in_source_id
stringlengths 13
58
| issue
stringlengths 3
241k
| before_files
listlengths 0
3
| after_files
listlengths 0
3
| pr_diff
stringlengths 109
107M
⌀ |
---|---|---|---|---|
pulp__pulpcore-4641 | pulp_file version is set to 3.40.0.dev
**Version**
pulpcore 3.40.0
**Describe the bug**
Status API reports pulp_file version as 3.40.0.dev
| [
{
"content": "from pulpcore.plugin import PulpPluginAppConfig\n\n\nclass PulpFilePluginAppConfig(PulpPluginAppConfig):\n \"\"\"\n Entry point for pulp_file plugin.\n \"\"\"\n\n name = \"pulp_file.app\"\n label = \"file\"\n version = \"3.40.0.dev\"\n python_package_name = \"pulp_file\" # TODO Add python_module_name\n domain_compatible = True\n",
"path": "pulp_file/app/__init__.py"
}
] | [
{
"content": "from pulpcore.plugin import PulpPluginAppConfig\n\n\nclass PulpFilePluginAppConfig(PulpPluginAppConfig):\n \"\"\"\n Entry point for pulp_file plugin.\n \"\"\"\n\n name = \"pulp_file.app\"\n label = \"file\"\n version = \"3.41.0.dev\"\n python_package_name = \"pulp_file\" # TODO Add python_module_name\n domain_compatible = True\n",
"path": "pulp_file/app/__init__.py"
}
] | diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index 540db5cc96..aba3e10392 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -19,3 +19,5 @@ values =
[bumpversion:file:./setup.py]
[bumpversion:file:./docs/conf.py]
+
+[bumpversion:file:pulp_file/app/__init__.py]
diff --git a/CHANGES/pulp_file/4633.bugfix b/CHANGES/pulp_file/4633.bugfix
new file mode 100644
index 0000000000..36a8711407
--- /dev/null
+++ b/CHANGES/pulp_file/4633.bugfix
@@ -0,0 +1 @@
+Fix pulp_file advertised version.
diff --git a/pulp_file/app/__init__.py b/pulp_file/app/__init__.py
index fe1d7362c6..d92d113268 100644
--- a/pulp_file/app/__init__.py
+++ b/pulp_file/app/__init__.py
@@ -8,6 +8,6 @@ class PulpFilePluginAppConfig(PulpPluginAppConfig):
name = "pulp_file.app"
label = "file"
- version = "3.40.0.dev"
+ version = "3.41.0.dev"
python_package_name = "pulp_file" # TODO Add python_module_name
domain_compatible = True
|
DataBiosphere__toil-239 | Jenkins should only deploy to PyPI when building off the master branch
| [
{
"content": "from setuptools import setup, find_packages\n\nsetup(\n name='toil',\n version='3.0.4',\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n url=\"https://github.com/BD2KGenomics/toil\",\n install_requires=['bd2k-python-lib>=1.7.dev1'],\n extras_require={\n 'mesos': [\n 'mesos.interface==0.22.0',\n 'psutil==3.0.1' ],\n 'aws': [\n 'boto==2.38.0' ] },\n package_dir={ '': 'src' },\n packages=find_packages( 'src', exclude=[ '*.test' ] ),\n entry_points={\n 'console_scripts': [\n 'toilKill = toil.utils.toilKill:main',\n 'toilStatus = toil.utils.toilStatus:main',\n 'toilStats = toil.utils.toilStats:main',\n 'toilRestarts = toil.utils.toilRestarts:main',\n 'multijob = toil.batchSystems.multijob:main',\n 'toil-mesos-executor = toil.batchSystems.mesos.executor:main [mesos]'] } )\n",
"path": "setup.py"
}
] | [
{
"content": "from setuptools import setup, find_packages\n\nsetup(\n name='toil',\n version='3.0.5.dev1',\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n url=\"https://github.com/BD2KGenomics/toil\",\n install_requires=['bd2k-python-lib>=1.7.dev1'],\n extras_require={\n 'mesos': [\n 'mesos.interface==0.22.0',\n 'psutil==3.0.1' ],\n 'aws': [\n 'boto==2.38.0' ] },\n package_dir={ '': 'src' },\n packages=find_packages( 'src', exclude=[ '*.test' ] ),\n entry_points={\n 'console_scripts': [\n 'toilKill = toil.utils.toilKill:main',\n 'toilStatus = toil.utils.toilStatus:main',\n 'toilStats = toil.utils.toilStats:main',\n 'toilRestarts = toil.utils.toilRestarts:main',\n 'multijob = toil.batchSystems.multijob:main',\n 'toil-mesos-executor = toil.batchSystems.mesos.executor:main [mesos]'] } )\n",
"path": "setup.py"
}
] | diff --git a/Makefile b/Makefile
index 252cda9d12..a854a9d914 100644
--- a/Makefile
+++ b/Makefile
@@ -76,7 +76,11 @@ check_running_on_jenkins:
@test -n "$$BUILD_NUMBER" || ( echo "\033[0;31mThis target should only be invoked on Jenkins.\033[0m" ; false )
pypi: check_clean_working_copy check_running_on_jenkins
- $(python) setup.py egg_info --tag-build=dev$$BUILD_NUMBER register sdist bdist_egg upload
+ test "$$(git rev-parse --verify remotes/origin/master)" != "$$(git rev-parse --verify HEAD)" \
+ && echo "Not on master branch, silently skipping deployment to PyPI." \
+ || $(python) setup.py egg_info --tag-build=build$$BUILD_NUMBER register sdist bdist_egg upload
pypi_stable: check_clean_working_copy check_running_on_jenkins
- $(python) setup.py egg_info register sdist bdist_egg upload
\ No newline at end of file
+ test "$$(git rev-parse --verify remotes/origin/master)" != "$$(git rev-parse --verify HEAD)" \
+ && echo "Not on master branch, silently skipping deployment to PyPI." \
+ || $(python) setup.py egg_info register sdist bdist_egg upload
diff --git a/setup.py b/setup.py
index 34c01ab695..85111970c2 100755
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@
setup(
name='toil',
- version='3.0.4',
+ version='3.0.5.dev1',
description='Pipeline management software for clusters.',
author='Benedict Paten',
author_email='[email protected]',
|
google__mobly-524 | Fix pytest warnings in Python 3
`pytests` currently produces the following warnings:
mobly/mobly/test_runner.py:181: PytestWarning: cannot collect test class 'TestRunner' because it has a __init__ constructor
class TestRunner(object):
mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.
expected_completed_and_passed)
mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.
expected_completed_and_passed)
mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead.
self.assertEquals(actual_test.begin_time, expected_begin_time)
mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead.
self.assertEquals(actual_test.end_time, expected_end_time)
mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.
expected_completed_and_passed)
mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead.
self.assertEquals(actual_test.begin_time, expected_begin_time)
mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead.
self.assertEquals(actual_test.end_time, expected_end_time)
mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead.
self.assertEquals(actual_test.begin_time, expected_begin_time)
mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead.
self.assertEquals(actual_test.end_time, expected_end_time)
mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.
expected_completed_and_passed)
mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.
expected_completed_and_passed)
mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.
expected_completed_and_passed)
mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead.
self.assertEquals(actual_test.begin_time, expected_begin_time)
mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead.
self.assertEquals(actual_test.end_time, expected_end_time)
mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead.
self.assertEquals(actual_test.begin_time, expected_begin_time)
mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead.
self.assertEquals(actual_test.end_time, expected_end_time)
mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.
expected_completed_and_passed)
mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.
expected_completed_and_passed)
mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead.
self.assertEquals(actual_test.begin_time, expected_begin_time)
mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead.
self.assertEquals(actual_test.end_time, expected_end_time)
mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.
expected_completed_and_passed)
mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.
expected_completed_and_passed)
mobly/tests/mobly/output_test.py:171: DeprecationWarning: Please use assertNotEqual instead.
self.assertNotEquals(output_dir1, output_dir2)
mobly/tests/mobly/output_test.py:205: DeprecationWarning: Please use assertNotEqual instead.
self.assertNotEquals(output_dir1, output_dir2)
-- Docs: https://docs.pytest.org/en/latest/warnings.html
| [
{
"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport platform\nimport setuptools\nfrom setuptools.command import test\nimport sys\n\ninstall_requires = [\n 'future', 'portpicker', 'psutil>=5.4.4', 'pyserial', 'pyyaml',\n 'timeout_decorator'\n]\n\nif sys.version_info < (3, ):\n install_requires.extend([\n 'enum34',\n # \"futures\" is needed for py2 compatibility and it only works in 2.7\n 'futures',\n ])\n\nif platform.system() == 'Windows':\n install_requires.append('pywin32')\n\n\nclass PyTest(test.test):\n \"\"\"Class used to execute unit tests using PyTest. This allows us to execute\n unit tests without having to install the package.\n \"\"\"\n\n def finalize_options(self):\n test.test.finalize_options(self)\n self.test_args = ['-x', \"tests\"]\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\ndef main():\n setuptools.setup(\n name='mobly',\n version='1.7.5',\n maintainer='Ang Li',\n maintainer_email='[email protected]',\n description='Automation framework for special end-to-end test cases',\n license='Apache2.0',\n url='https://github.com/google/mobly',\n download_url='https://github.com/google/mobly/tarball/1.7.5',\n packages=setuptools.find_packages(),\n include_package_data=False,\n scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],\n tests_require=[\n 'mock',\n 'pytest',\n 'pytz',\n ],\n install_requires=install_requires,\n cmdclass={'test': PyTest},\n )\n\n\nif __name__ == '__main__':\n main()\n",
"path": "setup.py"
}
] | [
{
"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport platform\nimport setuptools\nfrom setuptools.command import test\nimport sys\n\ninstall_requires = [\n 'future', 'portpicker', 'psutil>=5.4.4', 'pyserial', 'pyyaml',\n 'timeout_decorator'\n]\n\nif sys.version_info < (3, ):\n install_requires.extend([\n 'enum34',\n # \"futures\" is needed for py2 compatibility and it only works in 2.7\n 'futures',\n ])\n\nif platform.system() == 'Windows':\n install_requires.append('pywin32')\n\n\nclass PyTest(test.test):\n \"\"\"Class used to execute unit tests using PyTest. This allows us to execute\n unit tests without having to install the package.\n \"\"\"\n\n def finalize_options(self):\n test.test.finalize_options(self)\n self.test_args = ['-x', \"tests/mobly\"]\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\ndef main():\n setuptools.setup(\n name='mobly',\n version='1.7.5',\n maintainer='Ang Li',\n maintainer_email='[email protected]',\n description='Automation framework for special end-to-end test cases',\n license='Apache2.0',\n url='https://github.com/google/mobly',\n download_url='https://github.com/google/mobly/tarball/1.7.5',\n packages=setuptools.find_packages(),\n include_package_data=False,\n scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],\n tests_require=[\n 'mock',\n 'pytest',\n 'pytz',\n ],\n install_requires=install_requires,\n cmdclass={'test': PyTest},\n )\n\n\nif __name__ == '__main__':\n main()\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 9e593a80..adc1df5d 100755
--- a/setup.py
+++ b/setup.py
@@ -40,7 +40,7 @@ class PyTest(test.test):
def finalize_options(self):
test.test.finalize_options(self)
- self.test_args = ['-x', "tests"]
+ self.test_args = ['-x', "tests/mobly"]
self.test_suite = True
def run_tests(self):
|
carpentries__amy-430 | Skills should be displayed in sorted order
Skills are currently displayed in a more-or-less random order (based I presume on the order in which they were added to the DB). They should be sorted, either alphabetically (which would put all 'dc' before all 'swc') or by second part (e.g., by what's after the '/').
| [
{
"content": "import datetime\nimport re\n\nfrom django.contrib.auth.models import (\n AbstractBaseUser, BaseUserManager, PermissionsMixin)\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.db.models import Q\n\nfrom django_countries.fields import CountryField\nimport reversion\n\n#------------------------------------------------------------\n\nSTR_SHORT = 10 # length of short strings\nSTR_MED = 40 # length of medium strings\nSTR_LONG = 100 # length of long strings\nSTR_REG_KEY = 20 # length of Eventbrite registration key\n\n#------------------------------------------------------------\n\nclass Site(models.Model):\n '''Represent a site where workshops are hosted.'''\n\n domain = models.CharField(max_length=STR_LONG, unique=True)\n fullname = models.CharField(max_length=STR_LONG, unique=True)\n country = CountryField(null=True, blank=True)\n notes = models.TextField(default=\"\", blank=True)\n\n def __str__(self):\n return self.domain\n\n def get_absolute_url(self):\n return reverse('site_details', args=[str(self.domain)])\n\n#------------------------------------------------------------\n\nclass Airport(models.Model):\n '''Represent an airport (used to locate instructors).'''\n\n iata = models.CharField(max_length=STR_SHORT, unique=True, verbose_name=\"IATA code\",\n help_text='<a href=\"https://www.world-airport-codes.com/\">Look up code</a>')\n fullname = models.CharField(max_length=STR_LONG, unique=True, verbose_name=\"Airport name\")\n country = CountryField()\n latitude = models.FloatField()\n longitude = models.FloatField()\n\n def __str__(self):\n return '{0}: {1}'.format(self.iata, self.fullname)\n\n def get_absolute_url(self):\n return reverse('airport_details', args=[str(self.iata)])\n\n#------------------------------------------------------------\n\nclass PersonManager(BaseUserManager):\n \"\"\"\n Create users and superusers from command line.\n\n For example:\n\n $ python manage.py createsuperuser\n \"\"\"\n\n def create_user(self, username, personal, family, email, password=None):\n \"\"\"\n Create and save a normal (not-super) user.\n \"\"\"\n user = self.model(\n username=username, personal=personal, family=family,\n email=self.normalize_email(email),\n is_superuser=False)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, username, personal, family, email, password):\n \"\"\"\n Create and save a superuser.\n \"\"\"\n user = self.model(\n username=username, personal=personal, family=family,\n email=self.normalize_email(email),\n is_superuser=True)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n\[email protected]\nclass Person(AbstractBaseUser, PermissionsMixin):\n '''Represent a single person.'''\n MALE = 'M'\n FEMALE = 'F'\n OTHER = 'O'\n GENDER_CHOICES = (\n (MALE, 'Male'),\n (FEMALE, 'Female'),\n (OTHER, 'Other'),\n )\n\n # These attributes should always contain field names of Person\n PERSON_UPLOAD_FIELDS = ('personal', 'middle', 'family', 'email')\n PERSON_TASK_EXTRA_FIELDS = ('event', 'role')\n PERSON_TASK_UPLOAD_FIELDS = PERSON_UPLOAD_FIELDS + PERSON_TASK_EXTRA_FIELDS\n\n personal = models.CharField(max_length=STR_LONG)\n middle = models.CharField(max_length=STR_LONG, null=True, blank=True)\n family = models.CharField(max_length=STR_LONG)\n email = models.CharField(max_length=STR_LONG, unique=True, null=True, blank=True)\n gender = models.CharField(max_length=1, choices=GENDER_CHOICES, null=True, blank=True)\n may_contact = models.BooleanField(default=True)\n airport = models.ForeignKey(Airport, null=True, blank=True, on_delete=models.PROTECT)\n github = models.CharField(max_length=STR_MED, unique=True, null=True, blank=True)\n twitter = models.CharField(max_length=STR_MED, unique=True, null=True, blank=True)\n url = models.CharField(max_length=STR_LONG, null=True, blank=True)\n username = models.CharField(max_length=STR_MED, unique=True)\n notes = models.TextField(default=\"\", blank=True)\n\n badges = models.ManyToManyField(\"Badge\", through=\"Award\")\n lessons = models.ManyToManyField(\"Lesson\", through=\"Qualification\")\n domains = models.ManyToManyField(\"KnowledgeDomain\")\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = [\n 'personal',\n 'family',\n 'email',\n ]\n\n objects = PersonManager()\n\n def get_full_name(self):\n middle = ''\n if self.middle is not None:\n middle = ' {0}'.format(self.middle)\n return '{0}{1} {2}'.format(self.personal, middle, self.family)\n\n def get_short_name(self):\n return self.personal\n\n def __str__(self):\n result = self.get_full_name()\n if self.email is not None:\n result += ' <' + self.email + '>'\n return result\n\n def get_absolute_url(self):\n return reverse('person_details', args=[str(self.id)])\n\n @property\n def is_staff(self):\n \"\"\"\n Required for logging into admin panel at '/admin/'.\n \"\"\"\n return self.is_superuser\n\n def save(self, *args, **kwargs):\n # save empty string as NULL to the database - otherwise there are\n # issues with UNIQUE constraint failing\n self.middle = self.middle or None\n self.email = self.email or None\n self.gender = self.gender or None\n self.airport = self.airport or None\n self.github = self.github or None\n self.twitter = self.twitter or None\n self.url = self.url or None\n super().save(*args, **kwargs)\n\n\n#------------------------------------------------------------\n\nclass Tag(models.Model):\n '''Label for grouping events.'''\n\n name = models.CharField(max_length=STR_MED, unique=True)\n details = models.CharField(max_length=STR_LONG)\n\n def __str__(self):\n return self.name\n\n#------------------------------------------------------------\n\n# In order to make our custom filters chainable, we have to\n# define them on the QuerySet, not the Manager - see\n# http://www.dabapps.com/blog/higher-level-query-api-django-orm/\nclass EventQuerySet(models.query.QuerySet):\n '''Handles finding past, ongoing and upcoming events'''\n\n def past_events(self):\n '''Return past events.\n\n Past events are those which started before today, and\n which either ended before today or whose end is NULL\n '''\n\n # All events that started before today\n queryset = self.filter(start__lt=datetime.date.today())\n\n # Of those events, only those that also ended before today\n # or where the end date is NULL\n ended_before_today = models.Q(end__lt=datetime.date.today())\n end_is_null = models.Q(end__isnull=True)\n\n queryset = queryset.filter(ended_before_today | end_is_null)\n\n return queryset\n\n def upcoming_events(self):\n '''Return published upcoming events.\n\n Upcoming events are those which start after today. Published\n events are those which have a URL. Events are ordered by date,\n soonest first.\n '''\n\n queryset = self.filter(start__gt=datetime.date.today())\\\n .filter(url__isnull=False)\\\n .order_by('start')\n return queryset\n\n def ongoing_events(self):\n '''Return ongoing events.\n\n Ongoing events are those which start after today.\n '''\n\n # All events that start before or on today\n queryset = self.filter(start__lte=datetime.date.today())\n\n # Of those, only the ones that finish after or on today\n queryset = queryset.filter(end__gte=datetime.date.today())\n\n return queryset\n\n def unpublished_events(self):\n '''Return events without URLs that are upcoming or have unknown starts.\n\n Events are ordered by slug and then by serial number.'''\n\n future_without_url = Q(start__gte=datetime.date.today(), url__isnull=True)\n unknown_start = Q(start__isnull=True)\n return self.filter(future_without_url | unknown_start)\\\n .order_by('slug', 'id')\n\n def uninvoiced_events(self):\n '''Return a queryset for events that have not yet been invoiced.\n\n These are events that have an admin fee, are not marked as invoiced, and have occurred.\n Events are sorted oldest first.'''\n\n return self.past_events().filter(admin_fee__gt=0)\\\n .exclude(invoiced=True)\\\n .order_by('start')\n\nclass EventManager(models.Manager):\n '''A custom manager which is essentially a proxy for EventQuerySet'''\n\n def get_queryset(self):\n \"\"\"Attach our custom query set to the manager.\"\"\"\n return EventQuerySet(self.model, using=self._db)\n\n # Proxy methods so we can call our custom filters from the manager\n # without explicitly creating an EventQuerySet first - see\n # reference above\n\n def past_events(self):\n return self.get_queryset().past_events()\n\n def ongoing_events(self):\n return self.get_queryset().ongoing_events()\n\n def upcoming_events(self):\n return self.get_queryset().upcoming_events()\n\n def unpublished_events(self):\n return self.get_queryset().unpublished_events()\n\n def uninvoiced_events(self):\n return self.get_queryset().uninvoiced_events()\n\n\[email protected]\nclass Event(models.Model):\n '''Represent a single event.'''\n\n site = models.ForeignKey(Site, on_delete=models.PROTECT)\n tags = models.ManyToManyField(Tag)\n organizer = models.ForeignKey(Site, related_name='organizer', null=True,\n blank=True, on_delete=models.PROTECT)\n start = models.DateField(null=True, blank=True,\n help_text='Setting this and url \"publishes\" the event.')\n end = models.DateField(null=True, blank=True)\n slug = models.CharField(max_length=STR_LONG, null=True, blank=True, unique=True)\n url = models.CharField(max_length=STR_LONG, unique=True, null=True, blank=True,\n help_text='Setting this and startdate \"publishes\" the event.')\n reg_key = models.CharField(max_length=STR_REG_KEY, null=True, blank=True, verbose_name=\"Eventbrite key\")\n attendance = models.IntegerField(null=True, blank=True)\n admin_fee = models.DecimalField(max_digits=6, decimal_places=2, null=True, blank=True)\n invoiced = models.NullBooleanField(default=False, blank=True)\n notes = models.TextField(default=\"\", blank=True)\n\n class Meta:\n ordering = ('-start', )\n\n # Set the custom manager\n objects = EventManager()\n\n def __str__(self):\n return self.get_ident()\n\n def get_absolute_url(self):\n return reverse('event_details', args=[self.get_ident()])\n\n def get_ident(self):\n if self.slug:\n return str(self.slug)\n return str(self.id)\n\n @staticmethod\n def get_by_ident(ident):\n '''\n Select event that matches given identifier.\n If ident is an int, search for matching primary-key;\n otherwise get matching slug. May throw DoesNotExist error.\n '''\n try:\n return Event.objects.get(pk=int(ident))\n except ValueError:\n return Event.objects.get(slug=ident)\n\n def save(self, *args, **kwargs):\n self.slug = self.slug or None\n self.url = self.url or None\n super(Event, self).save(*args, **kwargs)\n\n\n#------------------------------------------------------------\n\nclass Role(models.Model):\n '''Enumerate roles in workshops.'''\n\n name = models.CharField(max_length=STR_MED)\n\n def __str__(self):\n return self.name\n\n#------------------------------------------------------------\n\n\nclass TaskManager(models.Manager):\n def instructors(self):\n \"\"\"Fetch tasks with role 'instructor'.\"\"\"\n return self.get_queryset().filter(role__name=\"instructor\")\n\n def learners(self):\n \"\"\"Fetch tasks with role 'learner'.\"\"\"\n return self.get_queryset().filter(role__name=\"learner\")\n\n def helpers(self):\n \"\"\"Fetch tasks with role 'helper'.\"\"\"\n return self.get_queryset().filter(role__name=\"helper\")\n\n\nclass Task(models.Model):\n '''Represent who did what at events.'''\n\n event = models.ForeignKey(Event)\n person = models.ForeignKey(Person)\n role = models.ForeignKey(Role)\n\n objects = TaskManager()\n\n class Meta:\n unique_together = (\"event\", \"person\", \"role\")\n\n def __str__(self):\n return '{0}/{1}={2}'.format(self.event, self.person, self.role)\n\n def get_absolute_url(self):\n return reverse('task_details', kwargs={'task_id': self.id})\n\n#------------------------------------------------------------\n\nclass Lesson(models.Model):\n '''Represent a lesson someone might teach.'''\n\n name = models.CharField(max_length=STR_MED)\n\n def __str__(self):\n return self.name\n\n#------------------------------------------------------------\n\nclass Qualification(models.Model):\n '''What is someone qualified to teach?'''\n\n person = models.ForeignKey(Person)\n lesson = models.ForeignKey(Lesson)\n\n def __str__(self):\n return '{0}/{1}'.format(self.person, self.lesson)\n\n#------------------------------------------------------------\n\nclass Badge(models.Model):\n '''Represent a badge we award.'''\n\n name = models.CharField(max_length=STR_MED, unique=True)\n title = models.CharField(max_length=STR_MED)\n criteria = models.CharField(max_length=STR_LONG)\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('badge_details', args=[self.name])\n\n#------------------------------------------------------------\n\nclass Award(models.Model):\n '''Represent a particular badge earned by a person.'''\n\n person = models.ForeignKey(Person)\n badge = models.ForeignKey(Badge)\n awarded = models.DateField()\n event = models.ForeignKey(Event, null=True, blank=True)\n\n class Meta:\n unique_together = (\"person\", \"badge\", )\n\n def __str__(self):\n return '{0}/{1}/{2}/{3}'.format(self.person, self.badge, self.awarded, self.event)\n\n#------------------------------------------------------------\n\nclass KnowledgeDomain(models.Model):\n \"\"\"Represent a knowledge domain a person is engaged in.\"\"\"\n name = models.CharField(max_length=STR_LONG)\n\n def __str__(self):\n return self.name\n",
"path": "workshops/models.py"
}
] | [
{
"content": "import datetime\nimport re\n\nfrom django.contrib.auth.models import (\n AbstractBaseUser, BaseUserManager, PermissionsMixin)\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.db.models import Q\n\nfrom django_countries.fields import CountryField\nimport reversion\n\n#------------------------------------------------------------\n\nSTR_SHORT = 10 # length of short strings\nSTR_MED = 40 # length of medium strings\nSTR_LONG = 100 # length of long strings\nSTR_REG_KEY = 20 # length of Eventbrite registration key\n\n#------------------------------------------------------------\n\nclass Site(models.Model):\n '''Represent a site where workshops are hosted.'''\n\n domain = models.CharField(max_length=STR_LONG, unique=True)\n fullname = models.CharField(max_length=STR_LONG, unique=True)\n country = CountryField(null=True, blank=True)\n notes = models.TextField(default=\"\", blank=True)\n\n def __str__(self):\n return self.domain\n\n def get_absolute_url(self):\n return reverse('site_details', args=[str(self.domain)])\n\n#------------------------------------------------------------\n\nclass Airport(models.Model):\n '''Represent an airport (used to locate instructors).'''\n\n iata = models.CharField(max_length=STR_SHORT, unique=True, verbose_name=\"IATA code\",\n help_text='<a href=\"https://www.world-airport-codes.com/\">Look up code</a>')\n fullname = models.CharField(max_length=STR_LONG, unique=True, verbose_name=\"Airport name\")\n country = CountryField()\n latitude = models.FloatField()\n longitude = models.FloatField()\n\n def __str__(self):\n return '{0}: {1}'.format(self.iata, self.fullname)\n\n def get_absolute_url(self):\n return reverse('airport_details', args=[str(self.iata)])\n\n#------------------------------------------------------------\n\nclass PersonManager(BaseUserManager):\n \"\"\"\n Create users and superusers from command line.\n\n For example:\n\n $ python manage.py createsuperuser\n \"\"\"\n\n def create_user(self, username, personal, family, email, password=None):\n \"\"\"\n Create and save a normal (not-super) user.\n \"\"\"\n user = self.model(\n username=username, personal=personal, family=family,\n email=self.normalize_email(email),\n is_superuser=False)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, username, personal, family, email, password):\n \"\"\"\n Create and save a superuser.\n \"\"\"\n user = self.model(\n username=username, personal=personal, family=family,\n email=self.normalize_email(email),\n is_superuser=True)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n\[email protected]\nclass Person(AbstractBaseUser, PermissionsMixin):\n '''Represent a single person.'''\n MALE = 'M'\n FEMALE = 'F'\n OTHER = 'O'\n GENDER_CHOICES = (\n (MALE, 'Male'),\n (FEMALE, 'Female'),\n (OTHER, 'Other'),\n )\n\n # These attributes should always contain field names of Person\n PERSON_UPLOAD_FIELDS = ('personal', 'middle', 'family', 'email')\n PERSON_TASK_EXTRA_FIELDS = ('event', 'role')\n PERSON_TASK_UPLOAD_FIELDS = PERSON_UPLOAD_FIELDS + PERSON_TASK_EXTRA_FIELDS\n\n personal = models.CharField(max_length=STR_LONG)\n middle = models.CharField(max_length=STR_LONG, null=True, blank=True)\n family = models.CharField(max_length=STR_LONG)\n email = models.CharField(max_length=STR_LONG, unique=True, null=True, blank=True)\n gender = models.CharField(max_length=1, choices=GENDER_CHOICES, null=True, blank=True)\n may_contact = models.BooleanField(default=True)\n airport = models.ForeignKey(Airport, null=True, blank=True, on_delete=models.PROTECT)\n github = models.CharField(max_length=STR_MED, unique=True, null=True, blank=True)\n twitter = models.CharField(max_length=STR_MED, unique=True, null=True, blank=True)\n url = models.CharField(max_length=STR_LONG, null=True, blank=True)\n username = models.CharField(max_length=STR_MED, unique=True)\n notes = models.TextField(default=\"\", blank=True)\n\n badges = models.ManyToManyField(\"Badge\", through=\"Award\")\n lessons = models.ManyToManyField(\"Lesson\", through=\"Qualification\")\n domains = models.ManyToManyField(\"KnowledgeDomain\")\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = [\n 'personal',\n 'family',\n 'email',\n ]\n\n objects = PersonManager()\n\n def get_full_name(self):\n middle = ''\n if self.middle is not None:\n middle = ' {0}'.format(self.middle)\n return '{0}{1} {2}'.format(self.personal, middle, self.family)\n\n def get_short_name(self):\n return self.personal\n\n def __str__(self):\n result = self.get_full_name()\n if self.email is not None:\n result += ' <' + self.email + '>'\n return result\n\n def get_absolute_url(self):\n return reverse('person_details', args=[str(self.id)])\n\n @property\n def is_staff(self):\n \"\"\"\n Required for logging into admin panel at '/admin/'.\n \"\"\"\n return self.is_superuser\n\n def save(self, *args, **kwargs):\n # save empty string as NULL to the database - otherwise there are\n # issues with UNIQUE constraint failing\n self.middle = self.middle or None\n self.email = self.email or None\n self.gender = self.gender or None\n self.airport = self.airport or None\n self.github = self.github or None\n self.twitter = self.twitter or None\n self.url = self.url or None\n super().save(*args, **kwargs)\n\n\n#------------------------------------------------------------\n\nclass Tag(models.Model):\n '''Label for grouping events.'''\n\n name = models.CharField(max_length=STR_MED, unique=True)\n details = models.CharField(max_length=STR_LONG)\n\n def __str__(self):\n return self.name\n\n#------------------------------------------------------------\n\n# In order to make our custom filters chainable, we have to\n# define them on the QuerySet, not the Manager - see\n# http://www.dabapps.com/blog/higher-level-query-api-django-orm/\nclass EventQuerySet(models.query.QuerySet):\n '''Handles finding past, ongoing and upcoming events'''\n\n def past_events(self):\n '''Return past events.\n\n Past events are those which started before today, and\n which either ended before today or whose end is NULL\n '''\n\n # All events that started before today\n queryset = self.filter(start__lt=datetime.date.today())\n\n # Of those events, only those that also ended before today\n # or where the end date is NULL\n ended_before_today = models.Q(end__lt=datetime.date.today())\n end_is_null = models.Q(end__isnull=True)\n\n queryset = queryset.filter(ended_before_today | end_is_null)\n\n return queryset\n\n def upcoming_events(self):\n '''Return published upcoming events.\n\n Upcoming events are those which start after today. Published\n events are those which have a URL. Events are ordered by date,\n soonest first.\n '''\n\n queryset = self.filter(start__gt=datetime.date.today())\\\n .filter(url__isnull=False)\\\n .order_by('start')\n return queryset\n\n def ongoing_events(self):\n '''Return ongoing events.\n\n Ongoing events are those which start after today.\n '''\n\n # All events that start before or on today\n queryset = self.filter(start__lte=datetime.date.today())\n\n # Of those, only the ones that finish after or on today\n queryset = queryset.filter(end__gte=datetime.date.today())\n\n return queryset\n\n def unpublished_events(self):\n '''Return events without URLs that are upcoming or have unknown starts.\n\n Events are ordered by slug and then by serial number.'''\n\n future_without_url = Q(start__gte=datetime.date.today(), url__isnull=True)\n unknown_start = Q(start__isnull=True)\n return self.filter(future_without_url | unknown_start)\\\n .order_by('slug', 'id')\n\n def uninvoiced_events(self):\n '''Return a queryset for events that have not yet been invoiced.\n\n These are events that have an admin fee, are not marked as invoiced, and have occurred.\n Events are sorted oldest first.'''\n\n return self.past_events().filter(admin_fee__gt=0)\\\n .exclude(invoiced=True)\\\n .order_by('start')\n\nclass EventManager(models.Manager):\n '''A custom manager which is essentially a proxy for EventQuerySet'''\n\n def get_queryset(self):\n \"\"\"Attach our custom query set to the manager.\"\"\"\n return EventQuerySet(self.model, using=self._db)\n\n # Proxy methods so we can call our custom filters from the manager\n # without explicitly creating an EventQuerySet first - see\n # reference above\n\n def past_events(self):\n return self.get_queryset().past_events()\n\n def ongoing_events(self):\n return self.get_queryset().ongoing_events()\n\n def upcoming_events(self):\n return self.get_queryset().upcoming_events()\n\n def unpublished_events(self):\n return self.get_queryset().unpublished_events()\n\n def uninvoiced_events(self):\n return self.get_queryset().uninvoiced_events()\n\n\[email protected]\nclass Event(models.Model):\n '''Represent a single event.'''\n\n site = models.ForeignKey(Site, on_delete=models.PROTECT)\n tags = models.ManyToManyField(Tag)\n organizer = models.ForeignKey(Site, related_name='organizer', null=True,\n blank=True, on_delete=models.PROTECT)\n start = models.DateField(null=True, blank=True,\n help_text='Setting this and url \"publishes\" the event.')\n end = models.DateField(null=True, blank=True)\n slug = models.CharField(max_length=STR_LONG, null=True, blank=True, unique=True)\n url = models.CharField(max_length=STR_LONG, unique=True, null=True, blank=True,\n help_text='Setting this and startdate \"publishes\" the event.')\n reg_key = models.CharField(max_length=STR_REG_KEY, null=True, blank=True, verbose_name=\"Eventbrite key\")\n attendance = models.IntegerField(null=True, blank=True)\n admin_fee = models.DecimalField(max_digits=6, decimal_places=2, null=True, blank=True)\n invoiced = models.NullBooleanField(default=False, blank=True)\n notes = models.TextField(default=\"\", blank=True)\n\n class Meta:\n ordering = ('-start', )\n\n # Set the custom manager\n objects = EventManager()\n\n def __str__(self):\n return self.get_ident()\n\n def get_absolute_url(self):\n return reverse('event_details', args=[self.get_ident()])\n\n def get_ident(self):\n if self.slug:\n return str(self.slug)\n return str(self.id)\n\n @staticmethod\n def get_by_ident(ident):\n '''\n Select event that matches given identifier.\n If ident is an int, search for matching primary-key;\n otherwise get matching slug. May throw DoesNotExist error.\n '''\n try:\n return Event.objects.get(pk=int(ident))\n except ValueError:\n return Event.objects.get(slug=ident)\n\n def save(self, *args, **kwargs):\n self.slug = self.slug or None\n self.url = self.url or None\n super(Event, self).save(*args, **kwargs)\n\n\n#------------------------------------------------------------\n\nclass Role(models.Model):\n '''Enumerate roles in workshops.'''\n\n name = models.CharField(max_length=STR_MED)\n\n def __str__(self):\n return self.name\n\n#------------------------------------------------------------\n\n\nclass TaskManager(models.Manager):\n def instructors(self):\n \"\"\"Fetch tasks with role 'instructor'.\"\"\"\n return self.get_queryset().filter(role__name=\"instructor\")\n\n def learners(self):\n \"\"\"Fetch tasks with role 'learner'.\"\"\"\n return self.get_queryset().filter(role__name=\"learner\")\n\n def helpers(self):\n \"\"\"Fetch tasks with role 'helper'.\"\"\"\n return self.get_queryset().filter(role__name=\"helper\")\n\n\nclass Task(models.Model):\n '''Represent who did what at events.'''\n\n event = models.ForeignKey(Event)\n person = models.ForeignKey(Person)\n role = models.ForeignKey(Role)\n\n objects = TaskManager()\n\n class Meta:\n unique_together = (\"event\", \"person\", \"role\")\n\n def __str__(self):\n return '{0}/{1}={2}'.format(self.event, self.person, self.role)\n\n def get_absolute_url(self):\n return reverse('task_details', kwargs={'task_id': self.id})\n\n#------------------------------------------------------------\n\nclass Lesson(models.Model):\n '''Represent a lesson someone might teach.'''\n\n name = models.CharField(max_length=STR_MED)\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ['name']\n\n#------------------------------------------------------------\n\nclass Qualification(models.Model):\n '''What is someone qualified to teach?'''\n\n person = models.ForeignKey(Person)\n lesson = models.ForeignKey(Lesson)\n\n def __str__(self):\n return '{0}/{1}'.format(self.person, self.lesson)\n\n#------------------------------------------------------------\n\nclass Badge(models.Model):\n '''Represent a badge we award.'''\n\n name = models.CharField(max_length=STR_MED, unique=True)\n title = models.CharField(max_length=STR_MED)\n criteria = models.CharField(max_length=STR_LONG)\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('badge_details', args=[self.name])\n\n#------------------------------------------------------------\n\nclass Award(models.Model):\n '''Represent a particular badge earned by a person.'''\n\n person = models.ForeignKey(Person)\n badge = models.ForeignKey(Badge)\n awarded = models.DateField()\n event = models.ForeignKey(Event, null=True, blank=True)\n\n class Meta:\n unique_together = (\"person\", \"badge\", )\n\n def __str__(self):\n return '{0}/{1}/{2}/{3}'.format(self.person, self.badge, self.awarded, self.event)\n\n#------------------------------------------------------------\n\nclass KnowledgeDomain(models.Model):\n \"\"\"Represent a knowledge domain a person is engaged in.\"\"\"\n name = models.CharField(max_length=STR_LONG)\n\n def __str__(self):\n return self.name\n",
"path": "workshops/models.py"
}
] | diff --git a/workshops/models.py b/workshops/models.py
index 0fd73428d..d52ca94f5 100644
--- a/workshops/models.py
+++ b/workshops/models.py
@@ -391,6 +391,9 @@ class Lesson(models.Model):
def __str__(self):
return self.name
+ class Meta:
+ ordering = ['name']
+
#------------------------------------------------------------
class Qualification(models.Model):
diff --git a/workshops/test/test_person.py b/workshops/test/test_person.py
index c21ff1426..b15763cc2 100644
--- a/workshops/test/test_person.py
+++ b/workshops/test/test_person.py
@@ -425,8 +425,8 @@ def setUp(self):
self.badge1 = Badge.objects.create(name='Badge1')
self.badge2 = Badge.objects.create(name='Badge2')
- self.lesson1 = Lesson.objects.get(name='swc/python')
- self.lesson2 = Lesson.objects.get(name='dc/spreadsheets')
+ self.lesson1 = Lesson.objects.get(name='dc/spreadsheets')
+ self.lesson2 = Lesson.objects.get(name='swc/python')
self.domain1 = KnowledgeDomain.objects.get(pk=1) # space sciences
self.domain2 = KnowledgeDomain.objects.get(pk=2) # geo* sciences
|
magenta__magenta-629 | ValueError: Cell returns tuple of states, but the flag state_is_tuple is not set. State size is: (LSTMStateTuple(c=128, h=128), LSTMStateTuple(c=128, h=128))
Hey guys,
I've just set up my conda environment and packages. When I running the bazel test //magenta/... command, the test //magenta/models/shared:events_rnn_graph_test failed. I am new to this project so hopefully someone could point me to the right direction! For your info, I have installed all the required packages according to setup.py, and confirmed installation with 'pip freeze' and 'conda list' command.
Thanks in advance!
Simon
Bellow is the error message in the log file:
`ERROR: testBuildGraphWithAttention (__main__.EventSequenceRNNGraphTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/private/var/tmp/_bazel_simonttk/2d57163c72209284de52b06652358cc7/execroot/magenta/bazel-out/local-opt/bin/magenta/models/shared/events_rnn_graph_test.runfiles/__main__/magenta/models/shared/events_rnn_graph_test.py", line 58, in testBuildGraphWithAttention
'train', self.config, sequence_example_file_paths=['test'])
File "/private/var/tmp/_bazel_simonttk/2d57163c72209284de52b06652358cc7/execroot/magenta/bazel-out/local-opt/bin/magenta/models/shared/events_rnn_graph_test.runfiles/__main__/magenta/models/shared/events_rnn_graph.py", line 98, in build_graph
attn_length=hparams.attn_length)
File "/private/var/tmp/_bazel_simonttk/2d57163c72209284de52b06652358cc7/execroot/magenta/bazel-out/local-opt/bin/magenta/models/shared/events_rnn_graph_test.runfiles/__main__/magenta/models/shared/events_rnn_graph.py", line 47, in make_rnn_cell
cell = tf.contrib.rnn.AttentionCellWrapper(cell, attn_length)
File "/Users/simonttk/anaconda2/envs/magenta/lib/python2.7/site-packages/tensorflow/contrib/rnn/python/ops/rnn_cell.py", line 1077, in __init__
% str(cell.state_size))
ValueError: Cell returns tuple of states, but the flag state_is_tuple is not set. State size is: (LSTMStateTuple(c=128, h=128), LSTMStateTuple(c=128, h=128))`
| [
{
"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Provides function to build an event sequence RNN model's graph.\"\"\"\n\n# internal imports\nimport tensorflow as tf\nimport magenta\n\n\ndef make_rnn_cell(rnn_layer_sizes,\n dropout_keep_prob=1.0,\n attn_length=0,\n base_cell=tf.contrib.rnn.BasicLSTMCell):\n \"\"\"Makes a RNN cell from the given hyperparameters.\n\n Args:\n rnn_layer_sizes: A list of integer sizes (in units) for each layer of the\n RNN.\n dropout_keep_prob: The float probability to keep the output of any given\n sub-cell.\n attn_length: The size of the attention vector.\n base_cell: The base tf.contrib.rnn.RNNCell to use for sub-cells.\n\n Returns:\n A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters.\n \"\"\"\n cells = []\n for num_units in rnn_layer_sizes:\n cell = base_cell(num_units)\n cell = tf.contrib.rnn.DropoutWrapper(\n cell, output_keep_prob=dropout_keep_prob)\n cells.append(cell)\n\n cell = tf.contrib.rnn.MultiRNNCell(cells)\n if attn_length:\n cell = tf.contrib.rnn.AttentionCellWrapper(cell, attn_length)\n\n return cell\n\n\ndef build_graph(mode, config, sequence_example_file_paths=None):\n \"\"\"Builds the TensorFlow graph.\n\n Args:\n mode: 'train', 'eval', or 'generate'. Only mode related ops are added to\n the graph.\n config: An EventSequenceRnnConfig containing the encoder/decoder and HParams\n to use.\n sequence_example_file_paths: A list of paths to TFRecord files containing\n tf.train.SequenceExample protos. Only needed for training and\n evaluation. May be a sharded file of the form.\n\n Returns:\n A tf.Graph instance which contains the TF ops.\n\n Raises:\n ValueError: If mode is not 'train', 'eval', or 'generate'.\n \"\"\"\n if mode not in ('train', 'eval', 'generate'):\n raise ValueError(\"The mode parameter must be 'train', 'eval', \"\n \"or 'generate'. The mode parameter was: %s\" % mode)\n\n hparams = config.hparams\n encoder_decoder = config.encoder_decoder\n\n tf.logging.info('hparams = %s', hparams.values())\n\n input_size = encoder_decoder.input_size\n num_classes = encoder_decoder.num_classes\n no_event_label = encoder_decoder.default_event_label\n\n with tf.Graph().as_default() as graph:\n inputs, labels, lengths, = None, None, None\n\n if mode == 'train' or mode == 'eval':\n inputs, labels, lengths = magenta.common.get_padded_batch(\n sequence_example_file_paths, hparams.batch_size, input_size)\n\n elif mode == 'generate':\n inputs = tf.placeholder(tf.float32, [hparams.batch_size, None,\n input_size])\n\n cell = make_rnn_cell(\n hparams.rnn_layer_sizes,\n dropout_keep_prob=(\n 1.0 if mode == 'generate' else hparams.dropout_keep_prob),\n attn_length=hparams.attn_length)\n\n initial_state = cell.zero_state(hparams.batch_size, tf.float32)\n\n outputs, final_state = tf.nn.dynamic_rnn(\n cell, inputs, initial_state=initial_state, swap_memory=True)\n\n outputs_flat = tf.reshape(outputs, [-1, cell.output_size])\n logits_flat = tf.contrib.layers.linear(outputs_flat, num_classes)\n\n if mode == 'train' or mode == 'eval':\n labels_flat = tf.reshape(labels, [-1])\n mask = tf.sequence_mask(lengths)\n if hparams.skip_first_n_losses:\n skip = tf.minimum(lengths, hparams.skip_first_n_losses)\n skip_mask = tf.sequence_mask(skip, maxlen=tf.reduce_max(lengths))\n mask = tf.logical_and(mask, tf.logical_not(skip_mask))\n mask = tf.cast(mask, tf.float32)\n mask_flat = tf.reshape(mask, [-1])\n\n num_logits = tf.to_float(tf.reduce_sum(lengths))\n\n with tf.control_dependencies(\n [tf.Assert(tf.greater(num_logits, 0.), [num_logits])]):\n softmax_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels_flat, logits=logits_flat)\n loss = tf.reduce_sum(mask_flat * softmax_cross_entropy) / num_logits\n perplexity = (tf.reduce_sum(mask_flat * tf.exp(softmax_cross_entropy)) /\n num_logits)\n\n correct_predictions = tf.to_float(\n tf.nn.in_top_k(logits_flat, labels_flat, 1)) * mask_flat\n accuracy = tf.reduce_sum(correct_predictions) / num_logits * 100\n\n event_positions = (\n tf.to_float(tf.not_equal(labels_flat, no_event_label)) * mask_flat)\n event_accuracy = (\n tf.reduce_sum(tf.multiply(correct_predictions, event_positions)) /\n tf.reduce_sum(event_positions) * 100)\n\n no_event_positions = (\n tf.to_float(tf.equal(labels_flat, no_event_label)) * mask_flat)\n no_event_accuracy = (\n tf.reduce_sum(tf.multiply(correct_predictions, no_event_positions)) /\n tf.reduce_sum(no_event_positions) * 100)\n\n global_step = tf.Variable(0, trainable=False, name='global_step')\n\n tf.add_to_collection('loss', loss)\n tf.add_to_collection('perplexity', perplexity)\n tf.add_to_collection('accuracy', accuracy)\n tf.add_to_collection('global_step', global_step)\n\n summaries = [\n tf.summary.scalar('loss', loss),\n tf.summary.scalar('perplexity', perplexity),\n tf.summary.scalar('accuracy', accuracy),\n tf.summary.scalar(\n 'event_accuracy', event_accuracy),\n tf.summary.scalar(\n 'no_event_accuracy', no_event_accuracy),\n ]\n\n if mode == 'train':\n learning_rate = tf.train.exponential_decay(\n hparams.initial_learning_rate, global_step, hparams.decay_steps,\n hparams.decay_rate, staircase=True, name='learning_rate')\n\n opt = tf.train.AdamOptimizer(learning_rate)\n params = tf.trainable_variables()\n gradients = tf.gradients(loss, params)\n clipped_gradients, _ = tf.clip_by_global_norm(gradients,\n hparams.clip_norm)\n train_op = opt.apply_gradients(zip(clipped_gradients, params),\n global_step)\n tf.add_to_collection('learning_rate', learning_rate)\n tf.add_to_collection('train_op', train_op)\n\n summaries.append(tf.summary.scalar(\n 'learning_rate', learning_rate))\n\n if mode == 'eval':\n summary_op = tf.summary.merge(summaries)\n tf.add_to_collection('summary_op', summary_op)\n\n elif mode == 'generate':\n temperature = tf.placeholder(tf.float32, [])\n softmax_flat = tf.nn.softmax(\n tf.div(logits_flat, tf.fill([num_classes], temperature)))\n softmax = tf.reshape(softmax_flat, [hparams.batch_size, -1, num_classes])\n\n tf.add_to_collection('inputs', inputs)\n tf.add_to_collection('initial_state', initial_state)\n tf.add_to_collection('final_state', final_state)\n tf.add_to_collection('temperature', temperature)\n tf.add_to_collection('softmax', softmax)\n\n return graph\n",
"path": "magenta/models/shared/events_rnn_graph.py"
}
] | [
{
"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Provides function to build an event sequence RNN model's graph.\"\"\"\n\n# internal imports\nimport tensorflow as tf\nimport magenta\n\n\ndef make_rnn_cell(rnn_layer_sizes,\n dropout_keep_prob=1.0,\n attn_length=0,\n base_cell=tf.contrib.rnn.BasicLSTMCell):\n \"\"\"Makes a RNN cell from the given hyperparameters.\n\n Args:\n rnn_layer_sizes: A list of integer sizes (in units) for each layer of the\n RNN.\n dropout_keep_prob: The float probability to keep the output of any given\n sub-cell.\n attn_length: The size of the attention vector.\n base_cell: The base tf.contrib.rnn.RNNCell to use for sub-cells.\n\n Returns:\n A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters.\n \"\"\"\n cells = []\n for num_units in rnn_layer_sizes:\n cell = base_cell(num_units)\n cell = tf.contrib.rnn.DropoutWrapper(\n cell, output_keep_prob=dropout_keep_prob)\n cells.append(cell)\n\n cell = tf.contrib.rnn.MultiRNNCell(cells)\n if attn_length:\n cell = tf.contrib.rnn.AttentionCellWrapper(\n cell, attn_length, state_is_tuple=True)\n\n return cell\n\n\ndef build_graph(mode, config, sequence_example_file_paths=None):\n \"\"\"Builds the TensorFlow graph.\n\n Args:\n mode: 'train', 'eval', or 'generate'. Only mode related ops are added to\n the graph.\n config: An EventSequenceRnnConfig containing the encoder/decoder and HParams\n to use.\n sequence_example_file_paths: A list of paths to TFRecord files containing\n tf.train.SequenceExample protos. Only needed for training and\n evaluation. May be a sharded file of the form.\n\n Returns:\n A tf.Graph instance which contains the TF ops.\n\n Raises:\n ValueError: If mode is not 'train', 'eval', or 'generate'.\n \"\"\"\n if mode not in ('train', 'eval', 'generate'):\n raise ValueError(\"The mode parameter must be 'train', 'eval', \"\n \"or 'generate'. The mode parameter was: %s\" % mode)\n\n hparams = config.hparams\n encoder_decoder = config.encoder_decoder\n\n tf.logging.info('hparams = %s', hparams.values())\n\n input_size = encoder_decoder.input_size\n num_classes = encoder_decoder.num_classes\n no_event_label = encoder_decoder.default_event_label\n\n with tf.Graph().as_default() as graph:\n inputs, labels, lengths, = None, None, None\n\n if mode == 'train' or mode == 'eval':\n inputs, labels, lengths = magenta.common.get_padded_batch(\n sequence_example_file_paths, hparams.batch_size, input_size)\n\n elif mode == 'generate':\n inputs = tf.placeholder(tf.float32, [hparams.batch_size, None,\n input_size])\n\n cell = make_rnn_cell(\n hparams.rnn_layer_sizes,\n dropout_keep_prob=(\n 1.0 if mode == 'generate' else hparams.dropout_keep_prob),\n attn_length=hparams.attn_length)\n\n initial_state = cell.zero_state(hparams.batch_size, tf.float32)\n\n outputs, final_state = tf.nn.dynamic_rnn(\n cell, inputs, initial_state=initial_state, swap_memory=True)\n\n outputs_flat = tf.reshape(outputs, [-1, cell.output_size])\n logits_flat = tf.contrib.layers.linear(outputs_flat, num_classes)\n\n if mode == 'train' or mode == 'eval':\n labels_flat = tf.reshape(labels, [-1])\n mask = tf.sequence_mask(lengths)\n if hparams.skip_first_n_losses:\n skip = tf.minimum(lengths, hparams.skip_first_n_losses)\n skip_mask = tf.sequence_mask(skip, maxlen=tf.reduce_max(lengths))\n mask = tf.logical_and(mask, tf.logical_not(skip_mask))\n mask = tf.cast(mask, tf.float32)\n mask_flat = tf.reshape(mask, [-1])\n\n num_logits = tf.to_float(tf.reduce_sum(lengths))\n\n with tf.control_dependencies(\n [tf.Assert(tf.greater(num_logits, 0.), [num_logits])]):\n softmax_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels_flat, logits=logits_flat)\n loss = tf.reduce_sum(mask_flat * softmax_cross_entropy) / num_logits\n perplexity = (tf.reduce_sum(mask_flat * tf.exp(softmax_cross_entropy)) /\n num_logits)\n\n correct_predictions = tf.to_float(\n tf.nn.in_top_k(logits_flat, labels_flat, 1)) * mask_flat\n accuracy = tf.reduce_sum(correct_predictions) / num_logits * 100\n\n event_positions = (\n tf.to_float(tf.not_equal(labels_flat, no_event_label)) * mask_flat)\n event_accuracy = (\n tf.reduce_sum(tf.multiply(correct_predictions, event_positions)) /\n tf.reduce_sum(event_positions) * 100)\n\n no_event_positions = (\n tf.to_float(tf.equal(labels_flat, no_event_label)) * mask_flat)\n no_event_accuracy = (\n tf.reduce_sum(tf.multiply(correct_predictions, no_event_positions)) /\n tf.reduce_sum(no_event_positions) * 100)\n\n global_step = tf.Variable(0, trainable=False, name='global_step')\n\n tf.add_to_collection('loss', loss)\n tf.add_to_collection('perplexity', perplexity)\n tf.add_to_collection('accuracy', accuracy)\n tf.add_to_collection('global_step', global_step)\n\n summaries = [\n tf.summary.scalar('loss', loss),\n tf.summary.scalar('perplexity', perplexity),\n tf.summary.scalar('accuracy', accuracy),\n tf.summary.scalar(\n 'event_accuracy', event_accuracy),\n tf.summary.scalar(\n 'no_event_accuracy', no_event_accuracy),\n ]\n\n if mode == 'train':\n learning_rate = tf.train.exponential_decay(\n hparams.initial_learning_rate, global_step, hparams.decay_steps,\n hparams.decay_rate, staircase=True, name='learning_rate')\n\n opt = tf.train.AdamOptimizer(learning_rate)\n params = tf.trainable_variables()\n gradients = tf.gradients(loss, params)\n clipped_gradients, _ = tf.clip_by_global_norm(gradients,\n hparams.clip_norm)\n train_op = opt.apply_gradients(zip(clipped_gradients, params),\n global_step)\n tf.add_to_collection('learning_rate', learning_rate)\n tf.add_to_collection('train_op', train_op)\n\n summaries.append(tf.summary.scalar(\n 'learning_rate', learning_rate))\n\n if mode == 'eval':\n summary_op = tf.summary.merge(summaries)\n tf.add_to_collection('summary_op', summary_op)\n\n elif mode == 'generate':\n temperature = tf.placeholder(tf.float32, [])\n softmax_flat = tf.nn.softmax(\n tf.div(logits_flat, tf.fill([num_classes], temperature)))\n softmax = tf.reshape(softmax_flat, [hparams.batch_size, -1, num_classes])\n\n tf.add_to_collection('inputs', inputs)\n tf.add_to_collection('initial_state', initial_state)\n tf.add_to_collection('final_state', final_state)\n tf.add_to_collection('temperature', temperature)\n tf.add_to_collection('softmax', softmax)\n\n return graph\n",
"path": "magenta/models/shared/events_rnn_graph.py"
}
] | diff --git a/magenta/models/shared/events_rnn_graph.py b/magenta/models/shared/events_rnn_graph.py
index c4e6f7dd02..3b58b22d7e 100644
--- a/magenta/models/shared/events_rnn_graph.py
+++ b/magenta/models/shared/events_rnn_graph.py
@@ -44,7 +44,8 @@ def make_rnn_cell(rnn_layer_sizes,
cell = tf.contrib.rnn.MultiRNNCell(cells)
if attn_length:
- cell = tf.contrib.rnn.AttentionCellWrapper(cell, attn_length)
+ cell = tf.contrib.rnn.AttentionCellWrapper(
+ cell, attn_length, state_is_tuple=True)
return cell
|
projectmesa__mesa-826 | Push new Mesa release
Wee are overdue for an official release. Before I push one, does anyone have anything that really want to try to get in or should I just tag and release?
Discuss.
| [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\nMesa Agent-Based Modeling Framework\n\nCore Objects: Model, and Agent.\n\n\"\"\"\nimport datetime\n\nfrom .model import Model\nfrom .agent import Agent\n\n\n__all__ = [\"Model\", \"Agent\"]\n\n__title__ = \"mesa\"\n__version__ = \"0.8.6\"\n__license__ = \"Apache 2.0\"\n__copyright__ = \"Copyright %s Project Mesa Team\" % datetime.date.today().year\n",
"path": "mesa/__init__.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\nMesa Agent-Based Modeling Framework\n\nCore Objects: Model, and Agent.\n\n\"\"\"\nimport datetime\n\nfrom .model import Model\nfrom .agent import Agent\n\n\n__all__ = [\"Model\", \"Agent\"]\n\n__title__ = \"mesa\"\n__version__ = \"0.8.7\"\n__license__ = \"Apache 2.0\"\n__copyright__ = \"Copyright %s Project Mesa Team\" % datetime.date.today().year\n",
"path": "mesa/__init__.py"
}
] | diff --git a/HISTORY.rst b/HISTORY.rst
index 9a6869ac774..2be43bafb32 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -3,6 +3,74 @@
Release History
---------------
+0.8.7 (2020-05-XX) Lake Havasu City
++++++++++++++++++++++++++++++++++++++++++++
+
+**Improvements**
+
+* Enable BatchRunner to run specified set of parameter combinations #651 (#607)
+* Restructured runcontrol.js #661
+* Add pipenv support for mesa #678
+* Increase test coverage and change to codecov #692
+* Updates Travis to explicitly set the dist to be Xenial #699
+* time: Remove resolved TODO on random seed of random scheduler #708
+* hex_snowflake: Update description to be more informative #712
+* Added Coverall to Codecov in Contributing file #734
+* Makes opening the browser optional when launching the server #755 #754
+* NetworkGrid: Update to networkx 2.4 API #763
+* Apply black to mesa/ directory #775
+* Updated travis to 3.8 and updated gitignore #777
+* Add information (to docstring) on image as agent portrayal shape #791
+* Change grid empties from list to set #649 (improves speed)
+* Adding mypy annotation
+ * space: Add type annotation to Grid class #779
+ * add Mypy annotation to time, agent, and model #792
+ * space: Add mypy annotation to the remaining methods/functions #796
+* Docs related
+ * Bulk merge of docs from 'docs' to 'master' #684
+ * Created useful snippets code section in the docs #668 #669
+ * Updating index.rst #672
+ * Clarify runserver snippet in index.rst #682
+ * Add documentation for feature (pipenv) added in #678 #683
+ * Add docs for BatchRunner to support Variable and Fixed Parameter Contribution #679 #683
+ * Resources #651 in docs branch #691. This preps for #683 to be merged.
+ * intro tutorial: Clarify a function that is not defined in the class #705
+ * Updates formatting the readme Docs markdown #737
+* Examples related
+ * Schelling: Separate text-only viz into run_ascii.py #706
+ * examples/Readme.md: Update description to be consistent with the folder names #707
+
+**Fixes**
+
+* Fixes link to update code coverage module - Updates Removing last link to coveralls and replacing to codecoverage #748
+* Fixes D3 Network Visualization to update (rather than overwrite) #765 #767
+* Fix parameter order in initializing SingleGrid object #770 #769
+* Updating pipenv link #773
+* Fixed pip install from github by specifying egg #802
+* Compatibility fixes
+ * Fixes VisualizationServer to be compatible with recent versions of Tornado #655
+ * Fixes #749 networkx incompatibility #750
+* Fixing typos
+ * Fixes documentation typos in example code #695 #696
+ * Fixes typo in ModularServer's last parameter #711
+ * Fixed typo in BarChartModule line 100 #747
+ * Fix typo in documentation #809
+* Doc fixes (not relating to typos)
+ * Update tutorial to point to correct repo location #671 #670
+ * Updating sphinx and reverting issues #674 #675 #677 #681
+ * Fixes code blocks that weren't showing up in the tutorial #686
+ * Remove figure from advanced tutorial showing the empty visualization #729
+ * Removes git clone from tutorial - Update intro_tutorial.rst #730
+ * Fixes citation links in docs tutorial section #736
+ * Fix histogram in advanced tutorial #794 #610
+ * Fixes Advanced Tutorial #elements #804 #803
+* Fixes to examples
+ * Fixing test_random_walk bug - wolf sheep. #821
+ * Fixes shape_example server launch #762 #756
+ * Fixing broken table in pd_grid example #824
+
+
+
0.8.6 (2019-05-02) Lake Havasu City
+++++++++++++++++++++++++++++++++++++++++++
diff --git a/mesa/__init__.py b/mesa/__init__.py
index c54d71e9eb0..a2598b0890d 100644
--- a/mesa/__init__.py
+++ b/mesa/__init__.py
@@ -14,6 +14,6 @@
__all__ = ["Model", "Agent"]
__title__ = "mesa"
-__version__ = "0.8.6"
+__version__ = "0.8.7"
__license__ = "Apache 2.0"
__copyright__ = "Copyright %s Project Mesa Team" % datetime.date.today().year
|
mindsdb__mindsdb-28 | IndexError: list index out of range when missing predict value
**Is your feature request related to a problem? Please describe.**
When there is empty string provided as predict value e.g:
```
result = mdb.predict(predict=' ', model_name='home_rentals')
```
IndexError: list index out of range error is thrown
**Describe the solution you'd like**
User friendly message should be thrown e.g
ValueError: Please provide valid predict value
**Additional context**
We can check for empty predict values in https://github.com/mindsdb/main/blob/76c691c4b18a4723626dfcbff8228da614d93e8b/mindsdb/libs/controllers/mindsdb_controller.py#L170 and raise Value error if predict not provided.
| [
{
"content": "import sqlite3\nimport pandas\nimport requests\nimport logging\nimport os\nimport platform\nimport _thread\nimport uuid\nimport traceback\nimport urllib\n\nfrom mindsdb.libs.helpers.sqlite_helpers import *\nfrom mindsdb.libs.helpers.multi_data_source import getDS\nfrom mindsdb.config import SQLITE_FILE\nimport mindsdb.config as CONFIG\n\nfrom mindsdb.libs.data_types.transaction_metadata import TransactionMetadata\nfrom mindsdb.libs.controllers.session_controller import SessionController\nfrom mindsdb.libs.constants.mindsdb import *\n\nfrom mindsdb.version import mindsdb_version as MINDSDB_VERSION\n\nfrom pathlib import Path\n\nclass MindsDBController:\n\n def __init__(self, file=SQLITE_FILE):\n \"\"\"\n\n :param file:\n \"\"\"\n\n self.setConfigs()\n\n _thread.start_new_thread(MindsDBController.checkForUpdates, ())\n self.session = SessionController()\n self.storage_file = file\n self.conn = sqlite3.connect(file)\n self.conn.create_aggregate(\"first_value\", 1, FirstValueAgg)\n self.conn.create_aggregate(\"array_agg_json\", 2, ArrayAggJSON)\n\n def setConfigs(self):\n \"\"\"\n This sets the config settings for this mindsdb instance\n :return:\n \"\"\"\n # set logging settings\n logging.basicConfig(**CONFIG.PROXY_LOG_CONFIG)\n\n # set the mindsdb storage folder\n storage_ok = True # default state\n\n # if it does not exist try to create it\n if not os.path.exists(CONFIG.MINDSDB_STORAGE_PATH):\n try:\n logging.info('{folder} does not exist, creating it now'.format(folder=CONFIG.MINDSDB_STORAGE_PATH))\n path = Path(CONFIG.MINDSDB_STORAGE_PATH)\n path.mkdir(exist_ok=True, parents=True)\n except:\n logging.info(traceback.format_exc())\n storage_ok = False\n logging.error('MindsDB storate foldler: {folder} does not exist and could not be created'.format(folder=CONFIG.MINDSDB_STORAGE_PATH))\n\n # If storage path is not writable, raise an exception as this can no longer be\n if not os.access(CONFIG.MINDSDB_STORAGE_PATH, os.W_OK) or storage_ok == False:\n error_message = '''Cannot write into storage path, please either set the config variable mindsdb.config.set('MINDSDB_STORAGE_PATH',<path>) or give write access to {folder}'''\n raise ValueError(error_message.format(folder=CONFIG.MINDSDB_STORAGE_PATH))\n\n\n def addTable(self, ds, as_table):\n \"\"\"\n\n :param ds:\n :param as_table:\n :return:\n \"\"\"\n\n ds.df.to_sql(as_table, self.conn, if_exists='replace', index=False)\n\n def query(self, query):\n \"\"\"\n\n :param query:\n :return:\n \"\"\"\n\n cur = self.conn.cursor()\n return cur.execute(query)\n\n def queryToDF(self, query):\n \"\"\"\n\n :param query:\n :return:\n \"\"\"\n\n return pandas.read_sql_query(query, self.conn)\n\n\n def setUserEmail(self, email):\n \"\"\"\n\n :param email:\n :return:\n \"\"\"\n email_file = CONFIG.MINDSDB_STORAGE_PATH + '/../email.mdb_base'\n\n\n try:\n open(email_file, 'w').write(email)\n return True\n except:\n logging.warning('Cannot store token, Please add write permissions to file:' + email_file)\n return False\n\n def getUserEmail(self):\n \"\"\"\n\n :return:\n \"\"\"\n email_file = CONFIG.MINDSDB_STORAGE_PATH + '/../email.mdb_base'\n email_file_path = Path(email_file)\n\n try:\n if email_file_path.is_file():\n email = open(email_file, 'r').read()\n return email\n else:\n return None\n except:\n logging.warning('Cannot read email, Please add write permissions to file:' + email_file)\n return None\n\n def learn(self, predict, from_file=None, from_data = None, model_name='mdsb_model', test_from_data=None, group_by = None, window_size = MODEL_GROUP_BY_DEAFAULT_LIMIT, order_by = [], breakpoint = PHASE_END, ignore_columns = []):\n \"\"\"\n\n :param from_query:\n :param predict:\n :param model_name:\n :param test_query:\n :return:\n \"\"\"\n\n from_ds = getDS(from_data) if from_file is None else getDS(from_file)\n test_from_ds = test_from_data if test_from_data is None else getDS(test_from_data)\n\n transaction_type = TRANSACTION_LEARN\n\n predict_columns = [predict] if type(predict) != type([]) else predict\n\n transaction_metadata = TransactionMetadata()\n transaction_metadata.model_name = model_name\n transaction_metadata.model_predict_columns = predict_columns\n transaction_metadata.model_group_by = group_by\n transaction_metadata.model_order_by = order_by if type(order_by) == type([]) else [order_by]\n transaction_metadata.window_size = window_size\n transaction_metadata.type = transaction_type\n transaction_metadata.from_data = from_ds\n transaction_metadata.test_from_data = test_from_ds\n transaction_metadata.ignore_columns = ignore_columns\n\n self.startInfoServer()\n self.session.newTransaction(transaction_metadata, breakpoint)\n\n\n def startInfoServer(self):\n pass\n\n def predict(self, predict, from_data = None, when={}, model_name='mdsb_model', breakpoint= PHASE_END):\n \"\"\"\n\n :param predict:\n :param when:\n :param model_name:\n :return:\n \"\"\"\n\n transaction_type = TRANSACTION_PREDICT\n\n from_ds = None if from_data is None else getDS(from_data)\n\n predict_columns = [predict] if type(predict) != type([]) else predict\n\n transaction_metadata = TransactionMetadata()\n transaction_metadata.model_name = model_name\n transaction_metadata.model_predict_columns = predict_columns\n transaction_metadata.model_when_conditions = when\n transaction_metadata.type = transaction_type\n transaction_metadata.storage_file = self.storage_file\n transaction_metadata.from_data = from_ds\n\n transaction = self.session.newTransaction(transaction_metadata, breakpoint)\n\n return transaction.output_data\n\n @staticmethod\n def checkForUpdates():\n # tmp files\n uuid_file = CONFIG.MINDSDB_STORAGE_PATH + '/../uuid.mdb_base'\n mdb_file = CONFIG.MINDSDB_STORAGE_PATH + '/start.mdb_base'\n\n uuid_file_path = Path(uuid_file)\n if uuid_file_path.is_file():\n uuid_str = open(uuid_file).read()\n else:\n uuid_str = str(uuid.uuid4())\n try:\n open(uuid_file, 'w').write(uuid_str)\n except:\n logging.warning('Cannot store token, Please add write permissions to file:' + uuid_file)\n uuid_str = uuid_str + '.NO_WRITE'\n\n file_path = Path(mdb_file)\n if file_path.is_file():\n token = open(mdb_file).read()\n else:\n token = '{system}|{version}|{uid}'.format(system=platform.system(), version=MINDSDB_VERSION, uid=uuid_str)\n try:\n open(mdb_file,'w').write(token)\n except:\n logging.warning('Cannot store token, Please add write permissions to file:'+mdb_file)\n token = token+'.NO_WRITE'\n extra = urllib.parse.quote_plus(token)\n try:\n r = requests.get('http://mindsdb.com/updates/check/{extra}'.format(extra=extra), headers={'referer': 'http://check.mindsdb.com/?token={token}'.format(token=token)})\n except:\n logging.warning('Could not check for updates')\n return\n try:\n # TODO: Extract version, compare with version in version.py\n ret = r.json()\n\n if 'version' in ret and ret['version']!= MINDSDB_VERSION:\n logging.warning(\"There is a new version of MindsDB {version}, please do:\\n pip3 uninstall mindsdb\\n pip2 install mindsdb --user\".format(version=ret['version']))\n else:\n logging.debug('MindsDB is up to date!')\n\n except:\n\n logging.warning('could not check for MindsDB updates')\n\n\n",
"path": "mindsdb/libs/controllers/mindsdb_controller.py"
}
] | [
{
"content": "import sqlite3\nimport pandas\nimport requests\nimport logging\nimport os\nimport platform\nimport _thread\nimport uuid\nimport traceback\nimport urllib\n\nfrom mindsdb.libs.helpers.sqlite_helpers import *\nfrom mindsdb.libs.helpers.multi_data_source import getDS\nfrom mindsdb.config import SQLITE_FILE\nimport mindsdb.config as CONFIG\n\nfrom mindsdb.libs.data_types.transaction_metadata import TransactionMetadata\nfrom mindsdb.libs.controllers.session_controller import SessionController\nfrom mindsdb.libs.constants.mindsdb import *\n\nfrom mindsdb.version import mindsdb_version as MINDSDB_VERSION\n\nfrom pathlib import Path\n\nclass MindsDBController:\n\n def __init__(self, file=SQLITE_FILE):\n \"\"\"\n\n :param file:\n \"\"\"\n\n self.setConfigs()\n\n _thread.start_new_thread(MindsDBController.checkForUpdates, ())\n self.session = SessionController()\n self.storage_file = file\n self.conn = sqlite3.connect(file)\n self.conn.create_aggregate(\"first_value\", 1, FirstValueAgg)\n self.conn.create_aggregate(\"array_agg_json\", 2, ArrayAggJSON)\n\n def setConfigs(self):\n \"\"\"\n This sets the config settings for this mindsdb instance\n :return:\n \"\"\"\n # set logging settings\n logging.basicConfig(**CONFIG.PROXY_LOG_CONFIG)\n\n # set the mindsdb storage folder\n storage_ok = True # default state\n\n # if it does not exist try to create it\n if not os.path.exists(CONFIG.MINDSDB_STORAGE_PATH):\n try:\n logging.info('{folder} does not exist, creating it now'.format(folder=CONFIG.MINDSDB_STORAGE_PATH))\n path = Path(CONFIG.MINDSDB_STORAGE_PATH)\n path.mkdir(exist_ok=True, parents=True)\n except:\n logging.info(traceback.format_exc())\n storage_ok = False\n logging.error('MindsDB storate foldler: {folder} does not exist and could not be created'.format(folder=CONFIG.MINDSDB_STORAGE_PATH))\n\n # If storage path is not writable, raise an exception as this can no longer be\n if not os.access(CONFIG.MINDSDB_STORAGE_PATH, os.W_OK) or storage_ok == False:\n error_message = '''Cannot write into storage path, please either set the config variable mindsdb.config.set('MINDSDB_STORAGE_PATH',<path>) or give write access to {folder}'''\n raise ValueError(error_message.format(folder=CONFIG.MINDSDB_STORAGE_PATH))\n\n\n def addTable(self, ds, as_table):\n \"\"\"\n\n :param ds:\n :param as_table:\n :return:\n \"\"\"\n\n ds.df.to_sql(as_table, self.conn, if_exists='replace', index=False)\n\n def query(self, query):\n \"\"\"\n\n :param query:\n :return:\n \"\"\"\n\n cur = self.conn.cursor()\n return cur.execute(query)\n\n def queryToDF(self, query):\n \"\"\"\n\n :param query:\n :return:\n \"\"\"\n\n return pandas.read_sql_query(query, self.conn)\n\n\n def setUserEmail(self, email):\n \"\"\"\n\n :param email:\n :return:\n \"\"\"\n email_file = CONFIG.MINDSDB_STORAGE_PATH + '/../email.mdb_base'\n\n\n try:\n open(email_file, 'w').write(email)\n return True\n except:\n logging.warning('Cannot store token, Please add write permissions to file:' + email_file)\n return False\n\n def getUserEmail(self):\n \"\"\"\n\n :return:\n \"\"\"\n email_file = CONFIG.MINDSDB_STORAGE_PATH + '/../email.mdb_base'\n email_file_path = Path(email_file)\n\n try:\n if email_file_path.is_file():\n email = open(email_file, 'r').read()\n return email\n else:\n return None\n except:\n logging.warning('Cannot read email, Please add write permissions to file:' + email_file)\n return None\n\n def learn(self, predict, from_file=None, from_data = None, model_name='mdsb_model', test_from_data=None, group_by = None, window_size = MODEL_GROUP_BY_DEAFAULT_LIMIT, order_by = [], breakpoint = PHASE_END, ignore_columns = []):\n \"\"\"\n\n :param from_query:\n :param predict:\n :param model_name:\n :param test_query:\n :return:\n \"\"\"\n\n from_ds = getDS(from_data) if from_file is None else getDS(from_file)\n test_from_ds = test_from_data if test_from_data is None else getDS(test_from_data)\n\n transaction_type = TRANSACTION_LEARN\n\n predict_columns = [predict] if type(predict) != type([]) else predict\n\n transaction_metadata = TransactionMetadata()\n transaction_metadata.model_name = model_name\n transaction_metadata.model_predict_columns = predict_columns\n transaction_metadata.model_group_by = group_by\n transaction_metadata.model_order_by = order_by if type(order_by) == type([]) else [order_by]\n transaction_metadata.window_size = window_size\n transaction_metadata.type = transaction_type\n transaction_metadata.from_data = from_ds\n transaction_metadata.test_from_data = test_from_ds\n transaction_metadata.ignore_columns = ignore_columns\n\n self.startInfoServer()\n self.session.newTransaction(transaction_metadata, breakpoint)\n\n\n def startInfoServer(self):\n pass\n\n def predict(self, predict, from_data = None, when={}, model_name='mdsb_model', breakpoint= PHASE_END):\n \"\"\"\n\n :param predict:\n :param when:\n :param model_name:\n :return:\n \"\"\"\n \n if not predict:\n raise ValueError('Please provide valid predict value.')\n\n transaction_type = TRANSACTION_PREDICT\n\n from_ds = None if from_data is None else getDS(from_data)\n\n predict_columns = [predict] if type(predict) != type([]) else predict\n\n transaction_metadata = TransactionMetadata()\n transaction_metadata.model_name = model_name\n transaction_metadata.model_predict_columns = predict_columns\n transaction_metadata.model_when_conditions = when\n transaction_metadata.type = transaction_type\n transaction_metadata.storage_file = self.storage_file\n transaction_metadata.from_data = from_ds\n\n transaction = self.session.newTransaction(transaction_metadata, breakpoint)\n\n return transaction.output_data\n\n @staticmethod\n def checkForUpdates():\n # tmp files\n uuid_file = CONFIG.MINDSDB_STORAGE_PATH + '/../uuid.mdb_base'\n mdb_file = CONFIG.MINDSDB_STORAGE_PATH + '/start.mdb_base'\n\n uuid_file_path = Path(uuid_file)\n if uuid_file_path.is_file():\n uuid_str = open(uuid_file).read()\n else:\n uuid_str = str(uuid.uuid4())\n try:\n open(uuid_file, 'w').write(uuid_str)\n except:\n logging.warning('Cannot store token, Please add write permissions to file:' + uuid_file)\n uuid_str = uuid_str + '.NO_WRITE'\n\n file_path = Path(mdb_file)\n if file_path.is_file():\n token = open(mdb_file).read()\n else:\n token = '{system}|{version}|{uid}'.format(system=platform.system(), version=MINDSDB_VERSION, uid=uuid_str)\n try:\n open(mdb_file,'w').write(token)\n except:\n logging.warning('Cannot store token, Please add write permissions to file:'+mdb_file)\n token = token+'.NO_WRITE'\n extra = urllib.parse.quote_plus(token)\n try:\n r = requests.get('http://mindsdb.com/updates/check/{extra}'.format(extra=extra), headers={'referer': 'http://check.mindsdb.com/?token={token}'.format(token=token)})\n except:\n logging.warning('Could not check for updates')\n return\n try:\n # TODO: Extract version, compare with version in version.py\n ret = r.json()\n\n if 'version' in ret and ret['version']!= MINDSDB_VERSION:\n logging.warning(\"There is a new version of MindsDB {version}, please do:\\n pip3 uninstall mindsdb\\n pip2 install mindsdb --user\".format(version=ret['version']))\n else:\n logging.debug('MindsDB is up to date!')\n\n except:\n\n logging.warning('could not check for MindsDB updates')\n\n\n",
"path": "mindsdb/libs/controllers/mindsdb_controller.py"
}
] | diff --git a/mindsdb/libs/controllers/mindsdb_controller.py b/mindsdb/libs/controllers/mindsdb_controller.py
index 149e2533f88..4d70d3ae767 100644
--- a/mindsdb/libs/controllers/mindsdb_controller.py
+++ b/mindsdb/libs/controllers/mindsdb_controller.py
@@ -174,6 +174,9 @@ def predict(self, predict, from_data = None, when={}, model_name='mdsb_model', b
:param model_name:
:return:
"""
+
+ if not predict:
+ raise ValueError('Please provide valid predict value.')
transaction_type = TRANSACTION_PREDICT
|
pytorch__ignite-2826 | WandBLogger and TensorboardLogger have different APIs for logging audio
## 🚀 Feature
The following code doesn't work:
```
logger = WandBLogger()
logger.writer
```
This is how you would typically add audio with a tensorboard logger:
```
logger.writer.add_audio('mixture', x.t(), engine.state.epoch)
```
The workaround (similar to discussed in https://github.com/Lightning-AI/lightning/issues/7028) would be to use the underlying _wandb object:
```
logger._wandb.log({"validation": [wandb.Audio(x.t(), caption="mixture", sample_rate=44100)]})
logger._wandb.log({"validation": [wandb.Audio(x.t(), caption="vocals", sample_rate=44100)]})
```
Is there a proposal for a standardized media logging API?
| [
{
"content": "\"\"\"TensorBoard logger and its helper handlers.\"\"\"\nfrom typing import Any, Callable, List, Optional, Union\n\nfrom torch.optim import Optimizer\n\nfrom ignite.contrib.handlers.base_logger import (\n BaseLogger,\n BaseOptimizerParamsHandler,\n BaseOutputHandler,\n BaseWeightsHandler,\n BaseWeightsScalarHandler,\n)\nfrom ignite.engine import Engine, Events\nfrom ignite.handlers import global_step_from_engine\n\n__all__ = [\n \"TensorboardLogger\",\n \"OptimizerParamsHandler\",\n \"OutputHandler\",\n \"WeightsScalarHandler\",\n \"WeightsHistHandler\",\n \"GradsScalarHandler\",\n \"GradsHistHandler\",\n \"global_step_from_engine\",\n]\n\n\nclass TensorboardLogger(BaseLogger):\n \"\"\"\n TensorBoard handler to log metrics, model/optimizer parameters, gradients during the training and validation.\n\n By default, this class favors `tensorboardX <https://github.com/lanpa/tensorboardX>`_ package if installed:\n\n .. code-block:: bash\n\n pip install tensorboardX\n\n otherwise, it falls back to using\n `PyTorch's SummaryWriter\n <https://pytorch.org/docs/stable/tensorboard.html>`_\n (>=v1.2.0).\n\n Args:\n args: Positional arguments accepted from\n `SummaryWriter\n <https://pytorch.org/docs/stable/tensorboard.html>`_.\n kwargs: Keyword arguments accepted from\n `SummaryWriter\n <https://pytorch.org/docs/stable/tensorboard.html>`_.\n For example, `log_dir` to setup path to the directory where to log.\n\n Examples:\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n # Create a logger\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Attach the logger to the trainer to log training loss at each iteration\n tb_logger.attach_output_handler(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n tag=\"training\",\n output_transform=lambda loss: {\"loss\": loss}\n )\n\n # Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch\n # We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch\n # of the `trainer` instead of `train_evaluator`.\n tb_logger.attach_output_handler(\n train_evaluator,\n event_name=Events.EPOCH_COMPLETED,\n tag=\"training\",\n metric_names=[\"nll\", \"accuracy\"],\n global_step_transform=global_step_from_engine(trainer),\n )\n\n # Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after\n # each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the\n # `trainer` instead of `evaluator`.\n tb_logger.attach_output_handler(\n evaluator,\n event_name=Events.EPOCH_COMPLETED,\n tag=\"validation\",\n metric_names=[\"nll\", \"accuracy\"],\n global_step_transform=global_step_from_engine(trainer)),\n )\n\n # Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration\n tb_logger.attach_opt_params_handler(\n trainer,\n event_name=Events.ITERATION_STARTED,\n optimizer=optimizer,\n param_name='lr' # optional\n )\n\n # Attach the logger to the trainer to log model's weights norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=WeightsScalarHandler(model)\n )\n\n # Attach the logger to the trainer to log model's weights as a histogram after each epoch\n tb_logger.attach(\n trainer,\n event_name=Events.EPOCH_COMPLETED,\n log_handler=WeightsHistHandler(model)\n )\n\n # Attach the logger to the trainer to log model's gradients norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=GradsScalarHandler(model)\n )\n\n # Attach the logger to the trainer to log model's gradients as a histogram after each epoch\n tb_logger.attach(\n trainer,\n event_name=Events.EPOCH_COMPLETED,\n log_handler=GradsHistHandler(model)\n )\n\n # We need to close the logger when we are done\n tb_logger.close()\n\n It is also possible to use the logger as context manager:\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n with TensorboardLogger(log_dir=\"experiments/tb_logs\") as tb_logger:\n\n trainer = Engine(update_fn)\n # Attach the logger to the trainer to log training loss at each iteration\n tb_logger.attach_output_handler(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n tag=\"training\",\n output_transform=lambda loss: {\"loss\": loss}\n )\n\n \"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any):\n try:\n from tensorboardX import SummaryWriter\n except ImportError:\n try:\n from torch.utils.tensorboard import SummaryWriter # type: ignore[no-redef]\n except ImportError:\n raise ModuleNotFoundError(\n \"This contrib module requires either tensorboardX or torch >= 1.2.0. \"\n \"You may install tensorboardX with command: \\n pip install tensorboardX \\n\"\n \"or upgrade PyTorch using your package manager of choice (pip or conda).\"\n )\n\n self.writer = SummaryWriter(*args, **kwargs)\n\n def close(self) -> None:\n self.writer.close()\n\n def _create_output_handler(self, *args: Any, **kwargs: Any) -> \"OutputHandler\":\n return OutputHandler(*args, **kwargs)\n\n def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> \"OptimizerParamsHandler\":\n return OptimizerParamsHandler(*args, **kwargs)\n\n\nclass OutputHandler(BaseOutputHandler):\n \"\"\"Helper handler to log engine's output, engine's state attributes and/or metrics\n\n Args:\n tag: common title for all produced plots. For example, \"training\"\n metric_names: list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform: output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{\"loss\": loss1, \"another_loss\": loss2}` to label the plot\n with corresponding keys.\n global_step_transform: global step transform function to output a desired global step.\n Input of the function is `(engine, event_name)`. Output of function should be an integer.\n Default is None, global_step based on attached engine. If provided,\n uses function output as global_step. To setup global step from another engine, please use\n :meth:`~ignite.contrib.handlers.tensorboard_logger.global_step_from_engine`.\n state_attributes: list of attributes of the ``trainer.state`` to plot.\n\n Examples:\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n # Create a logger\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after\n # each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch\n # of the `trainer`:\n tb_logger.attach(\n evaluator,\n log_handler=OutputHandler(\n tag=\"validation\",\n metric_names=[\"nll\", \"accuracy\"],\n global_step_transform=global_step_from_engine(trainer)\n ),\n event_name=Events.EPOCH_COMPLETED\n )\n # or equivalently\n tb_logger.attach_output_handler(\n evaluator,\n event_name=Events.EPOCH_COMPLETED,\n tag=\"validation\",\n metric_names=[\"nll\", \"accuracy\"],\n global_step_transform=global_step_from_engine(trainer)\n )\n\n Another example, where model is evaluated every 500 iterations:\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n @trainer.on(Events.ITERATION_COMPLETED(every=500))\n def evaluate(engine):\n evaluator.run(validation_set, max_epochs=1)\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n def global_step_transform(*args, **kwargs):\n return trainer.state.iteration\n\n # Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after\n # every 500 iterations. Since evaluator engine does not have access to the training iteration, we\n # provide a global_step_transform to return the trainer.state.iteration for the global_step, each time\n # evaluator metrics are plotted on Tensorboard.\n\n tb_logger.attach_output_handler(\n evaluator,\n event_name=Events.EPOCH_COMPLETED,\n tag=\"validation\",\n metrics=[\"nll\", \"accuracy\"],\n global_step_transform=global_step_transform\n )\n\n Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``\n are also logged along with the NLL and Accuracy after each iteration:\n\n .. code-block:: python\n\n tb_logger.attach(\n trainer,\n log_handler=OutputHandler(\n tag=\"training\",\n metric_names=[\"nll\", \"accuracy\"],\n state_attributes=[\"alpha\", \"beta\"],\n ),\n event_name=Events.ITERATION_COMPLETED\n )\n\n Example of `global_step_transform`:\n\n .. code-block:: python\n\n def global_step_transform(engine, event_name):\n return engine.state.get_event_attrib_value(event_name)\n\n .. versionchanged:: 0.4.7\n accepts an optional list of `state_attributes`\n \"\"\"\n\n def __init__(\n self,\n tag: str,\n metric_names: Optional[List[str]] = None,\n output_transform: Optional[Callable] = None,\n global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,\n state_attributes: Optional[List[str]] = None,\n ):\n super(OutputHandler, self).__init__(\n tag, metric_names, output_transform, global_step_transform, state_attributes\n )\n\n def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:\n\n if not isinstance(logger, TensorboardLogger):\n raise RuntimeError(\"Handler 'OutputHandler' works only with TensorboardLogger\")\n\n metrics = self._setup_output_metrics_state_attrs(engine, key_tuple=False)\n\n global_step = self.global_step_transform(engine, event_name)\n if not isinstance(global_step, int):\n raise TypeError(\n f\"global_step must be int, got {type(global_step)}.\"\n \" Please check the output of global_step_transform.\"\n )\n\n for key, value in metrics.items():\n logger.writer.add_scalar(key, value, global_step)\n\n\nclass OptimizerParamsHandler(BaseOptimizerParamsHandler):\n \"\"\"Helper handler to log optimizer parameters\n\n Args:\n optimizer: torch optimizer or any object with attribute ``param_groups``\n as a sequence.\n param_name: parameter name\n tag: common title for all produced plots. For example, \"generator\"\n\n Examples:\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n # Create a logger\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration\n tb_logger.attach(\n trainer,\n log_handler=OptimizerParamsHandler(optimizer),\n event_name=Events.ITERATION_STARTED\n )\n # or equivalently\n tb_logger.attach_opt_params_handler(\n trainer,\n event_name=Events.ITERATION_STARTED,\n optimizer=optimizer\n )\n \"\"\"\n\n def __init__(self, optimizer: Optimizer, param_name: str = \"lr\", tag: Optional[str] = None):\n super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)\n\n def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:\n if not isinstance(logger, TensorboardLogger):\n raise RuntimeError(\"Handler OptimizerParamsHandler works only with TensorboardLogger\")\n\n global_step = engine.state.get_event_attrib_value(event_name)\n tag_prefix = f\"{self.tag}/\" if self.tag else \"\"\n params = {\n f\"{tag_prefix}{self.param_name}/group_{i}\": float(param_group[self.param_name])\n for i, param_group in enumerate(self.optimizer.param_groups)\n }\n\n for k, v in params.items():\n logger.writer.add_scalar(k, v, global_step)\n\n\nclass WeightsScalarHandler(BaseWeightsScalarHandler):\n \"\"\"Helper handler to log model's weights as scalars.\n Handler, upon construction, iterates over named parameters of the model and keep\n reference to ones permitted by `whitelist`. Then at every call, applies\n reduction function to each parameter, produces a scalar and logs it.\n\n Args:\n model: model to log weights\n reduction: function to reduce parameters into scalar\n tag: common title for all produced plots. For example, \"generator\"\n whitelist: specific weights to log. Should be list of model's submodules\n or parameters names, or a callable which gets weight along with its name\n and determines if it should be logged. Names should be fully-qualified.\n For more information please refer to `PyTorch docs\n <https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.\n If not given, all of model's weights are logged.\n\n Examples:\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n # Create a logger\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Attach the logger to the trainer to log model's weights norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=WeightsScalarHandler(model, reduction=torch.norm)\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log only `fc` weights\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=WeightsScalarHandler(\n model,\n whitelist=['fc']\n )\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log weights which have `bias` in their names\n def has_bias_in_name(n, p):\n return 'bias' in n\n\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=WeightsScalarHandler(model, whitelist=has_bias_in_name)\n )\n\n .. versionchanged:: 0.4.9\n optional argument `whitelist` added.\n \"\"\"\n\n def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:\n\n if not isinstance(logger, TensorboardLogger):\n raise RuntimeError(\"Handler 'WeightsScalarHandler' works only with TensorboardLogger\")\n\n global_step = engine.state.get_event_attrib_value(event_name)\n tag_prefix = f\"{self.tag}/\" if self.tag else \"\"\n for name, p in self.weights:\n\n name = name.replace(\".\", \"/\")\n logger.writer.add_scalar(\n f\"{tag_prefix}weights_{self.reduction.__name__}/{name}\",\n self.reduction(p.data),\n global_step,\n )\n\n\nclass WeightsHistHandler(BaseWeightsHandler):\n \"\"\"Helper handler to log model's weights as histograms.\n\n Args:\n model: model to log weights\n tag: common title for all produced plots. For example, \"generator\"\n whitelist: specific weights to log. Should be list of model's submodules\n or parameters names, or a callable which gets weight along with its name\n and determines if it should be logged. Names should be fully-qualified.\n For more information please refer to `PyTorch docs\n <https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.\n If not given, all of model's weights are logged.\n\n Examples:\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n # Create a logger\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Attach the logger to the trainer to log model's weights norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=WeightsHistHandler(model)\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log weights of `fc` layer\n weights = ['fc']\n\n # Attach the logger to the trainer to log weights norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=WeightsHistHandler(model, whitelist=weights)\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log weights which name include 'conv'.\n weight_selector = lambda name, p: 'conv' in name\n\n # Attach the logger to the trainer to log weights norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=WeightsHistHandler(model, whitelist=weight_selector)\n )\n\n .. versionchanged:: 0.4.9\n optional argument `whitelist` added.\n \"\"\"\n\n def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:\n if not isinstance(logger, TensorboardLogger):\n raise RuntimeError(\"Handler 'WeightsHistHandler' works only with TensorboardLogger\")\n\n global_step = engine.state.get_event_attrib_value(event_name)\n tag_prefix = f\"{self.tag}/\" if self.tag else \"\"\n for name, p in self.weights:\n\n name = name.replace(\".\", \"/\")\n logger.writer.add_histogram(\n tag=f\"{tag_prefix}weights/{name}\", values=p.data.cpu().numpy(), global_step=global_step\n )\n\n\nclass GradsScalarHandler(BaseWeightsScalarHandler):\n \"\"\"Helper handler to log model's gradients as scalars.\n Handler, upon construction, iterates over named parameters of the model and keep\n reference to ones permitted by the `whitelist`. Then at every call, applies\n reduction function to each parameter's gradient, produces a scalar and logs it.\n\n Args:\n model: model to log weights\n reduction: function to reduce parameters into scalar\n tag: common title for all produced plots. For example, \"generator\"\n whitelist: specific gradients to log. Should be list of model's submodules\n or parameters names, or a callable which gets weight along with its name\n and determines if its gradient should be logged. Names should be\n fully-qualified. For more information please refer to `PyTorch docs\n <https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.\n If not given, all of model's gradients are logged.\n\n Examples:\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n # Create a logger\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Attach the logger to the trainer to log model's gradients norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=GradsScalarHandler(model, reduction=torch.norm)\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log gradient of `base`\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=GradsScalarHandler(\n model,\n reduction=torch.norm,\n whitelist=['base']\n )\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log gradient of weights which belong to a `fc` layer\n def is_in_fc_layer(n, p):\n return 'fc' in n\n\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=GradsScalarHandler(model, whitelist=is_in_fc_layer)\n )\n\n .. versionchanged:: 0.4.9\n optional argument `whitelist` added.\n \"\"\"\n\n def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:\n if not isinstance(logger, TensorboardLogger):\n raise RuntimeError(\"Handler 'GradsScalarHandler' works only with TensorboardLogger\")\n\n global_step = engine.state.get_event_attrib_value(event_name)\n tag_prefix = f\"{self.tag}/\" if self.tag else \"\"\n for name, p in self.weights:\n if p.grad is None:\n continue\n\n name = name.replace(\".\", \"/\")\n logger.writer.add_scalar(\n f\"{tag_prefix}grads_{self.reduction.__name__}/{name}\", self.reduction(p.grad), global_step\n )\n\n\nclass GradsHistHandler(BaseWeightsHandler):\n \"\"\"Helper handler to log model's gradients as histograms.\n\n Args:\n model: model to log weights\n tag: common title for all produced plots. For example, \"generator\"\n whitelist: specific gradients to log. Should be list of model's submodules\n or parameters names, or a callable which gets weight along with its name\n and determines if its gradient should be logged. Names should be\n fully-qualified. For more information please refer to `PyTorch docs\n <https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.\n If not given, all of model's gradients are logged.\n\n Examples:\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n # Create a logger\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Attach the logger to the trainer to log model's weights norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=GradsHistHandler(model)\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log gradient of `fc.bias`\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=GradsHistHandler(model, whitelist=['fc.bias'])\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log gradient of weights which have shape (2, 1)\n def has_shape_2_1(n, p):\n return p.shape == (2,1)\n\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=GradsHistHandler(model, whitelist=has_shape_2_1)\n )\n\n .. versionchanged:: 0.4.9\n optional argument `whitelist` added.\n \"\"\"\n\n def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:\n if not isinstance(logger, TensorboardLogger):\n raise RuntimeError(\"Handler 'GradsHistHandler' works only with TensorboardLogger\")\n\n global_step = engine.state.get_event_attrib_value(event_name)\n tag_prefix = f\"{self.tag}/\" if self.tag else \"\"\n for name, p in self.weights:\n if p.grad is None:\n continue\n\n name = name.replace(\".\", \"/\")\n logger.writer.add_histogram(\n tag=f\"{tag_prefix}grads/{name}\", values=p.grad.cpu().numpy(), global_step=global_step\n )\n",
"path": "ignite/contrib/handlers/tensorboard_logger.py"
}
] | [
{
"content": "\"\"\"TensorBoard logger and its helper handlers.\"\"\"\nfrom typing import Any, Callable, List, Optional, Union\n\nfrom torch.optim import Optimizer\n\nfrom ignite.contrib.handlers.base_logger import (\n BaseLogger,\n BaseOptimizerParamsHandler,\n BaseOutputHandler,\n BaseWeightsHandler,\n BaseWeightsScalarHandler,\n)\nfrom ignite.engine import Engine, Events\nfrom ignite.handlers import global_step_from_engine\n\n__all__ = [\n \"TensorboardLogger\",\n \"OptimizerParamsHandler\",\n \"OutputHandler\",\n \"WeightsScalarHandler\",\n \"WeightsHistHandler\",\n \"GradsScalarHandler\",\n \"GradsHistHandler\",\n \"global_step_from_engine\",\n]\n\n\nclass TensorboardLogger(BaseLogger):\n \"\"\"\n TensorBoard handler to log metrics, model/optimizer parameters, gradients during the training and validation.\n\n By default, this class favors `tensorboardX <https://github.com/lanpa/tensorboardX>`_ package if installed:\n\n .. code-block:: bash\n\n pip install tensorboardX\n\n otherwise, it falls back to using\n `PyTorch's SummaryWriter\n <https://pytorch.org/docs/stable/tensorboard.html>`_\n (>=v1.2.0).\n\n Args:\n args: Positional arguments accepted from\n `SummaryWriter\n <https://pytorch.org/docs/stable/tensorboard.html>`_.\n kwargs: Keyword arguments accepted from\n `SummaryWriter\n <https://pytorch.org/docs/stable/tensorboard.html>`_.\n For example, `log_dir` to setup path to the directory where to log.\n\n Examples:\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n # Create a logger\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Attach the logger to the trainer to log training loss at each iteration\n tb_logger.attach_output_handler(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n tag=\"training\",\n output_transform=lambda loss: {\"loss\": loss}\n )\n\n # Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch\n # We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch\n # of the `trainer` instead of `train_evaluator`.\n tb_logger.attach_output_handler(\n train_evaluator,\n event_name=Events.EPOCH_COMPLETED,\n tag=\"training\",\n metric_names=[\"nll\", \"accuracy\"],\n global_step_transform=global_step_from_engine(trainer),\n )\n\n # Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after\n # each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the\n # `trainer` instead of `evaluator`.\n tb_logger.attach_output_handler(\n evaluator,\n event_name=Events.EPOCH_COMPLETED,\n tag=\"validation\",\n metric_names=[\"nll\", \"accuracy\"],\n global_step_transform=global_step_from_engine(trainer)),\n )\n\n # Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration\n tb_logger.attach_opt_params_handler(\n trainer,\n event_name=Events.ITERATION_STARTED,\n optimizer=optimizer,\n param_name='lr' # optional\n )\n\n # Attach the logger to the trainer to log model's weights norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=WeightsScalarHandler(model)\n )\n\n # Attach the logger to the trainer to log model's weights as a histogram after each epoch\n tb_logger.attach(\n trainer,\n event_name=Events.EPOCH_COMPLETED,\n log_handler=WeightsHistHandler(model)\n )\n\n # Attach the logger to the trainer to log model's gradients norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=GradsScalarHandler(model)\n )\n\n # Attach the logger to the trainer to log model's gradients as a histogram after each epoch\n tb_logger.attach(\n trainer,\n event_name=Events.EPOCH_COMPLETED,\n log_handler=GradsHistHandler(model)\n )\n\n # We need to close the logger when we are done\n tb_logger.close()\n\n It is also possible to use the logger as context manager:\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n with TensorboardLogger(log_dir=\"experiments/tb_logs\") as tb_logger:\n\n trainer = Engine(update_fn)\n # Attach the logger to the trainer to log training loss at each iteration\n tb_logger.attach_output_handler(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n tag=\"training\",\n output_transform=lambda loss: {\"loss\": loss}\n )\n\n \"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any):\n try:\n from tensorboardX import SummaryWriter\n except ImportError:\n try:\n from torch.utils.tensorboard import SummaryWriter # type: ignore[no-redef]\n except ImportError:\n raise ModuleNotFoundError(\n \"This contrib module requires either tensorboardX or torch >= 1.2.0. \"\n \"You may install tensorboardX with command: \\n pip install tensorboardX \\n\"\n \"or upgrade PyTorch using your package manager of choice (pip or conda).\"\n )\n\n self.writer = SummaryWriter(*args, **kwargs)\n\n def __getattr__(self, attr: Any) -> Any:\n return getattr(self.writer, attr)\n\n def close(self) -> None:\n self.writer.close()\n\n def _create_output_handler(self, *args: Any, **kwargs: Any) -> \"OutputHandler\":\n return OutputHandler(*args, **kwargs)\n\n def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> \"OptimizerParamsHandler\":\n return OptimizerParamsHandler(*args, **kwargs)\n\n\nclass OutputHandler(BaseOutputHandler):\n \"\"\"Helper handler to log engine's output, engine's state attributes and/or metrics\n\n Args:\n tag: common title for all produced plots. For example, \"training\"\n metric_names: list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform: output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{\"loss\": loss1, \"another_loss\": loss2}` to label the plot\n with corresponding keys.\n global_step_transform: global step transform function to output a desired global step.\n Input of the function is `(engine, event_name)`. Output of function should be an integer.\n Default is None, global_step based on attached engine. If provided,\n uses function output as global_step. To setup global step from another engine, please use\n :meth:`~ignite.contrib.handlers.tensorboard_logger.global_step_from_engine`.\n state_attributes: list of attributes of the ``trainer.state`` to plot.\n\n Examples:\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n # Create a logger\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after\n # each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch\n # of the `trainer`:\n tb_logger.attach(\n evaluator,\n log_handler=OutputHandler(\n tag=\"validation\",\n metric_names=[\"nll\", \"accuracy\"],\n global_step_transform=global_step_from_engine(trainer)\n ),\n event_name=Events.EPOCH_COMPLETED\n )\n # or equivalently\n tb_logger.attach_output_handler(\n evaluator,\n event_name=Events.EPOCH_COMPLETED,\n tag=\"validation\",\n metric_names=[\"nll\", \"accuracy\"],\n global_step_transform=global_step_from_engine(trainer)\n )\n\n Another example, where model is evaluated every 500 iterations:\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n @trainer.on(Events.ITERATION_COMPLETED(every=500))\n def evaluate(engine):\n evaluator.run(validation_set, max_epochs=1)\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n def global_step_transform(*args, **kwargs):\n return trainer.state.iteration\n\n # Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after\n # every 500 iterations. Since evaluator engine does not have access to the training iteration, we\n # provide a global_step_transform to return the trainer.state.iteration for the global_step, each time\n # evaluator metrics are plotted on Tensorboard.\n\n tb_logger.attach_output_handler(\n evaluator,\n event_name=Events.EPOCH_COMPLETED,\n tag=\"validation\",\n metrics=[\"nll\", \"accuracy\"],\n global_step_transform=global_step_transform\n )\n\n Another example where the State Attributes ``trainer.state.alpha`` and ``trainer.state.beta``\n are also logged along with the NLL and Accuracy after each iteration:\n\n .. code-block:: python\n\n tb_logger.attach(\n trainer,\n log_handler=OutputHandler(\n tag=\"training\",\n metric_names=[\"nll\", \"accuracy\"],\n state_attributes=[\"alpha\", \"beta\"],\n ),\n event_name=Events.ITERATION_COMPLETED\n )\n\n Example of `global_step_transform`:\n\n .. code-block:: python\n\n def global_step_transform(engine, event_name):\n return engine.state.get_event_attrib_value(event_name)\n\n .. versionchanged:: 0.4.7\n accepts an optional list of `state_attributes`\n \"\"\"\n\n def __init__(\n self,\n tag: str,\n metric_names: Optional[List[str]] = None,\n output_transform: Optional[Callable] = None,\n global_step_transform: Optional[Callable[[Engine, Union[str, Events]], int]] = None,\n state_attributes: Optional[List[str]] = None,\n ):\n super(OutputHandler, self).__init__(\n tag, metric_names, output_transform, global_step_transform, state_attributes\n )\n\n def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:\n\n if not isinstance(logger, TensorboardLogger):\n raise RuntimeError(\"Handler 'OutputHandler' works only with TensorboardLogger\")\n\n metrics = self._setup_output_metrics_state_attrs(engine, key_tuple=False)\n\n global_step = self.global_step_transform(engine, event_name)\n if not isinstance(global_step, int):\n raise TypeError(\n f\"global_step must be int, got {type(global_step)}.\"\n \" Please check the output of global_step_transform.\"\n )\n\n for key, value in metrics.items():\n logger.writer.add_scalar(key, value, global_step)\n\n\nclass OptimizerParamsHandler(BaseOptimizerParamsHandler):\n \"\"\"Helper handler to log optimizer parameters\n\n Args:\n optimizer: torch optimizer or any object with attribute ``param_groups``\n as a sequence.\n param_name: parameter name\n tag: common title for all produced plots. For example, \"generator\"\n\n Examples:\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n # Create a logger\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration\n tb_logger.attach(\n trainer,\n log_handler=OptimizerParamsHandler(optimizer),\n event_name=Events.ITERATION_STARTED\n )\n # or equivalently\n tb_logger.attach_opt_params_handler(\n trainer,\n event_name=Events.ITERATION_STARTED,\n optimizer=optimizer\n )\n \"\"\"\n\n def __init__(self, optimizer: Optimizer, param_name: str = \"lr\", tag: Optional[str] = None):\n super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)\n\n def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:\n if not isinstance(logger, TensorboardLogger):\n raise RuntimeError(\"Handler OptimizerParamsHandler works only with TensorboardLogger\")\n\n global_step = engine.state.get_event_attrib_value(event_name)\n tag_prefix = f\"{self.tag}/\" if self.tag else \"\"\n params = {\n f\"{tag_prefix}{self.param_name}/group_{i}\": float(param_group[self.param_name])\n for i, param_group in enumerate(self.optimizer.param_groups)\n }\n\n for k, v in params.items():\n logger.writer.add_scalar(k, v, global_step)\n\n\nclass WeightsScalarHandler(BaseWeightsScalarHandler):\n \"\"\"Helper handler to log model's weights as scalars.\n Handler, upon construction, iterates over named parameters of the model and keep\n reference to ones permitted by `whitelist`. Then at every call, applies\n reduction function to each parameter, produces a scalar and logs it.\n\n Args:\n model: model to log weights\n reduction: function to reduce parameters into scalar\n tag: common title for all produced plots. For example, \"generator\"\n whitelist: specific weights to log. Should be list of model's submodules\n or parameters names, or a callable which gets weight along with its name\n and determines if it should be logged. Names should be fully-qualified.\n For more information please refer to `PyTorch docs\n <https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.\n If not given, all of model's weights are logged.\n\n Examples:\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n # Create a logger\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Attach the logger to the trainer to log model's weights norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=WeightsScalarHandler(model, reduction=torch.norm)\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log only `fc` weights\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=WeightsScalarHandler(\n model,\n whitelist=['fc']\n )\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log weights which have `bias` in their names\n def has_bias_in_name(n, p):\n return 'bias' in n\n\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=WeightsScalarHandler(model, whitelist=has_bias_in_name)\n )\n\n .. versionchanged:: 0.4.9\n optional argument `whitelist` added.\n \"\"\"\n\n def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:\n\n if not isinstance(logger, TensorboardLogger):\n raise RuntimeError(\"Handler 'WeightsScalarHandler' works only with TensorboardLogger\")\n\n global_step = engine.state.get_event_attrib_value(event_name)\n tag_prefix = f\"{self.tag}/\" if self.tag else \"\"\n for name, p in self.weights:\n\n name = name.replace(\".\", \"/\")\n logger.writer.add_scalar(\n f\"{tag_prefix}weights_{self.reduction.__name__}/{name}\",\n self.reduction(p.data),\n global_step,\n )\n\n\nclass WeightsHistHandler(BaseWeightsHandler):\n \"\"\"Helper handler to log model's weights as histograms.\n\n Args:\n model: model to log weights\n tag: common title for all produced plots. For example, \"generator\"\n whitelist: specific weights to log. Should be list of model's submodules\n or parameters names, or a callable which gets weight along with its name\n and determines if it should be logged. Names should be fully-qualified.\n For more information please refer to `PyTorch docs\n <https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.\n If not given, all of model's weights are logged.\n\n Examples:\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n # Create a logger\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Attach the logger to the trainer to log model's weights norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=WeightsHistHandler(model)\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log weights of `fc` layer\n weights = ['fc']\n\n # Attach the logger to the trainer to log weights norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=WeightsHistHandler(model, whitelist=weights)\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log weights which name include 'conv'.\n weight_selector = lambda name, p: 'conv' in name\n\n # Attach the logger to the trainer to log weights norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=WeightsHistHandler(model, whitelist=weight_selector)\n )\n\n .. versionchanged:: 0.4.9\n optional argument `whitelist` added.\n \"\"\"\n\n def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:\n if not isinstance(logger, TensorboardLogger):\n raise RuntimeError(\"Handler 'WeightsHistHandler' works only with TensorboardLogger\")\n\n global_step = engine.state.get_event_attrib_value(event_name)\n tag_prefix = f\"{self.tag}/\" if self.tag else \"\"\n for name, p in self.weights:\n\n name = name.replace(\".\", \"/\")\n logger.writer.add_histogram(\n tag=f\"{tag_prefix}weights/{name}\", values=p.data.cpu().numpy(), global_step=global_step\n )\n\n\nclass GradsScalarHandler(BaseWeightsScalarHandler):\n \"\"\"Helper handler to log model's gradients as scalars.\n Handler, upon construction, iterates over named parameters of the model and keep\n reference to ones permitted by the `whitelist`. Then at every call, applies\n reduction function to each parameter's gradient, produces a scalar and logs it.\n\n Args:\n model: model to log weights\n reduction: function to reduce parameters into scalar\n tag: common title for all produced plots. For example, \"generator\"\n whitelist: specific gradients to log. Should be list of model's submodules\n or parameters names, or a callable which gets weight along with its name\n and determines if its gradient should be logged. Names should be\n fully-qualified. For more information please refer to `PyTorch docs\n <https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.\n If not given, all of model's gradients are logged.\n\n Examples:\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n # Create a logger\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Attach the logger to the trainer to log model's gradients norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=GradsScalarHandler(model, reduction=torch.norm)\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log gradient of `base`\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=GradsScalarHandler(\n model,\n reduction=torch.norm,\n whitelist=['base']\n )\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log gradient of weights which belong to a `fc` layer\n def is_in_fc_layer(n, p):\n return 'fc' in n\n\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=GradsScalarHandler(model, whitelist=is_in_fc_layer)\n )\n\n .. versionchanged:: 0.4.9\n optional argument `whitelist` added.\n \"\"\"\n\n def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:\n if not isinstance(logger, TensorboardLogger):\n raise RuntimeError(\"Handler 'GradsScalarHandler' works only with TensorboardLogger\")\n\n global_step = engine.state.get_event_attrib_value(event_name)\n tag_prefix = f\"{self.tag}/\" if self.tag else \"\"\n for name, p in self.weights:\n if p.grad is None:\n continue\n\n name = name.replace(\".\", \"/\")\n logger.writer.add_scalar(\n f\"{tag_prefix}grads_{self.reduction.__name__}/{name}\", self.reduction(p.grad), global_step\n )\n\n\nclass GradsHistHandler(BaseWeightsHandler):\n \"\"\"Helper handler to log model's gradients as histograms.\n\n Args:\n model: model to log weights\n tag: common title for all produced plots. For example, \"generator\"\n whitelist: specific gradients to log. Should be list of model's submodules\n or parameters names, or a callable which gets weight along with its name\n and determines if its gradient should be logged. Names should be\n fully-qualified. For more information please refer to `PyTorch docs\n <https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.get_submodule>`_.\n If not given, all of model's gradients are logged.\n\n Examples:\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n # Create a logger\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Attach the logger to the trainer to log model's weights norm after each iteration\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=GradsHistHandler(model)\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log gradient of `fc.bias`\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=GradsHistHandler(model, whitelist=['fc.bias'])\n )\n\n .. code-block:: python\n\n from ignite.contrib.handlers.tensorboard_logger import *\n\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\")\n\n # Log gradient of weights which have shape (2, 1)\n def has_shape_2_1(n, p):\n return p.shape == (2,1)\n\n tb_logger.attach(\n trainer,\n event_name=Events.ITERATION_COMPLETED,\n log_handler=GradsHistHandler(model, whitelist=has_shape_2_1)\n )\n\n .. versionchanged:: 0.4.9\n optional argument `whitelist` added.\n \"\"\"\n\n def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, Events]) -> None:\n if not isinstance(logger, TensorboardLogger):\n raise RuntimeError(\"Handler 'GradsHistHandler' works only with TensorboardLogger\")\n\n global_step = engine.state.get_event_attrib_value(event_name)\n tag_prefix = f\"{self.tag}/\" if self.tag else \"\"\n for name, p in self.weights:\n if p.grad is None:\n continue\n\n name = name.replace(\".\", \"/\")\n logger.writer.add_histogram(\n tag=f\"{tag_prefix}grads/{name}\", values=p.grad.cpu().numpy(), global_step=global_step\n )\n",
"path": "ignite/contrib/handlers/tensorboard_logger.py"
}
] | diff --git a/ignite/contrib/handlers/tensorboard_logger.py b/ignite/contrib/handlers/tensorboard_logger.py
index 042d19198320..f8b002e3020b 100644
--- a/ignite/contrib/handlers/tensorboard_logger.py
+++ b/ignite/contrib/handlers/tensorboard_logger.py
@@ -160,6 +160,9 @@ def __init__(self, *args: Any, **kwargs: Any):
self.writer = SummaryWriter(*args, **kwargs)
+ def __getattr__(self, attr: Any) -> Any:
+ return getattr(self.writer, attr)
+
def close(self) -> None:
self.writer.close()
diff --git a/tests/ignite/contrib/handlers/test_tensorboard_logger.py b/tests/ignite/contrib/handlers/test_tensorboard_logger.py
index 7645eddd335f..60c8a1f4483c 100644
--- a/tests/ignite/contrib/handlers/test_tensorboard_logger.py
+++ b/tests/ignite/contrib/handlers/test_tensorboard_logger.py
@@ -32,6 +32,17 @@ def test_optimizer_params_handler_wrong_setup():
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
+def test_getattr_method():
+ # Create a mock SummaryWriter object
+ mock_writer = MagicMock()
+ # Assign the mock object to the writer attribute of a TensorboardLoggerinstance
+ logger = TensorboardLogger()
+ logger.writer = mock_writer
+ # Test that a method passed through the __getattr__ method calls thecorresponding method on the mock object
+ logger.add_scalar("loss", 0.5)
+ mock_writer.add_scalar.assert_called_once_with("loss", 0.5)
+
+
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
|
mosaicml__composer-79 | Add Colab Example
* Add Example Jupyter notebook to the examples folder
* Add "Open in Colab" to the README.md
| [
{
"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport os\nimport sys\n\nimport setuptools\nfrom setuptools import setup\n\n\ndef package_files(directory):\n # from https://stackoverflow.com/a/36693250\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ninstall_requires = [\n \"pyyaml>=5.4.1\",\n \"tqdm>=4.62.3\",\n \"torchmetrics>=0.5.1\",\n \"torch_optimizer==0.1.0\",\n \"torchvision>=0.9.0\",\n \"torch>=1.9\",\n \"argparse>=1.4.0\",\n \"yahp>=0.0.10\",\n]\nextra_deps = {}\n\nextra_deps['base'] = []\n\nextra_deps['dev'] = [\n 'junitparser>=2.1.1',\n 'coverage[toml]>=6.1.1',\n 'pytest>=6.2.0',\n 'yapf>=0.13.0',\n 'isort>=5.9.3',\n 'yamllint>=1.26.2',\n 'pytest-timeout>=1.4.2',\n 'recommonmark>=0.7.1',\n 'sphinx>=4.2.0',\n 'sphinx_copybutton>=0.4.0',\n 'sphinx_markdown_tables>=0.0.15',\n 'sphinx-argparse>=0.3.1',\n 'sphinxcontrib.katex>=0.8.6',\n 'sphinxext.opengraph>=0.4.2',\n 'sphinx_rtd_theme>=1.0.0',\n 'myst-parser>=0.15.2',\n]\nextra_deps['wandb'] = ['wandb>=0.12.2']\n\nextra_deps['nlp'] = [\n 'transformers>=4.11.3',\n 'datasets>=1.14.0',\n]\n\nextra_deps['unet'] = [\n 'monai>=0.7.0',\n 'scikit-learn>=1.0.1',\n]\n\nextra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)\n\nsetup(\n name=\"mosaicml\",\n version=\"0.2.4\",\n author=\"MosaicML\",\n author_email=\"[email protected]\",\n description=\"composing methods for ML training efficiency\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mosaicml/composer\",\n include_package_data=True,\n package_data={\n \"composer\": ['py.typed'],\n \"\": package_files('composer/yamls'),\n },\n packages=setuptools.find_packages(include=[\"composer\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n ],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': ['composer = composer.cli.launcher:main',],\n },\n extras_require=extra_deps,\n dependency_links=['https://developer.download.nvidia.com/compute/redist'],\n python_requires='>=3.7',\n ext_package=\"composer\",\n)\n\n# only visible if user installs with verbose -v flag\n# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)\nprint(\"*\" * 20, file=sys.stderr)\nprint(\n \"\\nNOTE: For best performance, we recommend installing Pillow-SIMD \"\n \"\\nfor accelerated image processing operations. To install:\"\n \"\\n\\n\\t pip uninstall pillow && pip install pillow-simd\\n\",\n file=sys.stderr)\nprint(\"*\" * 20, file=sys.stderr)\n",
"path": "setup.py"
}
] | [
{
"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport os\nimport sys\n\nimport setuptools\nfrom setuptools import setup\n\n\ndef package_files(directory):\n # from https://stackoverflow.com/a/36693250\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ninstall_requires = [\n \"pyyaml>=5.4.1\",\n \"tqdm>=4.62.3\",\n \"torchmetrics>=0.5.1\",\n \"torch_optimizer==0.1.0\",\n \"torchvision>=0.9.0\",\n \"torch>=1.9\",\n \"argparse>=1.4.0\",\n \"yahp>=0.0.10\",\n]\nextra_deps = {}\n\nextra_deps['base'] = []\n\nextra_deps['dev'] = [\n 'junitparser>=2.1.1',\n 'coverage[toml]>=6.1.1',\n 'pytest>=6.2.0',\n 'yapf>=0.13.0',\n 'isort>=5.9.3',\n 'yamllint>=1.26.2',\n 'pytest-timeout>=1.4.2',\n 'recommonmark>=0.7.1',\n 'sphinx>=4.2.0',\n 'sphinx_copybutton>=0.4.0',\n 'sphinx_markdown_tables>=0.0.15',\n 'sphinx-argparse>=0.3.1',\n 'sphinxcontrib.katex>=0.8.6',\n 'sphinxext.opengraph>=0.4.2',\n 'sphinx_rtd_theme>=1.0.0',\n 'testbook>=0.4.2',\n 'myst-parser>=0.15.2',\n]\nextra_deps['wandb'] = ['wandb>=0.12.2']\n\nextra_deps['nlp'] = [\n 'transformers>=4.11.3',\n 'datasets>=1.14.0',\n]\n\nextra_deps['unet'] = [\n 'monai>=0.7.0',\n 'scikit-learn>=1.0.1',\n]\n\nextra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)\n\nsetup(\n name=\"mosaicml\",\n version=\"0.2.4\",\n author=\"MosaicML\",\n author_email=\"[email protected]\",\n description=\"composing methods for ML training efficiency\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mosaicml/composer\",\n include_package_data=True,\n package_data={\n \"composer\": ['py.typed'],\n \"\": package_files('composer/yamls'),\n },\n packages=setuptools.find_packages(include=[\"composer\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n ],\n install_requires=install_requires,\n entry_points={\n 'console_scripts': ['composer = composer.cli.launcher:main',],\n },\n extras_require=extra_deps,\n dependency_links=['https://developer.download.nvidia.com/compute/redist'],\n python_requires='>=3.7',\n ext_package=\"composer\",\n)\n\n# only visible if user installs with verbose -v flag\n# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)\nprint(\"*\" * 20, file=sys.stderr)\nprint(\n \"\\nNOTE: For best performance, we recommend installing Pillow-SIMD \"\n \"\\nfor accelerated image processing operations. To install:\"\n \"\\n\\n\\t pip uninstall pillow && pip install pillow-simd\\n\",\n file=sys.stderr)\nprint(\"*\" * 20, file=sys.stderr)\n",
"path": "setup.py"
}
] | diff --git a/README.md b/README.md
index 8caf8f42ca..d50b6b9df0 100644
--- a/README.md
+++ b/README.md
@@ -7,11 +7,18 @@ The library features:
* Standardized approach to implement and compose efficiency methods, extended from two-way callbacks ([Howard et al, 2020](https://arxiv.org/abs/2002.04688))
* Easy way to access our methods either directly for your trainer loops, or through the MosaicML Trainer.
+[](https://colab.research.google.com/github/mosaicml/composer/blob/main/examples/composer.ipynb)
+
+
+## Installing Composer
+
To install `Composer`:
```
pip install mosaicml
```
+## Using Composer
+
A few ways to use `Composer`:
1. Import the functional form of our methods:
diff --git a/examples/composer.ipynb b/examples/composer.ipynb
new file mode 100644
index 0000000000..91544e8826
--- /dev/null
+++ b/examples/composer.ipynb
@@ -0,0 +1,104 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "tags": [
+ "requirements"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "!pip install mosaicml"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "tags": [
+ "imports"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "import torch\n",
+ "\n",
+ "from composer import trainer, algorithms"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "tags": [
+ "hparams"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "if torch.cuda.is_available():\n",
+ " trainer_hparams = trainer.load(\"classify_mnist\")\n",
+ "else:\n",
+ " trainer_hparams = trainer.load(\"classify_mnist_cpu\")\n",
+ "\n",
+ "trainer_hparams.algorithms = algorithms.load_multiple(\n",
+ " \"blurpool\",\n",
+ " \"scale_schedule\")\n",
+ "trainer_hparams.set_datadir(\"~/datasets\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "tags": [
+ "trainer"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "mosaic_trainer = trainer_hparams.initialize_object()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "tags": [
+ "train"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "mosaic_trainer.fit()"
+ ]
+ }
+ ],
+ "metadata": {
+ "celltoolbar": "Tags",
+ "interpreter": {
+ "hash": "40ad569553f4172ee5f9f9f1cdecfe3a03f28f5ebfb04d4146b885c5108ed381"
+ },
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/setup.py b/setup.py
index 0040c525b9..bd31c35993 100755
--- a/setup.py
+++ b/setup.py
@@ -49,6 +49,7 @@ def package_files(directory):
'sphinxcontrib.katex>=0.8.6',
'sphinxext.opengraph>=0.4.2',
'sphinx_rtd_theme>=1.0.0',
+ 'testbook>=0.4.2',
'myst-parser>=0.15.2',
]
extra_deps['wandb'] = ['wandb>=0.12.2']
diff --git a/tests/examples/__init__.py b/tests/examples/__init__.py
new file mode 100644
index 0000000000..0929d967ff
--- /dev/null
+++ b/tests/examples/__init__.py
@@ -0,0 +1 @@
+# Copyright 2021 MosaicML. All Rights Reserved.
diff --git a/tests/examples/test_composer_ipynb.py b/tests/examples/test_composer_ipynb.py
new file mode 100644
index 0000000000..ea594e6125
--- /dev/null
+++ b/tests/examples/test_composer_ipynb.py
@@ -0,0 +1,22 @@
+# Copyright 2021 MosaicML. All Rights Reserved.
+
+import os
+
+import pytest
+import testbook
+import testbook.client
+
+import composer
+
+examples_path = os.path.join(os.path.dirname(composer.__file__), '..', 'examples')
+
+
[email protected](os.path.join(examples_path, 'composer.ipynb'))
[email protected](120) # long timeout to download the dataset (if needed) and train one epoch
+def test_composer_notebook(tb: testbook.client.TestbookNotebookClient):
+ tb.execute_cell("imports")
+ tb.execute_cell("hparams")
+ tb.inject("trainer_hparams.max_epochs = 1")
+ tb.execute_cell("trainer")
+ assert tb.get('mosaic_trainer').state.max_epochs == 1
+ tb.execute_cell("train")
|
beeware__toga-928 | toga-demo alias doesn't work on Windows
## Expected Behavior
Examples in the documentation should work. I have to specify version 0.2.15 for anything to run properly - the normal pip installation of toga installs the dev builds that are not functioning.
## Current Behavior
They all fail with various errors of missing items, etc.
```
C:\Users\bubth\Development\togatest> pip install --pre toga-demo
Collecting toga-demo
Downloading https://files.pythonhosted.org/packages/33/05/61d94bccdfe6831eb60fc59cd79c60d7780983d07df984d82e2a8f298b8b
/toga_demo-0.3.0.dev19-py3-none-any.whl (616kB)
|████████████████████████████████| 624kB 819kB/s
Collecting toga==0.3.0.dev18 (from toga-demo)
Downloading https://files.pythonhosted.org/packages/9c/cd/4ec127b063c9b1c6f045791e7613e05247dc30e0cb817bccf09de9377ecf
/toga-0.3.0.dev18-py3-none-any.whl
Collecting toga-winforms==0.3.0.dev18; sys_platform == "win32" (from toga==0.3.0.dev18->toga-demo)
Downloading https://files.pythonhosted.org/packages/81/67/6e16ddc4c4286a4b6f08005c66006524e305c3befca01df34f509ef76202
/toga_winforms-0.3.0.dev18-py3-none-any.whl
Collecting toga-core==0.3.0.dev18 (from toga-winforms==0.3.0.dev18; sys_platform == "win32"->toga==0.3.0.dev18->toga-dem
o)
/toga_core-0.3.0.dev18-py3-none-any.whl (512kB)
|████████████████████████████████| 522kB 6.8MB/s
Requirement already satisfied: pythonnet in c:\program files\python37\lib\site-packages (from toga-winforms==0.3.0.dev18Requirement already satisfied: importlib-metadata; python_version < "3.8" in c:\users\bubth\appdata\roaming\python\pythotoga-demo) (0.18)
Collecting travertino>=0.1.0 (from toga-core==0.3.0.dev18->toga-winforms==0.3.0.dev18; sys_platform == "win32"->toga==0.3.0.dev18->toga-demo)
Downloading https://files.pythonhosted.org/packages/4c/78/b33e38d372707fbf2c461d1bde6797a12c8d20f97279db63cb57dc24eacb/travertino-0.1.3-py3-none-any.whl
Requirement already satisfied: zipp>=0.5 in c:\users\bubth\appdata\roaming\python\python37\site-packages (from importlib-metadata; python_version < "3.8"->toga-core==0.3.0.dev18->toga-winforms==0.3.0.dev18; sys_platform == "win32"->toga==0.3.0.dev18->toga-demo) (0.5.2)
Installing collected packages: travertino, toga-core, toga-winforms, toga, toga-demo
Found existing installation: toga-core 0.2.15
Uninstalling toga-core-0.2.15:
Successfully uninstalled toga-core-0.2.15
Found existing installation: toga-winforms 0.2.15
Uninstalling toga-winforms-0.2.15:
Successfully uninstalled toga-winforms-0.2.15
Found existing installation: toga 0.2.15
Uninstalling toga-0.2.15:
Successfully uninstalled toga-0.2.15
Successfully installed toga-0.3.0.dev18 toga-core-0.3.0.dev18 toga-demo-0.3.0.dev19 toga-winforms-0.3.0.dev18 travertino-0.1.3
WARNING: You are using pip version 19.2.1, however version 20.1.1 is available.
You should consider upgrading via the 'python -m pip install --upgrade pip' command.
C:\Users\bubth\Development\togatest> python --versoin
unknown option --versoin
usage: C:\Program Files\Python37\python.exe [option] ... [-c cmd | -m mod | file | -] [arg] ...
Try `python -h' for more information.
C:\Users\bubth\Development\togatest> python --version
Python 3.7.3
C:\Users\bubth\Development\togatest> toga-demo
Traceback (most recent call last):
File "c:\program files\python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\program files\python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Program Files\Python37\Scripts\toga-demo.exe\__main__.py", line 9, in <module>
File "c:\program files\python37\lib\site-packages\toga_demo\__main__.py", line 5, in run
main().main_loop()
File "c:\program files\python37\lib\site-packages\toga_demo\app.py", line 98, in main
return TogaDemo('Toga Demo', 'org.beeware.toga-demo')
File "c:\program files\python37\lib\site-packages\toga\app.py", line 184, in __init__
self.icon = 'resources/{app_name}'.format(app_name=self.app_name)
File "c:\program files\python37\lib\site-packages\toga\app.py", line 317, in icon
self._icon.bind(self.factory)
File "c:\program files\python37\lib\site-packages\toga\icons.py", line 41, in bind
resource_path = factory.paths.app
File "c:\program files\python37\lib\site-packages\toga_winforms\paths.py", line 10, in app
return Path(sys.modules[App.app.module_name].__file__).parent
KeyError: ''
C:\Users\bubth\Development\togatest>
```
```
Traceback (most recent call last):
File ".\test.py", line 2, in <module>
from toga.style.pack import Pack, ROW, CENTER, COLUMN
ModuleNotFoundError: No module named 'toga.style'
```
```
C:\Users\bubth\Development\togatest> python .\test.py
Traceback (most recent call last):
File ".\test.py", line 24, in <module>
main().main_loop()
File "C:\Program Files\Python37\lib\site-packages\toga_winforms\app.py", line 49, in main_loop
self._startup()
File "C:\Program Files\Python37\lib\site-packages\toga_winforms\app.py", line 41, in _startup
self.startup()
File "C:\Program Files\Python37\lib\site-packages\toga\interface\app.py", line 144, in startup
self.main_window.content = self._startup_method(self)
File ".\test.py", line 9, in build
box = toga.Box()
File "C:\Program Files\Python37\lib\site-packages\toga_winforms\widgets\box.py", line 10, in __init__
super().__init__(id=id, style=style, children=children)
File "C:\Program Files\Python37\lib\site-packages\toga\interface\widgets\box.py", line 21, in __init__
super().__init__(id=id, style=style, children=children)
File "C:\Program Files\Python37\lib\site-packages\toga\interface\widgets\base.py", line 144, in __init__
self.style = CSS()
File "C:\Program Files\Python37\lib\site-packages\toga\interface\widgets\base.py", line 170, in style
self._style = value.bind(self)
AttributeError: 'CSS' object has no attribute 'bind'
```
## Steps to reproduce
<!--- Provide a set of steps describing how to reproduce this bug. If you have a live example, provide the link below -->
1. Be on windows
2. install toga
3. Follow the browser tutorial or hello world tutorial
## Your Environment
<!--- Provide details on your current environment you found the bug in -->
* Python Version (list the specific version number)
```
C:\Users\bubth\Development\togatest> python --version
Python 3.7.3
```
* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)
```
OS Name Microsoft Windows 10 Pro
Version 10.0.19041 Build 19041
Other OS Description Not Available
OS Manufacturer Microsoft Corporation
System Name LAPPYTOPPY
System Manufacturer Micro-Star International Co., Ltd.
System Model GP73 Leopard 8RF
System Type x64-based PC
System SKU 17C5.1
Processor Intel(R) Core(TM) i7-8750H CPU @ 2.20GHz, 2201 Mhz, 6 Core(s), 12 Logical Processor(s)
BIOS Version/Date American Megatrends Inc. E17C5IMS.10A, 7/13/2018
SMBIOS Version 3.1
Embedded Controller Version 255.255
BIOS Mode UEFI
BaseBoard Manufacturer Micro-Star International Co., Ltd.
BaseBoard Product MS-17C5
BaseBoard Version REV:1.0
Platform Role Mobile
Secure Boot State On
PCR7 Configuration Elevation Required to View
Windows Directory C:\WINDOWS
System Directory C:\WINDOWS\system32
Boot Device \Device\HarddiskVolume3
Locale United States
Hardware Abstraction Layer Version = "10.0.19041.1"
User Name LAPPYTOPPY\bubth
Time Zone Mountain Daylight Time
Installed Physical Memory (RAM) 16.0 GB
Total Physical Memory 15.8 GB
Available Physical Memory 4.19 GB
Total Virtual Memory 18.2 GB
Available Virtual Memory 4.69 GB
Page File Space 2.38 GB
Page File C:\pagefile.sys
Kernel DMA Protection Off
Virtualization-based security Running
Virtualization-based security Required Security Properties
Virtualization-based security Available Security Properties Base Virtualization Support, Secure Boot, DMA Protection, SMM Security Mitigations 1.0, Mode Based Execution Control
Virtualization-based security Services Configured
Virtualization-based security Services Running
Device Encryption Support Elevation Required to View
A hypervisor has been detected. Features required for Hyper-V will not be displayed.
```
* Toga Version (list the specific version number or git hash)
```
C:\Users\bubth\Development\togatest> python
Python 3.7.3 (v3.7.3:ef4ec6ed12, Mar 25 2019, 22:22:05) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> import toga
>>> toga.__version__
'0.3.0.dev18'
```
* Toga Target (the type of app you are trying to generate)
- [ ] android
- [ ] cocoa
- [ ] django
- [ ] gtk
- [ ] iOS
- [ ] tvOS
- [ ] watchOS
- [x ] winforms
- [ ] win32
- [ ] Other (please specify)
| [
{
"content": "#!/usr/bin/env python\nimport io\n\nfrom setuptools import setup, find_packages\n\n\nwith io.open('README.rst', encoding='utf8') as readme:\n long_description = readme.read()\n\n\nsetup(\n name='toga-demo',\n version='0.3.0.dev20',\n description='A demonstration of the capabilities of the Toga widget toolkit.',\n long_description=long_description,\n author='Russell Keith-Magee',\n author_email='[email protected]',\n url='http://beeware.org/toga-demo',\n include_package_data=True,\n packages=find_packages(),\n python_requires='>=3.5',\n package_data={\n 'toga_demo': ['resources/*.icns', 'resources/*.png'],\n },\n install_requires=[\n 'toga==0.3.0.dev18'\n ],\n entry_points={\n 'console_scripts': [\n 'toga-demo = toga_demo.__main__:run',\n ]\n },\n license='New BSD',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n options={\n 'app': {\n 'formal_name': 'Toga Demo',\n 'bundle': 'org.beeware',\n },\n 'ios': {\n 'app_requires': [\n 'toga-ios==0.3.0.dev20',\n ]\n },\n 'django': {\n 'app_requires': [\n 'toga-django==0.3.0.dev20',\n ]\n },\n 'macos': {\n 'app_requires': [\n 'toga-cocoa==0.3.0.dev20',\n ]\n },\n 'linux': {\n 'app_requires': [\n 'toga-gtk==0.3.0.dev20',\n ]\n },\n 'windows': {\n 'app_requires': [\n 'toga-winform==0.3.0.dev20',\n ]\n },\n 'android': {\n 'app_requires': [\n 'toga-android==0.3.0.dev20',\n ]\n }\n }\n)\n",
"path": "demo/setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\nimport io\n\nfrom setuptools import setup, find_packages\n\n\nwith io.open('README.rst', encoding='utf8') as readme:\n long_description = readme.read()\n\n\nsetup(\n name='toga-demo',\n version='0.3.0.dev20',\n description='A demonstration of the capabilities of the Toga widget toolkit.',\n long_description=long_description,\n author='Russell Keith-Magee',\n author_email='[email protected]',\n url='http://beeware.org/toga-demo',\n include_package_data=True,\n packages=find_packages(),\n python_requires='>=3.5',\n package_data={\n 'toga_demo': ['resources/*.icns', 'resources/*.png'],\n },\n install_requires=[\n 'toga==0.3.0.dev20'\n ],\n entry_points={\n 'console_scripts': [\n 'toga-demo = toga_demo.__main__:run',\n ]\n },\n license='New BSD',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n options={\n 'app': {\n 'formal_name': 'Toga Demo',\n 'bundle': 'org.beeware',\n },\n 'ios': {\n 'app_requires': [\n 'toga-ios==0.3.0.dev20',\n ]\n },\n 'django': {\n 'app_requires': [\n 'toga-django==0.3.0.dev20',\n ]\n },\n 'macos': {\n 'app_requires': [\n 'toga-cocoa==0.3.0.dev20',\n ]\n },\n 'linux': {\n 'app_requires': [\n 'toga-gtk==0.3.0.dev20',\n ]\n },\n 'windows': {\n 'app_requires': [\n 'toga-winform==0.3.0.dev20',\n ]\n },\n 'android': {\n 'app_requires': [\n 'toga-android==0.3.0.dev20',\n ]\n }\n }\n)\n",
"path": "demo/setup.py"
}
] | diff --git a/demo/pyproject.toml b/demo/pyproject.toml
index 29077abdbe..abf54d10f8 100644
--- a/demo/pyproject.toml
+++ b/demo/pyproject.toml
@@ -1,5 +1,3 @@
-[build-system]
-requires = ["briefcase"]
[tool.briefcase]
project_name = "Toga Demo"
diff --git a/demo/setup.py b/demo/setup.py
index c8776abf61..1e6ac8011e 100644
--- a/demo/setup.py
+++ b/demo/setup.py
@@ -23,7 +23,7 @@
'toga_demo': ['resources/*.icns', 'resources/*.png'],
},
install_requires=[
- 'toga==0.3.0.dev18'
+ 'toga==0.3.0.dev20'
],
entry_points={
'console_scripts': [
|
streamlit__streamlit-6348 | experimental_get_query_params won't work before rerun
### Checklist
- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.
- [X] I added a very descriptive title to this issue.
- [X] I have provided sufficient information below to help reproduce this issue.
### Summary
User can not get right query_params before rerun.
### Reproducible Code Example
```Python
import streamlit as st
st.experimental_set_query_params(param=3)
st.write(st.experimental_get_query_params())
```
### Steps To Reproduce
Run script, `{"param ": 3}` will not appear at first time until rerun script after querystring in browser already changed.
### Expected Behavior
Show `{"param ": 3}`
### Current Behavior
show empty dict
### Is this a regression?
- [X] Yes, this used to work in a previous version.
### Debug info
- Streamlit version: 1.20.0
- Python version: 3.10.6
- Operating System: Linux
- Browser: Chrome
- Virtual environment: None
### Additional Information
In previous version `set_query_params` will set `ctx.query_string = parse.urlencode(query_params, doseq=True)` immediately.
But in 1.20, this line is removed while `get_query_params` still get if from `ctx.query_string` .
### Are you willing to submit a PR?
- [x] Yes, I am willing to submit a PR!
| [
{
"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport urllib.parse as parse\nfrom typing import Any, Dict, List\n\nfrom streamlit import util\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto.ForwardMsg_pb2 import ForwardMsg\nfrom streamlit.runtime.metrics_util import gather_metrics\nfrom streamlit.runtime.scriptrunner import get_script_run_ctx\n\nEMBED_QUERY_PARAM = \"embed\"\nEMBED_OPTIONS_QUERY_PARAM = \"embed_options\"\nEMBED_QUERY_PARAMS_KEYS = [EMBED_QUERY_PARAM, EMBED_OPTIONS_QUERY_PARAM]\n\n\n@gather_metrics(\"experimental_get_query_params\")\ndef get_query_params() -> Dict[str, List[str]]:\n \"\"\"Return the query parameters that is currently showing in the browser's URL bar.\n\n Returns\n -------\n dict\n The current query parameters as a dict. \"Query parameters\" are the part of the URL that comes\n after the first \"?\".\n\n Example\n -------\n Let's say the user's web browser is at\n `http://localhost:8501/?show_map=True&selected=asia&selected=america`.\n Then, you can get the query parameters using the following:\n\n >>> import streamlit as st\n >>>\n >>> st.experimental_get_query_params()\n {\"show_map\": [\"True\"], \"selected\": [\"asia\", \"america\"]}\n\n Note that the values in the returned dict are *always* lists. This is\n because we internally use Python's urllib.parse.parse_qs(), which behaves\n this way. And this behavior makes sense when you consider that every item\n in a query string is potentially a 1-element array.\n\n \"\"\"\n ctx = get_script_run_ctx()\n if ctx is None:\n return {}\n # Return new query params dict, but without embed, embed_options query params\n return util.exclude_key_query_params(\n parse.parse_qs(ctx.query_string), keys_to_exclude=EMBED_QUERY_PARAMS_KEYS\n )\n\n\n@gather_metrics(\"experimental_set_query_params\")\ndef set_query_params(**query_params: Any) -> None:\n \"\"\"Set the query parameters that are shown in the browser's URL bar.\n\n .. warning::\n Query param `embed` cannot be set using this method.\n\n Parameters\n ----------\n **query_params : dict\n The query parameters to set, as key-value pairs.\n\n Example\n -------\n\n To point the user's web browser to something like\n \"http://localhost:8501/?show_map=True&selected=asia&selected=america\",\n you would do the following:\n\n >>> import streamlit as st\n >>>\n >>> st.experimental_set_query_params(\n ... show_map=True,\n ... selected=[\"asia\", \"america\"],\n ... )\n\n \"\"\"\n ctx = get_script_run_ctx()\n if ctx is None:\n return\n\n msg = ForwardMsg()\n msg.page_info_changed.query_string = _ensure_no_embed_params(\n query_params, ctx.query_string\n )\n ctx.enqueue(msg)\n\n\ndef _ensure_no_embed_params(\n query_params: Dict[str, List[str]], query_string: str\n) -> str:\n \"\"\"Ensures there are no embed params set (raises StreamlitAPIException) if there is a try,\n also makes sure old param values in query_string are preserved. Returns query_string : str.\"\"\"\n # Get query params dict without embed, embed_options params\n query_params_without_embed = util.exclude_key_query_params(\n query_params, keys_to_exclude=EMBED_QUERY_PARAMS_KEYS\n )\n if query_params != query_params_without_embed:\n raise StreamlitAPIException(\n \"Query param embed and embed_options (case-insensitive) cannot be set using set_query_params method.\"\n )\n\n all_current_params = parse.parse_qs(query_string)\n current_embed_params = parse.urlencode(\n {\n EMBED_QUERY_PARAM: [\n param\n for param in util.extract_key_query_params(\n all_current_params, param_key=EMBED_QUERY_PARAM\n )\n ],\n EMBED_OPTIONS_QUERY_PARAM: [\n param\n for param in util.extract_key_query_params(\n all_current_params, param_key=EMBED_OPTIONS_QUERY_PARAM\n )\n ],\n },\n doseq=True,\n )\n query_string = parse.urlencode(query_params, doseq=True)\n\n if query_string:\n separator = \"&\" if current_embed_params else \"\"\n return separator.join([query_string, current_embed_params])\n return current_embed_params\n",
"path": "lib/streamlit/commands/query_params.py"
}
] | [
{
"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport urllib.parse as parse\nfrom typing import Any, Dict, List\n\nfrom streamlit import util\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto.ForwardMsg_pb2 import ForwardMsg\nfrom streamlit.runtime.metrics_util import gather_metrics\nfrom streamlit.runtime.scriptrunner import get_script_run_ctx\n\nEMBED_QUERY_PARAM = \"embed\"\nEMBED_OPTIONS_QUERY_PARAM = \"embed_options\"\nEMBED_QUERY_PARAMS_KEYS = [EMBED_QUERY_PARAM, EMBED_OPTIONS_QUERY_PARAM]\n\n\n@gather_metrics(\"experimental_get_query_params\")\ndef get_query_params() -> Dict[str, List[str]]:\n \"\"\"Return the query parameters that is currently showing in the browser's URL bar.\n\n Returns\n -------\n dict\n The current query parameters as a dict. \"Query parameters\" are the part of the URL that comes\n after the first \"?\".\n\n Example\n -------\n Let's say the user's web browser is at\n `http://localhost:8501/?show_map=True&selected=asia&selected=america`.\n Then, you can get the query parameters using the following:\n\n >>> import streamlit as st\n >>>\n >>> st.experimental_get_query_params()\n {\"show_map\": [\"True\"], \"selected\": [\"asia\", \"america\"]}\n\n Note that the values in the returned dict are *always* lists. This is\n because we internally use Python's urllib.parse.parse_qs(), which behaves\n this way. And this behavior makes sense when you consider that every item\n in a query string is potentially a 1-element array.\n\n \"\"\"\n ctx = get_script_run_ctx()\n if ctx is None:\n return {}\n # Return new query params dict, but without embed, embed_options query params\n return util.exclude_key_query_params(\n parse.parse_qs(ctx.query_string), keys_to_exclude=EMBED_QUERY_PARAMS_KEYS\n )\n\n\n@gather_metrics(\"experimental_set_query_params\")\ndef set_query_params(**query_params: Any) -> None:\n \"\"\"Set the query parameters that are shown in the browser's URL bar.\n\n .. warning::\n Query param `embed` cannot be set using this method.\n\n Parameters\n ----------\n **query_params : dict\n The query parameters to set, as key-value pairs.\n\n Example\n -------\n\n To point the user's web browser to something like\n \"http://localhost:8501/?show_map=True&selected=asia&selected=america\",\n you would do the following:\n\n >>> import streamlit as st\n >>>\n >>> st.experimental_set_query_params(\n ... show_map=True,\n ... selected=[\"asia\", \"america\"],\n ... )\n\n \"\"\"\n ctx = get_script_run_ctx()\n if ctx is None:\n return\n\n msg = ForwardMsg()\n msg.page_info_changed.query_string = _ensure_no_embed_params(\n query_params, ctx.query_string\n )\n ctx.query_string = msg.page_info_changed.query_string\n ctx.enqueue(msg)\n\n\ndef _ensure_no_embed_params(\n query_params: Dict[str, List[str]], query_string: str\n) -> str:\n \"\"\"Ensures there are no embed params set (raises StreamlitAPIException) if there is a try,\n also makes sure old param values in query_string are preserved. Returns query_string : str.\"\"\"\n # Get query params dict without embed, embed_options params\n query_params_without_embed = util.exclude_key_query_params(\n query_params, keys_to_exclude=EMBED_QUERY_PARAMS_KEYS\n )\n if query_params != query_params_without_embed:\n raise StreamlitAPIException(\n \"Query param embed and embed_options (case-insensitive) cannot be set using set_query_params method.\"\n )\n\n all_current_params = parse.parse_qs(query_string)\n current_embed_params = parse.urlencode(\n {\n EMBED_QUERY_PARAM: [\n param\n for param in util.extract_key_query_params(\n all_current_params, param_key=EMBED_QUERY_PARAM\n )\n ],\n EMBED_OPTIONS_QUERY_PARAM: [\n param\n for param in util.extract_key_query_params(\n all_current_params, param_key=EMBED_OPTIONS_QUERY_PARAM\n )\n ],\n },\n doseq=True,\n )\n query_string = parse.urlencode(query_params, doseq=True)\n\n if query_string:\n separator = \"&\" if current_embed_params else \"\"\n return separator.join([query_string, current_embed_params])\n return current_embed_params\n",
"path": "lib/streamlit/commands/query_params.py"
}
] | diff --git a/lib/streamlit/commands/query_params.py b/lib/streamlit/commands/query_params.py
index b15e753aa1d6..7b632f3391ed 100644
--- a/lib/streamlit/commands/query_params.py
+++ b/lib/streamlit/commands/query_params.py
@@ -97,6 +97,7 @@ def set_query_params(**query_params: Any) -> None:
msg.page_info_changed.query_string = _ensure_no_embed_params(
query_params, ctx.query_string
)
+ ctx.query_string = msg.page_info_changed.query_string
ctx.enqueue(msg)
diff --git a/lib/tests/streamlit/streamlit_test.py b/lib/tests/streamlit/streamlit_test.py
index 168fde6b07c8..c6cd234b8e18 100644
--- a/lib/tests/streamlit/streamlit_test.py
+++ b/lib/tests/streamlit/streamlit_test.py
@@ -693,6 +693,13 @@ def test_set_query_params_exceptions(self):
with self.assertRaises(StreamlitAPIException):
st.experimental_set_query_params(embed_options="show_colored_line")
+ def test_get_query_params_after_set_query_params(self):
+ """Test valid st.set_query_params sends protobuf message."""
+ p_set = dict(x=["a"])
+ st.experimental_set_query_params(**p_set)
+ p_get = st.experimental_get_query_params()
+ self.assertEqual(p_get, p_set)
+
@parameterized.expand([(st.error,), (st.warning,), (st.info,), (st.success,)])
def test_st_alert_exceptions(self, alert_func):
"""Test that alert functions throw an exception when a non-emoji is given as an icon."""
|
EleutherAI__gpt-neox-1024 | 'attention.bias' and 'attention.masked_bias' not in `hf_layer.state_dict()` when converting gpt-neox model to huggingface
**Describe the bug**
A clear and concise description of what the bug is.
I encounter the following error when I am converting GPTNeoX models to Huggingface using the `tools/convert_module_to_hf.py` script.
```
(gpt-neox) johnny@ink-lucy:~/gpt-neox$ bash haveibeentrainedon/wikitext/pilot/convert_to_hf.sh
[2023-08-18 23:37:21,695] [INFO] [real_accelerator.py:133:get_accelerator] Setting ds_accelerator to cuda (auto detect)
> building GPT2BPETokenizer tokenizer ...
> padded vocab (size: 50257) with 47 dummy tokens (new size: 50304)
Saving weights in fp16 precision...
0%| | 0/24 [00:00<?, ?it/s]
Traceback (most recent call last):
File "./tools/convert_module_to_hf.py", line 307, in <module>
hf_model = convert(args.input_dir, loaded_config, args.output_dir)
File "./tools/convert_module_to_hf.py", line 230, in convert
state_dict["attention.bias"] = hf_layer.state_dict()["attention.bias"]
KeyError: 'attention.bias'
```
**Expected behavior**
Successful conversion.
**Proposed solution**
If you comment out lines 230 and 231, the script will run through. From an eyeballing of the results, it doesn't seem like language modelling performance seriously degraded. Could this be some code that was supposed to be taken out?
**Additional context**
This is for a model trained with the config `configs/pythia/410m.yml`
| [
{
"content": "# Copyright (c) 2021, EleutherAI\n# This file is based on code by the authors denoted below and has been modified from its original version.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nfrom abc import ABC, abstractmethod\nfrom multiprocessing import cpu_count\n\n\"\"\"\nThis registry is for automatically downloading and extracting datasets.\n\nTo register a class you need to inherit the DataDownloader class, and provide name and url attributes, and (optionally)\nthe number of documents.\n\nWhen done, add it to the DATA_DOWNLOADERS dict. The function process_data runs the pre-processing for the selected\ndataset.\n\"\"\"\n\nGPT2_VOCAB_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json\"\nGPT2_MERGE_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt\"\n\n\nclass DataDownloader(ABC):\n \"\"\"Dataset registry class to automatically download / extract datasets\"\"\"\n\n def __init__(\n self,\n tokenizer_type=None,\n merge_file=None,\n vocab_file=None,\n data_dir=None,\n force_redownload=None,\n num_workers=None,\n ):\n if tokenizer_type is None:\n tokenizer_type = \"GPT2BPETokenizer\"\n if data_dir is None:\n data_dir = os.environ.get(\"DATA_DIR\", \"./data\")\n if merge_file is None:\n merge_file = f\"{data_dir}/gpt2-merges.txt\"\n if force_redownload is None:\n force_redownload = False\n if vocab_file is None:\n if tokenizer_type == \"GPT2BPETokenizer\":\n vocab_file = f\"{data_dir}/gpt2-vocab.json\"\n elif tokenizer_type == \"HFGPT2Tokenizer\":\n vocab_file = \"gpt2\"\n elif tokenizer_type == \"CharLevelTokenizer\":\n pass\n else:\n assert vocab_file is not None, \"No vocab file provided\"\n if num_workers is None:\n num_workers = cpu_count()\n self._tokenizer_type = tokenizer_type\n self._merge_file = merge_file\n self._vocab_file = vocab_file\n self._data_dir = data_dir\n self._force_redownload = force_redownload\n self._num_workers = num_workers\n\n @property\n def base_dir(self):\n \"\"\"base data directory\"\"\"\n return self._data_dir\n\n @property\n @abstractmethod\n def name(self):\n \"\"\"name of dataset\"\"\"\n pass\n\n @property\n @abstractmethod\n def urls(self):\n \"\"\"URLs from which to download dataset\"\"\"\n pass\n\n @property\n def tokenizer_type(self):\n \"\"\"tokenizer type to use when tokenizing data\"\"\"\n return self._tokenizer_type\n\n @property\n def merge_file(self):\n \"\"\"Merge file for tokenizer\"\"\"\n return self._merge_file\n\n @property\n def vocab_file(self):\n \"\"\"Vocab file for tokenizer\"\"\"\n return self._vocab_file\n\n @property\n def num_workers(self):\n \"\"\"Number of workers to use in preprocessing\"\"\"\n return self._num_workers\n\n @property\n def num_docs(self):\n \"\"\"Number of documents in the dataset (if known)\"\"\"\n return None\n\n @property\n def ftfy(self):\n \"\"\"Use ftfy (https://github.com/LuminosoInsight/python-ftfy) to fix text encodings\"\"\"\n return False\n\n def exists(self):\n \"\"\"Checks if the dataset is present\"\"\"\n return os.path.isdir(f\"{self.base_dir}/{self.name}\")\n\n def download(self):\n \"\"\"downloads dataset\"\"\"\n os.makedirs(os.path.join(self.base_dir, self.name), exist_ok=True)\n for url in self.urls:\n try:\n os_cmd = f\"wget {url} -O {os.path.join(self.base_dir, self.name, os.path.basename(url))}\"\n if os.system(os_cmd) != 0:\n raise Exception(\n f\"Cannot download file at URL {url}: server may be down\"\n )\n except Exception as e:\n raise Exception(f\"Download error: {e}\")\n\n def tokenize(self):\n \"\"\"tokenizes dataset\"\"\"\n parent_folder = os.path.join(self.base_dir, self.name)\n jsonl_filepath = \",\".join(\n [os.path.join(parent_folder, os.path.basename(url)) for url in self.urls]\n )\n\n cmd = f\"python tools/preprocess_data.py \\\n --input {jsonl_filepath} \\\n --output-prefix {parent_folder}/{self.name} \\\n --vocab {self.vocab_file} \\\n --dataset-impl mmap \\\n --tokenizer-type {self.tokenizer_type} \\\n --merge-file {self.merge_file} \\\n --append-eod \\\n --workers {self.num_workers} \"\n\n if self.num_docs is not None:\n cmd += f\"--num-docs {self.num_docs} \"\n\n if self.ftfy:\n cmd += f\"--ftfy \"\n\n os.system(cmd)\n\n def prepare(self):\n if self._force_redownload:\n self.download()\n else:\n if not self.exists():\n self.download()\n\n self.tokenize()\n\n\nclass Enron(DataDownloader):\n name = \"enron\"\n urls = [\"http://eaidata.bmk.sh/data/enron_emails.jsonl.zst\"]\n num_docs = 517401\n\n\nclass PileSubset(DataDownloader):\n name = \"pile_00\"\n urls = [\"https://the-eye.eu/public/AI/pile/train/00.jsonl.zst\"]\n\n\nclass Pile(DataDownloader):\n name = \"pile\"\n urls = [\n f\"https://the-eye.eu/public/AI/pile/train/{i:02}.jsonl.zst\" for i in range(30)\n ]\n\n\nclass Github(DataDownloader):\n name = \"github\"\n urls = [\"http://eaidata.bmk.sh/data/github_small.jsonl.zst\"]\n\n\nclass ArXiv(DataDownloader):\n name = \"arxiv\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/2020-09-08-arxiv-extracts-nofallback-until-2007-068.tar.gz\"\n ]\n\n\nclass EuroParl(DataDownloader):\n name = \"europarl\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/EuroParliamentProceedings_1996_2011.jsonl.zst\"\n ]\n\n\nclass FreeLaw(DataDownloader):\n name = \"freelaw\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/FreeLaw_Opinions.jsonl.zst\"\n ]\n\n\nclass NiH(DataDownloader):\n name = \"nih\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/NIH_ExPORTER_awarded_grant_text.jsonl.zst\"\n ]\n\n\nclass PubMed(DataDownloader):\n name = \"pubmed\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/PMC_extracts.tar.gz\"\n ]\n\n\nclass Books1(DataDownloader):\n name = \"books1\"\n urls = [\"https://the-eye.eu/public/AI/pile_preliminary_components/books1.tar.gz\"]\n\n\nclass Books3(DataDownloader):\n name = \"books3\"\n urls = [\"https://the-eye.eu/public/AI/pile_preliminary_components/books3.tar.gz\"]\n\n\nclass HackerNews(DataDownloader):\n name = \"hackernews\"\n urls = [\"https://the-eye.eu/public/AI/pile_preliminary_components/hn.tar.gz\"]\n num_docs = 373000\n\n\nclass OpenWebText2(DataDownloader):\n name = \"openwebtext2\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/openwebtext2.jsonl.zst.tar\"\n ]\n num_docs = 17103000\n\n\nclass StackExchange(DataDownloader):\n name = \"stackexchange\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/stackexchange_dataset.tar\"\n ]\n\n\nclass UbuntuIRC(DataDownloader):\n name = \"ubuntu_irc\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/ubuntu_irc_until_2020_9_1.jsonl.zst\"\n ]\n\n\nclass YoutubeSubtitles(DataDownloader):\n name = \"youtube_subtitles\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/yt_subs.jsonl.zst\"\n ]\n\n\nclass C4(DataDownloader):\n name = \"c4\"\n urls = [\n f\"https://the-eye.eu/eleuther_staging/c4/en/c4-train.{i:05}-of-01024.json.gz\"\n for i in range(1024)\n ]\n\n\nclass C4OpenWebText(DataDownloader):\n name = \"c4_openwebtext\"\n urls = [\n f\"https://the-eye.eu/eleuther_staging/c4/realnewslike/c4-train.{i:05}-of-00512.json.gz\"\n for i in range(512)\n ]\n\n\nclass Enwik8(DataDownloader):\n name = \"enwik8\"\n urls = [\"https://data.deepai.org/enwik8.zip\"]\n\n\ndef maybe_download_gpt2_tokenizer_data(tokenizer_type, data_dir):\n if tokenizer_type is None or tokenizer_type == \"GPT2BPETokenizer\":\n GPT2_VOCAB_FP = f\"{data_dir}//gpt2-vocab.json\"\n GPT2_MERGE_FP = f\"{data_dir}/gpt2-merges.txt\"\n if not os.path.isfile(GPT2_VOCAB_FP):\n os.system(f\"wget {GPT2_VOCAB_URL} -O {GPT2_VOCAB_FP}\")\n if not os.path.isfile(GPT2_MERGE_FP):\n os.system(f\"wget {GPT2_MERGE_URL} -O {GPT2_MERGE_FP}\")\n\n\nDATA_DOWNLOADERS = {\n \"pass\": \"pass\",\n \"enron\": Enron,\n \"pile_subset\": PileSubset,\n \"pile\": Pile,\n \"github\": Github,\n \"arxiv\": ArXiv,\n \"europarl\": EuroParl,\n \"freelaw\": FreeLaw,\n \"nih\": NiH,\n \"pubmed\": PubMed,\n \"books1\": Books1,\n \"books3\": Books3,\n \"hackernews\": HackerNews,\n \"openwebtext2\": OpenWebText2,\n \"stackexchange\": StackExchange,\n \"ubuntu_irc\": UbuntuIRC,\n \"youtube_subtitles\": YoutubeSubtitles,\n \"c4\": C4,\n \"c4_openwebtext\": C4OpenWebText,\n \"enwik8\": Enwik8,\n}\n\n\ndef prepare_dataset(\n dataset_name: str,\n tokenizer_type: str = None,\n data_dir: str = None,\n vocab_file: str = None,\n merge_file: str = None,\n force_redownload: bool = None,\n num_workers: int = None,\n):\n \"\"\"\n Downloads + tokenizes a dataset in the registry (dataset_name) and saves output .npy files to data_dir.\n \"\"\"\n if data_dir is None:\n data_dir = os.environ.get(\"DATA_DIR\", \"./data\")\n os.makedirs(data_dir, exist_ok=True)\n maybe_download_gpt2_tokenizer_data(tokenizer_type, data_dir)\n DownloaderClass = DATA_DOWNLOADERS.get(dataset_name.lower(), None)\n if DownloaderClass is None:\n raise NotImplementedError(\n f'Dataset \"{dataset_name}\" not recognized - please choose from {list(DATA_DOWNLOADERS.keys())}'\n )\n elif DownloaderClass == \"pass\":\n # pass on building dataset (for unit tests)\n pass\n else:\n num_workers = 1 if dataset_name == \"enwik8\" else num_workers\n d = DownloaderClass(\n tokenizer_type=tokenizer_type,\n vocab_file=vocab_file,\n merge_file=merge_file,\n data_dir=data_dir,\n force_redownload=force_redownload,\n num_workers=num_workers,\n )\n d.prepare()\n",
"path": "tools/corpora.py"
}
] | [
{
"content": "# Copyright (c) 2021, EleutherAI\n# This file is based on code by the authors denoted below and has been modified from its original version.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nfrom abc import ABC, abstractmethod\nfrom multiprocessing import cpu_count\n\n\"\"\"\nThis registry is for automatically downloading and extracting datasets.\n\nTo register a class you need to inherit the DataDownloader class, and provide name and url attributes, and (optionally)\nthe number of documents.\n\nWhen done, add it to the DATA_DOWNLOADERS dict. The function process_data runs the pre-processing for the selected\ndataset.\n\"\"\"\n\nGPT2_VOCAB_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json\"\nGPT2_MERGE_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt\"\n\n\nclass DataDownloader(ABC):\n \"\"\"Dataset registry class to automatically download / extract datasets\"\"\"\n\n def __init__(\n self,\n tokenizer_type=None,\n merge_file=None,\n vocab_file=None,\n data_dir=None,\n force_redownload=None,\n num_workers=None,\n ):\n if tokenizer_type is None:\n tokenizer_type = \"GPT2BPETokenizer\"\n if data_dir is None:\n data_dir = os.environ.get(\"DATA_DIR\", \"./data\")\n if merge_file is None:\n merge_file = f\"{data_dir}/gpt2-merges.txt\"\n if force_redownload is None:\n force_redownload = False\n if vocab_file is None:\n if tokenizer_type == \"GPT2BPETokenizer\":\n vocab_file = f\"{data_dir}/gpt2-vocab.json\"\n elif tokenizer_type == \"HFGPT2Tokenizer\":\n vocab_file = \"gpt2\"\n elif tokenizer_type == \"CharLevelTokenizer\":\n pass\n else:\n assert vocab_file is not None, \"No vocab file provided\"\n if num_workers is None:\n num_workers = cpu_count()\n self._tokenizer_type = tokenizer_type\n self._merge_file = merge_file\n self._vocab_file = vocab_file\n self._data_dir = data_dir\n self._force_redownload = force_redownload\n self._num_workers = num_workers\n\n @property\n def base_dir(self):\n \"\"\"base data directory\"\"\"\n return self._data_dir\n\n @property\n @abstractmethod\n def name(self):\n \"\"\"name of dataset\"\"\"\n pass\n\n @property\n @abstractmethod\n def urls(self):\n \"\"\"URLs from which to download dataset\"\"\"\n pass\n\n @property\n def tokenizer_type(self):\n \"\"\"tokenizer type to use when tokenizing data\"\"\"\n return self._tokenizer_type\n\n @property\n def merge_file(self):\n \"\"\"Merge file for tokenizer\"\"\"\n return self._merge_file\n\n @property\n def vocab_file(self):\n \"\"\"Vocab file for tokenizer\"\"\"\n return self._vocab_file\n\n @property\n def num_workers(self):\n \"\"\"Number of workers to use in preprocessing\"\"\"\n return self._num_workers\n\n @property\n def num_docs(self):\n \"\"\"Number of documents in the dataset (if known)\"\"\"\n return None\n\n @property\n def ftfy(self):\n \"\"\"Use ftfy (https://github.com/LuminosoInsight/python-ftfy) to fix text encodings\"\"\"\n return False\n\n def exists(self):\n \"\"\"Checks if the dataset is present\"\"\"\n return os.path.isdir(f\"{self.base_dir}/{self.name}\")\n\n def download(self):\n \"\"\"downloads dataset\"\"\"\n os.makedirs(os.path.join(self.base_dir, self.name), exist_ok=True)\n for url in self.urls:\n try:\n os_cmd = f\"wget {url} -O {os.path.join(self.base_dir, self.name, os.path.basename(url))}\"\n if os.system(os_cmd) != 0:\n raise Exception(\n f\"Cannot download file at URL {url}: server may be down\"\n )\n except Exception as e:\n raise Exception(f\"Download error: {e}\")\n\n def tokenize(self):\n \"\"\"tokenizes dataset\"\"\"\n parent_folder = os.path.join(self.base_dir, self.name)\n jsonl_filepath = \",\".join(\n [os.path.join(parent_folder, os.path.basename(url)) for url in self.urls]\n )\n\n cmd = f\"python tools/preprocess_data.py \\\n --input {jsonl_filepath} \\\n --output-prefix {parent_folder}/{self.name} \\\n --vocab {self.vocab_file} \\\n --dataset-impl mmap \\\n --tokenizer-type {self.tokenizer_type} \\\n --merge-file {self.merge_file} \\\n --append-eod \\\n --workers {self.num_workers} \"\n\n if self.num_docs is not None:\n cmd += f\"--num-docs {self.num_docs} \"\n\n if self.ftfy:\n cmd += f\"--ftfy \"\n\n os.system(cmd)\n\n def prepare(self):\n if self._force_redownload:\n self.download()\n else:\n if not self.exists():\n self.download()\n\n self.tokenize()\n\n\nclass Enron(DataDownloader):\n name = \"enron\"\n urls = [\"http://eaidata.bmk.sh/data/enron_emails.jsonl.zst\"]\n num_docs = 517401\n\n\nclass PileSubset(DataDownloader):\n name = \"pile_00\"\n urls = [\"https://the-eye.eu/public/AI/pile/train/00.jsonl.zst\"]\n\n\nclass Pile(DataDownloader):\n name = \"pile\"\n urls = [\n f\"https://the-eye.eu/public/AI/pile/train/{i:02}.jsonl.zst\" for i in range(30)\n ]\n\n\nclass Github(DataDownloader):\n name = \"github\"\n urls = [\"http://eaidata.bmk.sh/data/github_small.jsonl.zst\"]\n\n\nclass ArXiv(DataDownloader):\n name = \"arxiv\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/2020-09-08-arxiv-extracts-nofallback-until-2007-068.tar.gz\"\n ]\n\n\nclass EuroParl(DataDownloader):\n name = \"europarl\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/EuroParliamentProceedings_1996_2011.jsonl.zst\"\n ]\n\n\nclass FreeLaw(DataDownloader):\n name = \"freelaw\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/FreeLaw_Opinions.jsonl.zst\"\n ]\n\n\nclass NiH(DataDownloader):\n name = \"nih\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/NIH_ExPORTER_awarded_grant_text.jsonl.zst\"\n ]\n\n\nclass PubMed(DataDownloader):\n name = \"pubmed\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/PMC_extracts.tar.gz\"\n ]\n\n\nclass Books1(DataDownloader):\n name = \"books1\"\n urls = [\"https://the-eye.eu/public/AI/pile_preliminary_components/books1.tar.gz\"]\n\n\nclass Books3(DataDownloader):\n name = \"books3\"\n urls = [\"https://the-eye.eu/public/AI/pile_preliminary_components/books3.tar.gz\"]\n\n\nclass HackerNews(DataDownloader):\n name = \"hackernews\"\n urls = [\"https://the-eye.eu/public/AI/pile_preliminary_components/hn.tar.gz\"]\n num_docs = 373000\n\n\nclass OpenWebText2(DataDownloader):\n name = \"openwebtext2\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/openwebtext2.jsonl.zst.tar\"\n ]\n num_docs = 17103000\n\n\nclass StackExchange(DataDownloader):\n name = \"stackexchange\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/stackexchange_dataset.tar\"\n ]\n\n\nclass UbuntuIRC(DataDownloader):\n name = \"ubuntu_irc\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/ubuntu_irc_until_2020_9_1.jsonl.zst\"\n ]\n\n\nclass YoutubeSubtitles(DataDownloader):\n name = \"youtube_subtitles\"\n urls = [\n \"https://the-eye.eu/public/AI/pile_preliminary_components/yt_subs.jsonl.zst\"\n ]\n\n\nclass C4(DataDownloader):\n name = \"c4\"\n urls = [\n f\"https://the-eye.eu/eleuther_staging/c4/en/c4-train.{i:05}-of-01024.json.gz\"\n for i in range(1024)\n ]\n\n\nclass C4OpenWebText(DataDownloader):\n name = \"c4_openwebtext\"\n urls = [\n f\"https://the-eye.eu/eleuther_staging/c4/realnewslike/c4-train.{i:05}-of-00512.json.gz\"\n for i in range(512)\n ]\n\n\nclass Enwik8(DataDownloader):\n name = \"enwik8\"\n urls = [\"http://mattmahoney.net/dc/enwik8.zip\"]\n\n\ndef maybe_download_gpt2_tokenizer_data(tokenizer_type, data_dir):\n if tokenizer_type is None or tokenizer_type == \"GPT2BPETokenizer\":\n GPT2_VOCAB_FP = f\"{data_dir}//gpt2-vocab.json\"\n GPT2_MERGE_FP = f\"{data_dir}/gpt2-merges.txt\"\n if not os.path.isfile(GPT2_VOCAB_FP):\n os.system(f\"wget {GPT2_VOCAB_URL} -O {GPT2_VOCAB_FP}\")\n if not os.path.isfile(GPT2_MERGE_FP):\n os.system(f\"wget {GPT2_MERGE_URL} -O {GPT2_MERGE_FP}\")\n\n\nDATA_DOWNLOADERS = {\n \"pass\": \"pass\",\n \"enron\": Enron,\n \"pile_subset\": PileSubset,\n \"pile\": Pile,\n \"github\": Github,\n \"arxiv\": ArXiv,\n \"europarl\": EuroParl,\n \"freelaw\": FreeLaw,\n \"nih\": NiH,\n \"pubmed\": PubMed,\n \"books1\": Books1,\n \"books3\": Books3,\n \"hackernews\": HackerNews,\n \"openwebtext2\": OpenWebText2,\n \"stackexchange\": StackExchange,\n \"ubuntu_irc\": UbuntuIRC,\n \"youtube_subtitles\": YoutubeSubtitles,\n \"c4\": C4,\n \"c4_openwebtext\": C4OpenWebText,\n \"enwik8\": Enwik8,\n}\n\n\ndef prepare_dataset(\n dataset_name: str,\n tokenizer_type: str = None,\n data_dir: str = None,\n vocab_file: str = None,\n merge_file: str = None,\n force_redownload: bool = None,\n num_workers: int = None,\n):\n \"\"\"\n Downloads + tokenizes a dataset in the registry (dataset_name) and saves output .npy files to data_dir.\n \"\"\"\n if data_dir is None:\n data_dir = os.environ.get(\"DATA_DIR\", \"./data\")\n os.makedirs(data_dir, exist_ok=True)\n maybe_download_gpt2_tokenizer_data(tokenizer_type, data_dir)\n DownloaderClass = DATA_DOWNLOADERS.get(dataset_name.lower(), None)\n if DownloaderClass is None:\n raise NotImplementedError(\n f'Dataset \"{dataset_name}\" not recognized - please choose from {list(DATA_DOWNLOADERS.keys())}'\n )\n elif DownloaderClass == \"pass\":\n # pass on building dataset (for unit tests)\n pass\n else:\n num_workers = 1 if dataset_name == \"enwik8\" else num_workers\n d = DownloaderClass(\n tokenizer_type=tokenizer_type,\n vocab_file=vocab_file,\n merge_file=merge_file,\n data_dir=data_dir,\n force_redownload=force_redownload,\n num_workers=num_workers,\n )\n d.prepare()\n",
"path": "tools/corpora.py"
}
] | diff --git a/configs/neox_arguments.md b/configs/neox_arguments.md
index b8367075a..f7cc3f084 100644
--- a/configs/neox_arguments.md
+++ b/configs/neox_arguments.md
@@ -111,7 +111,7 @@ Logging Arguments
- **git_hash**: str
- Default = 16485ee
+ Default = 7bdda99
current git hash of repository
diff --git a/requirements/requirements.txt b/requirements/requirements.txt
index 3f3a70882..88e49f073 100644
--- a/requirements/requirements.txt
+++ b/requirements/requirements.txt
@@ -12,4 +12,4 @@ sentencepiece
six
tiktoken>=0.1.2
tokenizers>=0.12.1
-transformers>=4.24.0
+transformers==4.30.2
diff --git a/tools/corpora.py b/tools/corpora.py
index b9e846454..35977b908 100644
--- a/tools/corpora.py
+++ b/tools/corpora.py
@@ -290,7 +290,7 @@ class C4OpenWebText(DataDownloader):
class Enwik8(DataDownloader):
name = "enwik8"
- urls = ["https://data.deepai.org/enwik8.zip"]
+ urls = ["http://mattmahoney.net/dc/enwik8.zip"]
def maybe_download_gpt2_tokenizer_data(tokenizer_type, data_dir):
|
scoutapp__scout_apm_python-583 | Support Python 3.9
Python 3.9 will be released 2020-10-05.
Here are some steps before its release:
* Start testing with prerelease
After release:
* Ensure tests run with released version
* Add 3.9 PyPI classifier
* Enable PYthon wheel building in release
| [
{
"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nwith open(\"README.md\", \"r\") as fp:\n long_description = fp.read()\n\npackages = find_packages(\"src\")\nif sys.version_info < (3, 6):\n packages = [p for p in packages if not p.startswith(\"scout_apm.async_\")]\n\ncompile_extensions = (\n # Python 3+\n sys.version_info >= (3,)\n # Not Jython\n and not sys.platform.startswith(\"java\")\n # Not PyPy\n and \"__pypy__\" not in sys.builtin_module_names\n # Not explicitly disabled\n and (os.environ.get(\"SCOUT_DISABLE_EXTENSIONS\", \"\") == \"\")\n)\nif compile_extensions:\n ext_modules = [\n Extension(\n name=str(\"scout_apm.core._objtrace\"),\n sources=[str(\"src/scout_apm/core/_objtrace.c\")],\n optional=True,\n )\n ]\nelse:\n ext_modules = []\n\nsetup(\n name=\"scout_apm\",\n version=\"2.16.2\",\n description=\"Scout Application Performance Monitoring Agent\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/scoutapp/scout_apm_python\",\n project_urls={\n \"Documentation\": \"https://docs.scoutapm.com/#python-agent\",\n \"Changelog\": (\n \"https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md\"\n ),\n },\n author=\"Scout\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n zip_safe=False,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n packages=packages,\n package_dir={str(\"\"): str(\"src\")},\n ext_modules=ext_modules,\n entry_points={\n \"console_scripts\": [\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n install_requires=[\n 'asgiref ; python_version >= \"3.5\"',\n 'importlib-metadata ; python_version < \"3.8\"',\n \"psutil>=5,<6\",\n 'urllib3[secure] < 1.25 ; python_version < \"3.5\"',\n 'urllib3[secure] < 2 ; python_version >= \"3.5\"',\n \"wrapt>=1.10,<2.0\",\n ],\n keywords=\"apm performance monitoring development\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Bottle\",\n \"Framework :: Django\",\n \"Framework :: Django :: 1.8\",\n \"Framework :: Django :: 1.9\",\n \"Framework :: Django :: 1.10\",\n \"Framework :: Django :: 1.11\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Flask\",\n \"Framework :: Pyramid\",\n \"Intended Audience :: Developers\",\n \"Topic :: System :: Monitoring\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nwith open(\"README.md\", \"r\") as fp:\n long_description = fp.read()\n\npackages = find_packages(\"src\")\nif sys.version_info < (3, 6):\n packages = [p for p in packages if not p.startswith(\"scout_apm.async_\")]\n\ncompile_extensions = (\n # Python 3+\n sys.version_info >= (3,)\n # Not Jython\n and not sys.platform.startswith(\"java\")\n # Not PyPy\n and \"__pypy__\" not in sys.builtin_module_names\n # Not explicitly disabled\n and (os.environ.get(\"SCOUT_DISABLE_EXTENSIONS\", \"\") == \"\")\n)\nif compile_extensions:\n ext_modules = [\n Extension(\n name=str(\"scout_apm.core._objtrace\"),\n sources=[str(\"src/scout_apm/core/_objtrace.c\")],\n optional=True,\n )\n ]\nelse:\n ext_modules = []\n\nsetup(\n name=\"scout_apm\",\n version=\"2.16.2\",\n description=\"Scout Application Performance Monitoring Agent\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/scoutapp/scout_apm_python\",\n project_urls={\n \"Documentation\": \"https://docs.scoutapm.com/#python-agent\",\n \"Changelog\": (\n \"https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md\"\n ),\n },\n author=\"Scout\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n zip_safe=False,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n packages=packages,\n package_dir={str(\"\"): str(\"src\")},\n ext_modules=ext_modules,\n entry_points={\n \"console_scripts\": [\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n install_requires=[\n 'asgiref ; python_version >= \"3.5\"',\n 'importlib-metadata ; python_version < \"3.8\"',\n \"psutil>=5,<6\",\n 'urllib3[secure] < 1.25 ; python_version < \"3.5\"',\n 'urllib3[secure] < 2 ; python_version >= \"3.5\"',\n \"wrapt>=1.10,<2.0\",\n ],\n keywords=\"apm performance monitoring development\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Bottle\",\n \"Framework :: Django\",\n \"Framework :: Django :: 1.8\",\n \"Framework :: Django :: 1.9\",\n \"Framework :: Django :: 1.10\",\n \"Framework :: Django :: 1.11\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Flask\",\n \"Framework :: Pyramid\",\n \"Intended Audience :: Developers\",\n \"Topic :: System :: Monitoring\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n)\n",
"path": "setup.py"
}
] | diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 88366fde..4e3aa315 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -21,6 +21,7 @@ jobs:
- 3.6
- 3.7
- 3.8
+ - 3.9
services:
elasticsearch:
@@ -47,7 +48,7 @@ jobs:
steps:
- uses: actions/checkout@v2
- - uses: actions/[email protected]
+ - uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Upgrade packaging tools
@@ -56,5 +57,5 @@ jobs:
run: python -m pip install --upgrade tox
- name: Run tox targets for ${{ matrix.python-version }}
run: |
- ENV_PREFIX=$(tr -d "." <<< "py${{ matrix.python-version }}-")
- TOXENV=$(tox --listenvs | grep $ENV_PREFIX | tr '\n' ',') python -m tox
+ ENV_PREFIX=$(tr -C -d "0-9" <<< "${{ matrix.python-version }}")
+ TOXENV=$(tox --listenvs | grep "^py$ENV_PREFIX" | tr '\n' ',') python -m tox
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 95da24af..cb811ebf 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,12 @@
# Changelog
+## Pending
+
+### Added
+
+- Support Python 3.9.
+ ([PR #583](https://github.com/scoutapp/scout_apm_python/pull/583))
+
## [2.16.2] 2020-09-17
- Moved core agent on Linux to default to the musl version, rather than try
diff --git a/setup.py b/setup.py
index 2e6caef2..f33ae372 100644
--- a/setup.py
+++ b/setup.py
@@ -98,5 +98,6 @@
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
],
)
diff --git a/tests/compat.py b/tests/compat.py
index 35dd36cf..72c2b416 100644
--- a/tests/compat.py
+++ b/tests/compat.py
@@ -44,4 +44,17 @@ def nullcontext(obj):
yield obj
-__all__ = ["mock", "nullcontext", "TemporaryDirectory"]
+if sys.version_info >= (3, 4):
+ from contextlib import suppress
+else:
+ from contextlib import contextmanager
+
+ @contextmanager
+ def suppress(*exceptions):
+ try:
+ yield
+ except exceptions:
+ pass
+
+
+__all__ = ["mock", "nullcontext", "suppress", "TemporaryDirectory"]
diff --git a/tests/integration/django_app.py b/tests/integration/django_app.py
index 078186b5..4ec7d181 100644
--- a/tests/integration/django_app.py
+++ b/tests/integration/django_app.py
@@ -1,11 +1,15 @@
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
+import sys
+
import django
import wrapt
from django.conf import settings
from django.template.response import TemplateResponse
+from tests.compat import suppress
+
config = {
"ALLOWED_HOSTS": ["*"],
"DATABASES": {
@@ -118,22 +122,22 @@ def sql_kwargs(request):
def sql_type_errors(request):
with connection.cursor() as cursor:
- try:
+ with suppress(TypeError):
cursor.execute()
- except TypeError:
- pass
- try:
+
+ if sys.version_info >= (3, 9):
+ exc_type = TypeError
+ else:
+ exc_type = ValueError
+
+ with suppress(exc_type):
cursor.execute(sql=None)
- except ValueError:
- pass
- try:
+
+ with suppress(TypeError):
cursor.executemany()
- except TypeError:
- pass
- try:
+
+ with suppress(TypeError):
cursor.executemany(sql=None, param_list=[(1,)])
- except TypeError:
- pass
return HttpResponse("Done")
diff --git a/tox.ini b/tox.ini
index abf2debe..d16a6c1a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,9 +6,9 @@ envlist =
{py27,py34,py35,py36}-django111
{py34,py35,py36,py37,py38}-django20
{py35,py36,py37,py38}-django21
- {py35,py36,py37,py38}-django22
- {py36,py37,py38}-django30
- {py36,py37,py38}-django31
+ {py35,py36,py37,py38,py39}-django22
+ {py36,py37,py38,py39}-django30
+ {py36,py37,py38,py39}-django31
[testenv]
passenv =
|
rlworkgroup__garage-714 | Cannot plot during training
```
from garage.experiment import LocalRunner, run_experiment
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.envs import TfEnv
from garage.tf.policies import CategoricalMLPPolicy
def run_task(*_):
with LocalRunner() as runner:
env = TfEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(
name='policy', env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=4000,plot=True)
run_experiment(
run_task,
snapshot_mode='last',
seed=4,
n_parallel=4,
plot=True,
use_tf=False,
use_gpu=False
)
```
##########################################################
3) Why we removed viskit? I cannot find it in garage.
Thanks! I really like rllab and garage.
| [
{
"content": "\"\"\"\nThe local runner for tensorflow algorithms.\n\nA runner setup context for algorithms during initialization and\npipelines data between sampler and algorithm during training.\n\"\"\"\nimport copy\nimport time\nfrom types import SimpleNamespace\n\nfrom dowel import logger, tabular\nimport tensorflow as tf\n\nfrom garage.experiment import snapshotter\n\n# Note: Optional module should be imported ad hoc to break circular dependency.\n\n\nclass LocalRunner:\n \"\"\"This class implements a local runner for tensorflow algorithms.\n\n A local runner provides a default tensorflow session using python context.\n This is useful for those experiment components (e.g. policy) that require a\n tensorflow session during construction.\n\n Use Runner.setup(algo, env) to setup algorithm and environement for runner\n and Runner.train() to start training.\n\n Examples:\n with LocalRunner() as runner:\n env = gym.make('CartPole-v1')\n policy = CategoricalMLPPolicy(\n env_spec=env.spec,\n hidden_sizes=(32, 32))\n algo = TRPO(\n env=env,\n policy=policy,\n baseline=baseline,\n max_path_length=100,\n discount=0.99,\n max_kl_step=0.01)\n runner.setup(algo, env)\n runner.train(n_epochs=100, batch_size=4000)\n\n \"\"\"\n\n def __init__(self, sess=None, max_cpus=1):\n \"\"\"Create a new local runner.\n\n Args:\n max_cpus(int): The maximum number of parallel sampler workers.\n sess(tf.Session): An optional tensorflow session.\n A new session will be created immediately if not provided.\n\n Note:\n The local runner will set up a joblib task pool of size max_cpus\n possibly later used by BatchSampler. If BatchSampler is not used,\n the processes in the pool will remain dormant.\n\n This setup is required to use tensorflow in a multiprocess\n environment before a tensorflow session is created\n because tensorflow is not fork-safe.\n\n See https://github.com/tensorflow/tensorflow/issues/2448.\n\n \"\"\"\n if max_cpus > 1:\n from garage.sampler import singleton_pool\n singleton_pool.initialize(max_cpus)\n self.sess = sess or tf.Session()\n self.sess_entered = False\n self.has_setup = False\n self.plot = False\n\n self.setup_args = None\n self.train_args = None\n\n def __enter__(self):\n \"\"\"Set self.sess as the default session.\n\n Returns:\n This local runner.\n\n \"\"\"\n if tf.get_default_session() is not self.sess:\n self.sess.__enter__()\n self.sess_entered = True\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"Leave session.\"\"\"\n if tf.get_default_session() is self.sess and self.sess_entered:\n self.sess.__exit__(exc_type, exc_val, exc_tb)\n self.sess_entered = False\n\n def setup(self, algo, env, sampler_cls=None, sampler_args=None):\n \"\"\"Set up runner for algorithm and environment.\n\n This method saves algo and env within runner and creates a sampler.\n\n Note:\n After setup() is called all variables in session should have been\n initialized. setup() respects existing values in session so\n policy weights can be loaded before setup().\n\n Args:\n algo(garage.np.algos.RLAlgorithm): An algorithm instance.\n env(garage.envs.GarageEnv): An environement instance.\n sampler_cls(garage.sampler.Sampler): A sampler class.\n sampler_args(dict): Arguments to be passed to sampler constructor.\n\n \"\"\"\n self.algo = algo\n self.env = env\n self.policy = self.algo.policy\n\n if sampler_args is None:\n sampler_args = {}\n\n if sampler_cls is None:\n from garage.tf.algos.batch_polopt import BatchPolopt\n if isinstance(algo, BatchPolopt):\n if self.policy.vectorized:\n from garage.tf.samplers import OnPolicyVectorizedSampler\n sampler_cls = OnPolicyVectorizedSampler\n else:\n from garage.tf.samplers import BatchSampler\n sampler_cls = BatchSampler\n else:\n from garage.tf.samplers import OffPolicyVectorizedSampler\n sampler_cls = OffPolicyVectorizedSampler\n\n self.sampler = sampler_cls(algo, env, **sampler_args)\n\n self.initialize_tf_vars()\n logger.log(self.sess.graph)\n self.has_setup = True\n\n self.setup_args = SimpleNamespace(\n sampler_cls=sampler_cls, sampler_args=sampler_args)\n\n def initialize_tf_vars(self):\n \"\"\"Initialize all uninitialized variables in session.\"\"\"\n with tf.name_scope('initialize_tf_vars'):\n uninited_set = [\n e.decode()\n for e in self.sess.run(tf.report_uninitialized_variables())\n ]\n self.sess.run(\n tf.variables_initializer([\n v for v in tf.global_variables()\n if v.name.split(':')[0] in uninited_set\n ]))\n\n def start_worker(self):\n \"\"\"Start Plotter and Sampler workers.\"\"\"\n self.sampler.start_worker()\n if self.plot:\n from garage.tf.plotter import Plotter\n self.plotter = Plotter(self.env, self.policy)\n self.plotter.start()\n\n def shutdown_worker(self):\n \"\"\"Shutdown Plotter and Sampler workers.\"\"\"\n self.sampler.shutdown_worker()\n if self.plot:\n self.plotter.close()\n\n def obtain_samples(self, itr, batch_size):\n \"\"\"Obtain one batch of samples.\n\n Args:\n itr(int): Index of iteration (epoch).\n batch_size(int): Number of steps in batch.\n This is a hint that the sampler may or may not respect.\n\n Returns:\n One batch of samples.\n\n \"\"\"\n if self.train_args.n_epoch_cycles == 1:\n logger.log('Obtaining samples...')\n return self.sampler.obtain_samples(itr, batch_size)\n\n def save(self, epoch, paths=None):\n \"\"\"Save snapshot of current batch.\n\n Args:\n itr(int): Index of iteration (epoch).\n paths(dict): Batch of samples after preprocessed.\n\n \"\"\"\n assert self.has_setup\n\n logger.log('Saving snapshot...')\n\n params = dict()\n # Save arguments\n params['setup_args'] = self.setup_args\n params['train_args'] = self.train_args\n\n # Save states\n params['env'] = self.env\n params['algo'] = self.algo\n if paths:\n params['paths'] = paths\n params['last_epoch'] = epoch\n snapshotter.save_itr_params(epoch, params)\n\n logger.log('Saved')\n\n def restore(self, snapshot_dir, from_epoch='last'):\n \"\"\"Restore experiment from snapshot.\n\n Args:\n snapshot_dir(str): Directory of snapshot.\n from_epoch(str or int): The epoch to restore from.\n Can be 'first', 'last' or a number.\n Not applicable when snapshot_mode='last'.\n\n Returns:\n A SimpleNamespace for train()'s arguments.\n\n Examples:\n 1. Resume experiment immediately.\n with LocalRunner() as runner:\n runner.restore(snapshot_dir)\n runner.resume()\n\n 2. Resume experiment with modified training arguments.\n with LocalRunner() as runner:\n runner.restore(snapshot_dir, resume_now=False)\n runner.resume(n_epochs=20)\n\n Note:\n When resume via command line, new snapshots will be\n saved into the SAME directory if not specified.\n\n When resume programmatically, snapshot directory should be\n specify manually or through run_experiment() interface.\n\n \"\"\"\n snapshotter.snapshot_dir = snapshot_dir\n saved = snapshotter.load(from_epoch)\n\n self.setup_args = saved['setup_args']\n self.train_args = saved['train_args']\n\n self.setup(\n env=saved['env'],\n algo=saved['algo'],\n sampler_cls=self.setup_args.sampler_cls,\n sampler_args=self.setup_args.sampler_args)\n\n n_epochs = self.train_args.n_epochs\n last_epoch = saved['last_epoch']\n n_epoch_cycles = self.train_args.n_epoch_cycles\n batch_size = self.train_args.batch_size\n store_paths = self.train_args.store_paths\n pause_for_plot = self.train_args.pause_for_plot\n\n fmt = '{:<20} {:<15}'\n logger.log('Restore from snapshot saved in %s' % snapshot_dir)\n logger.log(fmt.format('Train Args', 'Value'))\n logger.log(fmt.format('n_epochs', n_epochs))\n logger.log(fmt.format('last_epoch', last_epoch))\n logger.log(fmt.format('n_epoch_cycles', n_epoch_cycles))\n logger.log(fmt.format('batch_size', batch_size))\n logger.log(fmt.format('store_paths', store_paths))\n logger.log(fmt.format('pause_for_plot', pause_for_plot))\n\n self.train_args.start_epoch = last_epoch + 1\n return copy.copy(self.train_args)\n\n def log_diagnostics(self, pause_for_plot=False):\n \"\"\"Log diagnostics.\n\n Args:\n pause_for_plot(bool): Pause for plot.\n\n \"\"\"\n logger.log('Time %.2f s' % (time.time() - self.start_time))\n logger.log('EpochTime %.2f s' % (time.time() - self.itr_start_time))\n logger.log(tabular)\n if self.plot:\n self.plotter.update_plot(self.policy, self.algo.max_path_length)\n if pause_for_plot:\n input('Plotting evaluation run: Press Enter to \" \"continue...')\n\n def train(self,\n n_epochs,\n batch_size,\n n_epoch_cycles=1,\n plot=False,\n store_paths=False,\n pause_for_plot=False):\n \"\"\"Start training.\n\n Args:\n n_epochs(int): Number of epochs.\n batch_size(int): Number of environment steps in one batch.\n n_epoch_cycles(int): Number of batches of samples in each epoch.\n This is only useful for off-policy algorithm.\n For on-policy algorithm this value should always be 1.\n plot(bool): Visualize policy by doing rollout after each epoch.\n store_paths(bool): Save paths in snapshot.\n pause_for_plot(bool): Pause for plot.\n\n Returns:\n The average return in last epoch cycle.\n\n \"\"\"\n return self._train(\n n_epochs=n_epochs,\n n_epoch_cycles=n_epoch_cycles,\n batch_size=batch_size,\n plot=plot,\n store_paths=store_paths,\n pause_for_plot=pause_for_plot,\n start_epoch=0)\n\n def resume(self,\n n_epochs=None,\n batch_size=None,\n n_epoch_cycles=None,\n plot=None,\n store_paths=None,\n pause_for_plot=None):\n \"\"\"Resume from restored experiment.\n\n This method provides the same interface as train().\n\n If not specified, an argument will default to the\n saved arguments from the last call to train().\n\n Returns:\n The average return in last epoch cycle.\n\n \"\"\"\n assert self.train_args is not None, (\n 'You must call restore() before resume().')\n\n return self._train(\n n_epochs=n_epochs or self.train_args.n_epochs,\n n_epoch_cycles=n_epoch_cycles or self.train_args.n_epoch_cycles,\n batch_size=batch_size or self.train_args.batch_size,\n plot=plot or self.train_args.plot,\n store_paths=store_paths or self.train_args.store_paths,\n pause_for_plot=pause_for_plot or self.train_args.pause_for_plot,\n start_epoch=self.train_args.start_epoch)\n\n def _train(self,\n n_epochs,\n n_epoch_cycles,\n batch_size,\n plot,\n store_paths,\n pause_for_plot,\n start_epoch=0):\n \"\"\"Start actual training.\n\n Args:\n n_epochs(int): Number of epochs.\n n_epoch_cycles(int): Number of batches of samples in each epoch.\n This is only useful for off-policy algorithm.\n For on-policy algorithm this value should always be 1.\n batch_size(int): Number of steps in batch.\n plot(bool): Visualize policy by doing rollout after each epoch.\n store_paths(bool): Save paths in snapshot.\n pause_for_plot(bool): Pause for plot.\n start_epoch: (internal) The starting epoch.\n Use for experiment resuming.\n\n Returns:\n The average return in last epoch cycle.\n\n \"\"\"\n assert self.has_setup, ('Use Runner.setup() to setup runner before '\n 'training.')\n\n # Save arguments for restore\n self.train_args = SimpleNamespace(\n n_epochs=n_epochs,\n n_epoch_cycles=n_epoch_cycles,\n batch_size=batch_size,\n plot=plot,\n store_paths=store_paths,\n pause_for_plot=pause_for_plot,\n start_epoch=start_epoch)\n\n self.start_worker()\n\n self.start_time = time.time()\n itr = start_epoch * n_epoch_cycles\n\n last_return = None\n for epoch in range(start_epoch, n_epochs):\n self.itr_start_time = time.time()\n paths = None\n with logger.prefix('epoch #%d | ' % epoch):\n for cycle in range(n_epoch_cycles):\n paths = self.obtain_samples(itr, batch_size)\n last_return = self.algo.train_once(itr, paths)\n itr += 1\n self.save(epoch, paths if store_paths else None)\n self.log_diagnostics(pause_for_plot)\n logger.dump_all(itr)\n tabular.clear()\n\n self.shutdown_worker()\n\n return last_return\n",
"path": "src/garage/experiment/local_tf_runner.py"
}
] | [
{
"content": "\"\"\"\nThe local runner for tensorflow algorithms.\n\nA runner setup context for algorithms during initialization and\npipelines data between sampler and algorithm during training.\n\"\"\"\nimport copy\nimport time\nfrom types import SimpleNamespace\n\nfrom dowel import logger, tabular\nimport tensorflow as tf\n\nfrom garage.experiment import snapshotter\n\n# Note: Optional module should be imported ad hoc to break circular dependency.\n\n\nclass LocalRunner:\n \"\"\"This class implements a local runner for tensorflow algorithms.\n\n A local runner provides a default tensorflow session using python context.\n This is useful for those experiment components (e.g. policy) that require a\n tensorflow session during construction.\n\n Use Runner.setup(algo, env) to setup algorithm and environement for runner\n and Runner.train() to start training.\n\n Examples:\n with LocalRunner() as runner:\n env = gym.make('CartPole-v1')\n policy = CategoricalMLPPolicy(\n env_spec=env.spec,\n hidden_sizes=(32, 32))\n algo = TRPO(\n env=env,\n policy=policy,\n baseline=baseline,\n max_path_length=100,\n discount=0.99,\n max_kl_step=0.01)\n runner.setup(algo, env)\n runner.train(n_epochs=100, batch_size=4000)\n\n \"\"\"\n\n def __init__(self, sess=None, max_cpus=1):\n \"\"\"Create a new local runner.\n\n Args:\n max_cpus(int): The maximum number of parallel sampler workers.\n sess(tf.Session): An optional tensorflow session.\n A new session will be created immediately if not provided.\n\n Note:\n The local runner will set up a joblib task pool of size max_cpus\n possibly later used by BatchSampler. If BatchSampler is not used,\n the processes in the pool will remain dormant.\n\n This setup is required to use tensorflow in a multiprocess\n environment before a tensorflow session is created\n because tensorflow is not fork-safe.\n\n See https://github.com/tensorflow/tensorflow/issues/2448.\n\n \"\"\"\n if max_cpus > 1:\n from garage.sampler import singleton_pool\n singleton_pool.initialize(max_cpus)\n self.sess = sess or tf.Session()\n self.sess_entered = False\n self.has_setup = False\n self.plot = False\n\n self.setup_args = None\n self.train_args = None\n\n def __enter__(self):\n \"\"\"Set self.sess as the default session.\n\n Returns:\n This local runner.\n\n \"\"\"\n if tf.get_default_session() is not self.sess:\n self.sess.__enter__()\n self.sess_entered = True\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"Leave session.\"\"\"\n if tf.get_default_session() is self.sess and self.sess_entered:\n self.sess.__exit__(exc_type, exc_val, exc_tb)\n self.sess_entered = False\n\n def setup(self, algo, env, sampler_cls=None, sampler_args=None):\n \"\"\"Set up runner for algorithm and environment.\n\n This method saves algo and env within runner and creates a sampler.\n\n Note:\n After setup() is called all variables in session should have been\n initialized. setup() respects existing values in session so\n policy weights can be loaded before setup().\n\n Args:\n algo(garage.np.algos.RLAlgorithm): An algorithm instance.\n env(garage.envs.GarageEnv): An environement instance.\n sampler_cls(garage.sampler.Sampler): A sampler class.\n sampler_args(dict): Arguments to be passed to sampler constructor.\n\n \"\"\"\n self.algo = algo\n self.env = env\n self.policy = self.algo.policy\n\n if sampler_args is None:\n sampler_args = {}\n\n if sampler_cls is None:\n from garage.tf.algos.batch_polopt import BatchPolopt\n if isinstance(algo, BatchPolopt):\n if self.policy.vectorized:\n from garage.tf.samplers import OnPolicyVectorizedSampler\n sampler_cls = OnPolicyVectorizedSampler\n else:\n from garage.tf.samplers import BatchSampler\n sampler_cls = BatchSampler\n else:\n from garage.tf.samplers import OffPolicyVectorizedSampler\n sampler_cls = OffPolicyVectorizedSampler\n\n self.sampler = sampler_cls(algo, env, **sampler_args)\n\n self.initialize_tf_vars()\n logger.log(self.sess.graph)\n self.has_setup = True\n\n self.setup_args = SimpleNamespace(\n sampler_cls=sampler_cls, sampler_args=sampler_args)\n\n def initialize_tf_vars(self):\n \"\"\"Initialize all uninitialized variables in session.\"\"\"\n with tf.name_scope('initialize_tf_vars'):\n uninited_set = [\n e.decode()\n for e in self.sess.run(tf.report_uninitialized_variables())\n ]\n self.sess.run(\n tf.variables_initializer([\n v for v in tf.global_variables()\n if v.name.split(':')[0] in uninited_set\n ]))\n\n def start_worker(self):\n \"\"\"Start Plotter and Sampler workers.\"\"\"\n self.sampler.start_worker()\n if self.plot:\n from garage.tf.plotter import Plotter\n self.plotter = Plotter(self.env, self.policy)\n self.plotter.start()\n\n def shutdown_worker(self):\n \"\"\"Shutdown Plotter and Sampler workers.\"\"\"\n self.sampler.shutdown_worker()\n if self.plot:\n self.plotter.close()\n\n def obtain_samples(self, itr, batch_size):\n \"\"\"Obtain one batch of samples.\n\n Args:\n itr(int): Index of iteration (epoch).\n batch_size(int): Number of steps in batch.\n This is a hint that the sampler may or may not respect.\n\n Returns:\n One batch of samples.\n\n \"\"\"\n if self.train_args.n_epoch_cycles == 1:\n logger.log('Obtaining samples...')\n return self.sampler.obtain_samples(itr, batch_size)\n\n def save(self, epoch, paths=None):\n \"\"\"Save snapshot of current batch.\n\n Args:\n itr(int): Index of iteration (epoch).\n paths(dict): Batch of samples after preprocessed.\n\n \"\"\"\n assert self.has_setup\n\n logger.log('Saving snapshot...')\n\n params = dict()\n # Save arguments\n params['setup_args'] = self.setup_args\n params['train_args'] = self.train_args\n\n # Save states\n params['env'] = self.env\n params['algo'] = self.algo\n if paths:\n params['paths'] = paths\n params['last_epoch'] = epoch\n snapshotter.save_itr_params(epoch, params)\n\n logger.log('Saved')\n\n def restore(self, snapshot_dir, from_epoch='last'):\n \"\"\"Restore experiment from snapshot.\n\n Args:\n snapshot_dir(str): Directory of snapshot.\n from_epoch(str or int): The epoch to restore from.\n Can be 'first', 'last' or a number.\n Not applicable when snapshot_mode='last'.\n\n Returns:\n A SimpleNamespace for train()'s arguments.\n\n Examples:\n 1. Resume experiment immediately.\n with LocalRunner() as runner:\n runner.restore(snapshot_dir)\n runner.resume()\n\n 2. Resume experiment with modified training arguments.\n with LocalRunner() as runner:\n runner.restore(snapshot_dir, resume_now=False)\n runner.resume(n_epochs=20)\n\n Note:\n When resume via command line, new snapshots will be\n saved into the SAME directory if not specified.\n\n When resume programmatically, snapshot directory should be\n specify manually or through run_experiment() interface.\n\n \"\"\"\n snapshotter.snapshot_dir = snapshot_dir\n saved = snapshotter.load(from_epoch)\n\n self.setup_args = saved['setup_args']\n self.train_args = saved['train_args']\n\n self.setup(\n env=saved['env'],\n algo=saved['algo'],\n sampler_cls=self.setup_args.sampler_cls,\n sampler_args=self.setup_args.sampler_args)\n\n n_epochs = self.train_args.n_epochs\n last_epoch = saved['last_epoch']\n n_epoch_cycles = self.train_args.n_epoch_cycles\n batch_size = self.train_args.batch_size\n store_paths = self.train_args.store_paths\n pause_for_plot = self.train_args.pause_for_plot\n\n fmt = '{:<20} {:<15}'\n logger.log('Restore from snapshot saved in %s' % snapshot_dir)\n logger.log(fmt.format('Train Args', 'Value'))\n logger.log(fmt.format('n_epochs', n_epochs))\n logger.log(fmt.format('last_epoch', last_epoch))\n logger.log(fmt.format('n_epoch_cycles', n_epoch_cycles))\n logger.log(fmt.format('batch_size', batch_size))\n logger.log(fmt.format('store_paths', store_paths))\n logger.log(fmt.format('pause_for_plot', pause_for_plot))\n\n self.train_args.start_epoch = last_epoch + 1\n return copy.copy(self.train_args)\n\n def log_diagnostics(self, pause_for_plot=False):\n \"\"\"Log diagnostics.\n\n Args:\n pause_for_plot(bool): Pause for plot.\n\n \"\"\"\n logger.log('Time %.2f s' % (time.time() - self.start_time))\n logger.log('EpochTime %.2f s' % (time.time() - self.itr_start_time))\n logger.log(tabular)\n if self.plot:\n self.plotter.update_plot(self.policy, self.algo.max_path_length)\n if pause_for_plot:\n input('Plotting evaluation run: Press Enter to \" \"continue...')\n\n def train(self,\n n_epochs,\n batch_size,\n n_epoch_cycles=1,\n plot=False,\n store_paths=False,\n pause_for_plot=False):\n \"\"\"Start training.\n\n Args:\n n_epochs(int): Number of epochs.\n batch_size(int): Number of environment steps in one batch.\n n_epoch_cycles(int): Number of batches of samples in each epoch.\n This is only useful for off-policy algorithm.\n For on-policy algorithm this value should always be 1.\n plot(bool): Visualize policy by doing rollout after each epoch.\n store_paths(bool): Save paths in snapshot.\n pause_for_plot(bool): Pause for plot.\n\n Returns:\n The average return in last epoch cycle.\n\n \"\"\"\n return self._train(\n n_epochs=n_epochs,\n n_epoch_cycles=n_epoch_cycles,\n batch_size=batch_size,\n plot=plot,\n store_paths=store_paths,\n pause_for_plot=pause_for_plot,\n start_epoch=0)\n\n def resume(self,\n n_epochs=None,\n batch_size=None,\n n_epoch_cycles=None,\n plot=None,\n store_paths=None,\n pause_for_plot=None):\n \"\"\"Resume from restored experiment.\n\n This method provides the same interface as train().\n\n If not specified, an argument will default to the\n saved arguments from the last call to train().\n\n Returns:\n The average return in last epoch cycle.\n\n \"\"\"\n assert self.train_args is not None, (\n 'You must call restore() before resume().')\n\n return self._train(\n n_epochs=n_epochs or self.train_args.n_epochs,\n n_epoch_cycles=n_epoch_cycles or self.train_args.n_epoch_cycles,\n batch_size=batch_size or self.train_args.batch_size,\n plot=plot or self.train_args.plot,\n store_paths=store_paths or self.train_args.store_paths,\n pause_for_plot=pause_for_plot or self.train_args.pause_for_plot,\n start_epoch=self.train_args.start_epoch)\n\n def _train(self,\n n_epochs,\n n_epoch_cycles,\n batch_size,\n plot,\n store_paths,\n pause_for_plot,\n start_epoch=0):\n \"\"\"Start actual training.\n\n Args:\n n_epochs(int): Number of epochs.\n n_epoch_cycles(int): Number of batches of samples in each epoch.\n This is only useful for off-policy algorithm.\n For on-policy algorithm this value should always be 1.\n batch_size(int): Number of steps in batch.\n plot(bool): Visualize policy by doing rollout after each epoch.\n store_paths(bool): Save paths in snapshot.\n pause_for_plot(bool): Pause for plot.\n start_epoch: (internal) The starting epoch.\n Use for experiment resuming.\n\n Returns:\n The average return in last epoch cycle.\n\n \"\"\"\n assert self.has_setup, ('Use Runner.setup() to setup runner before '\n 'training.')\n\n # Save arguments for restore\n self.train_args = SimpleNamespace(\n n_epochs=n_epochs,\n n_epoch_cycles=n_epoch_cycles,\n batch_size=batch_size,\n plot=plot,\n store_paths=store_paths,\n pause_for_plot=pause_for_plot,\n start_epoch=start_epoch)\n\n self.plot = plot\n self.start_worker()\n\n self.start_time = time.time()\n itr = start_epoch * n_epoch_cycles\n\n last_return = None\n for epoch in range(start_epoch, n_epochs):\n self.itr_start_time = time.time()\n paths = None\n with logger.prefix('epoch #%d | ' % epoch):\n for cycle in range(n_epoch_cycles):\n paths = self.obtain_samples(itr, batch_size)\n last_return = self.algo.train_once(itr, paths)\n itr += 1\n self.save(epoch, paths if store_paths else None)\n self.log_diagnostics(pause_for_plot)\n logger.dump_all(itr)\n tabular.clear()\n\n self.shutdown_worker()\n\n return last_return\n",
"path": "src/garage/experiment/local_tf_runner.py"
}
] | diff --git a/src/garage/experiment/local_tf_runner.py b/src/garage/experiment/local_tf_runner.py
index 8ecc20931c..5573f7fcfe 100644
--- a/src/garage/experiment/local_tf_runner.py
+++ b/src/garage/experiment/local_tf_runner.py
@@ -388,6 +388,7 @@ def _train(self,
pause_for_plot=pause_for_plot,
start_epoch=start_epoch)
+ self.plot = plot
self.start_worker()
self.start_time = time.time()
diff --git a/tests/garage/experiment/test_local_tf_runner.py b/tests/garage/experiment/test_local_tf_runner.py
index 9b5c31e1aa..0715775158 100644
--- a/tests/garage/experiment/test_local_tf_runner.py
+++ b/tests/garage/experiment/test_local_tf_runner.py
@@ -5,6 +5,7 @@
from garage.sampler import singleton_pool
from garage.tf.algos import VPG
from garage.tf.envs import TfEnv
+from garage.tf.plotter import Plotter
from garage.tf.policies import CategoricalMLPPolicy
from garage.tf.samplers import BatchSampler
from tests.fixtures import TfGraphTestCase
@@ -45,7 +46,6 @@ def test_batch_sampler(self):
policy=policy,
baseline=baseline,
max_path_length=1,
- whole_paths=True,
discount=0.99)
runner.setup(
@@ -62,7 +62,8 @@ def test_batch_sampler(self):
runner.start_worker()
- paths = runner.sampler.obtain_samples(0, 8)
+ paths = runner.sampler.obtain_samples(
+ 0, batch_size=8, whole_paths=True)
self.assertGreaterEqual(
len(paths), max_cpus, 'BatchSampler should sample more than '
'max_cpus={} trajectories'.format(max_cpus))
@@ -103,3 +104,27 @@ def test_external_sess(self):
pass
# sess should still be the default session here.
tf.no_op().run()
+
+ def test_set_plot(self):
+ with LocalRunner() as runner:
+ env = TfEnv(env_name='CartPole-v1')
+
+ policy = CategoricalMLPPolicy(
+ name='policy', env_spec=env.spec, hidden_sizes=(8, 8))
+
+ baseline = LinearFeatureBaseline(env_spec=env.spec)
+
+ algo = VPG(
+ env_spec=env.spec,
+ policy=policy,
+ baseline=baseline,
+ max_path_length=100,
+ discount=0.99,
+ optimizer_args=dict(
+ tf_optimizer_args=dict(learning_rate=0.01, )))
+
+ runner.setup(algo, env)
+ runner.train(n_epochs=1, batch_size=100, plot=True)
+
+ assert isinstance(runner.plotter, Plotter), (
+ 'self.plotter in LocalRunner should be set to Plotter.')
|
python-pillow__Pillow-4455 | PIL cannot read JPEG comment
### What did you do?
I want PIL to read the JPEG comment (marker: 0xFF 0xFE).
I took an image with an attached JPEG comment - verified with exiftools & IrfanView to exist.
```python
from PIL import Image, JpegImagePlugin
pic = Image.open(<path_to_pic_with_JPEG_comment>)
print(pic.info)
```
### What did you expect to happen?
Show the JPEG comment in the dict.
### What actually happened?
> {'jfif': 257, 'jfif_version': (1, 1), 'dpi': (96, 96), 'jfif_unit': 1, 'jfif_density': (96, 96), 'exif': b'...'}
### What are your OS, Python and Pillow versions?
* OS: W7x64
* Python: Python 3.8.1 x64
* Pillow: Pillow 7.0.0
I cannot attach an image via github ("Something went really wrong, ..."), so here is the file (5.61 KiB) (I downloaded it and verified it's byte-identical to the uploaded one): [](https://postimg.cc/BLrFc0kf)
| [
{
"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# JPEG (JFIF) file handling\n#\n# See \"Digital Compression and Coding of Continuous-Tone Still Images,\n# Part 1, Requirements and Guidelines\" (CCITT T.81 / ISO 10918-1)\n#\n# History:\n# 1995-09-09 fl Created\n# 1995-09-13 fl Added full parser\n# 1996-03-25 fl Added hack to use the IJG command line utilities\n# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug\n# 1996-05-28 fl Added draft support, JFIF version (0.1)\n# 1996-12-30 fl Added encoder options, added progression property (0.2)\n# 1997-08-27 fl Save mode 1 images as BW (0.3)\n# 1998-07-12 fl Added YCbCr to draft and save methods (0.4)\n# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1)\n# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2)\n# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3)\n# 2003-04-25 fl Added experimental EXIF decoder (0.5)\n# 2003-06-06 fl Added experimental EXIF GPSinfo decoder\n# 2003-09-13 fl Extract COM markers\n# 2009-09-06 fl Added icc_profile support (from Florian Hoech)\n# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6)\n# 2009-03-08 fl Added subsampling support (from Justin Huff).\n#\n# Copyright (c) 1997-2003 by Secret Labs AB.\n# Copyright (c) 1995-1996 by Fredrik Lundh.\n#\n# See the README file for information on usage and redistribution.\n#\nimport array\nimport io\nimport os\nimport struct\nimport subprocess\nimport tempfile\nimport warnings\n\nfrom . import Image, ImageFile, TiffImagePlugin\nfrom ._binary import i8, i16be as i16, i32be as i32, o8\nfrom .JpegPresets import presets\n\n#\n# Parser\n\n\ndef Skip(self, marker):\n n = i16(self.fp.read(2)) - 2\n ImageFile._safe_read(self.fp, n)\n\n\ndef APP(self, marker):\n #\n # Application marker. Store these in the APP dictionary.\n # Also look for well-known application markers.\n\n n = i16(self.fp.read(2)) - 2\n s = ImageFile._safe_read(self.fp, n)\n\n app = \"APP%d\" % (marker & 15)\n\n self.app[app] = s # compatibility\n self.applist.append((app, s))\n\n if marker == 0xFFE0 and s[:4] == b\"JFIF\":\n # extract JFIF information\n self.info[\"jfif\"] = version = i16(s, 5) # version\n self.info[\"jfif_version\"] = divmod(version, 256)\n # extract JFIF properties\n try:\n jfif_unit = i8(s[7])\n jfif_density = i16(s, 8), i16(s, 10)\n except Exception:\n pass\n else:\n if jfif_unit == 1:\n self.info[\"dpi\"] = jfif_density\n self.info[\"jfif_unit\"] = jfif_unit\n self.info[\"jfif_density\"] = jfif_density\n elif marker == 0xFFE1 and s[:5] == b\"Exif\\0\":\n if \"exif\" not in self.info:\n # extract EXIF information (incomplete)\n self.info[\"exif\"] = s # FIXME: value will change\n elif marker == 0xFFE2 and s[:5] == b\"FPXR\\0\":\n # extract FlashPix information (incomplete)\n self.info[\"flashpix\"] = s # FIXME: value will change\n elif marker == 0xFFE2 and s[:12] == b\"ICC_PROFILE\\0\":\n # Since an ICC profile can be larger than the maximum size of\n # a JPEG marker (64K), we need provisions to split it into\n # multiple markers. The format defined by the ICC specifies\n # one or more APP2 markers containing the following data:\n # Identifying string ASCII \"ICC_PROFILE\\0\" (12 bytes)\n # Marker sequence number 1, 2, etc (1 byte)\n # Number of markers Total of APP2's used (1 byte)\n # Profile data (remainder of APP2 data)\n # Decoders should use the marker sequence numbers to\n # reassemble the profile, rather than assuming that the APP2\n # markers appear in the correct sequence.\n self.icclist.append(s)\n elif marker == 0xFFED and s[:14] == b\"Photoshop 3.0\\x00\":\n # parse the image resource block\n offset = 14\n photoshop = self.info.setdefault(\"photoshop\", {})\n while s[offset : offset + 4] == b\"8BIM\":\n try:\n offset += 4\n # resource code\n code = i16(s, offset)\n offset += 2\n # resource name (usually empty)\n name_len = i8(s[offset])\n # name = s[offset+1:offset+1+name_len]\n offset += 1 + name_len\n offset += offset & 1 # align\n # resource data block\n size = i32(s, offset)\n offset += 4\n data = s[offset : offset + size]\n if code == 0x03ED: # ResolutionInfo\n data = {\n \"XResolution\": i32(data[:4]) / 65536,\n \"DisplayedUnitsX\": i16(data[4:8]),\n \"YResolution\": i32(data[8:12]) / 65536,\n \"DisplayedUnitsY\": i16(data[12:]),\n }\n photoshop[code] = data\n offset += size\n offset += offset & 1 # align\n except struct.error:\n break # insufficient data\n\n elif marker == 0xFFEE and s[:5] == b\"Adobe\":\n self.info[\"adobe\"] = i16(s, 5)\n # extract Adobe custom properties\n try:\n adobe_transform = i8(s[1])\n except Exception:\n pass\n else:\n self.info[\"adobe_transform\"] = adobe_transform\n elif marker == 0xFFE2 and s[:4] == b\"MPF\\0\":\n # extract MPO information\n self.info[\"mp\"] = s[4:]\n # offset is current location minus buffer size\n # plus constant header size\n self.info[\"mpoffset\"] = self.fp.tell() - n + 4\n\n # If DPI isn't in JPEG header, fetch from EXIF\n if \"dpi\" not in self.info and \"exif\" in self.info:\n try:\n exif = self.getexif()\n resolution_unit = exif[0x0128]\n x_resolution = exif[0x011A]\n try:\n dpi = float(x_resolution[0]) / x_resolution[1]\n except TypeError:\n dpi = x_resolution\n if resolution_unit == 3: # cm\n # 1 dpcm = 2.54 dpi\n dpi *= 2.54\n self.info[\"dpi\"] = int(dpi + 0.5), int(dpi + 0.5)\n except (KeyError, SyntaxError, ValueError, ZeroDivisionError):\n # SyntaxError for invalid/unreadable EXIF\n # KeyError for dpi not included\n # ZeroDivisionError for invalid dpi rational value\n # ValueError for x_resolution[0] being an invalid float\n self.info[\"dpi\"] = 72, 72\n\n\ndef COM(self, marker):\n #\n # Comment marker. Store these in the APP dictionary.\n n = i16(self.fp.read(2)) - 2\n s = ImageFile._safe_read(self.fp, n)\n\n self.app[\"COM\"] = s # compatibility\n self.applist.append((\"COM\", s))\n\n\ndef SOF(self, marker):\n #\n # Start of frame marker. Defines the size and mode of the\n # image. JPEG is colour blind, so we use some simple\n # heuristics to map the number of layers to an appropriate\n # mode. Note that this could be made a bit brighter, by\n # looking for JFIF and Adobe APP markers.\n\n n = i16(self.fp.read(2)) - 2\n s = ImageFile._safe_read(self.fp, n)\n self._size = i16(s[3:]), i16(s[1:])\n\n self.bits = i8(s[0])\n if self.bits != 8:\n raise SyntaxError(\"cannot handle %d-bit layers\" % self.bits)\n\n self.layers = i8(s[5])\n if self.layers == 1:\n self.mode = \"L\"\n elif self.layers == 3:\n self.mode = \"RGB\"\n elif self.layers == 4:\n self.mode = \"CMYK\"\n else:\n raise SyntaxError(\"cannot handle %d-layer images\" % self.layers)\n\n if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]:\n self.info[\"progressive\"] = self.info[\"progression\"] = 1\n\n if self.icclist:\n # fixup icc profile\n self.icclist.sort() # sort by sequence number\n if i8(self.icclist[0][13]) == len(self.icclist):\n profile = []\n for p in self.icclist:\n profile.append(p[14:])\n icc_profile = b\"\".join(profile)\n else:\n icc_profile = None # wrong number of fragments\n self.info[\"icc_profile\"] = icc_profile\n self.icclist = None\n\n for i in range(6, len(s), 3):\n t = s[i : i + 3]\n # 4-tuples: id, vsamp, hsamp, qtable\n self.layer.append((t[0], i8(t[1]) // 16, i8(t[1]) & 15, i8(t[2])))\n\n\ndef DQT(self, marker):\n #\n # Define quantization table. Support baseline 8-bit tables\n # only. Note that there might be more than one table in\n # each marker.\n\n # FIXME: The quantization tables can be used to estimate the\n # compression quality.\n\n n = i16(self.fp.read(2)) - 2\n s = ImageFile._safe_read(self.fp, n)\n while len(s):\n if len(s) < 65:\n raise SyntaxError(\"bad quantization table marker\")\n v = i8(s[0])\n if v // 16 == 0:\n self.quantization[v & 15] = array.array(\"B\", s[1:65])\n s = s[65:]\n else:\n return # FIXME: add code to read 16-bit tables!\n # raise SyntaxError, \"bad quantization table element size\"\n\n\n#\n# JPEG marker table\n\nMARKER = {\n 0xFFC0: (\"SOF0\", \"Baseline DCT\", SOF),\n 0xFFC1: (\"SOF1\", \"Extended Sequential DCT\", SOF),\n 0xFFC2: (\"SOF2\", \"Progressive DCT\", SOF),\n 0xFFC3: (\"SOF3\", \"Spatial lossless\", SOF),\n 0xFFC4: (\"DHT\", \"Define Huffman table\", Skip),\n 0xFFC5: (\"SOF5\", \"Differential sequential DCT\", SOF),\n 0xFFC6: (\"SOF6\", \"Differential progressive DCT\", SOF),\n 0xFFC7: (\"SOF7\", \"Differential spatial\", SOF),\n 0xFFC8: (\"JPG\", \"Extension\", None),\n 0xFFC9: (\"SOF9\", \"Extended sequential DCT (AC)\", SOF),\n 0xFFCA: (\"SOF10\", \"Progressive DCT (AC)\", SOF),\n 0xFFCB: (\"SOF11\", \"Spatial lossless DCT (AC)\", SOF),\n 0xFFCC: (\"DAC\", \"Define arithmetic coding conditioning\", Skip),\n 0xFFCD: (\"SOF13\", \"Differential sequential DCT (AC)\", SOF),\n 0xFFCE: (\"SOF14\", \"Differential progressive DCT (AC)\", SOF),\n 0xFFCF: (\"SOF15\", \"Differential spatial (AC)\", SOF),\n 0xFFD0: (\"RST0\", \"Restart 0\", None),\n 0xFFD1: (\"RST1\", \"Restart 1\", None),\n 0xFFD2: (\"RST2\", \"Restart 2\", None),\n 0xFFD3: (\"RST3\", \"Restart 3\", None),\n 0xFFD4: (\"RST4\", \"Restart 4\", None),\n 0xFFD5: (\"RST5\", \"Restart 5\", None),\n 0xFFD6: (\"RST6\", \"Restart 6\", None),\n 0xFFD7: (\"RST7\", \"Restart 7\", None),\n 0xFFD8: (\"SOI\", \"Start of image\", None),\n 0xFFD9: (\"EOI\", \"End of image\", None),\n 0xFFDA: (\"SOS\", \"Start of scan\", Skip),\n 0xFFDB: (\"DQT\", \"Define quantization table\", DQT),\n 0xFFDC: (\"DNL\", \"Define number of lines\", Skip),\n 0xFFDD: (\"DRI\", \"Define restart interval\", Skip),\n 0xFFDE: (\"DHP\", \"Define hierarchical progression\", SOF),\n 0xFFDF: (\"EXP\", \"Expand reference component\", Skip),\n 0xFFE0: (\"APP0\", \"Application segment 0\", APP),\n 0xFFE1: (\"APP1\", \"Application segment 1\", APP),\n 0xFFE2: (\"APP2\", \"Application segment 2\", APP),\n 0xFFE3: (\"APP3\", \"Application segment 3\", APP),\n 0xFFE4: (\"APP4\", \"Application segment 4\", APP),\n 0xFFE5: (\"APP5\", \"Application segment 5\", APP),\n 0xFFE6: (\"APP6\", \"Application segment 6\", APP),\n 0xFFE7: (\"APP7\", \"Application segment 7\", APP),\n 0xFFE8: (\"APP8\", \"Application segment 8\", APP),\n 0xFFE9: (\"APP9\", \"Application segment 9\", APP),\n 0xFFEA: (\"APP10\", \"Application segment 10\", APP),\n 0xFFEB: (\"APP11\", \"Application segment 11\", APP),\n 0xFFEC: (\"APP12\", \"Application segment 12\", APP),\n 0xFFED: (\"APP13\", \"Application segment 13\", APP),\n 0xFFEE: (\"APP14\", \"Application segment 14\", APP),\n 0xFFEF: (\"APP15\", \"Application segment 15\", APP),\n 0xFFF0: (\"JPG0\", \"Extension 0\", None),\n 0xFFF1: (\"JPG1\", \"Extension 1\", None),\n 0xFFF2: (\"JPG2\", \"Extension 2\", None),\n 0xFFF3: (\"JPG3\", \"Extension 3\", None),\n 0xFFF4: (\"JPG4\", \"Extension 4\", None),\n 0xFFF5: (\"JPG5\", \"Extension 5\", None),\n 0xFFF6: (\"JPG6\", \"Extension 6\", None),\n 0xFFF7: (\"JPG7\", \"Extension 7\", None),\n 0xFFF8: (\"JPG8\", \"Extension 8\", None),\n 0xFFF9: (\"JPG9\", \"Extension 9\", None),\n 0xFFFA: (\"JPG10\", \"Extension 10\", None),\n 0xFFFB: (\"JPG11\", \"Extension 11\", None),\n 0xFFFC: (\"JPG12\", \"Extension 12\", None),\n 0xFFFD: (\"JPG13\", \"Extension 13\", None),\n 0xFFFE: (\"COM\", \"Comment\", COM),\n}\n\n\ndef _accept(prefix):\n return prefix[0:1] == b\"\\377\"\n\n\n##\n# Image plugin for JPEG and JFIF images.\n\n\nclass JpegImageFile(ImageFile.ImageFile):\n\n format = \"JPEG\"\n format_description = \"JPEG (ISO 10918)\"\n\n def _open(self):\n\n s = self.fp.read(1)\n\n if i8(s) != 255:\n raise SyntaxError(\"not a JPEG file\")\n\n # Create attributes\n self.bits = self.layers = 0\n\n # JPEG specifics (internal)\n self.layer = []\n self.huffman_dc = {}\n self.huffman_ac = {}\n self.quantization = {}\n self.app = {} # compatibility\n self.applist = []\n self.icclist = []\n\n while True:\n\n i = i8(s)\n if i == 0xFF:\n s = s + self.fp.read(1)\n i = i16(s)\n else:\n # Skip non-0xFF junk\n s = self.fp.read(1)\n continue\n\n if i in MARKER:\n name, description, handler = MARKER[i]\n if handler is not None:\n handler(self, i)\n if i == 0xFFDA: # start of scan\n rawmode = self.mode\n if self.mode == \"CMYK\":\n rawmode = \"CMYK;I\" # assume adobe conventions\n self.tile = [(\"jpeg\", (0, 0) + self.size, 0, (rawmode, \"\"))]\n # self.__offset = self.fp.tell()\n break\n s = self.fp.read(1)\n elif i == 0 or i == 0xFFFF:\n # padded marker or junk; move on\n s = b\"\\xff\"\n elif i == 0xFF00: # Skip extraneous data (escaped 0xFF)\n s = self.fp.read(1)\n else:\n raise SyntaxError(\"no marker found\")\n\n def load_read(self, read_bytes):\n \"\"\"\n internal: read more image data\n For premature EOF and LOAD_TRUNCATED_IMAGES adds EOI marker\n so libjpeg can finish decoding\n \"\"\"\n s = self.fp.read(read_bytes)\n\n if not s and ImageFile.LOAD_TRUNCATED_IMAGES:\n # Premature EOF.\n # Pretend file is finished adding EOI marker\n return b\"\\xFF\\xD9\"\n\n return s\n\n def draft(self, mode, size):\n\n if len(self.tile) != 1:\n return\n\n # Protect from second call\n if self.decoderconfig:\n return\n\n d, e, o, a = self.tile[0]\n scale = 1\n original_size = self.size\n\n if a[0] == \"RGB\" and mode in [\"L\", \"YCbCr\"]:\n self.mode = mode\n a = mode, \"\"\n\n if size:\n scale = min(self.size[0] // size[0], self.size[1] // size[1])\n for s in [8, 4, 2, 1]:\n if scale >= s:\n break\n e = (\n e[0],\n e[1],\n (e[2] - e[0] + s - 1) // s + e[0],\n (e[3] - e[1] + s - 1) // s + e[1],\n )\n self._size = ((self.size[0] + s - 1) // s, (self.size[1] + s - 1) // s)\n scale = s\n\n self.tile = [(d, e, o, a)]\n self.decoderconfig = (scale, 0)\n\n box = (0, 0, original_size[0] / scale, original_size[1] / scale)\n return (self.mode, box)\n\n def load_djpeg(self):\n\n # ALTERNATIVE: handle JPEGs via the IJG command line utilities\n\n f, path = tempfile.mkstemp()\n os.close(f)\n if os.path.exists(self.filename):\n subprocess.check_call([\"djpeg\", \"-outfile\", path, self.filename])\n else:\n raise ValueError(\"Invalid Filename\")\n\n try:\n with Image.open(path) as _im:\n _im.load()\n self.im = _im.im\n finally:\n try:\n os.unlink(path)\n except OSError:\n pass\n\n self.mode = self.im.mode\n self._size = self.im.size\n\n self.tile = []\n\n def _getexif(self):\n return _getexif(self)\n\n def _getmp(self):\n return _getmp(self)\n\n\ndef _fixup_dict(src_dict):\n # Helper function for _getexif()\n # returns a dict with any single item tuples/lists as individual values\n exif = Image.Exif()\n return exif._fixup_dict(src_dict)\n\n\ndef _getexif(self):\n if \"exif\" not in self.info:\n return None\n return dict(self.getexif())\n\n\ndef _getmp(self):\n # Extract MP information. This method was inspired by the \"highly\n # experimental\" _getexif version that's been in use for years now,\n # itself based on the ImageFileDirectory class in the TIFF plug-in.\n\n # The MP record essentially consists of a TIFF file embedded in a JPEG\n # application marker.\n try:\n data = self.info[\"mp\"]\n except KeyError:\n return None\n file_contents = io.BytesIO(data)\n head = file_contents.read(8)\n endianness = \">\" if head[:4] == b\"\\x4d\\x4d\\x00\\x2a\" else \"<\"\n # process dictionary\n try:\n info = TiffImagePlugin.ImageFileDirectory_v2(head)\n file_contents.seek(info.next)\n info.load(file_contents)\n mp = dict(info)\n except Exception:\n raise SyntaxError(\"malformed MP Index (unreadable directory)\")\n # it's an error not to have a number of images\n try:\n quant = mp[0xB001]\n except KeyError:\n raise SyntaxError(\"malformed MP Index (no number of images)\")\n # get MP entries\n mpentries = []\n try:\n rawmpentries = mp[0xB002]\n for entrynum in range(0, quant):\n unpackedentry = struct.unpack_from(\n \"{}LLLHH\".format(endianness), rawmpentries, entrynum * 16\n )\n labels = (\"Attribute\", \"Size\", \"DataOffset\", \"EntryNo1\", \"EntryNo2\")\n mpentry = dict(zip(labels, unpackedentry))\n mpentryattr = {\n \"DependentParentImageFlag\": bool(mpentry[\"Attribute\"] & (1 << 31)),\n \"DependentChildImageFlag\": bool(mpentry[\"Attribute\"] & (1 << 30)),\n \"RepresentativeImageFlag\": bool(mpentry[\"Attribute\"] & (1 << 29)),\n \"Reserved\": (mpentry[\"Attribute\"] & (3 << 27)) >> 27,\n \"ImageDataFormat\": (mpentry[\"Attribute\"] & (7 << 24)) >> 24,\n \"MPType\": mpentry[\"Attribute\"] & 0x00FFFFFF,\n }\n if mpentryattr[\"ImageDataFormat\"] == 0:\n mpentryattr[\"ImageDataFormat\"] = \"JPEG\"\n else:\n raise SyntaxError(\"unsupported picture format in MPO\")\n mptypemap = {\n 0x000000: \"Undefined\",\n 0x010001: \"Large Thumbnail (VGA Equivalent)\",\n 0x010002: \"Large Thumbnail (Full HD Equivalent)\",\n 0x020001: \"Multi-Frame Image (Panorama)\",\n 0x020002: \"Multi-Frame Image: (Disparity)\",\n 0x020003: \"Multi-Frame Image: (Multi-Angle)\",\n 0x030000: \"Baseline MP Primary Image\",\n }\n mpentryattr[\"MPType\"] = mptypemap.get(mpentryattr[\"MPType\"], \"Unknown\")\n mpentry[\"Attribute\"] = mpentryattr\n mpentries.append(mpentry)\n mp[0xB002] = mpentries\n except KeyError:\n raise SyntaxError(\"malformed MP Index (bad MP Entry)\")\n # Next we should try and parse the individual image unique ID list;\n # we don't because I've never seen this actually used in a real MPO\n # file and so can't test it.\n return mp\n\n\n# --------------------------------------------------------------------\n# stuff to save JPEG files\n\nRAWMODE = {\n \"1\": \"L\",\n \"L\": \"L\",\n \"RGB\": \"RGB\",\n \"RGBX\": \"RGB\",\n \"CMYK\": \"CMYK;I\", # assume adobe conventions\n \"YCbCr\": \"YCbCr\",\n}\n\n# fmt: off\nzigzag_index = (\n 0, 1, 5, 6, 14, 15, 27, 28,\n 2, 4, 7, 13, 16, 26, 29, 42,\n 3, 8, 12, 17, 25, 30, 41, 43,\n 9, 11, 18, 24, 31, 40, 44, 53,\n 10, 19, 23, 32, 39, 45, 52, 54,\n 20, 22, 33, 38, 46, 51, 55, 60,\n 21, 34, 37, 47, 50, 56, 59, 61,\n 35, 36, 48, 49, 57, 58, 62, 63,\n)\n\nsamplings = {\n (1, 1, 1, 1, 1, 1): 0,\n (2, 1, 1, 1, 1, 1): 1,\n (2, 2, 1, 1, 1, 1): 2,\n}\n# fmt: on\n\n\ndef convert_dict_qtables(qtables):\n qtables = [qtables[key] for key in range(len(qtables)) if key in qtables]\n for idx, table in enumerate(qtables):\n qtables[idx] = [table[i] for i in zigzag_index]\n return qtables\n\n\ndef get_sampling(im):\n # There's no subsampling when image have only 1 layer\n # (grayscale images) or when they are CMYK (4 layers),\n # so set subsampling to default value.\n #\n # NOTE: currently Pillow can't encode JPEG to YCCK format.\n # If YCCK support is added in the future, subsampling code will have\n # to be updated (here and in JpegEncode.c) to deal with 4 layers.\n if not hasattr(im, \"layers\") or im.layers in (1, 4):\n return -1\n sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3]\n return samplings.get(sampling, -1)\n\n\ndef _save(im, fp, filename):\n\n try:\n rawmode = RAWMODE[im.mode]\n except KeyError:\n raise OSError(\"cannot write mode %s as JPEG\" % im.mode)\n\n info = im.encoderinfo\n\n dpi = [round(x) for x in info.get(\"dpi\", (0, 0))]\n\n quality = info.get(\"quality\", -1)\n subsampling = info.get(\"subsampling\", -1)\n qtables = info.get(\"qtables\")\n\n if quality == \"keep\":\n quality = -1\n subsampling = \"keep\"\n qtables = \"keep\"\n elif quality in presets:\n preset = presets[quality]\n quality = -1\n subsampling = preset.get(\"subsampling\", -1)\n qtables = preset.get(\"quantization\")\n elif not isinstance(quality, int):\n raise ValueError(\"Invalid quality setting\")\n else:\n if subsampling in presets:\n subsampling = presets[subsampling].get(\"subsampling\", -1)\n if isinstance(qtables, str) and qtables in presets:\n qtables = presets[qtables].get(\"quantization\")\n\n if subsampling == \"4:4:4\":\n subsampling = 0\n elif subsampling == \"4:2:2\":\n subsampling = 1\n elif subsampling == \"4:2:0\":\n subsampling = 2\n elif subsampling == \"4:1:1\":\n # For compatibility. Before Pillow 4.3, 4:1:1 actually meant 4:2:0.\n # Set 4:2:0 if someone is still using that value.\n subsampling = 2\n elif subsampling == \"keep\":\n if im.format != \"JPEG\":\n raise ValueError(\"Cannot use 'keep' when original image is not a JPEG\")\n subsampling = get_sampling(im)\n\n def validate_qtables(qtables):\n if qtables is None:\n return qtables\n if isinstance(qtables, str):\n try:\n lines = [\n int(num)\n for line in qtables.splitlines()\n for num in line.split(\"#\", 1)[0].split()\n ]\n except ValueError:\n raise ValueError(\"Invalid quantization table\")\n else:\n qtables = [lines[s : s + 64] for s in range(0, len(lines), 64)]\n if isinstance(qtables, (tuple, list, dict)):\n if isinstance(qtables, dict):\n qtables = convert_dict_qtables(qtables)\n elif isinstance(qtables, tuple):\n qtables = list(qtables)\n if not (0 < len(qtables) < 5):\n raise ValueError(\"None or too many quantization tables\")\n for idx, table in enumerate(qtables):\n try:\n if len(table) != 64:\n raise TypeError\n table = array.array(\"B\", table)\n except TypeError:\n raise ValueError(\"Invalid quantization table\")\n else:\n qtables[idx] = list(table)\n return qtables\n\n if qtables == \"keep\":\n if im.format != \"JPEG\":\n raise ValueError(\"Cannot use 'keep' when original image is not a JPEG\")\n qtables = getattr(im, \"quantization\", None)\n qtables = validate_qtables(qtables)\n\n extra = b\"\"\n\n icc_profile = info.get(\"icc_profile\")\n if icc_profile:\n ICC_OVERHEAD_LEN = 14\n MAX_BYTES_IN_MARKER = 65533\n MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN\n markers = []\n while icc_profile:\n markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER])\n icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:]\n i = 1\n for marker in markers:\n size = struct.pack(\">H\", 2 + ICC_OVERHEAD_LEN + len(marker))\n extra += (\n b\"\\xFF\\xE2\"\n + size\n + b\"ICC_PROFILE\\0\"\n + o8(i)\n + o8(len(markers))\n + marker\n )\n i += 1\n\n # \"progressive\" is the official name, but older documentation\n # says \"progression\"\n # FIXME: issue a warning if the wrong form is used (post-1.1.7)\n progressive = info.get(\"progressive\", False) or info.get(\"progression\", False)\n\n optimize = info.get(\"optimize\", False)\n\n exif = info.get(\"exif\", b\"\")\n if isinstance(exif, Image.Exif):\n exif = exif.tobytes()\n\n # get keyword arguments\n im.encoderconfig = (\n quality,\n progressive,\n info.get(\"smooth\", 0),\n optimize,\n info.get(\"streamtype\", 0),\n dpi[0],\n dpi[1],\n subsampling,\n qtables,\n extra,\n exif,\n )\n\n # if we optimize, libjpeg needs a buffer big enough to hold the whole image\n # in a shot. Guessing on the size, at im.size bytes. (raw pixel size is\n # channels*size, this is a value that's been used in a django patch.\n # https://github.com/matthewwithanm/django-imagekit/issues/50\n bufsize = 0\n if optimize or progressive:\n # CMYK can be bigger\n if im.mode == \"CMYK\":\n bufsize = 4 * im.size[0] * im.size[1]\n # keep sets quality to -1, but the actual value may be high.\n elif quality >= 95 or quality == -1:\n bufsize = 2 * im.size[0] * im.size[1]\n else:\n bufsize = im.size[0] * im.size[1]\n\n # The EXIF info needs to be written as one block, + APP1, + one spare byte.\n # Ensure that our buffer is big enough. Same with the icc_profile block.\n bufsize = max(ImageFile.MAXBLOCK, bufsize, len(exif) + 5, len(extra) + 1)\n\n ImageFile._save(im, fp, [(\"jpeg\", (0, 0) + im.size, 0, rawmode)], bufsize)\n\n\ndef _save_cjpeg(im, fp, filename):\n # ALTERNATIVE: handle JPEGs via the IJG command line utilities.\n tempfile = im._dump()\n subprocess.check_call([\"cjpeg\", \"-outfile\", filename, tempfile])\n try:\n os.unlink(tempfile)\n except OSError:\n pass\n\n\n##\n# Factory for making JPEG and MPO instances\ndef jpeg_factory(fp=None, filename=None):\n im = JpegImageFile(fp, filename)\n try:\n mpheader = im._getmp()\n if mpheader[45057] > 1:\n # It's actually an MPO\n from .MpoImagePlugin import MpoImageFile\n\n # Don't reload everything, just convert it.\n im = MpoImageFile.adopt(im, mpheader)\n except (TypeError, IndexError):\n # It is really a JPEG\n pass\n except SyntaxError:\n warnings.warn(\n \"Image appears to be a malformed MPO file, it will be \"\n \"interpreted as a base JPEG file\"\n )\n return im\n\n\n# ---------------------------------------------------------------------\n# Registry stuff\n\nImage.register_open(JpegImageFile.format, jpeg_factory, _accept)\nImage.register_save(JpegImageFile.format, _save)\n\nImage.register_extensions(JpegImageFile.format, [\".jfif\", \".jpe\", \".jpg\", \".jpeg\"])\n\nImage.register_mime(JpegImageFile.format, \"image/jpeg\")\n",
"path": "src/PIL/JpegImagePlugin.py"
}
] | [
{
"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# JPEG (JFIF) file handling\n#\n# See \"Digital Compression and Coding of Continuous-Tone Still Images,\n# Part 1, Requirements and Guidelines\" (CCITT T.81 / ISO 10918-1)\n#\n# History:\n# 1995-09-09 fl Created\n# 1995-09-13 fl Added full parser\n# 1996-03-25 fl Added hack to use the IJG command line utilities\n# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug\n# 1996-05-28 fl Added draft support, JFIF version (0.1)\n# 1996-12-30 fl Added encoder options, added progression property (0.2)\n# 1997-08-27 fl Save mode 1 images as BW (0.3)\n# 1998-07-12 fl Added YCbCr to draft and save methods (0.4)\n# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1)\n# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2)\n# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3)\n# 2003-04-25 fl Added experimental EXIF decoder (0.5)\n# 2003-06-06 fl Added experimental EXIF GPSinfo decoder\n# 2003-09-13 fl Extract COM markers\n# 2009-09-06 fl Added icc_profile support (from Florian Hoech)\n# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6)\n# 2009-03-08 fl Added subsampling support (from Justin Huff).\n#\n# Copyright (c) 1997-2003 by Secret Labs AB.\n# Copyright (c) 1995-1996 by Fredrik Lundh.\n#\n# See the README file for information on usage and redistribution.\n#\nimport array\nimport io\nimport os\nimport struct\nimport subprocess\nimport tempfile\nimport warnings\n\nfrom . import Image, ImageFile, TiffImagePlugin\nfrom ._binary import i8, i16be as i16, i32be as i32, o8\nfrom .JpegPresets import presets\n\n#\n# Parser\n\n\ndef Skip(self, marker):\n n = i16(self.fp.read(2)) - 2\n ImageFile._safe_read(self.fp, n)\n\n\ndef APP(self, marker):\n #\n # Application marker. Store these in the APP dictionary.\n # Also look for well-known application markers.\n\n n = i16(self.fp.read(2)) - 2\n s = ImageFile._safe_read(self.fp, n)\n\n app = \"APP%d\" % (marker & 15)\n\n self.app[app] = s # compatibility\n self.applist.append((app, s))\n\n if marker == 0xFFE0 and s[:4] == b\"JFIF\":\n # extract JFIF information\n self.info[\"jfif\"] = version = i16(s, 5) # version\n self.info[\"jfif_version\"] = divmod(version, 256)\n # extract JFIF properties\n try:\n jfif_unit = i8(s[7])\n jfif_density = i16(s, 8), i16(s, 10)\n except Exception:\n pass\n else:\n if jfif_unit == 1:\n self.info[\"dpi\"] = jfif_density\n self.info[\"jfif_unit\"] = jfif_unit\n self.info[\"jfif_density\"] = jfif_density\n elif marker == 0xFFE1 and s[:5] == b\"Exif\\0\":\n if \"exif\" not in self.info:\n # extract EXIF information (incomplete)\n self.info[\"exif\"] = s # FIXME: value will change\n elif marker == 0xFFE2 and s[:5] == b\"FPXR\\0\":\n # extract FlashPix information (incomplete)\n self.info[\"flashpix\"] = s # FIXME: value will change\n elif marker == 0xFFE2 and s[:12] == b\"ICC_PROFILE\\0\":\n # Since an ICC profile can be larger than the maximum size of\n # a JPEG marker (64K), we need provisions to split it into\n # multiple markers. The format defined by the ICC specifies\n # one or more APP2 markers containing the following data:\n # Identifying string ASCII \"ICC_PROFILE\\0\" (12 bytes)\n # Marker sequence number 1, 2, etc (1 byte)\n # Number of markers Total of APP2's used (1 byte)\n # Profile data (remainder of APP2 data)\n # Decoders should use the marker sequence numbers to\n # reassemble the profile, rather than assuming that the APP2\n # markers appear in the correct sequence.\n self.icclist.append(s)\n elif marker == 0xFFED and s[:14] == b\"Photoshop 3.0\\x00\":\n # parse the image resource block\n offset = 14\n photoshop = self.info.setdefault(\"photoshop\", {})\n while s[offset : offset + 4] == b\"8BIM\":\n try:\n offset += 4\n # resource code\n code = i16(s, offset)\n offset += 2\n # resource name (usually empty)\n name_len = i8(s[offset])\n # name = s[offset+1:offset+1+name_len]\n offset += 1 + name_len\n offset += offset & 1 # align\n # resource data block\n size = i32(s, offset)\n offset += 4\n data = s[offset : offset + size]\n if code == 0x03ED: # ResolutionInfo\n data = {\n \"XResolution\": i32(data[:4]) / 65536,\n \"DisplayedUnitsX\": i16(data[4:8]),\n \"YResolution\": i32(data[8:12]) / 65536,\n \"DisplayedUnitsY\": i16(data[12:]),\n }\n photoshop[code] = data\n offset += size\n offset += offset & 1 # align\n except struct.error:\n break # insufficient data\n\n elif marker == 0xFFEE and s[:5] == b\"Adobe\":\n self.info[\"adobe\"] = i16(s, 5)\n # extract Adobe custom properties\n try:\n adobe_transform = i8(s[1])\n except Exception:\n pass\n else:\n self.info[\"adobe_transform\"] = adobe_transform\n elif marker == 0xFFE2 and s[:4] == b\"MPF\\0\":\n # extract MPO information\n self.info[\"mp\"] = s[4:]\n # offset is current location minus buffer size\n # plus constant header size\n self.info[\"mpoffset\"] = self.fp.tell() - n + 4\n\n # If DPI isn't in JPEG header, fetch from EXIF\n if \"dpi\" not in self.info and \"exif\" in self.info:\n try:\n exif = self.getexif()\n resolution_unit = exif[0x0128]\n x_resolution = exif[0x011A]\n try:\n dpi = float(x_resolution[0]) / x_resolution[1]\n except TypeError:\n dpi = x_resolution\n if resolution_unit == 3: # cm\n # 1 dpcm = 2.54 dpi\n dpi *= 2.54\n self.info[\"dpi\"] = int(dpi + 0.5), int(dpi + 0.5)\n except (KeyError, SyntaxError, ValueError, ZeroDivisionError):\n # SyntaxError for invalid/unreadable EXIF\n # KeyError for dpi not included\n # ZeroDivisionError for invalid dpi rational value\n # ValueError for x_resolution[0] being an invalid float\n self.info[\"dpi\"] = 72, 72\n\n\ndef COM(self, marker):\n #\n # Comment marker. Store these in the APP dictionary.\n n = i16(self.fp.read(2)) - 2\n s = ImageFile._safe_read(self.fp, n)\n\n self.info[\"comment\"] = s\n self.app[\"COM\"] = s # compatibility\n self.applist.append((\"COM\", s))\n\n\ndef SOF(self, marker):\n #\n # Start of frame marker. Defines the size and mode of the\n # image. JPEG is colour blind, so we use some simple\n # heuristics to map the number of layers to an appropriate\n # mode. Note that this could be made a bit brighter, by\n # looking for JFIF and Adobe APP markers.\n\n n = i16(self.fp.read(2)) - 2\n s = ImageFile._safe_read(self.fp, n)\n self._size = i16(s[3:]), i16(s[1:])\n\n self.bits = i8(s[0])\n if self.bits != 8:\n raise SyntaxError(\"cannot handle %d-bit layers\" % self.bits)\n\n self.layers = i8(s[5])\n if self.layers == 1:\n self.mode = \"L\"\n elif self.layers == 3:\n self.mode = \"RGB\"\n elif self.layers == 4:\n self.mode = \"CMYK\"\n else:\n raise SyntaxError(\"cannot handle %d-layer images\" % self.layers)\n\n if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]:\n self.info[\"progressive\"] = self.info[\"progression\"] = 1\n\n if self.icclist:\n # fixup icc profile\n self.icclist.sort() # sort by sequence number\n if i8(self.icclist[0][13]) == len(self.icclist):\n profile = []\n for p in self.icclist:\n profile.append(p[14:])\n icc_profile = b\"\".join(profile)\n else:\n icc_profile = None # wrong number of fragments\n self.info[\"icc_profile\"] = icc_profile\n self.icclist = None\n\n for i in range(6, len(s), 3):\n t = s[i : i + 3]\n # 4-tuples: id, vsamp, hsamp, qtable\n self.layer.append((t[0], i8(t[1]) // 16, i8(t[1]) & 15, i8(t[2])))\n\n\ndef DQT(self, marker):\n #\n # Define quantization table. Support baseline 8-bit tables\n # only. Note that there might be more than one table in\n # each marker.\n\n # FIXME: The quantization tables can be used to estimate the\n # compression quality.\n\n n = i16(self.fp.read(2)) - 2\n s = ImageFile._safe_read(self.fp, n)\n while len(s):\n if len(s) < 65:\n raise SyntaxError(\"bad quantization table marker\")\n v = i8(s[0])\n if v // 16 == 0:\n self.quantization[v & 15] = array.array(\"B\", s[1:65])\n s = s[65:]\n else:\n return # FIXME: add code to read 16-bit tables!\n # raise SyntaxError, \"bad quantization table element size\"\n\n\n#\n# JPEG marker table\n\nMARKER = {\n 0xFFC0: (\"SOF0\", \"Baseline DCT\", SOF),\n 0xFFC1: (\"SOF1\", \"Extended Sequential DCT\", SOF),\n 0xFFC2: (\"SOF2\", \"Progressive DCT\", SOF),\n 0xFFC3: (\"SOF3\", \"Spatial lossless\", SOF),\n 0xFFC4: (\"DHT\", \"Define Huffman table\", Skip),\n 0xFFC5: (\"SOF5\", \"Differential sequential DCT\", SOF),\n 0xFFC6: (\"SOF6\", \"Differential progressive DCT\", SOF),\n 0xFFC7: (\"SOF7\", \"Differential spatial\", SOF),\n 0xFFC8: (\"JPG\", \"Extension\", None),\n 0xFFC9: (\"SOF9\", \"Extended sequential DCT (AC)\", SOF),\n 0xFFCA: (\"SOF10\", \"Progressive DCT (AC)\", SOF),\n 0xFFCB: (\"SOF11\", \"Spatial lossless DCT (AC)\", SOF),\n 0xFFCC: (\"DAC\", \"Define arithmetic coding conditioning\", Skip),\n 0xFFCD: (\"SOF13\", \"Differential sequential DCT (AC)\", SOF),\n 0xFFCE: (\"SOF14\", \"Differential progressive DCT (AC)\", SOF),\n 0xFFCF: (\"SOF15\", \"Differential spatial (AC)\", SOF),\n 0xFFD0: (\"RST0\", \"Restart 0\", None),\n 0xFFD1: (\"RST1\", \"Restart 1\", None),\n 0xFFD2: (\"RST2\", \"Restart 2\", None),\n 0xFFD3: (\"RST3\", \"Restart 3\", None),\n 0xFFD4: (\"RST4\", \"Restart 4\", None),\n 0xFFD5: (\"RST5\", \"Restart 5\", None),\n 0xFFD6: (\"RST6\", \"Restart 6\", None),\n 0xFFD7: (\"RST7\", \"Restart 7\", None),\n 0xFFD8: (\"SOI\", \"Start of image\", None),\n 0xFFD9: (\"EOI\", \"End of image\", None),\n 0xFFDA: (\"SOS\", \"Start of scan\", Skip),\n 0xFFDB: (\"DQT\", \"Define quantization table\", DQT),\n 0xFFDC: (\"DNL\", \"Define number of lines\", Skip),\n 0xFFDD: (\"DRI\", \"Define restart interval\", Skip),\n 0xFFDE: (\"DHP\", \"Define hierarchical progression\", SOF),\n 0xFFDF: (\"EXP\", \"Expand reference component\", Skip),\n 0xFFE0: (\"APP0\", \"Application segment 0\", APP),\n 0xFFE1: (\"APP1\", \"Application segment 1\", APP),\n 0xFFE2: (\"APP2\", \"Application segment 2\", APP),\n 0xFFE3: (\"APP3\", \"Application segment 3\", APP),\n 0xFFE4: (\"APP4\", \"Application segment 4\", APP),\n 0xFFE5: (\"APP5\", \"Application segment 5\", APP),\n 0xFFE6: (\"APP6\", \"Application segment 6\", APP),\n 0xFFE7: (\"APP7\", \"Application segment 7\", APP),\n 0xFFE8: (\"APP8\", \"Application segment 8\", APP),\n 0xFFE9: (\"APP9\", \"Application segment 9\", APP),\n 0xFFEA: (\"APP10\", \"Application segment 10\", APP),\n 0xFFEB: (\"APP11\", \"Application segment 11\", APP),\n 0xFFEC: (\"APP12\", \"Application segment 12\", APP),\n 0xFFED: (\"APP13\", \"Application segment 13\", APP),\n 0xFFEE: (\"APP14\", \"Application segment 14\", APP),\n 0xFFEF: (\"APP15\", \"Application segment 15\", APP),\n 0xFFF0: (\"JPG0\", \"Extension 0\", None),\n 0xFFF1: (\"JPG1\", \"Extension 1\", None),\n 0xFFF2: (\"JPG2\", \"Extension 2\", None),\n 0xFFF3: (\"JPG3\", \"Extension 3\", None),\n 0xFFF4: (\"JPG4\", \"Extension 4\", None),\n 0xFFF5: (\"JPG5\", \"Extension 5\", None),\n 0xFFF6: (\"JPG6\", \"Extension 6\", None),\n 0xFFF7: (\"JPG7\", \"Extension 7\", None),\n 0xFFF8: (\"JPG8\", \"Extension 8\", None),\n 0xFFF9: (\"JPG9\", \"Extension 9\", None),\n 0xFFFA: (\"JPG10\", \"Extension 10\", None),\n 0xFFFB: (\"JPG11\", \"Extension 11\", None),\n 0xFFFC: (\"JPG12\", \"Extension 12\", None),\n 0xFFFD: (\"JPG13\", \"Extension 13\", None),\n 0xFFFE: (\"COM\", \"Comment\", COM),\n}\n\n\ndef _accept(prefix):\n return prefix[0:1] == b\"\\377\"\n\n\n##\n# Image plugin for JPEG and JFIF images.\n\n\nclass JpegImageFile(ImageFile.ImageFile):\n\n format = \"JPEG\"\n format_description = \"JPEG (ISO 10918)\"\n\n def _open(self):\n\n s = self.fp.read(1)\n\n if i8(s) != 255:\n raise SyntaxError(\"not a JPEG file\")\n\n # Create attributes\n self.bits = self.layers = 0\n\n # JPEG specifics (internal)\n self.layer = []\n self.huffman_dc = {}\n self.huffman_ac = {}\n self.quantization = {}\n self.app = {} # compatibility\n self.applist = []\n self.icclist = []\n\n while True:\n\n i = i8(s)\n if i == 0xFF:\n s = s + self.fp.read(1)\n i = i16(s)\n else:\n # Skip non-0xFF junk\n s = self.fp.read(1)\n continue\n\n if i in MARKER:\n name, description, handler = MARKER[i]\n if handler is not None:\n handler(self, i)\n if i == 0xFFDA: # start of scan\n rawmode = self.mode\n if self.mode == \"CMYK\":\n rawmode = \"CMYK;I\" # assume adobe conventions\n self.tile = [(\"jpeg\", (0, 0) + self.size, 0, (rawmode, \"\"))]\n # self.__offset = self.fp.tell()\n break\n s = self.fp.read(1)\n elif i == 0 or i == 0xFFFF:\n # padded marker or junk; move on\n s = b\"\\xff\"\n elif i == 0xFF00: # Skip extraneous data (escaped 0xFF)\n s = self.fp.read(1)\n else:\n raise SyntaxError(\"no marker found\")\n\n def load_read(self, read_bytes):\n \"\"\"\n internal: read more image data\n For premature EOF and LOAD_TRUNCATED_IMAGES adds EOI marker\n so libjpeg can finish decoding\n \"\"\"\n s = self.fp.read(read_bytes)\n\n if not s and ImageFile.LOAD_TRUNCATED_IMAGES:\n # Premature EOF.\n # Pretend file is finished adding EOI marker\n return b\"\\xFF\\xD9\"\n\n return s\n\n def draft(self, mode, size):\n\n if len(self.tile) != 1:\n return\n\n # Protect from second call\n if self.decoderconfig:\n return\n\n d, e, o, a = self.tile[0]\n scale = 1\n original_size = self.size\n\n if a[0] == \"RGB\" and mode in [\"L\", \"YCbCr\"]:\n self.mode = mode\n a = mode, \"\"\n\n if size:\n scale = min(self.size[0] // size[0], self.size[1] // size[1])\n for s in [8, 4, 2, 1]:\n if scale >= s:\n break\n e = (\n e[0],\n e[1],\n (e[2] - e[0] + s - 1) // s + e[0],\n (e[3] - e[1] + s - 1) // s + e[1],\n )\n self._size = ((self.size[0] + s - 1) // s, (self.size[1] + s - 1) // s)\n scale = s\n\n self.tile = [(d, e, o, a)]\n self.decoderconfig = (scale, 0)\n\n box = (0, 0, original_size[0] / scale, original_size[1] / scale)\n return (self.mode, box)\n\n def load_djpeg(self):\n\n # ALTERNATIVE: handle JPEGs via the IJG command line utilities\n\n f, path = tempfile.mkstemp()\n os.close(f)\n if os.path.exists(self.filename):\n subprocess.check_call([\"djpeg\", \"-outfile\", path, self.filename])\n else:\n raise ValueError(\"Invalid Filename\")\n\n try:\n with Image.open(path) as _im:\n _im.load()\n self.im = _im.im\n finally:\n try:\n os.unlink(path)\n except OSError:\n pass\n\n self.mode = self.im.mode\n self._size = self.im.size\n\n self.tile = []\n\n def _getexif(self):\n return _getexif(self)\n\n def _getmp(self):\n return _getmp(self)\n\n\ndef _fixup_dict(src_dict):\n # Helper function for _getexif()\n # returns a dict with any single item tuples/lists as individual values\n exif = Image.Exif()\n return exif._fixup_dict(src_dict)\n\n\ndef _getexif(self):\n if \"exif\" not in self.info:\n return None\n return dict(self.getexif())\n\n\ndef _getmp(self):\n # Extract MP information. This method was inspired by the \"highly\n # experimental\" _getexif version that's been in use for years now,\n # itself based on the ImageFileDirectory class in the TIFF plug-in.\n\n # The MP record essentially consists of a TIFF file embedded in a JPEG\n # application marker.\n try:\n data = self.info[\"mp\"]\n except KeyError:\n return None\n file_contents = io.BytesIO(data)\n head = file_contents.read(8)\n endianness = \">\" if head[:4] == b\"\\x4d\\x4d\\x00\\x2a\" else \"<\"\n # process dictionary\n try:\n info = TiffImagePlugin.ImageFileDirectory_v2(head)\n file_contents.seek(info.next)\n info.load(file_contents)\n mp = dict(info)\n except Exception:\n raise SyntaxError(\"malformed MP Index (unreadable directory)\")\n # it's an error not to have a number of images\n try:\n quant = mp[0xB001]\n except KeyError:\n raise SyntaxError(\"malformed MP Index (no number of images)\")\n # get MP entries\n mpentries = []\n try:\n rawmpentries = mp[0xB002]\n for entrynum in range(0, quant):\n unpackedentry = struct.unpack_from(\n \"{}LLLHH\".format(endianness), rawmpentries, entrynum * 16\n )\n labels = (\"Attribute\", \"Size\", \"DataOffset\", \"EntryNo1\", \"EntryNo2\")\n mpentry = dict(zip(labels, unpackedentry))\n mpentryattr = {\n \"DependentParentImageFlag\": bool(mpentry[\"Attribute\"] & (1 << 31)),\n \"DependentChildImageFlag\": bool(mpentry[\"Attribute\"] & (1 << 30)),\n \"RepresentativeImageFlag\": bool(mpentry[\"Attribute\"] & (1 << 29)),\n \"Reserved\": (mpentry[\"Attribute\"] & (3 << 27)) >> 27,\n \"ImageDataFormat\": (mpentry[\"Attribute\"] & (7 << 24)) >> 24,\n \"MPType\": mpentry[\"Attribute\"] & 0x00FFFFFF,\n }\n if mpentryattr[\"ImageDataFormat\"] == 0:\n mpentryattr[\"ImageDataFormat\"] = \"JPEG\"\n else:\n raise SyntaxError(\"unsupported picture format in MPO\")\n mptypemap = {\n 0x000000: \"Undefined\",\n 0x010001: \"Large Thumbnail (VGA Equivalent)\",\n 0x010002: \"Large Thumbnail (Full HD Equivalent)\",\n 0x020001: \"Multi-Frame Image (Panorama)\",\n 0x020002: \"Multi-Frame Image: (Disparity)\",\n 0x020003: \"Multi-Frame Image: (Multi-Angle)\",\n 0x030000: \"Baseline MP Primary Image\",\n }\n mpentryattr[\"MPType\"] = mptypemap.get(mpentryattr[\"MPType\"], \"Unknown\")\n mpentry[\"Attribute\"] = mpentryattr\n mpentries.append(mpentry)\n mp[0xB002] = mpentries\n except KeyError:\n raise SyntaxError(\"malformed MP Index (bad MP Entry)\")\n # Next we should try and parse the individual image unique ID list;\n # we don't because I've never seen this actually used in a real MPO\n # file and so can't test it.\n return mp\n\n\n# --------------------------------------------------------------------\n# stuff to save JPEG files\n\nRAWMODE = {\n \"1\": \"L\",\n \"L\": \"L\",\n \"RGB\": \"RGB\",\n \"RGBX\": \"RGB\",\n \"CMYK\": \"CMYK;I\", # assume adobe conventions\n \"YCbCr\": \"YCbCr\",\n}\n\n# fmt: off\nzigzag_index = (\n 0, 1, 5, 6, 14, 15, 27, 28,\n 2, 4, 7, 13, 16, 26, 29, 42,\n 3, 8, 12, 17, 25, 30, 41, 43,\n 9, 11, 18, 24, 31, 40, 44, 53,\n 10, 19, 23, 32, 39, 45, 52, 54,\n 20, 22, 33, 38, 46, 51, 55, 60,\n 21, 34, 37, 47, 50, 56, 59, 61,\n 35, 36, 48, 49, 57, 58, 62, 63,\n)\n\nsamplings = {\n (1, 1, 1, 1, 1, 1): 0,\n (2, 1, 1, 1, 1, 1): 1,\n (2, 2, 1, 1, 1, 1): 2,\n}\n# fmt: on\n\n\ndef convert_dict_qtables(qtables):\n qtables = [qtables[key] for key in range(len(qtables)) if key in qtables]\n for idx, table in enumerate(qtables):\n qtables[idx] = [table[i] for i in zigzag_index]\n return qtables\n\n\ndef get_sampling(im):\n # There's no subsampling when image have only 1 layer\n # (grayscale images) or when they are CMYK (4 layers),\n # so set subsampling to default value.\n #\n # NOTE: currently Pillow can't encode JPEG to YCCK format.\n # If YCCK support is added in the future, subsampling code will have\n # to be updated (here and in JpegEncode.c) to deal with 4 layers.\n if not hasattr(im, \"layers\") or im.layers in (1, 4):\n return -1\n sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3]\n return samplings.get(sampling, -1)\n\n\ndef _save(im, fp, filename):\n\n try:\n rawmode = RAWMODE[im.mode]\n except KeyError:\n raise OSError(\"cannot write mode %s as JPEG\" % im.mode)\n\n info = im.encoderinfo\n\n dpi = [round(x) for x in info.get(\"dpi\", (0, 0))]\n\n quality = info.get(\"quality\", -1)\n subsampling = info.get(\"subsampling\", -1)\n qtables = info.get(\"qtables\")\n\n if quality == \"keep\":\n quality = -1\n subsampling = \"keep\"\n qtables = \"keep\"\n elif quality in presets:\n preset = presets[quality]\n quality = -1\n subsampling = preset.get(\"subsampling\", -1)\n qtables = preset.get(\"quantization\")\n elif not isinstance(quality, int):\n raise ValueError(\"Invalid quality setting\")\n else:\n if subsampling in presets:\n subsampling = presets[subsampling].get(\"subsampling\", -1)\n if isinstance(qtables, str) and qtables in presets:\n qtables = presets[qtables].get(\"quantization\")\n\n if subsampling == \"4:4:4\":\n subsampling = 0\n elif subsampling == \"4:2:2\":\n subsampling = 1\n elif subsampling == \"4:2:0\":\n subsampling = 2\n elif subsampling == \"4:1:1\":\n # For compatibility. Before Pillow 4.3, 4:1:1 actually meant 4:2:0.\n # Set 4:2:0 if someone is still using that value.\n subsampling = 2\n elif subsampling == \"keep\":\n if im.format != \"JPEG\":\n raise ValueError(\"Cannot use 'keep' when original image is not a JPEG\")\n subsampling = get_sampling(im)\n\n def validate_qtables(qtables):\n if qtables is None:\n return qtables\n if isinstance(qtables, str):\n try:\n lines = [\n int(num)\n for line in qtables.splitlines()\n for num in line.split(\"#\", 1)[0].split()\n ]\n except ValueError:\n raise ValueError(\"Invalid quantization table\")\n else:\n qtables = [lines[s : s + 64] for s in range(0, len(lines), 64)]\n if isinstance(qtables, (tuple, list, dict)):\n if isinstance(qtables, dict):\n qtables = convert_dict_qtables(qtables)\n elif isinstance(qtables, tuple):\n qtables = list(qtables)\n if not (0 < len(qtables) < 5):\n raise ValueError(\"None or too many quantization tables\")\n for idx, table in enumerate(qtables):\n try:\n if len(table) != 64:\n raise TypeError\n table = array.array(\"B\", table)\n except TypeError:\n raise ValueError(\"Invalid quantization table\")\n else:\n qtables[idx] = list(table)\n return qtables\n\n if qtables == \"keep\":\n if im.format != \"JPEG\":\n raise ValueError(\"Cannot use 'keep' when original image is not a JPEG\")\n qtables = getattr(im, \"quantization\", None)\n qtables = validate_qtables(qtables)\n\n extra = b\"\"\n\n icc_profile = info.get(\"icc_profile\")\n if icc_profile:\n ICC_OVERHEAD_LEN = 14\n MAX_BYTES_IN_MARKER = 65533\n MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN\n markers = []\n while icc_profile:\n markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER])\n icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:]\n i = 1\n for marker in markers:\n size = struct.pack(\">H\", 2 + ICC_OVERHEAD_LEN + len(marker))\n extra += (\n b\"\\xFF\\xE2\"\n + size\n + b\"ICC_PROFILE\\0\"\n + o8(i)\n + o8(len(markers))\n + marker\n )\n i += 1\n\n # \"progressive\" is the official name, but older documentation\n # says \"progression\"\n # FIXME: issue a warning if the wrong form is used (post-1.1.7)\n progressive = info.get(\"progressive\", False) or info.get(\"progression\", False)\n\n optimize = info.get(\"optimize\", False)\n\n exif = info.get(\"exif\", b\"\")\n if isinstance(exif, Image.Exif):\n exif = exif.tobytes()\n\n # get keyword arguments\n im.encoderconfig = (\n quality,\n progressive,\n info.get(\"smooth\", 0),\n optimize,\n info.get(\"streamtype\", 0),\n dpi[0],\n dpi[1],\n subsampling,\n qtables,\n extra,\n exif,\n )\n\n # if we optimize, libjpeg needs a buffer big enough to hold the whole image\n # in a shot. Guessing on the size, at im.size bytes. (raw pixel size is\n # channels*size, this is a value that's been used in a django patch.\n # https://github.com/matthewwithanm/django-imagekit/issues/50\n bufsize = 0\n if optimize or progressive:\n # CMYK can be bigger\n if im.mode == \"CMYK\":\n bufsize = 4 * im.size[0] * im.size[1]\n # keep sets quality to -1, but the actual value may be high.\n elif quality >= 95 or quality == -1:\n bufsize = 2 * im.size[0] * im.size[1]\n else:\n bufsize = im.size[0] * im.size[1]\n\n # The EXIF info needs to be written as one block, + APP1, + one spare byte.\n # Ensure that our buffer is big enough. Same with the icc_profile block.\n bufsize = max(ImageFile.MAXBLOCK, bufsize, len(exif) + 5, len(extra) + 1)\n\n ImageFile._save(im, fp, [(\"jpeg\", (0, 0) + im.size, 0, rawmode)], bufsize)\n\n\ndef _save_cjpeg(im, fp, filename):\n # ALTERNATIVE: handle JPEGs via the IJG command line utilities.\n tempfile = im._dump()\n subprocess.check_call([\"cjpeg\", \"-outfile\", filename, tempfile])\n try:\n os.unlink(tempfile)\n except OSError:\n pass\n\n\n##\n# Factory for making JPEG and MPO instances\ndef jpeg_factory(fp=None, filename=None):\n im = JpegImageFile(fp, filename)\n try:\n mpheader = im._getmp()\n if mpheader[45057] > 1:\n # It's actually an MPO\n from .MpoImagePlugin import MpoImageFile\n\n # Don't reload everything, just convert it.\n im = MpoImageFile.adopt(im, mpheader)\n except (TypeError, IndexError):\n # It is really a JPEG\n pass\n except SyntaxError:\n warnings.warn(\n \"Image appears to be a malformed MPO file, it will be \"\n \"interpreted as a base JPEG file\"\n )\n return im\n\n\n# ---------------------------------------------------------------------\n# Registry stuff\n\nImage.register_open(JpegImageFile.format, jpeg_factory, _accept)\nImage.register_save(JpegImageFile.format, _save)\n\nImage.register_extensions(JpegImageFile.format, [\".jfif\", \".jpe\", \".jpg\", \".jpeg\"])\n\nImage.register_mime(JpegImageFile.format, \"image/jpeg\")\n",
"path": "src/PIL/JpegImagePlugin.py"
}
] | diff --git a/Tests/test_file_jpeg.py b/Tests/test_file_jpeg.py
index f13536d5868..33045122891 100644
--- a/Tests/test_file_jpeg.py
+++ b/Tests/test_file_jpeg.py
@@ -60,6 +60,8 @@ def test_app(self):
)
assert len(im.applist) == 2
+ assert im.info["comment"] == b"File written by Adobe Photoshop\xa8 4.0\x00"
+
def test_cmyk(self):
# Test CMYK handling. Thanks to Tim and Charlie for test data,
# Michael for getting me to look one more time.
diff --git a/docs/handbook/image-file-formats.rst b/docs/handbook/image-file-formats.rst
index 7ce685ed2a3..18f547a498c 100644
--- a/docs/handbook/image-file-formats.rst
+++ b/docs/handbook/image-file-formats.rst
@@ -298,6 +298,11 @@ The :py:meth:`~PIL.Image.Image.open` method may set the following
**exif**
Raw EXIF data from the image.
+**comment**
+ A comment about the image.
+
+ .. versionadded:: 7.1.0
+
The :py:meth:`~PIL.Image.Image.save` method supports the following options:
diff --git a/docs/releasenotes/7.1.0.rst b/docs/releasenotes/7.1.0.rst
index 1369177d26e..e3bc107ddff 100644
--- a/docs/releasenotes/7.1.0.rst
+++ b/docs/releasenotes/7.1.0.rst
@@ -18,6 +18,15 @@ been resolved.
im = Image.open("hopper.jpg")
im.save("out.jpg", quality=0)
+API Additions
+=============
+
+Reading JPEG comments
+^^^^^^^^^^^^^^^^^^^^^
+
+When opening a JPEG image, the comment may now be read into
+:py:attr:`~PIL.Image.Image.info`.
+
Other Changes
=============
diff --git a/src/PIL/JpegImagePlugin.py b/src/PIL/JpegImagePlugin.py
index 229eac2141e..2aa029efbff 100644
--- a/src/PIL/JpegImagePlugin.py
+++ b/src/PIL/JpegImagePlugin.py
@@ -176,6 +176,7 @@ def COM(self, marker):
n = i16(self.fp.read(2)) - 2
s = ImageFile._safe_read(self.fp, n)
+ self.info["comment"] = s
self.app["COM"] = s # compatibility
self.applist.append(("COM", s))
|
feast-dev__feast-244 | Feast cli config file should be settable by an env var
**Is your feature request related to a problem? Please describe.**
If I have multiple feast instances, I want to be able to set different .feast files to configure the CLI.
**Describe the solution you'd like**
export FEAST_CONFIG=path/to/feast/configfile
it should default to ~/.feast
Feast cli config file should be settable by an env var
**Is your feature request related to a problem? Please describe.**
If I have multiple feast instances, I want to be able to set different .feast files to configure the CLI.
**Describe the solution you'd like**
export FEAST_CONFIG=path/to/feast/configfile
it should default to ~/.feast
| [
{
"content": "#\n# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom os.path import expanduser, join\nimport logging\nimport os\nimport sys\nfrom typing import Dict\nfrom urllib.parse import urlparse\nfrom urllib.parse import ParseResult\n\nimport toml\n\n_logger = logging.getLogger(__name__)\n\nfeast_configuration_properties = {\"core_url\": \"URL\", \"serving_url\": \"URL\"}\n\nCONFIGURATION_FILE_DIR = \".feast\"\nCONFIGURATION_FILE_NAME = \"config.toml\"\n\n\ndef get_or_create_config() -> Dict:\n \"\"\"\n Creates or gets the Feast users active configuration\n :return: dictionary of Feast properties\n \"\"\"\n\n user_config_file_dir, user_config_file_path = _get_config_file_locations()\n\n if not os.path.exists(os.path.dirname(user_config_file_dir)):\n os.makedirs(os.path.dirname(user_config_file_dir))\n\n if not os.path.isfile(user_config_file_path):\n _save_config(user_config_file_path, _fproperties_to_dict())\n\n try:\n return toml.load(user_config_file_path)\n except FileNotFoundError:\n _logger.error(\n \"Could not find Feast configuration file \" + user_config_file_path\n )\n sys.exit(1)\n except toml.decoder.TomlDecodeError:\n _logger.error(\n \"Could not decode Feast configuration file \" + user_config_file_path\n )\n sys.exit(1)\n except Exception as e:\n _logger.error(e)\n sys.exit(1)\n\n\ndef set_property(fproperty: str, value: str):\n \"\"\"\n Sets a single property in the Feast users local configuration file\n :param fproperty: Feast property name\n :param value: Feast property value\n \"\"\"\n\n if _is_valid_property(fproperty, value):\n active_feast_config = get_or_create_config()\n active_feast_config[fproperty] = value\n _, user_config_file_path = _get_config_file_locations()\n _save_config(user_config_file_path, active_feast_config)\n print(\"Updated property [%s]\" % fproperty)\n else:\n _logger.error(\"Invalid property selected\")\n sys.exit(1)\n\n\ndef get_config_property_or_fail(fproperty):\n active_feast_config = get_or_create_config()\n if _is_valid_property(fproperty, active_feast_config[fproperty]):\n return active_feast_config[fproperty]\n _logger.error(\"Could not load Feast property from configuration: %s\" % fproperty)\n sys.exit(1)\n\n\ndef _fproperties_to_dict() -> Dict[str, str]:\n prop_dict = {}\n for fproperty in feast_configuration_properties:\n prop_dict[fproperty] = \"\"\n return prop_dict\n\n\ndef _is_valid_property(fproperty: str, value: str) -> bool:\n \"\"\"\n Validates both a Feast property as well as value\n :param fproperty: Feast property name\n :param value: Feast property value\n :return: Returns True if property and value are valid\n \"\"\"\n\n if fproperty not in feast_configuration_properties:\n _logger.error(\"You are trying to set an invalid property\")\n sys.exit(1)\n\n fprop_type = feast_configuration_properties[fproperty]\n\n if fprop_type == \"URL\":\n if \"//\" not in value:\n value = \"%s%s\" % (\"grpc://\", value)\n parsed_value = urlparse(value) # type: ParseResult\n if parsed_value.netloc:\n return True\n\n _logger.error(\"The property you are trying to set could not be identified\")\n sys.exit(1)\n\n\ndef _save_config(user_config_file_path: str, config_string: Dict[str, str]):\n \"\"\"\n Saves Feast configuration\n :param user_config_file_path: Local file system path to save configuration\n :param config_string: Contents in dictionary format to save to path\n \"\"\"\n\n try:\n with open(user_config_file_path, \"w+\") as f:\n toml.dump(config_string, f)\n except Exception as e:\n _logger.error(\"Could not update configuration file for Feast\")\n print(e)\n sys.exit(1)\n\n\ndef _get_config_file_locations() -> (str, str):\n user_config_file_dir = join(expanduser(\"~\"), CONFIGURATION_FILE_DIR)\n user_config_file_path = join(user_config_file_dir, CONFIGURATION_FILE_NAME)\n return user_config_file_dir, user_config_file_path\n",
"path": "sdk/python/feast/config.py"
}
] | [
{
"content": "#\n# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom os.path import expanduser, join\nimport logging\nimport os\nimport sys\nfrom typing import Dict\nfrom urllib.parse import urlparse\nfrom urllib.parse import ParseResult\n\nimport toml\n\n_logger = logging.getLogger(__name__)\n\nfeast_configuration_properties = {\"core_url\": \"URL\", \"serving_url\": \"URL\"}\n\nCONFIGURATION_FILE_DIR = os.environ.get(\"FEAST_CONFIG\", \".feast\")\nCONFIGURATION_FILE_NAME = \"config.toml\"\n\n\ndef get_or_create_config() -> Dict:\n \"\"\"\n Creates or gets the Feast users active configuration\n :return: dictionary of Feast properties\n \"\"\"\n\n user_config_file_dir, user_config_file_path = _get_config_file_locations()\n\n if not os.path.exists(os.path.dirname(user_config_file_dir)):\n os.makedirs(os.path.dirname(user_config_file_dir))\n\n if not os.path.isfile(user_config_file_path):\n _save_config(user_config_file_path, _fproperties_to_dict())\n\n try:\n return toml.load(user_config_file_path)\n except FileNotFoundError:\n _logger.error(\n \"Could not find Feast configuration file \" + user_config_file_path\n )\n sys.exit(1)\n except toml.decoder.TomlDecodeError:\n _logger.error(\n \"Could not decode Feast configuration file \" + user_config_file_path\n )\n sys.exit(1)\n except Exception as e:\n _logger.error(e)\n sys.exit(1)\n\n\ndef set_property(fproperty: str, value: str):\n \"\"\"\n Sets a single property in the Feast users local configuration file\n :param fproperty: Feast property name\n :param value: Feast property value\n \"\"\"\n\n if _is_valid_property(fproperty, value):\n active_feast_config = get_or_create_config()\n active_feast_config[fproperty] = value\n _, user_config_file_path = _get_config_file_locations()\n _save_config(user_config_file_path, active_feast_config)\n print(\"Updated property [%s]\" % fproperty)\n else:\n _logger.error(\"Invalid property selected\")\n sys.exit(1)\n\n\ndef get_config_property_or_fail(fproperty):\n active_feast_config = get_or_create_config()\n if _is_valid_property(fproperty, active_feast_config[fproperty]):\n return active_feast_config[fproperty]\n _logger.error(\"Could not load Feast property from configuration: %s\" % fproperty)\n sys.exit(1)\n\n\ndef _fproperties_to_dict() -> Dict[str, str]:\n prop_dict = {}\n for fproperty in feast_configuration_properties:\n prop_dict[fproperty] = \"\"\n return prop_dict\n\n\ndef _is_valid_property(fproperty: str, value: str) -> bool:\n \"\"\"\n Validates both a Feast property as well as value\n :param fproperty: Feast property name\n :param value: Feast property value\n :return: Returns True if property and value are valid\n \"\"\"\n\n if fproperty not in feast_configuration_properties:\n _logger.error(\"You are trying to set an invalid property\")\n sys.exit(1)\n\n fprop_type = feast_configuration_properties[fproperty]\n\n if fprop_type == \"URL\":\n if \"//\" not in value:\n value = \"%s%s\" % (\"grpc://\", value)\n parsed_value = urlparse(value) # type: ParseResult\n if parsed_value.netloc:\n return True\n\n _logger.error(\"The property you are trying to set could not be identified\")\n sys.exit(1)\n\n\ndef _save_config(user_config_file_path: str, config_string: Dict[str, str]):\n \"\"\"\n Saves Feast configuration\n :param user_config_file_path: Local file system path to save configuration\n :param config_string: Contents in dictionary format to save to path\n \"\"\"\n\n try:\n with open(user_config_file_path, \"w+\") as f:\n toml.dump(config_string, f)\n except Exception as e:\n _logger.error(\"Could not update configuration file for Feast\")\n print(e)\n sys.exit(1)\n\n\ndef _get_config_file_locations() -> (str, str):\n user_config_file_dir = join(expanduser(\"~\"), CONFIGURATION_FILE_DIR)\n user_config_file_path = join(user_config_file_dir, CONFIGURATION_FILE_NAME)\n return user_config_file_dir, user_config_file_path\n",
"path": "sdk/python/feast/config.py"
}
] | diff --git a/sdk/python/feast/config.py b/sdk/python/feast/config.py
index 77867cbaa4d..9b6a6dd4d83 100644
--- a/sdk/python/feast/config.py
+++ b/sdk/python/feast/config.py
@@ -28,7 +28,7 @@
feast_configuration_properties = {"core_url": "URL", "serving_url": "URL"}
-CONFIGURATION_FILE_DIR = ".feast"
+CONFIGURATION_FILE_DIR = os.environ.get("FEAST_CONFIG", ".feast")
CONFIGURATION_FILE_NAME = "config.toml"
|
chainer__chainer-5586 | Docstring of `functions.forget` is incorrect as `+` doesn't retain inputs anymore
The docstring says that `(x + y) + x` retains the immediate variable holding `x + y`.
```
Let ``f`` be a function defined as:
>>> def f(a, b):
... return a + b + a
and, ``x`` and ``y`` be :class:`~chainer.Variable`\\ s:
>>> x = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))
>>> y = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))
When ``z`` is calculated as ``z = f(x, y)``, its intermediate result
``x + y`` is stored in memory. Instead, if you call ``f`` with
``F.forget``:
>>> z = F.forget(f, x, y)
intermediate ``x + y`` is forgotten.
```
But this isn't true for new-style function of `+`, because addition don't requires book-kept inputs for backpropagation.
I checked the behavior by the following script, which traverses retained variables.
```python
import chainer
import chainer.functions as F
import numpy as np
def f(a, b):
return (a + b) + a
def recur_check_vars(v, x, y):
creator = v.creator_node
if creator is None:
return
for pnode in creator.inputs:
p = pnode.get_variable()
assert p.data is None or p is x or p is y
print(p)
recur_check_vars(p, x, y)
def main():
x = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))
y = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))
print(x)
print(y)
print()
z = f(x, y)
recur_check_vars(z, x, y)
if __name__ == '__main__':
main()
```
The script doesn't fail, and the output is as follows. We can see that`x + y` is discarded. Living variables `x` and `y` are retrieved, as each `VariableNode` instance has a weakref to the corresponding variable.
```
variable([-0.7699733 -0.50523347 -0.20869003 -0.7912116 0.92058474])
variable([ 0.58832335 -0.06183117 0.1939743 0.9021316 -0.19973369])
variable(None)
variable([-0.7699733 -0.50523347 -0.20869003 -0.7912116 0.92058474])
variable([ 0.58832335 -0.06183117 0.1939743 0.9021316 -0.19973369])
variable([-0.7699733 -0.50523347 -0.20869003 -0.7912116 0.92058474])
```
| [
{
"content": "import chainer\nfrom chainer import function\nfrom chainer import function_node\nfrom chainer import variable\n\n\ndef _call_func(func, xs):\n outs = func(*xs)\n\n if isinstance(outs, tuple):\n for i, out in enumerate(outs):\n if isinstance(out, variable.Variable):\n continue\n n = i + 1\n suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get(\n n if n < 20 else n % 10, 'th')\n msg = ('{}{} element of a returned tuple is not Variable, '\n 'but is {}').format(n, suffix, type(out))\n raise RuntimeError(msg)\n elif isinstance(outs, variable.Variable):\n outs = (outs,)\n else:\n msg = ('A tuple of Variables or a Variable are expected, but {} '\n 'is returned.'.format(type(outs)))\n raise RuntimeError(msg)\n\n return outs\n\n\nclass Forget(function_node.FunctionNode):\n\n def __init__(self, func):\n if not callable(func):\n raise TypeError('func must be callable')\n self.func = func\n\n def forward(self, inputs):\n self.retain_inputs(tuple(range(len(inputs))))\n with function.no_backprop_mode():\n xs = [variable.Variable(x) for x in inputs]\n outs = _call_func(self.func, xs)\n return tuple(out.data for out in outs)\n\n def backward(self, indexes, grad_outputs):\n # Double backprop is not allowed\n if chainer.config.enable_backprop:\n raise RuntimeError('double backpropagation in functions.forget is '\n 'not allowed.')\n\n inputs = self.get_retained_inputs()\n # Create new variables that have no creators\n dummy_inputs = tuple([variable.Variable(inp.array) for inp in inputs])\n\n with function.force_backprop_mode():\n outs = _call_func(self.func, dummy_inputs)\n assert len(outs) == len(grad_outputs)\n if len(outs) > 1:\n # Avoid doing backward multiple times when `outs` is a tuple\n outs = chainer.functions.identity(*outs)\n\n for out, grad_output in zip(outs, grad_outputs):\n out.grad_var = grad_output\n outs[0].backward()\n\n return tuple([inp.grad_var for inp in dummy_inputs])\n\n\ndef forget(func, *xs):\n \"\"\"Calls a function without storing intermediate results.\n\n On a forward propagation, Chainer normally stores all intermediate results\n of :class:`~chainer.variable.VariableNode`\\\\ s on a computational graph as\n they are required on backward propagation.\n Sometimes these results consume too much memory.\n ``F.forget`` *forgets* such intermediate results on forward propagation,\n and still supports backpropagation with recalculation.\n\n On a forward propagation, ``F.forget`` calls a given function with given\n variables without creating a computational graph. That means, no\n intermediate results are stored.\n On a backward propagation, ``F.forget`` calls the given function again to\n create a computational graph for backpropagation.\n\n ``F.forget`` reduces internal memory usage, whereas it requires more\n calculation time as it calls the function twice.\n\n .. admonition:: Example\n\n Let ``f`` be a function defined as:\n\n >>> def f(a, b):\n ... return a + b + a\n\n and, ``x`` and ``y`` be :class:`~chainer.Variable`\\\\ s:\n\n >>> x = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))\n >>> y = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))\n\n When ``z`` is calculated as ``z = f(x, y)``, its intermediate result\n ``x + y`` is stored in memory. Instead, if you call ``f`` with\n ``F.forget``:\n\n >>> z = F.forget(f, x, y)\n\n intermediate ``x + y`` is forgotten.\n\n .. note::\n\n ``F.forget`` does not support functions which behave differently in\n multiple calls with the same inputs, such as\n :meth:`F.dropout() <chainer.functions.dropout>` and\n :meth:`F.negative_sampling() <chainer.functions.negative_sampling>`.\n\n .. note::\n\n In case input argument variables are of class :class:`numpy.ndarray` or\n :class:`cupy.ndarray` objects, arguments will automatically be\n converted to :class:`~chainer.Variable`\\\\ s.\n This conversion takes place to ensure that this function is included\n in the computational graph to enable backward computations.\n\n .. note::\n\n ``F.forget`` does not support double backpropagation.\n\n Args:\n func (callable): A function to call. It needs to be called with\n :class:`~chainer.Variable` object(s) and to return a\n :class:`~chainer.Variable` object or a tuple of\n :class:`~chainer.Variable` objects.\n xs (~chainer.Variable): Argument variables of the function.\n\n Returns:\n ~chainer.Variable: A variable ``func`` returns. If it returns a tuple,\n the method returns a tuple too.\n\n \"\"\"\n xs = tuple(x if isinstance(x, variable.Variable) else\n variable.Variable(x, requires_grad=True) for x in xs)\n y = Forget(func).apply(xs)\n if len(y) == 1:\n y, = y\n return y\n",
"path": "chainer/functions/util/forget.py"
}
] | [
{
"content": "import chainer\nfrom chainer import function\nfrom chainer import function_node\nfrom chainer import variable\n\n\ndef _call_func(func, xs):\n outs = func(*xs)\n\n if isinstance(outs, tuple):\n for i, out in enumerate(outs):\n if isinstance(out, variable.Variable):\n continue\n n = i + 1\n suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get(\n n if n < 20 else n % 10, 'th')\n msg = ('{}{} element of a returned tuple is not Variable, '\n 'but is {}').format(n, suffix, type(out))\n raise RuntimeError(msg)\n elif isinstance(outs, variable.Variable):\n outs = (outs,)\n else:\n msg = ('A tuple of Variables or a Variable are expected, but {} '\n 'is returned.'.format(type(outs)))\n raise RuntimeError(msg)\n\n return outs\n\n\nclass Forget(function_node.FunctionNode):\n\n def __init__(self, func):\n if not callable(func):\n raise TypeError('func must be callable')\n self.func = func\n\n def forward(self, inputs):\n self.retain_inputs(tuple(range(len(inputs))))\n with function.no_backprop_mode():\n xs = [variable.Variable(x) for x in inputs]\n outs = _call_func(self.func, xs)\n return tuple(out.data for out in outs)\n\n def backward(self, indexes, grad_outputs):\n # Double backprop is not allowed\n if chainer.config.enable_backprop:\n raise RuntimeError('double backpropagation in functions.forget is '\n 'not allowed.')\n\n inputs = self.get_retained_inputs()\n # Create new variables that have no creators\n dummy_inputs = tuple([variable.Variable(inp.array) for inp in inputs])\n\n with function.force_backprop_mode():\n outs = _call_func(self.func, dummy_inputs)\n assert len(outs) == len(grad_outputs)\n if len(outs) > 1:\n # Avoid doing backward multiple times when `outs` is a tuple\n outs = chainer.functions.identity(*outs)\n\n for out, grad_output in zip(outs, grad_outputs):\n out.grad_var = grad_output\n outs[0].backward()\n\n return tuple([inp.grad_var for inp in dummy_inputs])\n\n\ndef forget(func, *xs):\n \"\"\"Calls a function without storing intermediate results.\n\n On a forward propagation, Chainer normally stores all intermediate results\n of :class:`~chainer.variable.VariableNode`\\\\ s on a computational graph as\n they are required on backward propagation.\n Sometimes these results consume too much memory.\n ``F.forget`` *forgets* such intermediate results on forward propagation,\n and still supports backpropagation with recalculation.\n\n On a forward propagation, ``F.forget`` calls a given function with given\n variables without creating a computational graph. That means, no\n intermediate results are stored.\n On a backward propagation, ``F.forget`` calls the given function again to\n create a computational graph for backpropagation.\n\n ``F.forget`` reduces internal memory usage, whereas it requires more\n calculation time as it calls the function twice.\n\n .. admonition:: Example\n\n Let ``f`` be a function defined as:\n\n >>> def f(a, b):\n ... return (a + b) * a\n\n and, ``x`` and ``y`` be :class:`~chainer.Variable`\\\\ s:\n\n >>> x = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))\n >>> y = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))\n\n When ``z`` is calculated as ``z = f(x, y)``, its intermediate result\n ``x + y`` is stored in memory. Instead, if you call ``f`` with\n ``F.forget``:\n\n >>> z = F.forget(f, x, y)\n\n intermediate ``x + y`` is forgotten.\n\n .. note::\n\n ``F.forget`` does not support functions which behave differently in\n multiple calls with the same inputs, such as\n :meth:`F.dropout() <chainer.functions.dropout>` and\n :meth:`F.negative_sampling() <chainer.functions.negative_sampling>`.\n\n .. note::\n\n In case input argument variables are of class :class:`numpy.ndarray` or\n :class:`cupy.ndarray` objects, arguments will automatically be\n converted to :class:`~chainer.Variable`\\\\ s.\n This conversion takes place to ensure that this function is included\n in the computational graph to enable backward computations.\n\n .. note::\n\n ``F.forget`` does not support double backpropagation.\n\n Args:\n func (callable): A function to call. It needs to be called with\n :class:`~chainer.Variable` object(s) and to return a\n :class:`~chainer.Variable` object or a tuple of\n :class:`~chainer.Variable` objects.\n xs (~chainer.Variable): Argument variables of the function.\n\n Returns:\n ~chainer.Variable: A variable ``func`` returns. If it returns a tuple,\n the method returns a tuple too.\n\n \"\"\"\n xs = tuple(x if isinstance(x, variable.Variable) else\n variable.Variable(x, requires_grad=True) for x in xs)\n y = Forget(func).apply(xs)\n if len(y) == 1:\n y, = y\n return y\n",
"path": "chainer/functions/util/forget.py"
}
] | diff --git a/chainer/functions/util/forget.py b/chainer/functions/util/forget.py
index 14e62093af8c..44c1c2271d57 100644
--- a/chainer/functions/util/forget.py
+++ b/chainer/functions/util/forget.py
@@ -89,7 +89,7 @@ def forget(func, *xs):
Let ``f`` be a function defined as:
>>> def f(a, b):
- ... return a + b + a
+ ... return (a + b) * a
and, ``x`` and ``y`` be :class:`~chainer.Variable`\\ s:
|
ray-project__ray-9297 | [tune] Parameters from `tune.choice()` do not get logged to TensorBoard when integers
### What is the problem?
When providing parameters via `tune.choice()` that include integers, the values are not logged to TensorBoard's HPARAMS section.
The issue is that `numpy.random.choice([1, 2, 3])` (for example) returns `numpy.int32`/`numpy.int64` and those types are not included in the `VALID_HPARAMS = (str, bool, int, float, list)` tuple (python/ray/tune/logger.py).
Since TensorBoard has no issues with logging `numpy.int32/64`, one simple solution would be to just include those types in the tuple above. Happy to provide a PR if you think this is the way to go.
*Ray version and other system information (Python version, TensorFlow version, OS):*
ray: 0.8.6
python: 3.7.7
tensorboard: 2.2.2
ubuntu: 20.04
### Reproduction (REQUIRED)
```python
from ray import tune
def trainable(config):
tune.report(score=config["a"])
config_dict = {"a": tune.choice([1, 2, 3])}
tune.run(trainable, config=config_dict, num_samples=1)
```
- [x] I have verified my script runs in a clean environment and reproduces the issue.
- [x] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html).
| [
{
"content": "import csv\nimport json\nimport logging\nimport os\nimport yaml\nimport numbers\nimport numpy as np\n\nimport ray.cloudpickle as cloudpickle\nfrom ray.util.debug import log_once\nfrom ray.tune.result import (NODE_IP, TRAINING_ITERATION, TIME_TOTAL_S,\n TIMESTEPS_TOTAL, EXPR_PARAM_FILE,\n EXPR_PARAM_PICKLE_FILE, EXPR_PROGRESS_FILE,\n EXPR_RESULT_FILE)\nfrom ray.tune.syncer import get_node_syncer\nfrom ray.tune.utils import flatten_dict\n\nlogger = logging.getLogger(__name__)\n\ntf = None\nVALID_SUMMARY_TYPES = [int, float, np.float32, np.float64, np.int32, np.int64]\n\n\nclass Logger:\n \"\"\"Logging interface for ray.tune.\n\n By default, the UnifiedLogger implementation is used which logs results in\n multiple formats (TensorBoard, rllab/viskit, plain json, custom loggers)\n at once.\n\n Arguments:\n config: Configuration passed to all logger creators.\n logdir: Directory for all logger creators to log to.\n trial (Trial): Trial object for the logger to access.\n \"\"\"\n\n def __init__(self, config, logdir, trial=None):\n self.config = config\n self.logdir = logdir\n self.trial = trial\n self._init()\n\n def _init(self):\n pass\n\n def on_result(self, result):\n \"\"\"Given a result, appends it to the existing log.\"\"\"\n\n raise NotImplementedError\n\n def update_config(self, config):\n \"\"\"Updates the config for logger.\"\"\"\n\n pass\n\n def close(self):\n \"\"\"Releases all resources used by this logger.\"\"\"\n\n pass\n\n def flush(self):\n \"\"\"Flushes all disk writes to storage.\"\"\"\n\n pass\n\n\nclass NoopLogger(Logger):\n def on_result(self, result):\n pass\n\n\nclass MLFLowLogger(Logger):\n \"\"\"MLFlow logger.\n\n Requires the experiment configuration to have a MLFlow Experiment ID\n or manually set the proper environment variables.\n\n \"\"\"\n\n def _init(self):\n from mlflow.tracking import MlflowClient\n client = MlflowClient()\n run = client.create_run(self.config.get(\"mlflow_experiment_id\"))\n self._run_id = run.info.run_id\n for key, value in self.config.items():\n client.log_param(self._run_id, key, value)\n self.client = client\n\n def on_result(self, result):\n for key, value in result.items():\n if not isinstance(value, float):\n continue\n self.client.log_metric(\n self._run_id, key, value, step=result.get(TRAINING_ITERATION))\n\n def close(self):\n self.client.set_terminated(self._run_id)\n\n\nclass JsonLogger(Logger):\n \"\"\"Logs trial results in json format.\n\n Also writes to a results file and param.json file when results or\n configurations are updated. Experiments must be executed with the\n JsonLogger to be compatible with the ExperimentAnalysis tool.\n \"\"\"\n\n def _init(self):\n self.update_config(self.config)\n local_file = os.path.join(self.logdir, EXPR_RESULT_FILE)\n self.local_out = open(local_file, \"a\")\n\n def on_result(self, result):\n json.dump(result, self, cls=_SafeFallbackEncoder)\n self.write(\"\\n\")\n self.local_out.flush()\n\n def write(self, b):\n self.local_out.write(b)\n\n def flush(self):\n self.local_out.flush()\n\n def close(self):\n self.local_out.close()\n\n def update_config(self, config):\n self.config = config\n config_out = os.path.join(self.logdir, EXPR_PARAM_FILE)\n with open(config_out, \"w\") as f:\n json.dump(\n self.config,\n f,\n indent=2,\n sort_keys=True,\n cls=_SafeFallbackEncoder)\n config_pkl = os.path.join(self.logdir, EXPR_PARAM_PICKLE_FILE)\n with open(config_pkl, \"wb\") as f:\n cloudpickle.dump(self.config, f)\n\n\nclass CSVLogger(Logger):\n \"\"\"Logs results to progress.csv under the trial directory.\n\n Automatically flattens nested dicts in the result dict before writing\n to csv:\n\n {\"a\": {\"b\": 1, \"c\": 2}} -> {\"a/b\": 1, \"a/c\": 2}\n\n \"\"\"\n\n def _init(self):\n \"\"\"CSV outputted with Headers as first set of results.\"\"\"\n progress_file = os.path.join(self.logdir, EXPR_PROGRESS_FILE)\n self._continuing = os.path.exists(progress_file)\n self._file = open(progress_file, \"a\")\n self._csv_out = None\n\n def on_result(self, result):\n tmp = result.copy()\n if \"config\" in tmp:\n del tmp[\"config\"]\n result = flatten_dict(tmp, delimiter=\"/\")\n if self._csv_out is None:\n self._csv_out = csv.DictWriter(self._file, result.keys())\n if not self._continuing:\n self._csv_out.writeheader()\n self._csv_out.writerow(\n {k: v\n for k, v in result.items() if k in self._csv_out.fieldnames})\n self._file.flush()\n\n def flush(self):\n self._file.flush()\n\n def close(self):\n self._file.close()\n\n\nclass TBXLogger(Logger):\n \"\"\"TensorBoardX Logger.\n\n Note that hparams will be written only after a trial has terminated.\n This logger automatically flattens nested dicts to show on TensorBoard:\n\n {\"a\": {\"b\": 1, \"c\": 2}} -> {\"a/b\": 1, \"a/c\": 2}\n \"\"\"\n\n # NoneType is not supported on the last TBX release yet.\n VALID_HPARAMS = (str, bool, int, float, list)\n\n def _init(self):\n try:\n from tensorboardX import SummaryWriter\n except ImportError:\n logger.error(\"pip install 'ray[tune]' to see TensorBoard files.\")\n raise\n self._file_writer = SummaryWriter(self.logdir, flush_secs=30)\n self.last_result = None\n\n def on_result(self, result):\n step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]\n\n tmp = result.copy()\n for k in [\n \"config\", \"pid\", \"timestamp\", TIME_TOTAL_S, TRAINING_ITERATION\n ]:\n if k in tmp:\n del tmp[k] # not useful to log these\n\n flat_result = flatten_dict(tmp, delimiter=\"/\")\n path = [\"ray\", \"tune\"]\n valid_result = {}\n\n for attr, value in flat_result.items():\n full_attr = \"/\".join(path + [attr])\n if type(value) in VALID_SUMMARY_TYPES and not np.isnan(value):\n valid_result[full_attr] = value\n self._file_writer.add_scalar(\n full_attr, value, global_step=step)\n elif (type(value) == list\n and len(value) > 0) or (type(value) == np.ndarray\n and value.size > 0):\n valid_result[full_attr] = value\n try:\n self._file_writer.add_histogram(\n full_attr, value, global_step=step)\n # In case TensorboardX still doesn't think it's a valid value\n # (e.g. `[[]]`), warn and move on.\n except (ValueError, TypeError):\n if log_once(\"invalid_tbx_value\"):\n logger.warning(\n \"You are trying to log an invalid value ({}={}) \"\n \"via {}!\".format(full_attr, value,\n type(self).__name__))\n\n self.last_result = valid_result\n self._file_writer.flush()\n\n def flush(self):\n if self._file_writer is not None:\n self._file_writer.flush()\n\n def close(self):\n if self._file_writer is not None:\n if self.trial and self.trial.evaluated_params and self.last_result:\n flat_result = flatten_dict(self.last_result, delimiter=\"/\")\n scrubbed_result = {\n k: value\n for k, value in flat_result.items()\n if type(value) in VALID_SUMMARY_TYPES\n }\n self._try_log_hparams(scrubbed_result)\n self._file_writer.close()\n\n def _try_log_hparams(self, result):\n # TBX currently errors if the hparams value is None.\n flat_params = flatten_dict(self.trial.evaluated_params)\n scrubbed_params = {\n k: v\n for k, v in flat_params.items()\n if isinstance(v, self.VALID_HPARAMS)\n }\n\n removed = {\n k: v\n for k, v in flat_params.items()\n if not isinstance(v, self.VALID_HPARAMS)\n }\n if removed:\n logger.info(\n \"Removed the following hyperparameter values when \"\n \"logging to tensorboard: %s\", str(removed))\n\n from tensorboardX.summary import hparams\n try:\n experiment_tag, session_start_tag, session_end_tag = hparams(\n hparam_dict=scrubbed_params, metric_dict=result)\n self._file_writer.file_writer.add_summary(experiment_tag)\n self._file_writer.file_writer.add_summary(session_start_tag)\n self._file_writer.file_writer.add_summary(session_end_tag)\n except Exception:\n logger.exception(\"TensorboardX failed to log hparams. \"\n \"This may be due to an unsupported type \"\n \"in the hyperparameter values.\")\n\n\nDEFAULT_LOGGERS = (JsonLogger, CSVLogger, TBXLogger)\n\n\nclass UnifiedLogger(Logger):\n \"\"\"Unified result logger for TensorBoard, rllab/viskit, plain json.\n\n Arguments:\n config: Configuration passed to all logger creators.\n logdir: Directory for all logger creators to log to.\n loggers (list): List of logger creators. Defaults to CSV, Tensorboard,\n and JSON loggers.\n sync_function (func|str): Optional function for syncer to run.\n See ray/python/ray/tune/syncer.py\n \"\"\"\n\n def __init__(self,\n config,\n logdir,\n trial=None,\n loggers=None,\n sync_function=None):\n if loggers is None:\n self._logger_cls_list = DEFAULT_LOGGERS\n else:\n self._logger_cls_list = loggers\n if JsonLogger not in self._logger_cls_list:\n if log_once(\"JsonLogger\"):\n logger.warning(\n \"JsonLogger not provided. The ExperimentAnalysis tool is \"\n \"disabled.\")\n self._sync_function = sync_function\n self._log_syncer = None\n\n super(UnifiedLogger, self).__init__(config, logdir, trial)\n\n def _init(self):\n self._loggers = []\n for cls in self._logger_cls_list:\n try:\n self._loggers.append(cls(self.config, self.logdir, self.trial))\n except Exception as exc:\n logger.warning(\"Could not instantiate %s: %s.\", cls.__name__,\n str(exc))\n self._log_syncer = get_node_syncer(\n self.logdir,\n remote_dir=self.logdir,\n sync_function=self._sync_function)\n\n def on_result(self, result):\n for _logger in self._loggers:\n _logger.on_result(result)\n self._log_syncer.set_worker_ip(result.get(NODE_IP))\n self._log_syncer.sync_down_if_needed()\n\n def update_config(self, config):\n for _logger in self._loggers:\n _logger.update_config(config)\n\n def close(self):\n for _logger in self._loggers:\n _logger.close()\n\n def flush(self, sync_down=True):\n for _logger in self._loggers:\n _logger.flush()\n if sync_down:\n if not self._log_syncer.sync_down():\n logger.warning(\"Trial %s: Post-flush sync skipped.\",\n self.trial)\n\n def sync_up(self):\n return self._log_syncer.sync_up()\n\n def sync_down(self):\n return self._log_syncer.sync_down()\n\n def wait(self):\n self._log_syncer.wait()\n\n def sync_results_to_new_location(self, worker_ip):\n \"\"\"Sends the current log directory to the remote node.\n\n Syncing will not occur if the cluster is not started\n with the Ray autoscaler.\n \"\"\"\n if worker_ip != self._log_syncer.worker_ip:\n logger.info(\"Trial %s: Syncing (blocking) results to %s\",\n self.trial, worker_ip)\n self._log_syncer.reset()\n self._log_syncer.set_worker_ip(worker_ip)\n if not self._log_syncer.sync_up():\n logger.error(\n \"Trial %s: Sync up to new location skipped. \"\n \"This should not occur.\", self.trial)\n self._log_syncer.wait()\n else:\n logger.error(\n \"Trial %s: Sync attempted to same IP %s. This \"\n \"should not occur.\", self.trial, worker_ip)\n\n\nclass _SafeFallbackEncoder(json.JSONEncoder):\n def __init__(self, nan_str=\"null\", **kwargs):\n super(_SafeFallbackEncoder, self).__init__(**kwargs)\n self.nan_str = nan_str\n\n def default(self, value):\n try:\n if np.isnan(value):\n return self.nan_str\n\n if (type(value).__module__ == np.__name__\n and isinstance(value, np.ndarray)):\n return value.tolist()\n\n if issubclass(type(value), numbers.Integral):\n return int(value)\n if issubclass(type(value), numbers.Number):\n return float(value)\n\n return super(_SafeFallbackEncoder, self).default(value)\n\n except Exception:\n return str(value) # give up, just stringify it (ok for logs)\n\n\ndef pretty_print(result):\n result = result.copy()\n result.update(config=None) # drop config from pretty print\n result.update(hist_stats=None) # drop hist_stats from pretty print\n out = {}\n for k, v in result.items():\n if v is not None:\n out[k] = v\n\n cleaned = json.dumps(out, cls=_SafeFallbackEncoder)\n return yaml.safe_dump(json.loads(cleaned), default_flow_style=False)\n",
"path": "python/ray/tune/logger.py"
}
] | [
{
"content": "import csv\nimport json\nimport logging\nimport os\nimport yaml\nimport numbers\nimport numpy as np\n\nimport ray.cloudpickle as cloudpickle\nfrom ray.util.debug import log_once\nfrom ray.tune.result import (NODE_IP, TRAINING_ITERATION, TIME_TOTAL_S,\n TIMESTEPS_TOTAL, EXPR_PARAM_FILE,\n EXPR_PARAM_PICKLE_FILE, EXPR_PROGRESS_FILE,\n EXPR_RESULT_FILE)\nfrom ray.tune.syncer import get_node_syncer\nfrom ray.tune.utils import flatten_dict\n\nlogger = logging.getLogger(__name__)\n\ntf = None\nVALID_SUMMARY_TYPES = [int, float, np.float32, np.float64, np.int32, np.int64]\n\n\nclass Logger:\n \"\"\"Logging interface for ray.tune.\n\n By default, the UnifiedLogger implementation is used which logs results in\n multiple formats (TensorBoard, rllab/viskit, plain json, custom loggers)\n at once.\n\n Arguments:\n config: Configuration passed to all logger creators.\n logdir: Directory for all logger creators to log to.\n trial (Trial): Trial object for the logger to access.\n \"\"\"\n\n def __init__(self, config, logdir, trial=None):\n self.config = config\n self.logdir = logdir\n self.trial = trial\n self._init()\n\n def _init(self):\n pass\n\n def on_result(self, result):\n \"\"\"Given a result, appends it to the existing log.\"\"\"\n\n raise NotImplementedError\n\n def update_config(self, config):\n \"\"\"Updates the config for logger.\"\"\"\n\n pass\n\n def close(self):\n \"\"\"Releases all resources used by this logger.\"\"\"\n\n pass\n\n def flush(self):\n \"\"\"Flushes all disk writes to storage.\"\"\"\n\n pass\n\n\nclass NoopLogger(Logger):\n def on_result(self, result):\n pass\n\n\nclass MLFLowLogger(Logger):\n \"\"\"MLFlow logger.\n\n Requires the experiment configuration to have a MLFlow Experiment ID\n or manually set the proper environment variables.\n\n \"\"\"\n\n def _init(self):\n from mlflow.tracking import MlflowClient\n client = MlflowClient()\n run = client.create_run(self.config.get(\"mlflow_experiment_id\"))\n self._run_id = run.info.run_id\n for key, value in self.config.items():\n client.log_param(self._run_id, key, value)\n self.client = client\n\n def on_result(self, result):\n for key, value in result.items():\n if not isinstance(value, float):\n continue\n self.client.log_metric(\n self._run_id, key, value, step=result.get(TRAINING_ITERATION))\n\n def close(self):\n self.client.set_terminated(self._run_id)\n\n\nclass JsonLogger(Logger):\n \"\"\"Logs trial results in json format.\n\n Also writes to a results file and param.json file when results or\n configurations are updated. Experiments must be executed with the\n JsonLogger to be compatible with the ExperimentAnalysis tool.\n \"\"\"\n\n def _init(self):\n self.update_config(self.config)\n local_file = os.path.join(self.logdir, EXPR_RESULT_FILE)\n self.local_out = open(local_file, \"a\")\n\n def on_result(self, result):\n json.dump(result, self, cls=_SafeFallbackEncoder)\n self.write(\"\\n\")\n self.local_out.flush()\n\n def write(self, b):\n self.local_out.write(b)\n\n def flush(self):\n self.local_out.flush()\n\n def close(self):\n self.local_out.close()\n\n def update_config(self, config):\n self.config = config\n config_out = os.path.join(self.logdir, EXPR_PARAM_FILE)\n with open(config_out, \"w\") as f:\n json.dump(\n self.config,\n f,\n indent=2,\n sort_keys=True,\n cls=_SafeFallbackEncoder)\n config_pkl = os.path.join(self.logdir, EXPR_PARAM_PICKLE_FILE)\n with open(config_pkl, \"wb\") as f:\n cloudpickle.dump(self.config, f)\n\n\nclass CSVLogger(Logger):\n \"\"\"Logs results to progress.csv under the trial directory.\n\n Automatically flattens nested dicts in the result dict before writing\n to csv:\n\n {\"a\": {\"b\": 1, \"c\": 2}} -> {\"a/b\": 1, \"a/c\": 2}\n\n \"\"\"\n\n def _init(self):\n \"\"\"CSV outputted with Headers as first set of results.\"\"\"\n progress_file = os.path.join(self.logdir, EXPR_PROGRESS_FILE)\n self._continuing = os.path.exists(progress_file)\n self._file = open(progress_file, \"a\")\n self._csv_out = None\n\n def on_result(self, result):\n tmp = result.copy()\n if \"config\" in tmp:\n del tmp[\"config\"]\n result = flatten_dict(tmp, delimiter=\"/\")\n if self._csv_out is None:\n self._csv_out = csv.DictWriter(self._file, result.keys())\n if not self._continuing:\n self._csv_out.writeheader()\n self._csv_out.writerow(\n {k: v\n for k, v in result.items() if k in self._csv_out.fieldnames})\n self._file.flush()\n\n def flush(self):\n self._file.flush()\n\n def close(self):\n self._file.close()\n\n\nclass TBXLogger(Logger):\n \"\"\"TensorBoardX Logger.\n\n Note that hparams will be written only after a trial has terminated.\n This logger automatically flattens nested dicts to show on TensorBoard:\n\n {\"a\": {\"b\": 1, \"c\": 2}} -> {\"a/b\": 1, \"a/c\": 2}\n \"\"\"\n\n # NoneType is not supported on the last TBX release yet.\n VALID_HPARAMS = (str, bool, np.bool8, int, np.integer, float, list)\n\n def _init(self):\n try:\n from tensorboardX import SummaryWriter\n except ImportError:\n logger.error(\"pip install 'ray[tune]' to see TensorBoard files.\")\n raise\n self._file_writer = SummaryWriter(self.logdir, flush_secs=30)\n self.last_result = None\n\n def on_result(self, result):\n step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]\n\n tmp = result.copy()\n for k in [\n \"config\", \"pid\", \"timestamp\", TIME_TOTAL_S, TRAINING_ITERATION\n ]:\n if k in tmp:\n del tmp[k] # not useful to log these\n\n flat_result = flatten_dict(tmp, delimiter=\"/\")\n path = [\"ray\", \"tune\"]\n valid_result = {}\n\n for attr, value in flat_result.items():\n full_attr = \"/\".join(path + [attr])\n if type(value) in VALID_SUMMARY_TYPES and not np.isnan(value):\n valid_result[full_attr] = value\n self._file_writer.add_scalar(\n full_attr, value, global_step=step)\n elif (type(value) == list\n and len(value) > 0) or (type(value) == np.ndarray\n and value.size > 0):\n valid_result[full_attr] = value\n try:\n self._file_writer.add_histogram(\n full_attr, value, global_step=step)\n # In case TensorboardX still doesn't think it's a valid value\n # (e.g. `[[]]`), warn and move on.\n except (ValueError, TypeError):\n if log_once(\"invalid_tbx_value\"):\n logger.warning(\n \"You are trying to log an invalid value ({}={}) \"\n \"via {}!\".format(full_attr, value,\n type(self).__name__))\n\n self.last_result = valid_result\n self._file_writer.flush()\n\n def flush(self):\n if self._file_writer is not None:\n self._file_writer.flush()\n\n def close(self):\n if self._file_writer is not None:\n if self.trial and self.trial.evaluated_params and self.last_result:\n flat_result = flatten_dict(self.last_result, delimiter=\"/\")\n scrubbed_result = {\n k: value\n for k, value in flat_result.items()\n if type(value) in VALID_SUMMARY_TYPES\n }\n self._try_log_hparams(scrubbed_result)\n self._file_writer.close()\n\n def _try_log_hparams(self, result):\n # TBX currently errors if the hparams value is None.\n flat_params = flatten_dict(self.trial.evaluated_params)\n scrubbed_params = {\n k: v\n for k, v in flat_params.items()\n if isinstance(v, self.VALID_HPARAMS)\n }\n\n removed = {\n k: v\n for k, v in flat_params.items()\n if not isinstance(v, self.VALID_HPARAMS)\n }\n if removed:\n logger.info(\n \"Removed the following hyperparameter values when \"\n \"logging to tensorboard: %s\", str(removed))\n\n from tensorboardX.summary import hparams\n try:\n experiment_tag, session_start_tag, session_end_tag = hparams(\n hparam_dict=scrubbed_params, metric_dict=result)\n self._file_writer.file_writer.add_summary(experiment_tag)\n self._file_writer.file_writer.add_summary(session_start_tag)\n self._file_writer.file_writer.add_summary(session_end_tag)\n except Exception:\n logger.exception(\"TensorboardX failed to log hparams. \"\n \"This may be due to an unsupported type \"\n \"in the hyperparameter values.\")\n\n\nDEFAULT_LOGGERS = (JsonLogger, CSVLogger, TBXLogger)\n\n\nclass UnifiedLogger(Logger):\n \"\"\"Unified result logger for TensorBoard, rllab/viskit, plain json.\n\n Arguments:\n config: Configuration passed to all logger creators.\n logdir: Directory for all logger creators to log to.\n loggers (list): List of logger creators. Defaults to CSV, Tensorboard,\n and JSON loggers.\n sync_function (func|str): Optional function for syncer to run.\n See ray/python/ray/tune/syncer.py\n \"\"\"\n\n def __init__(self,\n config,\n logdir,\n trial=None,\n loggers=None,\n sync_function=None):\n if loggers is None:\n self._logger_cls_list = DEFAULT_LOGGERS\n else:\n self._logger_cls_list = loggers\n if JsonLogger not in self._logger_cls_list:\n if log_once(\"JsonLogger\"):\n logger.warning(\n \"JsonLogger not provided. The ExperimentAnalysis tool is \"\n \"disabled.\")\n self._sync_function = sync_function\n self._log_syncer = None\n\n super(UnifiedLogger, self).__init__(config, logdir, trial)\n\n def _init(self):\n self._loggers = []\n for cls in self._logger_cls_list:\n try:\n self._loggers.append(cls(self.config, self.logdir, self.trial))\n except Exception as exc:\n logger.warning(\"Could not instantiate %s: %s.\", cls.__name__,\n str(exc))\n self._log_syncer = get_node_syncer(\n self.logdir,\n remote_dir=self.logdir,\n sync_function=self._sync_function)\n\n def on_result(self, result):\n for _logger in self._loggers:\n _logger.on_result(result)\n self._log_syncer.set_worker_ip(result.get(NODE_IP))\n self._log_syncer.sync_down_if_needed()\n\n def update_config(self, config):\n for _logger in self._loggers:\n _logger.update_config(config)\n\n def close(self):\n for _logger in self._loggers:\n _logger.close()\n\n def flush(self, sync_down=True):\n for _logger in self._loggers:\n _logger.flush()\n if sync_down:\n if not self._log_syncer.sync_down():\n logger.warning(\"Trial %s: Post-flush sync skipped.\",\n self.trial)\n\n def sync_up(self):\n return self._log_syncer.sync_up()\n\n def sync_down(self):\n return self._log_syncer.sync_down()\n\n def wait(self):\n self._log_syncer.wait()\n\n def sync_results_to_new_location(self, worker_ip):\n \"\"\"Sends the current log directory to the remote node.\n\n Syncing will not occur if the cluster is not started\n with the Ray autoscaler.\n \"\"\"\n if worker_ip != self._log_syncer.worker_ip:\n logger.info(\"Trial %s: Syncing (blocking) results to %s\",\n self.trial, worker_ip)\n self._log_syncer.reset()\n self._log_syncer.set_worker_ip(worker_ip)\n if not self._log_syncer.sync_up():\n logger.error(\n \"Trial %s: Sync up to new location skipped. \"\n \"This should not occur.\", self.trial)\n self._log_syncer.wait()\n else:\n logger.error(\n \"Trial %s: Sync attempted to same IP %s. This \"\n \"should not occur.\", self.trial, worker_ip)\n\n\nclass _SafeFallbackEncoder(json.JSONEncoder):\n def __init__(self, nan_str=\"null\", **kwargs):\n super(_SafeFallbackEncoder, self).__init__(**kwargs)\n self.nan_str = nan_str\n\n def default(self, value):\n try:\n if np.isnan(value):\n return self.nan_str\n\n if (type(value).__module__ == np.__name__\n and isinstance(value, np.ndarray)):\n return value.tolist()\n\n if issubclass(type(value), numbers.Integral):\n return int(value)\n if issubclass(type(value), numbers.Number):\n return float(value)\n\n return super(_SafeFallbackEncoder, self).default(value)\n\n except Exception:\n return str(value) # give up, just stringify it (ok for logs)\n\n\ndef pretty_print(result):\n result = result.copy()\n result.update(config=None) # drop config from pretty print\n result.update(hist_stats=None) # drop hist_stats from pretty print\n out = {}\n for k, v in result.items():\n if v is not None:\n out[k] = v\n\n cleaned = json.dumps(out, cls=_SafeFallbackEncoder)\n return yaml.safe_dump(json.loads(cleaned), default_flow_style=False)\n",
"path": "python/ray/tune/logger.py"
}
] | diff --git a/python/ray/tune/logger.py b/python/ray/tune/logger.py
index 044448d47c622..d2fae3723fd0a 100644
--- a/python/ray/tune/logger.py
+++ b/python/ray/tune/logger.py
@@ -187,7 +187,7 @@ class TBXLogger(Logger):
"""
# NoneType is not supported on the last TBX release yet.
- VALID_HPARAMS = (str, bool, int, float, list)
+ VALID_HPARAMS = (str, bool, np.bool8, int, np.integer, float, list)
def _init(self):
try:
diff --git a/python/ray/tune/tests/test_logger.py b/python/ray/tune/tests/test_logger.py
index 9a52ec61c4dd0..17215b36c9fe9 100644
--- a/python/ray/tune/tests/test_logger.py
+++ b/python/ray/tune/tests/test_logger.py
@@ -2,6 +2,7 @@
import unittest
import tempfile
import shutil
+import numpy as np
from ray.tune.logger import JsonLogger, CSVLogger, TBXLogger
@@ -46,7 +47,17 @@ def testJSON(self):
logger.close()
def testTBX(self):
- config = {"a": 2, "b": [1, 2], "c": {"c": {"D": 123}}}
+ config = {
+ "a": 2,
+ "b": [1, 2],
+ "c": {
+ "c": {
+ "D": 123
+ }
+ },
+ "d": np.int64(1),
+ "e": np.bool8(True)
+ }
t = Trial(evaluated_params=config, trial_id="tbx")
logger = TBXLogger(config=config, logdir=self.test_dir, trial=t)
logger.on_result(result(0, 4))
|
pyodide__pyodide-3868 | Aborted fetch requests freeze REPL
## 🐛 Bug
Fetch requests aborted (using the [signal](https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal/timeout_static#examples) options) freeze REPL, and do not raise exception.
### To Reproduce
```python
def fetch_response(url, timeout):
options = js.Object.new()
options.signal = js.AbortSignal.timeout(timeout)
return js.fetch(url, options)
response = await fetch_response('slow api', 1)
```
Dev Console shows:
```
Uncaught (in promise) PythonError: TypeError: invalid exception object
at new_error (pyodide.asm.js:9:14992)
at pyodide.asm.wasm:0x152d67
at pyodide.asm.wasm:0x152e6c
at Module.callPyObjectKwargs (pyodide.asm.js:9:75811)
at Module.callPyObject (pyodide.asm.js:9:76020)
at onRejected (pyodide.asm.js:9:59090)
```
This appears to occur because the `PyodideFuture` object, which the fetch returns, expects to receive a python Exception object on rejection. Instead, the returned `PyodideFuture` object gets a `JsProxy` of an `AbortError` (`DOMException`). A `JsProxy` of a `DOMException` can't be raised in pyodide.
```python
>>> import js
>>> raise js.DOMException.new('')
```
```
Traceback (most recent call last):
File "<console>", line 1, in <module>
TypeError: exceptions must derive from BaseException
```
### Expected behavior
Should raise a `JsException`.
Possible solution: Allow js.DOMException objects to be raised much like js.Error objects:
```python
>>> from pyodide.webloop import PyodideFuture
>>> fut = PyodideFuture()
>>> fut.set_exception(js.Error('hi'))
>>> fut.exception()
Error: hi
```
### Environment
- Pyodide Version<0.23.2>:
```
>>> import pyodide
>>> pyodide.__version__
'0.23.2'
```
- Browser version<Chrome 113.0.5672.114->:
| [
{
"content": "import sys\nfrom collections.abc import (\n AsyncIterator,\n Awaitable,\n Callable,\n ItemsView,\n Iterable,\n Iterator,\n KeysView,\n Mapping,\n MutableMapping,\n Sequence,\n ValuesView,\n)\nfrom functools import reduce\nfrom types import TracebackType\nfrom typing import IO, Any, Generic, TypeVar, overload\n\nfrom .docs_argspec import docs_argspec\n\n# All docstrings for public `core` APIs should be extracted from here. We use\n# the utilities in `docstring.py` and `docstring.c` to format them\n# appropriately.\n\n# Sphinx uses __name__ to determine the paths and such. It looks better for it\n# to refer to e.g., `pyodide.JsProxy` than `_pyodide._core_docs.JsProxy`.\n#\n# Use an empty name for the module of the type variables to prevent long\n# qualified names for the type variables from appearing in the docs.\n_save_name = __name__\n__name__ = \"\"\n\nT = TypeVar(\"T\")\nKT = TypeVar(\"KT\") # Key type.\nVT = TypeVar(\"VT\") # Value type.\nTco = TypeVar(\"Tco\", covariant=True) # Any type covariant containers.\nVco = TypeVar(\"Vco\", covariant=True) # Any type covariant containers.\nVTco = TypeVar(\"VTco\", covariant=True) # Value type covariant containers.\nTcontra = TypeVar(\"Tcontra\", contravariant=True) # Ditto contravariant.\n\n__name__ = \"pyodide.ffi\"\n\n_js_flags: dict[str, int] = {}\n\n\ndef _binor_reduce(l: Iterable[int]) -> int:\n return reduce(lambda x, y: x | y, l)\n\n\ndef _process_flag_expression(e: str) -> int:\n return _binor_reduce(_js_flags[x.strip()] for x in e.split(\"|\"))\n\n\nclass _JsProxyMetaClass(type):\n def __instancecheck__(cls, instance):\n \"\"\"Override for isinstance(instance, cls).\"\"\"\n # TODO: add support for user-generated subclasses with custom instance\n # checks\n # e.g., could check for a fetch response with x.constructor.name == \"Response\"\n # or Object.prototype.toString.call(x) == \"[object Response]\".\n return cls.__subclasscheck__(type(instance))\n\n def __subclasscheck__(cls, subclass):\n # TODO: This works for now but maybe there is a better or cleaner way to\n # do this.\n if type.__subclasscheck__(cls, subclass):\n return True\n if not hasattr(subclass, \"_js_type_flags\"):\n return False\n # For the \"synthetic\" subtypes defined in this file, we define\n # _js_type_flags as a string. We look these up in the _js_flags dict to\n # convert to a number.\n cls_flags = cls._js_type_flags # type:ignore[attr-defined]\n if isinstance(cls_flags, int):\n cls_flags = [cls_flags]\n else:\n cls_flags = [_process_flag_expression(f) for f in cls_flags]\n\n subclass_flags = subclass._js_type_flags\n if not isinstance(subclass_flags, int):\n subclass_flags = _binor_reduce(_js_flags[f] for f in subclass_flags)\n\n return any(cls_flag & subclass_flags == cls_flag for cls_flag in cls_flags)\n\n\n# We want to raise an error if someone tries to instantiate JsProxy directly\n# since it doesn't mean anything. But we have a few reasons to do so internally.\n# So we raise an error unless this private token is passed as an argument.\n_instantiate_token = object()\n\n\nclass JsProxy(metaclass=_JsProxyMetaClass):\n \"\"\"A proxy to make a JavaScript object behave like a Python object\n\n For more information see the :ref:`type-translations` documentation. In\n particular, see\n :ref:`the list of __dunder__ methods <type-translations-jsproxy>`\n that are (conditionally) implemented on :py:class:`JsProxy`.\n \"\"\"\n\n _js_type_flags: Any = 0\n\n def __new__(cls, arg=None, *args, **kwargs):\n if arg is _instantiate_token:\n return super().__new__(cls)\n raise TypeError(f\"{cls.__name__} cannot be instantiated.\")\n\n @property\n def js_id(self) -> int:\n \"\"\"An id number which can be used as a dictionary/set key if you want to\n key on JavaScript object identity.\n\n If two ``JsProxy`` are made with the same backing JavaScript object, they\n will have the same ``js_id``.\n \"\"\"\n return 0\n\n @property\n def typeof(self) -> str:\n \"\"\"Returns the JavaScript type of the ``JsProxy``.\n\n Corresponds to `typeof obj;` in JavaScript. You may also be interested\n in the `constuctor` attribute which returns the type as an object.\n \"\"\"\n return \"object\"\n\n def object_entries(self) -> \"JsProxy\":\n \"The JavaScript API ``Object.entries(object)``\"\n raise NotImplementedError\n\n def object_keys(self) -> \"JsProxy\":\n \"The JavaScript API ``Object.keys(object)``\"\n raise NotImplementedError\n\n def object_values(self) -> \"JsProxy\":\n \"The JavaScript API ``Object.values(object)``\"\n raise NotImplementedError\n\n def as_object_map(self, *, hereditary: bool = False) -> \"JsMutableMap[str, Any]\":\n \"\"\"Returns a new JsProxy that treats the object as a map.\n\n The methods :py:func:`~operator.__getitem__`,\n :py:func:`~operator.__setitem__`, :py:func:`~operator.__contains__`,\n :py:meth:`~object.__len__`, etc will perform lookups via ``object[key]``\n or similar.\n\n Note that ``len(x.as_object_map())`` evaluates in O(n) time (it iterates\n over the object and counts how many :js:func:`~Reflect.ownKeys` it has).\n If you need to compute the length in O(1) time, use a real\n :js:class:`Map` instead.\n\n Parameters\n ----------\n hereditary:\n If ``True``, any \"plain old objects\" stored as values in the object\n will be wrapped in `as_object_map` themselves.\n\n Examples\n --------\n\n .. code-block:: python\n\n from pyodide.code import run_js\n\n o = run_js(\"({x : {y: 2}})\")\n # You have to access the properties of o as attributes\n assert o.x.y == 2\n with pytest.raises(TypeError):\n o[\"x\"] # is not subscriptable\n\n # as_object_map allows us to access the property with getitem\n assert o.as_object_map()[\"x\"].y == 2\n\n with pytest.raises(TypeError):\n # The inner object is not subscriptable because hereditary is False.\n o.as_object_map()[\"x\"][\"y\"]\n\n # When hereditary is True, the inner object is also subscriptable\n assert o.as_object_map(hereditary=True)[\"x\"][\"y\"] == 2\n\n \"\"\"\n raise NotImplementedError\n\n def new(self, *args: Any, **kwargs: Any) -> \"JsProxy\":\n \"\"\"Construct a new instance of the JavaScript object\"\"\"\n raise NotImplementedError\n\n def to_py(\n self,\n *,\n depth: int = -1,\n default_converter: Callable[\n [\"JsProxy\", Callable[[\"JsProxy\"], Any], Callable[[\"JsProxy\", Any], None]],\n Any,\n ]\n | None = None,\n ) -> Any:\n \"\"\"Convert the :class:`JsProxy` to a native Python object as best as\n possible.\n\n See :ref:`type-translations-jsproxy-to-py` for more information.\n\n Parameters\n ----------\n depth:\n Limit the depth of the conversion. If a shallow conversion is\n desired, set ``depth`` to 1.\n\n default_converter:\n\n If present, this will be invoked whenever Pyodide does not have some\n built in conversion for the object. If ``default_converter`` raises\n an error, the error will be allowed to propagate. Otherwise, the\n object returned will be used as the conversion.\n ``default_converter`` takes three arguments. The first argument is\n the value to be converted.\n\n Examples\n --------\n\n Here are a couple examples of converter functions. In addition to the\n normal conversions, convert :js:class:`Date`` to :py:class:`~datetime.datetime`:\n\n .. code-block:: python\n\n from datetime import datetime\n def default_converter(value, _ignored1, _ignored2):\n if value.constructor.name == \"Date\":\n return datetime.fromtimestamp(d.valueOf()/1000)\n return value\n\n Don't create any JsProxies, require a complete conversion or raise an error:\n\n .. code-block:: python\n\n def default_converter(_value, _ignored1, _ignored2):\n raise Exception(\"Failed to completely convert object\")\n\n The second and third arguments are only needed for converting\n containers. The second argument is a conversion function which is used\n to convert the elements of the container with the same settings. The\n third argument is a \"cache\" function which is needed to handle self\n referential containers. Consider the following example. Suppose we have\n a Javascript ``Pair`` class:\n\n .. code-block:: javascript\n\n class Pair {\n constructor(first, second){\n this.first = first;\n this.second = second;\n }\n }\n\n We can use the following ``default_converter`` to convert ``Pair`` to :py:class:`list`:\n\n .. code-block:: python\n\n def default_converter(value, convert, cache):\n if value.constructor.name != \"Pair\":\n return value\n result = []\n cache(value, result);\n result.append(convert(value.first))\n result.append(convert(value.second))\n return result\n\n Note that we have to cache the conversion of ``value`` before converting\n ``value.first`` and ``value.second``. To see why, consider a self\n referential pair:\n\n .. code-block:: javascript\n\n let p = new Pair(0, 0);\n p.first = p;\n\n Without ``cache(value, result);``, converting ``p`` would lead to an\n infinite recurse. With it, we can successfully convert ``p`` to a list\n such that ``l[0] is l``.\n \"\"\"\n raise NotImplementedError\n\n\nclass JsDoubleProxy(JsProxy):\n \"\"\"A double proxy created with :py:func:`create_proxy`.\"\"\"\n\n _js_type_flags = [\"IS_DOUBLE_PROXY\"]\n\n def destroy(self) -> None:\n \"\"\"Destroy the proxy.\"\"\"\n pass\n\n def unwrap(self) -> Any:\n \"\"\"Unwrap a double proxy created with :py:func:`create_proxy` into the\n wrapped Python object.\n \"\"\"\n raise NotImplementedError\n\n\nclass JsPromise(JsProxy):\n \"\"\"A :py:class:`~pyodide.ffi.JsProxy` of a :js:class:`Promise` or some other `thenable\n <https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise#thenables>`_\n JavaScript object.\n\n A JavaScript object is considered to be a :js:class:`Promise` if it has a ``then`` method.\n \"\"\"\n\n _js_type_flags = [\"IS_AWAITABLE\"]\n\n def then(\n self, onfulfilled: Callable[[Any], Any], onrejected: Callable[[Any], Any]\n ) -> \"JsPromise\":\n \"\"\"The :js:meth:`Promise.then` API, wrapped to manage the lifetimes of the\n handlers.\n\n Pyodide will automatically release the references to the handlers\n when the promise resolves.\n \"\"\"\n raise NotImplementedError\n\n def catch(self, onrejected: Callable[[Any], Any], /) -> \"JsPromise\":\n \"\"\"The :js:meth:`Promise.catch` API, wrapped to manage the lifetimes of the\n handler.\n\n Pyodide will automatically release the references to the handler\n when the promise resolves.\n \"\"\"\n raise NotImplementedError\n\n def finally_(self, onfinally: Callable[[], Any], /) -> \"JsPromise\":\n \"\"\"The :js:meth:`Promise.finally` API, wrapped to manage the lifetimes of\n the handler.\n\n Pyodide will automatically release the references to the handler\n when the promise resolves. Note the trailing underscore in the name;\n this is needed because ``finally`` is a reserved keyword in Python.\n \"\"\"\n raise NotImplementedError\n\n\nclass JsBuffer(JsProxy):\n \"\"\"A JsProxy of an array buffer or array buffer view\"\"\"\n\n _js_type_flags = [\"IS_BUFFER\"]\n # There are no types for buffers:\n # https://github.com/python/typing/issues/593\n # https://bugs.python.org/issue27501\n # This is just for docs so lets just make something up?\n\n # Argument should be a buffer.\n # See https://github.com/python/typing/issues/593\n def assign(self, rhs: Any, /) -> None:\n \"\"\"Assign from a Python buffer into the JavaScript buffer.\"\"\"\n\n # Argument should be a buffer.\n # See https://github.com/python/typing/issues/593\n def assign_to(self, to: Any, /) -> None:\n \"\"\"Assign to a Python buffer from the JavaScript buffer.\"\"\"\n\n def to_memoryview(self) -> memoryview:\n \"\"\"Convert a buffer to a memoryview.\n\n Copies the data once. This currently has the same effect as\n :py:meth:`~JsArray.to_py`.\n \"\"\"\n raise NotImplementedError\n\n def to_bytes(self) -> bytes:\n \"\"\"Convert a buffer to a bytes object.\n\n Copies the data once.\n \"\"\"\n raise NotImplementedError\n\n def to_file(self, file: IO[bytes] | IO[str], /) -> None:\n \"\"\"Writes a buffer to a file.\n\n Will write the entire contents of the buffer to the current position of\n the file.\n\n Example\n -------\n >>> import pytest; pytest.skip()\n >>> from js import Uint8Array\n >>> x = Uint8Array.new(range(10))\n >>> with open('file.bin', 'wb') as fh:\n ... x.to_file(fh)\n which is equivalent to,\n >>> with open('file.bin', 'wb') as fh:\n ... data = x.to_bytes()\n ... fh.write(data)\n but the latter copies the data twice whereas the former only copies the\n data once.\n \"\"\"\n\n def from_file(self, file: IO[bytes] | IO[str], /) -> None:\n \"\"\"Reads from a file into a buffer.\n\n Will try to read a chunk of data the same size as the buffer from\n the current position of the file.\n\n Example\n -------\n >>> import pytest; pytest.skip()\n >>> from js import Uint8Array\n >>> # the JsProxy need to be pre-allocated\n >>> x = Uint8Array.new(range(10))\n >>> with open('file.bin', 'rb') as fh:\n ... x.read_file(fh)\n which is equivalent to\n >>> x = Uint8Array.new(range(10))\n >>> with open('file.bin', 'rb') as fh:\n ... chunk = fh.read(size=x.byteLength)\n ... x.assign(chunk)\n but the latter copies the data twice whereas the former only copies the\n data once.\n \"\"\"\n\n def _into_file(self, file: IO[bytes] | IO[str], /) -> None:\n \"\"\"Will write the entire contents of a buffer into a file using\n ``canOwn : true`` without any copy. After this, the buffer cannot be\n used again.\n\n If ``file`` is not empty, its contents will be overwritten!\n\n Only ``MEMFS`` cares about the ``canOwn`` flag, other file systems will\n just ignore it.\n\n\n Example\n -------\n >>> import pytest; pytest.skip()\n >>> from js import Uint8Array\n >>> x = Uint8Array.new(range(10))\n >>> with open('file.bin', 'wb') as fh:\n ... x._into_file(fh)\n which is similar to\n >>> with open('file.bin', 'wb') as fh:\n ... data = x.to_bytes()\n ... fh.write(data)\n but the latter copies the data once whereas the former doesn't copy the\n data.\n \"\"\"\n\n def to_string(self, encoding: str | None = None) -> str:\n \"\"\"Convert a buffer to a string object.\n\n Copies the data twice.\n\n The encoding argument will be passed to the :js:class:`TextDecoder`\n constructor. It should be one of the encodings listed in `the table here\n <https://encoding.spec.whatwg.org/#names-and-labels>`_. The default\n encoding is utf8.\n \"\"\"\n raise NotImplementedError\n\n\nclass JsArray(JsProxy, Generic[T]):\n \"\"\"A JsProxy of an :js:class:`Array`, :js:class:`NodeList`, or :js:class:`TypedArray`\"\"\"\n\n _js_type_flags = [\"IS_ARRAY\", \"IS_NODE_LIST\", \"IS_TYPEDARRAY\"]\n\n def __getitem__(self, idx: int | slice) -> T:\n raise NotImplementedError\n\n def __setitem__(self, idx: int | slice, value: T) -> None:\n pass\n\n def __delitem__(self, idx: int | slice) -> None:\n pass\n\n def __len__(self) -> int:\n return 0\n\n def extend(self, other: Iterable[T], /) -> None:\n \"\"\"Extend array by appending elements from the iterable.\"\"\"\n\n def __reversed__(self) -> Iterator[T]:\n \"\"\"Return a reverse iterator over the :js:class:`Array`.\"\"\"\n raise NotImplementedError\n\n def pop(self, /, index: int = -1) -> T:\n \"\"\"Remove and return the ``item`` at ``index`` (default last).\n\n Raises :py:exc:`IndexError` if list is empty or index is out of range.\n \"\"\"\n raise NotImplementedError\n\n def push(self, /, object: T) -> None:\n pass\n\n def append(self, /, object: T) -> None:\n \"\"\"Append object to the end of the list.\"\"\"\n\n def index(self, /, value: T, start: int = 0, stop: int = sys.maxsize) -> int:\n \"\"\"Return first ``index`` at which ``value`` appears in the ``Array``.\n\n Raises :py:exc:`ValueError` if the value is not present.\n \"\"\"\n raise NotImplementedError\n\n def count(self, /, x: T) -> int:\n \"\"\"Return the number of times x appears in the list.\"\"\"\n raise NotImplementedError\n\n def reverse(self) -> None:\n \"\"\"Reverse the array in place.\n\n Present only if the wrapped Javascript object is an array.\n \"\"\"\n\n def to_py(\n self,\n *,\n depth: int = -1,\n default_converter: Callable[\n [\"JsProxy\", Callable[[\"JsProxy\"], Any], Callable[[\"JsProxy\", Any], None]],\n Any,\n ]\n | None = None,\n ) -> list[Any]:\n raise NotImplementedError\n\n\nclass JsTypedArray(JsBuffer, JsArray[int]):\n _js_type_flags = [\"IS_TYPEDARRAY\"]\n BYTES_PER_ELEMENT: int\n\n def subarray(\n self, start: int | None = None, stop: int | None = None\n ) -> \"JsTypedArray\":\n raise NotImplementedError\n\n buffer: JsBuffer\n\n\[email protected]\nclass JsMap(JsProxy, Generic[KT, VTco]):\n \"\"\"A JavaScript Map\n\n To be considered a map, a JavaScript object must have a ``get`` method, it\n must have a ``size`` or a ``length`` property which is a number\n (idiomatically it should be called ``size``) and it must be iterable.\n \"\"\"\n\n _js_type_flags = [\"HAS_GET | HAS_LENGTH | IS_ITERABLE\", \"IS_OBJECT_MAP\"]\n\n def __getitem__(self, idx: KT) -> VTco:\n raise NotImplementedError\n\n def __len__(self) -> int:\n return 0\n\n def __iter__(self) -> KT:\n raise NotImplementedError\n\n def __contains__(self, idx: KT) -> bool:\n raise NotImplementedError\n\n def keys(self) -> KeysView[KT]:\n \"\"\"Return a :py:class:`~collections.abc.KeysView` for the map.\"\"\"\n raise NotImplementedError\n\n def items(self) -> ItemsView[KT, VTco]:\n \"\"\"Return a :py:class:`~collections.abc.ItemsView` for the map.\"\"\"\n raise NotImplementedError\n\n def values(self) -> ValuesView[VTco]:\n \"\"\"Return a :py:class:`~collections.abc.ValuesView` for the map.\"\"\"\n raise NotImplementedError\n\n @overload\n def get(self, key: KT, /) -> VTco | None:\n ...\n\n @overload\n def get(self, key: KT, default: VTco | T, /) -> VTco | T:\n ...\n\n @docs_argspec(\"(self, key: KT, default: VTco | None, /) -> VTco\")\n def get(self, key: KT, default: Any = None, /) -> VTco:\n r\"\"\"If ``key in self``, returns ``self[key]``. Otherwise returns ``default``.\"\"\"\n raise NotImplementedError\n\n\[email protected]\nclass JsMutableMap(JsMap[KT, VT], Generic[KT, VT]):\n \"\"\"A JavaScript mutable map\n\n To be considered a mutable map, a JavaScript object must have a ``get``\n method, a ``has`` method, a ``size`` or a ``length`` property which is a\n number (idiomatically it should be called ``size``) and it must be iterable.\n\n Instances of the JavaScript builtin ``Map`` class are ``JsMutableMap`` s.\n Also proxies returned by :py:meth:`JsProxy.as_object_map` are instances of\n ``JsMap`` .\n \"\"\"\n\n _js_type_flags = [\"HAS_GET | HAS_SET | HAS_LENGTH | IS_ITERABLE\", \"IS_OBJECT_MAP\"]\n\n @overload\n def pop(self, key: KT, /) -> VT:\n ...\n\n @overload\n def pop(self, key: KT, default: VT | T = ..., /) -> VT | T:\n ...\n\n @docs_argspec(\"(self, key: KT, default: VT | None = None, /) -> VT\")\n def pop(self, key: KT, default: Any = None, /) -> Any:\n r\"\"\"If ``key in self``, return ``self[key]`` and remove key from ``self``. Otherwise\n returns ``default``.\n \"\"\"\n raise NotImplementedError\n\n def setdefault(self, key: KT, default: VT | None = None) -> VT:\n \"\"\"If ``key in self``, return ``self[key]``. Otherwise\n sets ``self[key] = default`` and returns ``default``.\n \"\"\"\n raise NotImplementedError\n\n def popitem(self) -> tuple[KT, VT]:\n \"\"\"Remove some arbitrary ``key, value`` pair from the map and returns the\n ``(key, value)`` tuple.\n \"\"\"\n raise NotImplementedError\n\n def clear(self) -> None:\n \"\"\"Empty out the map entirely.\"\"\"\n\n @overload\n def update(self, __m: Mapping[KT, VT], **kwargs: VT) -> None:\n ...\n\n @overload\n def update(self, __m: Iterable[tuple[KT, VT]], **kwargs: VT) -> None:\n ...\n\n @overload\n def update(self, **kwargs: VT) -> None:\n ...\n\n @docs_argspec(\n \"(self, other : Mapping[KT, VT] | Iterable[tuple[KT, VT]] = None , /, **kwargs) -> None\"\n )\n def update(self, *args: Any, **kwargs: Any) -> None:\n r\"\"\"Updates ``self`` from ``other`` and ``kwargs``.\n\n Parameters\n ----------\n other:\n\n Either a mapping or an iterable of pairs. This can be left out.\n\n kwargs: ``VT``\n\n Extra key-values pairs to insert into the map. Only usable for\n inserting extra strings.\n\n If ``other`` is present and is a :py:class:`~collections.abc.Mapping` or has a ``keys``\n method, does\n\n .. code-block:: python\n\n for k in other:\n self[k] = other[k]\n\n If ``other`` is present and lacks a ``keys`` method, does\n\n .. code-block:: python\n\n for (k, v) in other:\n self[k] = v\n\n In all cases this is followed by:\n\n .. code-block:: python\n\n for (k, v) in kwargs.items():\n self[k] = v\n\n \"\"\"\n\n def __setitem__(self, idx: KT, value: VT) -> None:\n pass\n\n def __delitem__(self, idx: KT) -> None:\n return None\n\n\nclass JsIterator(JsProxy, Generic[Tco]):\n \"\"\"A JsProxy of a JavaScript iterator.\n\n An object is a :py:class:`JsAsyncIterator` if it has a :js:meth:`~Iterator.next` method and either has a\n :js:data:`Symbol.iterator` or has no :js:data:`Symbol.asyncIterator`.\n \"\"\"\n\n _js_type_flags = [\"IS_ITERATOR\"]\n\n def __next__(self) -> Tco:\n raise NotImplementedError\n\n def __iter__(self) -> Iterator[Tco]:\n raise NotImplementedError\n\n\nclass JsAsyncIterator(JsProxy, Generic[Tco]):\n \"\"\"A JsProxy of a JavaScript async iterator.\n\n An object is a :py:class:`JsAsyncIterator` if it has a\n :js:meth:`~AsyncIterator.next` method and either has a\n :js:data:`Symbol.asyncIterator` or has no :js:data:`Symbol.iterator`\n \"\"\"\n\n _js_type_flags = [\"IS_ASYNC_ITERATOR\"]\n\n def __anext__(self) -> Awaitable[Tco]:\n raise NotImplementedError\n\n def __aiter__(self) -> AsyncIterator[Tco]:\n raise NotImplementedError\n\n\nclass JsIterable(JsProxy, Generic[Tco]):\n \"\"\"A JavaScript iterable object\n\n A JavaScript object is iterable if it has a :js:data:`Symbol.iterator` method.\n \"\"\"\n\n _js_type_flags = [\"IS_ITERABLE\"]\n\n def __iter__(self) -> Iterator[Tco]:\n raise NotImplementedError\n\n\nclass JsAsyncIterable(JsProxy, Generic[Tco]):\n \"\"\"A JavaScript async iterable object\n\n A JavaScript object is async iterable if it has a :js:data:`Symbol.asyncIterator`\n method.\n \"\"\"\n\n _js_type_flags = [\"IS_ASYNC_ITERABLE\"]\n\n def __aiter__(self) -> AsyncIterator[Tco]:\n raise NotImplementedError\n\n\nclass JsGenerator(JsIterable[Tco], Generic[Tco, Tcontra, Vco]):\n \"\"\"A JavaScript generator\n\n A JavaScript object is treated as a generator if its\n :js:data:`Symbol.toStringTag` is ``\"Generator\"``. Most likely this will be\n because it is a true :js:class:`Generator` produced by the JavaScript\n runtime, but it may be a custom object trying hard to pretend to be a\n generator. It should have :js:meth:`~Generator.next`,\n :js:meth:`~Generator.return` and :js:meth:`~Generator.throw` methods.\n \"\"\"\n\n _js_type_flags = [\"IS_GENERATOR\"]\n\n def send(self, value: Tcontra) -> Tco:\n \"\"\"\n Resumes the execution and \"sends\" a value into the generator function.\n\n The ``value`` argument becomes the result of the current yield\n expression. The ``send()`` method returns the next value yielded by the\n generator, or raises :py:exc:`StopIteration` if the generator exits without\n yielding another value. When ``send()`` is called to start the\n generator, the argument will be ignored. Unlike in Python, we cannot\n detect that the generator hasn't started yet, and no error will be\n thrown if the argument of a not-started generator is not ``None``.\n \"\"\"\n raise NotImplementedError\n\n @overload\n def throw(\n self,\n typ: type[BaseException],\n val: BaseException | object = ...,\n tb: TracebackType | None = ...,\n /,\n ) -> Tco:\n ...\n\n @overload\n def throw(\n self,\n typ: BaseException,\n val: None = ...,\n tb: TracebackType | None = ...,\n /,\n ) -> Tco:\n ...\n\n @docs_argspec(\"(self, error: BaseException, /) -> Tco\")\n def throw(\n self,\n *args: Any,\n ) -> Tco:\n \"\"\"\n Raises an exception at the point where the generator was paused, and\n returns the next value yielded by the generator function.\n\n If the generator exits without yielding another value, a\n :py:exc:`StopIteration` exception is raised. If the generator function does\n not catch the passed-in exception, or raises a different exception, then\n that exception propagates to the caller.\n\n In typical use, this is called with a single exception instance similar\n to the way the raise keyword is used.\n\n For backwards compatibility, however, a second signature is supported,\n following a convention from older versions of Python. The type argument\n should be an exception class, and value should be an exception instance.\n If the value is not provided, the type constructor is called to get an\n instance. If traceback is provided, it is set on the exception,\n otherwise any existing ``__traceback__`` attribute stored in value may\n be cleared.\n \"\"\"\n raise NotImplementedError\n\n def close(self) -> None:\n \"\"\"Raises a :py:exc:`GeneratorExit` at the point where the generator\n function was paused.\n\n If the generator function then exits gracefully, is already closed, or\n raises :py:exc:`GeneratorExit` (by not catching the exception), ``close()``\n returns to its caller. If the generator yields a value, a\n :py:exc:`RuntimeError` is raised. If the generator raises any other\n exception, it is propagated to the caller. ``close()`` does nothing if\n the generator has already exited due to an exception or normal exit.\n \"\"\"\n\n def __next__(self) -> Tco:\n raise NotImplementedError\n\n def __iter__(self) -> \"JsGenerator[Tco, Tcontra, Vco]\":\n raise NotImplementedError\n\n\nclass JsFetchResponse(JsProxy):\n \"\"\"A :py:class:`JsFetchResponse` object represents a :js:data:`Response` to a\n :js:func:`fetch` request.\n \"\"\"\n\n bodyUsed: bool\n ok: bool\n redirected: bool\n status: int\n statusText: str\n type: str\n url: str\n headers: Any\n\n def clone(self) -> \"JsFetchResponse\":\n raise NotImplementedError\n\n async def arrayBuffer(self) -> JsBuffer:\n raise NotImplementedError\n\n async def text(self) -> str:\n raise NotImplementedError\n\n async def json(self) -> JsProxy:\n raise NotImplementedError\n\n\nclass JsAsyncGenerator(JsAsyncIterable[Tco], Generic[Tco, Tcontra, Vco]):\n \"\"\"A JavaScript :js:class:`AsyncGenerator`\n\n A JavaScript object is treated as an async generator if it's\n :js:data:`Symbol.toStringTag` is ``\"AsyncGenerator\"``. Most likely this will\n be because it is a true async generator produced by the JavaScript runtime,\n but it may be a custom object trying hard to pretend to be an async\n generator. It should have :js:meth:`~AsyncGenerator.next`,\n :js:meth:`~AsyncGenerator.return`, and :js:meth:`~AsyncGenerator.throw`\n methods.\n \"\"\"\n\n _js_type_flags = [\"IS_ASYNC_GENERATOR\"]\n\n def __anext__(self) -> Awaitable[Tco]:\n raise NotImplementedError\n\n def __aiter__(self) -> \"JsAsyncGenerator[Tco, Tcontra, Vco]\":\n raise NotImplementedError\n\n def asend(self, value: Tcontra, /) -> Awaitable[Tco]:\n \"\"\"Resumes the execution and \"sends\" a value into the async generator\n function.\n\n The ``value`` argument becomes the result of the current yield\n expression. The awaitable returned by the ``asend()`` method will return\n the next value yielded by the generator or raises\n :py:exc:`StopAsyncIteration` if the asynchronous generator returns. If the\n generator returned a value, this value is discarded (because in Python\n async generators cannot return a value).\n\n When ``asend()`` is called to start the generator, the argument will be\n ignored. Unlike in Python, we cannot detect that the generator hasn't\n started yet, and no error will be thrown if the argument of a\n not-started generator is not ``None``.\n \"\"\"\n raise NotImplementedError\n\n @overload\n def athrow(\n self,\n typ: type[BaseException],\n val: BaseException | object = ...,\n tb: TracebackType | None = ...,\n /,\n ) -> Awaitable[Tco]:\n ...\n\n @overload\n def athrow(\n self,\n typ: BaseException,\n val: None = ...,\n tb: TracebackType | None = ...,\n /,\n ) -> Awaitable[Tco]:\n ...\n\n @docs_argspec(\"(self, error: BaseException, /) -> Tco\")\n def athrow(self, value: Any, *args: Any) -> Awaitable[Tco]:\n \"\"\"Resumes the execution and raises an exception at the point where the\n generator was paused.\n\n The awaitable returned by ``athrow()`` method will return the next value\n yielded by the generator or raises :py:exc:`StopAsyncIteration` if the\n asynchronous generator returns. If the generator returned a value, this\n value is discarded (because in Python async generators cannot return a\n value). If the generator function does not catch the passed-in\n exception, or raises a different exception, then that exception\n propagates to the caller.\n \"\"\"\n raise NotImplementedError\n\n def aclose(self) -> Awaitable[None]:\n \"\"\"Raises a :py:exc:`GeneratorExit` at the point where the generator\n function was paused.\n\n If the generator function then exits gracefully, is already closed, or\n raises :py:exc:`GeneratorExit` (by not catching the exception),\n ``aclose()`` returns to its caller. If the generator yields a value, a\n :py:exc:`RuntimeError` is raised. If the generator raises any other\n exception, it is propagated to the caller. ``aclose()`` does nothing if\n the generator has already exited due to an exception or normal exit.\n \"\"\"\n raise NotImplementedError\n\n\nclass JsCallable(JsProxy):\n _js_type_flags = [\"IS_CALLABLE\"]\n\n def __call__(self):\n pass\n\n\nclass JsOnceCallable(JsCallable):\n def destroy(self):\n pass\n\n\nclass JsException(JsProxy, Exception):\n \"\"\"A JavaScript Error.\n\n These are pickleable unlike other JsProxies.\n \"\"\"\n\n # Note: Unlike many of these classes, this one is never actually seen by the\n # user IN_BROWSER (it's replaced by a different JsException in\n # pyodide._core). We use it to unpickle errors so we need it to be\n # instantiable.\n def __new__(cls, *args, **kwargs):\n if args[0] == _instantiate_token:\n return super().__new__(cls, *args, **kwargs)\n return cls._new_exc(*args, **kwargs)\n\n @classmethod\n def _new_exc(cls, name: str, message: str = \"\", stack: str = \"\") -> \"JsException\":\n result = super().__new__(JsException, _instantiate_token)\n result.name = name\n result.message = message\n result.stack = stack\n return result\n\n def __str__(self):\n return f\"{self.name}: {self.message}\"\n\n name: str\n \"\"\"The name of the error type\"\"\"\n\n message: str\n \"\"\"The error message\"\"\"\n\n stack: str\n \"\"\"The JavaScript stack trace\"\"\"\n\n\nclass ConversionError(Exception):\n \"\"\"An error thrown when conversion between JavaScript and Python fails.\"\"\"\n\n\nclass InternalError(Exception):\n \"\"\"Thrown when a recoverable assertion error occurs in internal Pyodide code\"\"\"\n\n pass\n\n\nclass JsDomElement(JsProxy):\n @property\n def tagName(self) -> str:\n return \"\"\n\n @property\n def children(self) -> Sequence[\"JsDomElement\"]:\n return []\n\n def appendChild(self, child: \"JsDomElement\") -> None:\n pass\n\n def addEventListener(self, event: str, listener: Callable[[Any], None]) -> None:\n pass\n\n def removeEventListener(self, event: str, listener: Callable[[Any], None]) -> None:\n pass\n\n\n# from pyproxy.c\n\n\ndef create_once_callable(obj: Callable[..., Any], /) -> JsOnceCallable:\n \"\"\"Wrap a Python Callable in a JavaScript function that can be called once.\n\n After being called the proxy will decrement the reference count\n of the Callable. The JavaScript function also has a ``destroy`` API that\n can be used to release the proxy without calling it.\n \"\"\"\n return obj # type: ignore[return-value]\n\n\ndef create_proxy(\n obj: Any, /, *, capture_this: bool = False, roundtrip: bool = True\n) -> JsDoubleProxy:\n \"\"\"Create a :py:class:`JsProxy` of a :js:class:`~pyodide.ffi.PyProxy`.\n\n This allows explicit control over the lifetime of the\n :js:class:`~pyodide.ffi.PyProxy` from Python: call the\n :py:meth:`~JsDoubleProxy.destroy` API when done.\n\n Parameters\n ----------\n obj:\n The object to wrap.\n\n capture_this :\n If the object is callable, should ``this`` be passed as the first\n argument when calling it from JavaScript.\n\n roundtrip:\n When the proxy is converted back from JavaScript to Python, if this is\n ``True`` it is converted into a double proxy. If ``False``, it is\n unwrapped into a Python object. In the case that ``roundtrip`` is\n ``True`` it is possible to unwrap a double proxy with the\n :py:meth:`JsDoubleProxy.unwrap` method. This is useful to allow easier\n control of lifetimes from Python:\n\n .. code-block:: python\n\n from js import o\n d = {}\n o.d = create_proxy(d, roundtrip=True)\n o.d.destroy() # Destroys the proxy created with create_proxy\n\n With ``roundtrip=False`` this would be an error.\n \"\"\"\n return obj\n\n\n# from python2js\n\n\n@overload\ndef to_js(\n obj: list[Any] | tuple[Any],\n /,\n *,\n depth: int = -1,\n pyproxies: JsProxy | None = None,\n create_pyproxies: bool = True,\n dict_converter: Callable[[Iterable[JsArray[Any]]], JsProxy] | None = None,\n default_converter: Callable[\n [Any, Callable[[Any], JsProxy], Callable[[Any, JsProxy], None]], JsProxy\n ]\n | None = None,\n) -> JsArray[Any]:\n ...\n\n\n@overload\ndef to_js(\n obj: dict[Any, Any],\n /,\n *,\n depth: int = -1,\n pyproxies: JsProxy | None,\n create_pyproxies: bool,\n dict_converter: None,\n default_converter: Callable[\n [Any, Callable[[Any], JsProxy], Callable[[Any, JsProxy], None]], JsProxy\n ]\n | None = None,\n) -> JsMap[Any, Any]:\n ...\n\n\n@overload\ndef to_js(\n obj: Any,\n /,\n *,\n depth: int = -1,\n pyproxies: JsProxy | None = None,\n create_pyproxies: bool = True,\n dict_converter: Callable[[Iterable[JsArray[Any]]], JsProxy] | None = None,\n default_converter: Callable[\n [Any, Callable[[Any], JsProxy], Callable[[Any, JsProxy], None]], JsProxy\n ]\n | None = None,\n) -> Any:\n ...\n\n\ndef to_js(\n obj: Any,\n /,\n *,\n depth: int = -1,\n pyproxies: JsProxy | None = None,\n create_pyproxies: bool = True,\n dict_converter: Callable[[Iterable[JsArray[Any]]], JsProxy] | None = None,\n default_converter: Callable[\n [Any, Callable[[Any], JsProxy], Callable[[Any, JsProxy], None]], JsProxy\n ]\n | None = None,\n) -> Any:\n \"\"\"Convert the object to JavaScript.\n\n This is similar to :js:meth:`~pyodide.ffi.PyProxy.toJs`, but for use from Python. If the\n object can be implicitly translated to JavaScript, it will be returned\n unchanged. If the object cannot be converted into JavaScript, this method\n will return a :py:class:`JsProxy` of a :js:class:`~pyodide.ffi.PyProxy`, as if you had used\n :func:`~pyodide.ffi.create_proxy`.\n\n See :ref:`type-translations-pyproxy-to-js` for more information.\n\n Parameters\n ----------\n obj :\n The Python object to convert\n\n depth :\n The maximum depth to do the conversion. Negative numbers are treated as\n infinite. Set this to 1 to do a shallow conversion.\n\n pyproxies:\n Should be a JavaScript :js:class:`Array`. If provided, any ``PyProxies``\n generated will be stored here. You can later use :py:meth:`destroy_proxies`\n if you want to destroy the proxies from Python (or from JavaScript you\n can just iterate over the :js:class:`Array` and destroy the proxies).\n\n create_pyproxies:\n If you set this to :py:data:`False`, :py:func:`to_js` will raise an error rather\n than creating any pyproxies.\n\n dict_converter:\n This converter if provided receives a (JavaScript) iterable of\n (JavaScript) pairs [key, value]. It is expected to return the desired\n result of the dict conversion. Some suggested values for this argument:\n\n * ``js.Map.new`` -- similar to the default behavior\n * ``js.Array.from`` -- convert to an array of entries\n * ``js.Object.fromEntries`` -- convert to a JavaScript object\n\n default_converter:\n If present will be invoked whenever Pyodide does not have some built in\n conversion for the object. If ``default_converter`` raises an error, the\n error will be allowed to propagate. Otherwise, the object returned will\n be used as the conversion. ``default_converter`` takes three arguments.\n The first argument is the value to be converted.\n\n Examples\n --------\n\n Here are some examples demonstrating the usage of the ``default_converter``\n argument.\n\n\n In addition to the normal conversions, convert JavaScript :js:class:`Date`\n objects to :py:class:`~datetime.datetime` objects:\n\n .. code-block:: python\n\n from datetime import datetime\n from js import Date\n def default_converter(value, _ignored1, _ignored2):\n if isinstance(value, datetime):\n return Date.new(value.timestamp() * 1000)\n return value\n\n Don't create any PyProxies, require a complete conversion or raise an error:\n\n .. code-block:: python\n\n def default_converter(_value, _ignored1, _ignored2):\n raise Exception(\"Failed to completely convert object\")\n\n The second and third arguments are only needed for converting containers.\n The second argument is a conversion function which is used to convert the\n elements of the container with the same settings. The third argument is a\n \"cache\" function which is needed to handle self referential containers.\n Consider the following example. Suppose we have a Python ``Pair`` class:\n\n .. code-block:: python\n\n class Pair:\n def __init__(self, first, second):\n self.first = first self.second = second\n\n We can use the following ``default_converter`` to convert ``Pair`` to\n :js:class:`Array`:\n\n .. code-block:: python\n\n from js import Array\n\n def default_converter(value, convert, cache):\n if not isinstance(value, Pair):\n return value\n result = Array.new() cache(value, result);\n result.push(convert(value.first)) result.push(convert(value.second))\n return result\n\n Note that we have to cache the conversion of ``value`` before converting\n ``value.first`` and ``value.second``. To see why, consider a self\n referential pair:\n\n .. code-block:: javascript\n\n p = Pair(0, 0); p.first = p;\n\n Without ``cache(value, result);``, converting ``p`` would lead to an\n infinite recurse. With it, we can successfully convert ``p`` to an Array\n such that ``l[0] === l``.\n \"\"\"\n return obj\n\n\ndef destroy_proxies(pyproxies: JsArray[Any], /) -> None:\n \"\"\"Destroy all PyProxies in a JavaScript array.\n\n pyproxies must be a JavaScript Array of PyProxies. Intended for use\n with the arrays created from the \"pyproxies\" argument of :js:meth:`~pyodide.ffi.PyProxy.toJs`\n and :py:func:`to_js`. This method is necessary because indexing the Array from\n Python automatically unwraps the PyProxy into the wrapped Python object.\n \"\"\"\n pass\n\n\n__name__ = _save_name\ndel _save_name\n\n__all__ = [\n \"ConversionError\",\n \"InternalError\",\n \"JsArray\",\n \"JsAsyncGenerator\",\n \"JsAsyncIterable\",\n \"JsAsyncIterator\",\n \"JsBuffer\",\n \"JsDoubleProxy\",\n \"JsException\",\n \"JsFetchResponse\",\n \"JsGenerator\",\n \"JsIterable\",\n \"JsIterator\",\n \"JsMap\",\n \"JsMutableMap\",\n \"JsPromise\",\n \"JsProxy\",\n \"JsDomElement\",\n \"JsCallable\",\n \"JsTypedArray\",\n \"create_once_callable\",\n \"create_proxy\",\n \"destroy_proxies\",\n \"to_js\",\n]\n",
"path": "src/py/_pyodide/_core_docs.py"
}
] | [
{
"content": "import sys\nfrom collections.abc import (\n AsyncIterator,\n Awaitable,\n Callable,\n ItemsView,\n Iterable,\n Iterator,\n KeysView,\n Mapping,\n MutableMapping,\n Sequence,\n ValuesView,\n)\nfrom functools import reduce\nfrom types import TracebackType\nfrom typing import IO, Any, Generic, TypeVar, overload\n\nfrom .docs_argspec import docs_argspec\n\n# All docstrings for public `core` APIs should be extracted from here. We use\n# the utilities in `docstring.py` and `docstring.c` to format them\n# appropriately.\n\n# Sphinx uses __name__ to determine the paths and such. It looks better for it\n# to refer to e.g., `pyodide.JsProxy` than `_pyodide._core_docs.JsProxy`.\n#\n# Use an empty name for the module of the type variables to prevent long\n# qualified names for the type variables from appearing in the docs.\n_save_name = __name__\n__name__ = \"\"\n\nT = TypeVar(\"T\")\nKT = TypeVar(\"KT\") # Key type.\nVT = TypeVar(\"VT\") # Value type.\nTco = TypeVar(\"Tco\", covariant=True) # Any type covariant containers.\nVco = TypeVar(\"Vco\", covariant=True) # Any type covariant containers.\nVTco = TypeVar(\"VTco\", covariant=True) # Value type covariant containers.\nTcontra = TypeVar(\"Tcontra\", contravariant=True) # Ditto contravariant.\n\n__name__ = \"pyodide.ffi\"\n\n_js_flags: dict[str, int] = {}\n\n\ndef _binor_reduce(l: Iterable[int]) -> int:\n return reduce(lambda x, y: x | y, l)\n\n\ndef _process_flag_expression(e: str) -> int:\n return _binor_reduce(_js_flags[x.strip()] for x in e.split(\"|\"))\n\n\nclass _JsProxyMetaClass(type):\n def __instancecheck__(cls, instance):\n \"\"\"Override for isinstance(instance, cls).\"\"\"\n # TODO: add support for user-generated subclasses with custom instance\n # checks\n # e.g., could check for a fetch response with x.constructor.name == \"Response\"\n # or Object.prototype.toString.call(x) == \"[object Response]\".\n return cls.__subclasscheck__(type(instance))\n\n def __subclasscheck__(cls, subclass):\n # TODO: This works for now but maybe there is a better or cleaner way to\n # do this.\n if type.__subclasscheck__(cls, subclass):\n return True\n if not hasattr(subclass, \"_js_type_flags\"):\n return False\n # For the \"synthetic\" subtypes defined in this file, we define\n # _js_type_flags as a string. We look these up in the _js_flags dict to\n # convert to a number.\n cls_flags = cls._js_type_flags # type:ignore[attr-defined]\n if isinstance(cls_flags, int):\n cls_flags = [cls_flags]\n else:\n cls_flags = [_process_flag_expression(f) for f in cls_flags]\n\n subclass_flags = subclass._js_type_flags\n if not isinstance(subclass_flags, int):\n subclass_flags = _binor_reduce(_js_flags[f] for f in subclass_flags)\n\n return any(cls_flag & subclass_flags == cls_flag for cls_flag in cls_flags)\n\n\n# We want to raise an error if someone tries to instantiate JsProxy directly\n# since it doesn't mean anything. But we have a few reasons to do so internally.\n# So we raise an error unless this private token is passed as an argument.\n_instantiate_token = object()\n\n\nclass JsProxy(metaclass=_JsProxyMetaClass):\n \"\"\"A proxy to make a JavaScript object behave like a Python object\n\n For more information see the :ref:`type-translations` documentation. In\n particular, see\n :ref:`the list of __dunder__ methods <type-translations-jsproxy>`\n that are (conditionally) implemented on :py:class:`JsProxy`.\n \"\"\"\n\n _js_type_flags: Any = 0\n\n def __new__(cls, arg=None, *args, **kwargs):\n if arg is _instantiate_token:\n return super().__new__(cls)\n raise TypeError(f\"{cls.__name__} cannot be instantiated.\")\n\n @property\n def js_id(self) -> int:\n \"\"\"An id number which can be used as a dictionary/set key if you want to\n key on JavaScript object identity.\n\n If two ``JsProxy`` are made with the same backing JavaScript object, they\n will have the same ``js_id``.\n \"\"\"\n return 0\n\n @property\n def typeof(self) -> str:\n \"\"\"Returns the JavaScript type of the ``JsProxy``.\n\n Corresponds to `typeof obj;` in JavaScript. You may also be interested\n in the `constuctor` attribute which returns the type as an object.\n \"\"\"\n return \"object\"\n\n def object_entries(self) -> \"JsProxy\":\n \"The JavaScript API ``Object.entries(object)``\"\n raise NotImplementedError\n\n def object_keys(self) -> \"JsProxy\":\n \"The JavaScript API ``Object.keys(object)``\"\n raise NotImplementedError\n\n def object_values(self) -> \"JsProxy\":\n \"The JavaScript API ``Object.values(object)``\"\n raise NotImplementedError\n\n def as_object_map(self, *, hereditary: bool = False) -> \"JsMutableMap[str, Any]\":\n \"\"\"Returns a new JsProxy that treats the object as a map.\n\n The methods :py:func:`~operator.__getitem__`,\n :py:func:`~operator.__setitem__`, :py:func:`~operator.__contains__`,\n :py:meth:`~object.__len__`, etc will perform lookups via ``object[key]``\n or similar.\n\n Note that ``len(x.as_object_map())`` evaluates in O(n) time (it iterates\n over the object and counts how many :js:func:`~Reflect.ownKeys` it has).\n If you need to compute the length in O(1) time, use a real\n :js:class:`Map` instead.\n\n Parameters\n ----------\n hereditary:\n If ``True``, any \"plain old objects\" stored as values in the object\n will be wrapped in `as_object_map` themselves.\n\n Examples\n --------\n\n .. code-block:: python\n\n from pyodide.code import run_js\n\n o = run_js(\"({x : {y: 2}})\")\n # You have to access the properties of o as attributes\n assert o.x.y == 2\n with pytest.raises(TypeError):\n o[\"x\"] # is not subscriptable\n\n # as_object_map allows us to access the property with getitem\n assert o.as_object_map()[\"x\"].y == 2\n\n with pytest.raises(TypeError):\n # The inner object is not subscriptable because hereditary is False.\n o.as_object_map()[\"x\"][\"y\"]\n\n # When hereditary is True, the inner object is also subscriptable\n assert o.as_object_map(hereditary=True)[\"x\"][\"y\"] == 2\n\n \"\"\"\n raise NotImplementedError\n\n def new(self, *args: Any, **kwargs: Any) -> \"JsProxy\":\n \"\"\"Construct a new instance of the JavaScript object\"\"\"\n raise NotImplementedError\n\n def to_py(\n self,\n *,\n depth: int = -1,\n default_converter: Callable[\n [\"JsProxy\", Callable[[\"JsProxy\"], Any], Callable[[\"JsProxy\", Any], None]],\n Any,\n ]\n | None = None,\n ) -> Any:\n \"\"\"Convert the :class:`JsProxy` to a native Python object as best as\n possible.\n\n See :ref:`type-translations-jsproxy-to-py` for more information.\n\n Parameters\n ----------\n depth:\n Limit the depth of the conversion. If a shallow conversion is\n desired, set ``depth`` to 1.\n\n default_converter:\n\n If present, this will be invoked whenever Pyodide does not have some\n built in conversion for the object. If ``default_converter`` raises\n an error, the error will be allowed to propagate. Otherwise, the\n object returned will be used as the conversion.\n ``default_converter`` takes three arguments. The first argument is\n the value to be converted.\n\n Examples\n --------\n\n Here are a couple examples of converter functions. In addition to the\n normal conversions, convert :js:class:`Date`` to :py:class:`~datetime.datetime`:\n\n .. code-block:: python\n\n from datetime import datetime\n def default_converter(value, _ignored1, _ignored2):\n if value.constructor.name == \"Date\":\n return datetime.fromtimestamp(d.valueOf()/1000)\n return value\n\n Don't create any JsProxies, require a complete conversion or raise an error:\n\n .. code-block:: python\n\n def default_converter(_value, _ignored1, _ignored2):\n raise Exception(\"Failed to completely convert object\")\n\n The second and third arguments are only needed for converting\n containers. The second argument is a conversion function which is used\n to convert the elements of the container with the same settings. The\n third argument is a \"cache\" function which is needed to handle self\n referential containers. Consider the following example. Suppose we have\n a Javascript ``Pair`` class:\n\n .. code-block:: javascript\n\n class Pair {\n constructor(first, second){\n this.first = first;\n this.second = second;\n }\n }\n\n We can use the following ``default_converter`` to convert ``Pair`` to :py:class:`list`:\n\n .. code-block:: python\n\n def default_converter(value, convert, cache):\n if value.constructor.name != \"Pair\":\n return value\n result = []\n cache(value, result);\n result.append(convert(value.first))\n result.append(convert(value.second))\n return result\n\n Note that we have to cache the conversion of ``value`` before converting\n ``value.first`` and ``value.second``. To see why, consider a self\n referential pair:\n\n .. code-block:: javascript\n\n let p = new Pair(0, 0);\n p.first = p;\n\n Without ``cache(value, result);``, converting ``p`` would lead to an\n infinite recurse. With it, we can successfully convert ``p`` to a list\n such that ``l[0] is l``.\n \"\"\"\n raise NotImplementedError\n\n\nclass JsDoubleProxy(JsProxy):\n \"\"\"A double proxy created with :py:func:`create_proxy`.\"\"\"\n\n _js_type_flags = [\"IS_DOUBLE_PROXY\"]\n\n def destroy(self) -> None:\n \"\"\"Destroy the proxy.\"\"\"\n pass\n\n def unwrap(self) -> Any:\n \"\"\"Unwrap a double proxy created with :py:func:`create_proxy` into the\n wrapped Python object.\n \"\"\"\n raise NotImplementedError\n\n\nclass JsPromise(JsProxy):\n \"\"\"A :py:class:`~pyodide.ffi.JsProxy` of a :js:class:`Promise` or some other `thenable\n <https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise#thenables>`_\n JavaScript object.\n\n A JavaScript object is considered to be a :js:class:`Promise` if it has a ``then`` method.\n \"\"\"\n\n _js_type_flags = [\"IS_AWAITABLE\"]\n\n def then(\n self, onfulfilled: Callable[[Any], Any], onrejected: Callable[[Any], Any]\n ) -> \"JsPromise\":\n \"\"\"The :js:meth:`Promise.then` API, wrapped to manage the lifetimes of the\n handlers.\n\n Pyodide will automatically release the references to the handlers\n when the promise resolves.\n \"\"\"\n raise NotImplementedError\n\n def catch(self, onrejected: Callable[[Any], Any], /) -> \"JsPromise\":\n \"\"\"The :js:meth:`Promise.catch` API, wrapped to manage the lifetimes of the\n handler.\n\n Pyodide will automatically release the references to the handler\n when the promise resolves.\n \"\"\"\n raise NotImplementedError\n\n def finally_(self, onfinally: Callable[[], Any], /) -> \"JsPromise\":\n \"\"\"The :js:meth:`Promise.finally` API, wrapped to manage the lifetimes of\n the handler.\n\n Pyodide will automatically release the references to the handler\n when the promise resolves. Note the trailing underscore in the name;\n this is needed because ``finally`` is a reserved keyword in Python.\n \"\"\"\n raise NotImplementedError\n\n\nclass JsBuffer(JsProxy):\n \"\"\"A JsProxy of an array buffer or array buffer view\"\"\"\n\n _js_type_flags = [\"IS_BUFFER\"]\n # There are no types for buffers:\n # https://github.com/python/typing/issues/593\n # https://bugs.python.org/issue27501\n # This is just for docs so lets just make something up?\n\n # Argument should be a buffer.\n # See https://github.com/python/typing/issues/593\n def assign(self, rhs: Any, /) -> None:\n \"\"\"Assign from a Python buffer into the JavaScript buffer.\"\"\"\n\n # Argument should be a buffer.\n # See https://github.com/python/typing/issues/593\n def assign_to(self, to: Any, /) -> None:\n \"\"\"Assign to a Python buffer from the JavaScript buffer.\"\"\"\n\n def to_memoryview(self) -> memoryview:\n \"\"\"Convert a buffer to a memoryview.\n\n Copies the data once. This currently has the same effect as\n :py:meth:`~JsArray.to_py`.\n \"\"\"\n raise NotImplementedError\n\n def to_bytes(self) -> bytes:\n \"\"\"Convert a buffer to a bytes object.\n\n Copies the data once.\n \"\"\"\n raise NotImplementedError\n\n def to_file(self, file: IO[bytes] | IO[str], /) -> None:\n \"\"\"Writes a buffer to a file.\n\n Will write the entire contents of the buffer to the current position of\n the file.\n\n Example\n -------\n >>> import pytest; pytest.skip()\n >>> from js import Uint8Array\n >>> x = Uint8Array.new(range(10))\n >>> with open('file.bin', 'wb') as fh:\n ... x.to_file(fh)\n which is equivalent to,\n >>> with open('file.bin', 'wb') as fh:\n ... data = x.to_bytes()\n ... fh.write(data)\n but the latter copies the data twice whereas the former only copies the\n data once.\n \"\"\"\n\n def from_file(self, file: IO[bytes] | IO[str], /) -> None:\n \"\"\"Reads from a file into a buffer.\n\n Will try to read a chunk of data the same size as the buffer from\n the current position of the file.\n\n Example\n -------\n >>> import pytest; pytest.skip()\n >>> from js import Uint8Array\n >>> # the JsProxy need to be pre-allocated\n >>> x = Uint8Array.new(range(10))\n >>> with open('file.bin', 'rb') as fh:\n ... x.read_file(fh)\n which is equivalent to\n >>> x = Uint8Array.new(range(10))\n >>> with open('file.bin', 'rb') as fh:\n ... chunk = fh.read(size=x.byteLength)\n ... x.assign(chunk)\n but the latter copies the data twice whereas the former only copies the\n data once.\n \"\"\"\n\n def _into_file(self, file: IO[bytes] | IO[str], /) -> None:\n \"\"\"Will write the entire contents of a buffer into a file using\n ``canOwn : true`` without any copy. After this, the buffer cannot be\n used again.\n\n If ``file`` is not empty, its contents will be overwritten!\n\n Only ``MEMFS`` cares about the ``canOwn`` flag, other file systems will\n just ignore it.\n\n\n Example\n -------\n >>> import pytest; pytest.skip()\n >>> from js import Uint8Array\n >>> x = Uint8Array.new(range(10))\n >>> with open('file.bin', 'wb') as fh:\n ... x._into_file(fh)\n which is similar to\n >>> with open('file.bin', 'wb') as fh:\n ... data = x.to_bytes()\n ... fh.write(data)\n but the latter copies the data once whereas the former doesn't copy the\n data.\n \"\"\"\n\n def to_string(self, encoding: str | None = None) -> str:\n \"\"\"Convert a buffer to a string object.\n\n Copies the data twice.\n\n The encoding argument will be passed to the :js:class:`TextDecoder`\n constructor. It should be one of the encodings listed in `the table here\n <https://encoding.spec.whatwg.org/#names-and-labels>`_. The default\n encoding is utf8.\n \"\"\"\n raise NotImplementedError\n\n\nclass JsArray(JsProxy, Generic[T]):\n \"\"\"A JsProxy of an :js:class:`Array`, :js:class:`NodeList`, or :js:class:`TypedArray`\"\"\"\n\n _js_type_flags = [\"IS_ARRAY\", \"IS_NODE_LIST\", \"IS_TYPEDARRAY\"]\n\n def __getitem__(self, idx: int | slice) -> T:\n raise NotImplementedError\n\n def __setitem__(self, idx: int | slice, value: T) -> None:\n pass\n\n def __delitem__(self, idx: int | slice) -> None:\n pass\n\n def __len__(self) -> int:\n return 0\n\n def extend(self, other: Iterable[T], /) -> None:\n \"\"\"Extend array by appending elements from the iterable.\"\"\"\n\n def __reversed__(self) -> Iterator[T]:\n \"\"\"Return a reverse iterator over the :js:class:`Array`.\"\"\"\n raise NotImplementedError\n\n def pop(self, /, index: int = -1) -> T:\n \"\"\"Remove and return the ``item`` at ``index`` (default last).\n\n Raises :py:exc:`IndexError` if list is empty or index is out of range.\n \"\"\"\n raise NotImplementedError\n\n def push(self, /, object: T) -> None:\n pass\n\n def append(self, /, object: T) -> None:\n \"\"\"Append object to the end of the list.\"\"\"\n\n def index(self, /, value: T, start: int = 0, stop: int = sys.maxsize) -> int:\n \"\"\"Return first ``index`` at which ``value`` appears in the ``Array``.\n\n Raises :py:exc:`ValueError` if the value is not present.\n \"\"\"\n raise NotImplementedError\n\n def count(self, /, x: T) -> int:\n \"\"\"Return the number of times x appears in the list.\"\"\"\n raise NotImplementedError\n\n def reverse(self) -> None:\n \"\"\"Reverse the array in place.\n\n Present only if the wrapped Javascript object is an array.\n \"\"\"\n\n def to_py(\n self,\n *,\n depth: int = -1,\n default_converter: Callable[\n [\"JsProxy\", Callable[[\"JsProxy\"], Any], Callable[[\"JsProxy\", Any], None]],\n Any,\n ]\n | None = None,\n ) -> list[Any]:\n raise NotImplementedError\n\n\nclass JsTypedArray(JsBuffer, JsArray[int]):\n _js_type_flags = [\"IS_TYPEDARRAY\"]\n BYTES_PER_ELEMENT: int\n\n def subarray(\n self, start: int | None = None, stop: int | None = None\n ) -> \"JsTypedArray\":\n raise NotImplementedError\n\n buffer: JsBuffer\n\n\[email protected]\nclass JsMap(JsProxy, Generic[KT, VTco]):\n \"\"\"A JavaScript Map\n\n To be considered a map, a JavaScript object must have a ``get`` method, it\n must have a ``size`` or a ``length`` property which is a number\n (idiomatically it should be called ``size``) and it must be iterable.\n \"\"\"\n\n _js_type_flags = [\"HAS_GET | HAS_LENGTH | IS_ITERABLE\", \"IS_OBJECT_MAP\"]\n\n def __getitem__(self, idx: KT) -> VTco:\n raise NotImplementedError\n\n def __len__(self) -> int:\n return 0\n\n def __iter__(self) -> KT:\n raise NotImplementedError\n\n def __contains__(self, idx: KT) -> bool:\n raise NotImplementedError\n\n def keys(self) -> KeysView[KT]:\n \"\"\"Return a :py:class:`~collections.abc.KeysView` for the map.\"\"\"\n raise NotImplementedError\n\n def items(self) -> ItemsView[KT, VTco]:\n \"\"\"Return a :py:class:`~collections.abc.ItemsView` for the map.\"\"\"\n raise NotImplementedError\n\n def values(self) -> ValuesView[VTco]:\n \"\"\"Return a :py:class:`~collections.abc.ValuesView` for the map.\"\"\"\n raise NotImplementedError\n\n @overload\n def get(self, key: KT, /) -> VTco | None:\n ...\n\n @overload\n def get(self, key: KT, default: VTco | T, /) -> VTco | T:\n ...\n\n @docs_argspec(\"(self, key: KT, default: VTco | None, /) -> VTco\")\n def get(self, key: KT, default: Any = None, /) -> VTco:\n r\"\"\"If ``key in self``, returns ``self[key]``. Otherwise returns ``default``.\"\"\"\n raise NotImplementedError\n\n\[email protected]\nclass JsMutableMap(JsMap[KT, VT], Generic[KT, VT]):\n \"\"\"A JavaScript mutable map\n\n To be considered a mutable map, a JavaScript object must have a ``get``\n method, a ``has`` method, a ``size`` or a ``length`` property which is a\n number (idiomatically it should be called ``size``) and it must be iterable.\n\n Instances of the JavaScript builtin ``Map`` class are ``JsMutableMap`` s.\n Also proxies returned by :py:meth:`JsProxy.as_object_map` are instances of\n ``JsMap`` .\n \"\"\"\n\n _js_type_flags = [\"HAS_GET | HAS_SET | HAS_LENGTH | IS_ITERABLE\", \"IS_OBJECT_MAP\"]\n\n @overload\n def pop(self, key: KT, /) -> VT:\n ...\n\n @overload\n def pop(self, key: KT, default: VT | T = ..., /) -> VT | T:\n ...\n\n @docs_argspec(\"(self, key: KT, default: VT | None = None, /) -> VT\")\n def pop(self, key: KT, default: Any = None, /) -> Any:\n r\"\"\"If ``key in self``, return ``self[key]`` and remove key from ``self``. Otherwise\n returns ``default``.\n \"\"\"\n raise NotImplementedError\n\n def setdefault(self, key: KT, default: VT | None = None) -> VT:\n \"\"\"If ``key in self``, return ``self[key]``. Otherwise\n sets ``self[key] = default`` and returns ``default``.\n \"\"\"\n raise NotImplementedError\n\n def popitem(self) -> tuple[KT, VT]:\n \"\"\"Remove some arbitrary ``key, value`` pair from the map and returns the\n ``(key, value)`` tuple.\n \"\"\"\n raise NotImplementedError\n\n def clear(self) -> None:\n \"\"\"Empty out the map entirely.\"\"\"\n\n @overload\n def update(self, __m: Mapping[KT, VT], **kwargs: VT) -> None:\n ...\n\n @overload\n def update(self, __m: Iterable[tuple[KT, VT]], **kwargs: VT) -> None:\n ...\n\n @overload\n def update(self, **kwargs: VT) -> None:\n ...\n\n @docs_argspec(\n \"(self, other : Mapping[KT, VT] | Iterable[tuple[KT, VT]] = None , /, **kwargs) -> None\"\n )\n def update(self, *args: Any, **kwargs: Any) -> None:\n r\"\"\"Updates ``self`` from ``other`` and ``kwargs``.\n\n Parameters\n ----------\n other:\n\n Either a mapping or an iterable of pairs. This can be left out.\n\n kwargs: ``VT``\n\n Extra key-values pairs to insert into the map. Only usable for\n inserting extra strings.\n\n If ``other`` is present and is a :py:class:`~collections.abc.Mapping` or has a ``keys``\n method, does\n\n .. code-block:: python\n\n for k in other:\n self[k] = other[k]\n\n If ``other`` is present and lacks a ``keys`` method, does\n\n .. code-block:: python\n\n for (k, v) in other:\n self[k] = v\n\n In all cases this is followed by:\n\n .. code-block:: python\n\n for (k, v) in kwargs.items():\n self[k] = v\n\n \"\"\"\n\n def __setitem__(self, idx: KT, value: VT) -> None:\n pass\n\n def __delitem__(self, idx: KT) -> None:\n return None\n\n\nclass JsIterator(JsProxy, Generic[Tco]):\n \"\"\"A JsProxy of a JavaScript iterator.\n\n An object is a :py:class:`JsAsyncIterator` if it has a :js:meth:`~Iterator.next` method and either has a\n :js:data:`Symbol.iterator` or has no :js:data:`Symbol.asyncIterator`.\n \"\"\"\n\n _js_type_flags = [\"IS_ITERATOR\"]\n\n def __next__(self) -> Tco:\n raise NotImplementedError\n\n def __iter__(self) -> Iterator[Tco]:\n raise NotImplementedError\n\n\nclass JsAsyncIterator(JsProxy, Generic[Tco]):\n \"\"\"A JsProxy of a JavaScript async iterator.\n\n An object is a :py:class:`JsAsyncIterator` if it has a\n :js:meth:`~AsyncIterator.next` method and either has a\n :js:data:`Symbol.asyncIterator` or has no :js:data:`Symbol.iterator`\n \"\"\"\n\n _js_type_flags = [\"IS_ASYNC_ITERATOR\"]\n\n def __anext__(self) -> Awaitable[Tco]:\n raise NotImplementedError\n\n def __aiter__(self) -> AsyncIterator[Tco]:\n raise NotImplementedError\n\n\nclass JsIterable(JsProxy, Generic[Tco]):\n \"\"\"A JavaScript iterable object\n\n A JavaScript object is iterable if it has a :js:data:`Symbol.iterator` method.\n \"\"\"\n\n _js_type_flags = [\"IS_ITERABLE\"]\n\n def __iter__(self) -> Iterator[Tco]:\n raise NotImplementedError\n\n\nclass JsAsyncIterable(JsProxy, Generic[Tco]):\n \"\"\"A JavaScript async iterable object\n\n A JavaScript object is async iterable if it has a :js:data:`Symbol.asyncIterator`\n method.\n \"\"\"\n\n _js_type_flags = [\"IS_ASYNC_ITERABLE\"]\n\n def __aiter__(self) -> AsyncIterator[Tco]:\n raise NotImplementedError\n\n\nclass JsGenerator(JsIterable[Tco], Generic[Tco, Tcontra, Vco]):\n \"\"\"A JavaScript generator\n\n A JavaScript object is treated as a generator if its\n :js:data:`Symbol.toStringTag` is ``\"Generator\"``. Most likely this will be\n because it is a true :js:class:`Generator` produced by the JavaScript\n runtime, but it may be a custom object trying hard to pretend to be a\n generator. It should have :js:meth:`~Generator.next`,\n :js:meth:`~Generator.return` and :js:meth:`~Generator.throw` methods.\n \"\"\"\n\n _js_type_flags = [\"IS_GENERATOR\"]\n\n def send(self, value: Tcontra) -> Tco:\n \"\"\"\n Resumes the execution and \"sends\" a value into the generator function.\n\n The ``value`` argument becomes the result of the current yield\n expression. The ``send()`` method returns the next value yielded by the\n generator, or raises :py:exc:`StopIteration` if the generator exits without\n yielding another value. When ``send()`` is called to start the\n generator, the argument will be ignored. Unlike in Python, we cannot\n detect that the generator hasn't started yet, and no error will be\n thrown if the argument of a not-started generator is not ``None``.\n \"\"\"\n raise NotImplementedError\n\n @overload\n def throw(\n self,\n typ: type[BaseException],\n val: BaseException | object = ...,\n tb: TracebackType | None = ...,\n /,\n ) -> Tco:\n ...\n\n @overload\n def throw(\n self,\n typ: BaseException,\n val: None = ...,\n tb: TracebackType | None = ...,\n /,\n ) -> Tco:\n ...\n\n @docs_argspec(\"(self, error: BaseException, /) -> Tco\")\n def throw(\n self,\n *args: Any,\n ) -> Tco:\n \"\"\"\n Raises an exception at the point where the generator was paused, and\n returns the next value yielded by the generator function.\n\n If the generator exits without yielding another value, a\n :py:exc:`StopIteration` exception is raised. If the generator function does\n not catch the passed-in exception, or raises a different exception, then\n that exception propagates to the caller.\n\n In typical use, this is called with a single exception instance similar\n to the way the raise keyword is used.\n\n For backwards compatibility, however, a second signature is supported,\n following a convention from older versions of Python. The type argument\n should be an exception class, and value should be an exception instance.\n If the value is not provided, the type constructor is called to get an\n instance. If traceback is provided, it is set on the exception,\n otherwise any existing ``__traceback__`` attribute stored in value may\n be cleared.\n \"\"\"\n raise NotImplementedError\n\n def close(self) -> None:\n \"\"\"Raises a :py:exc:`GeneratorExit` at the point where the generator\n function was paused.\n\n If the generator function then exits gracefully, is already closed, or\n raises :py:exc:`GeneratorExit` (by not catching the exception), ``close()``\n returns to its caller. If the generator yields a value, a\n :py:exc:`RuntimeError` is raised. If the generator raises any other\n exception, it is propagated to the caller. ``close()`` does nothing if\n the generator has already exited due to an exception or normal exit.\n \"\"\"\n\n def __next__(self) -> Tco:\n raise NotImplementedError\n\n def __iter__(self) -> \"JsGenerator[Tco, Tcontra, Vco]\":\n raise NotImplementedError\n\n\nclass JsFetchResponse(JsProxy):\n \"\"\"A :py:class:`JsFetchResponse` object represents a :js:data:`Response` to a\n :js:func:`fetch` request.\n \"\"\"\n\n bodyUsed: bool\n ok: bool\n redirected: bool\n status: int\n statusText: str\n type: str\n url: str\n headers: Any\n\n def clone(self) -> \"JsFetchResponse\":\n raise NotImplementedError\n\n async def arrayBuffer(self) -> JsBuffer:\n raise NotImplementedError\n\n async def text(self) -> str:\n raise NotImplementedError\n\n async def json(self) -> JsProxy:\n raise NotImplementedError\n\n\nclass JsAsyncGenerator(JsAsyncIterable[Tco], Generic[Tco, Tcontra, Vco]):\n \"\"\"A JavaScript :js:class:`AsyncGenerator`\n\n A JavaScript object is treated as an async generator if it's\n :js:data:`Symbol.toStringTag` is ``\"AsyncGenerator\"``. Most likely this will\n be because it is a true async generator produced by the JavaScript runtime,\n but it may be a custom object trying hard to pretend to be an async\n generator. It should have :js:meth:`~AsyncGenerator.next`,\n :js:meth:`~AsyncGenerator.return`, and :js:meth:`~AsyncGenerator.throw`\n methods.\n \"\"\"\n\n _js_type_flags = [\"IS_ASYNC_GENERATOR\"]\n\n def __anext__(self) -> Awaitable[Tco]:\n raise NotImplementedError\n\n def __aiter__(self) -> \"JsAsyncGenerator[Tco, Tcontra, Vco]\":\n raise NotImplementedError\n\n def asend(self, value: Tcontra, /) -> Awaitable[Tco]:\n \"\"\"Resumes the execution and \"sends\" a value into the async generator\n function.\n\n The ``value`` argument becomes the result of the current yield\n expression. The awaitable returned by the ``asend()`` method will return\n the next value yielded by the generator or raises\n :py:exc:`StopAsyncIteration` if the asynchronous generator returns. If the\n generator returned a value, this value is discarded (because in Python\n async generators cannot return a value).\n\n When ``asend()`` is called to start the generator, the argument will be\n ignored. Unlike in Python, we cannot detect that the generator hasn't\n started yet, and no error will be thrown if the argument of a\n not-started generator is not ``None``.\n \"\"\"\n raise NotImplementedError\n\n @overload\n def athrow(\n self,\n typ: type[BaseException],\n val: BaseException | object = ...,\n tb: TracebackType | None = ...,\n /,\n ) -> Awaitable[Tco]:\n ...\n\n @overload\n def athrow(\n self,\n typ: BaseException,\n val: None = ...,\n tb: TracebackType | None = ...,\n /,\n ) -> Awaitable[Tco]:\n ...\n\n @docs_argspec(\"(self, error: BaseException, /) -> Tco\")\n def athrow(self, value: Any, *args: Any) -> Awaitable[Tco]:\n \"\"\"Resumes the execution and raises an exception at the point where the\n generator was paused.\n\n The awaitable returned by ``athrow()`` method will return the next value\n yielded by the generator or raises :py:exc:`StopAsyncIteration` if the\n asynchronous generator returns. If the generator returned a value, this\n value is discarded (because in Python async generators cannot return a\n value). If the generator function does not catch the passed-in\n exception, or raises a different exception, then that exception\n propagates to the caller.\n \"\"\"\n raise NotImplementedError\n\n def aclose(self) -> Awaitable[None]:\n \"\"\"Raises a :py:exc:`GeneratorExit` at the point where the generator\n function was paused.\n\n If the generator function then exits gracefully, is already closed, or\n raises :py:exc:`GeneratorExit` (by not catching the exception),\n ``aclose()`` returns to its caller. If the generator yields a value, a\n :py:exc:`RuntimeError` is raised. If the generator raises any other\n exception, it is propagated to the caller. ``aclose()`` does nothing if\n the generator has already exited due to an exception or normal exit.\n \"\"\"\n raise NotImplementedError\n\n\nclass JsCallable(JsProxy):\n _js_type_flags = [\"IS_CALLABLE\"]\n\n def __call__(self):\n pass\n\n\nclass JsOnceCallable(JsCallable):\n def destroy(self):\n pass\n\n\nclass JsException(JsProxy, Exception):\n \"\"\"A JavaScript Error.\n\n These are pickleable unlike other JsProxies.\n \"\"\"\n\n # Note: Unlike many of these classes, this one is never actually seen by the\n # user IN_BROWSER (it's replaced by a different JsException in\n # pyodide._core). We use it to unpickle errors so we need it to be\n # instantiable.\n def __new__(cls, *args, **kwargs):\n if args[0] == _instantiate_token:\n return super().__new__(cls, *args, **kwargs)\n return cls._new_exc(*args, **kwargs)\n\n @classmethod\n def _new_exc(cls, name: str, message: str = \"\", stack: str = \"\") -> \"JsException\":\n result = super().__new__(JsException, _instantiate_token)\n result.name = name\n result.message = message\n result.stack = stack\n return result\n\n @classmethod\n def new(cls, *args: Any) -> \"JsException\":\n return cls()\n\n def __str__(self):\n return f\"{self.name}: {self.message}\"\n\n name: str\n \"\"\"The name of the error type\"\"\"\n\n message: str\n \"\"\"The error message\"\"\"\n\n stack: str\n \"\"\"The JavaScript stack trace\"\"\"\n\n\nclass ConversionError(Exception):\n \"\"\"An error thrown when conversion between JavaScript and Python fails.\"\"\"\n\n\nclass InternalError(Exception):\n \"\"\"Thrown when a recoverable assertion error occurs in internal Pyodide code\"\"\"\n\n pass\n\n\nclass JsDomElement(JsProxy):\n @property\n def tagName(self) -> str:\n return \"\"\n\n @property\n def children(self) -> Sequence[\"JsDomElement\"]:\n return []\n\n def appendChild(self, child: \"JsDomElement\") -> None:\n pass\n\n def addEventListener(self, event: str, listener: Callable[[Any], None]) -> None:\n pass\n\n def removeEventListener(self, event: str, listener: Callable[[Any], None]) -> None:\n pass\n\n\n# from pyproxy.c\n\n\ndef create_once_callable(obj: Callable[..., Any], /) -> JsOnceCallable:\n \"\"\"Wrap a Python Callable in a JavaScript function that can be called once.\n\n After being called the proxy will decrement the reference count\n of the Callable. The JavaScript function also has a ``destroy`` API that\n can be used to release the proxy without calling it.\n \"\"\"\n return obj # type: ignore[return-value]\n\n\ndef create_proxy(\n obj: Any, /, *, capture_this: bool = False, roundtrip: bool = True\n) -> JsDoubleProxy:\n \"\"\"Create a :py:class:`JsProxy` of a :js:class:`~pyodide.ffi.PyProxy`.\n\n This allows explicit control over the lifetime of the\n :js:class:`~pyodide.ffi.PyProxy` from Python: call the\n :py:meth:`~JsDoubleProxy.destroy` API when done.\n\n Parameters\n ----------\n obj:\n The object to wrap.\n\n capture_this :\n If the object is callable, should ``this`` be passed as the first\n argument when calling it from JavaScript.\n\n roundtrip:\n When the proxy is converted back from JavaScript to Python, if this is\n ``True`` it is converted into a double proxy. If ``False``, it is\n unwrapped into a Python object. In the case that ``roundtrip`` is\n ``True`` it is possible to unwrap a double proxy with the\n :py:meth:`JsDoubleProxy.unwrap` method. This is useful to allow easier\n control of lifetimes from Python:\n\n .. code-block:: python\n\n from js import o\n d = {}\n o.d = create_proxy(d, roundtrip=True)\n o.d.destroy() # Destroys the proxy created with create_proxy\n\n With ``roundtrip=False`` this would be an error.\n \"\"\"\n return obj\n\n\n# from python2js\n\n\n@overload\ndef to_js(\n obj: list[Any] | tuple[Any],\n /,\n *,\n depth: int = -1,\n pyproxies: JsProxy | None = None,\n create_pyproxies: bool = True,\n dict_converter: Callable[[Iterable[JsArray[Any]]], JsProxy] | None = None,\n default_converter: Callable[\n [Any, Callable[[Any], JsProxy], Callable[[Any, JsProxy], None]], JsProxy\n ]\n | None = None,\n) -> JsArray[Any]:\n ...\n\n\n@overload\ndef to_js(\n obj: dict[Any, Any],\n /,\n *,\n depth: int = -1,\n pyproxies: JsProxy | None,\n create_pyproxies: bool,\n dict_converter: None,\n default_converter: Callable[\n [Any, Callable[[Any], JsProxy], Callable[[Any, JsProxy], None]], JsProxy\n ]\n | None = None,\n) -> JsMap[Any, Any]:\n ...\n\n\n@overload\ndef to_js(\n obj: Any,\n /,\n *,\n depth: int = -1,\n pyproxies: JsProxy | None = None,\n create_pyproxies: bool = True,\n dict_converter: Callable[[Iterable[JsArray[Any]]], JsProxy] | None = None,\n default_converter: Callable[\n [Any, Callable[[Any], JsProxy], Callable[[Any, JsProxy], None]], JsProxy\n ]\n | None = None,\n) -> Any:\n ...\n\n\ndef to_js(\n obj: Any,\n /,\n *,\n depth: int = -1,\n pyproxies: JsProxy | None = None,\n create_pyproxies: bool = True,\n dict_converter: Callable[[Iterable[JsArray[Any]]], JsProxy] | None = None,\n default_converter: Callable[\n [Any, Callable[[Any], JsProxy], Callable[[Any, JsProxy], None]], JsProxy\n ]\n | None = None,\n) -> Any:\n \"\"\"Convert the object to JavaScript.\n\n This is similar to :js:meth:`~pyodide.ffi.PyProxy.toJs`, but for use from Python. If the\n object can be implicitly translated to JavaScript, it will be returned\n unchanged. If the object cannot be converted into JavaScript, this method\n will return a :py:class:`JsProxy` of a :js:class:`~pyodide.ffi.PyProxy`, as if you had used\n :func:`~pyodide.ffi.create_proxy`.\n\n See :ref:`type-translations-pyproxy-to-js` for more information.\n\n Parameters\n ----------\n obj :\n The Python object to convert\n\n depth :\n The maximum depth to do the conversion. Negative numbers are treated as\n infinite. Set this to 1 to do a shallow conversion.\n\n pyproxies:\n Should be a JavaScript :js:class:`Array`. If provided, any ``PyProxies``\n generated will be stored here. You can later use :py:meth:`destroy_proxies`\n if you want to destroy the proxies from Python (or from JavaScript you\n can just iterate over the :js:class:`Array` and destroy the proxies).\n\n create_pyproxies:\n If you set this to :py:data:`False`, :py:func:`to_js` will raise an error rather\n than creating any pyproxies.\n\n dict_converter:\n This converter if provided receives a (JavaScript) iterable of\n (JavaScript) pairs [key, value]. It is expected to return the desired\n result of the dict conversion. Some suggested values for this argument:\n\n * ``js.Map.new`` -- similar to the default behavior\n * ``js.Array.from`` -- convert to an array of entries\n * ``js.Object.fromEntries`` -- convert to a JavaScript object\n\n default_converter:\n If present will be invoked whenever Pyodide does not have some built in\n conversion for the object. If ``default_converter`` raises an error, the\n error will be allowed to propagate. Otherwise, the object returned will\n be used as the conversion. ``default_converter`` takes three arguments.\n The first argument is the value to be converted.\n\n Examples\n --------\n\n Here are some examples demonstrating the usage of the ``default_converter``\n argument.\n\n\n In addition to the normal conversions, convert JavaScript :js:class:`Date`\n objects to :py:class:`~datetime.datetime` objects:\n\n .. code-block:: python\n\n from datetime import datetime\n from js import Date\n def default_converter(value, _ignored1, _ignored2):\n if isinstance(value, datetime):\n return Date.new(value.timestamp() * 1000)\n return value\n\n Don't create any PyProxies, require a complete conversion or raise an error:\n\n .. code-block:: python\n\n def default_converter(_value, _ignored1, _ignored2):\n raise Exception(\"Failed to completely convert object\")\n\n The second and third arguments are only needed for converting containers.\n The second argument is a conversion function which is used to convert the\n elements of the container with the same settings. The third argument is a\n \"cache\" function which is needed to handle self referential containers.\n Consider the following example. Suppose we have a Python ``Pair`` class:\n\n .. code-block:: python\n\n class Pair:\n def __init__(self, first, second):\n self.first = first self.second = second\n\n We can use the following ``default_converter`` to convert ``Pair`` to\n :js:class:`Array`:\n\n .. code-block:: python\n\n from js import Array\n\n def default_converter(value, convert, cache):\n if not isinstance(value, Pair):\n return value\n result = Array.new() cache(value, result);\n result.push(convert(value.first)) result.push(convert(value.second))\n return result\n\n Note that we have to cache the conversion of ``value`` before converting\n ``value.first`` and ``value.second``. To see why, consider a self\n referential pair:\n\n .. code-block:: javascript\n\n p = Pair(0, 0); p.first = p;\n\n Without ``cache(value, result);``, converting ``p`` would lead to an\n infinite recurse. With it, we can successfully convert ``p`` to an Array\n such that ``l[0] === l``.\n \"\"\"\n return obj\n\n\ndef destroy_proxies(pyproxies: JsArray[Any], /) -> None:\n \"\"\"Destroy all PyProxies in a JavaScript array.\n\n pyproxies must be a JavaScript Array of PyProxies. Intended for use\n with the arrays created from the \"pyproxies\" argument of :js:meth:`~pyodide.ffi.PyProxy.toJs`\n and :py:func:`to_js`. This method is necessary because indexing the Array from\n Python automatically unwraps the PyProxy into the wrapped Python object.\n \"\"\"\n pass\n\n\n__name__ = _save_name\ndel _save_name\n\n__all__ = [\n \"ConversionError\",\n \"InternalError\",\n \"JsArray\",\n \"JsAsyncGenerator\",\n \"JsAsyncIterable\",\n \"JsAsyncIterator\",\n \"JsBuffer\",\n \"JsDoubleProxy\",\n \"JsException\",\n \"JsFetchResponse\",\n \"JsGenerator\",\n \"JsIterable\",\n \"JsIterator\",\n \"JsMap\",\n \"JsMutableMap\",\n \"JsPromise\",\n \"JsProxy\",\n \"JsDomElement\",\n \"JsCallable\",\n \"JsTypedArray\",\n \"create_once_callable\",\n \"create_proxy\",\n \"destroy_proxies\",\n \"to_js\",\n]\n",
"path": "src/py/_pyodide/_core_docs.py"
}
] | diff --git a/docs/project/changelog.md b/docs/project/changelog.md
index a56412e88ec..bf1866a1654 100644
--- a/docs/project/changelog.md
+++ b/docs/project/changelog.md
@@ -31,6 +31,10 @@ myst:
- {{ Enhancement }} Added `headers` property to `pyodide.http.FetchResponse`.
{pr}`2078`
+- {{ Fix }} A `JSProxy` of a `DOMException` will now inherit from exception so
+ it can be raised in Python.
+ {pr}`3868`
+
### Packages
- OpenBLAS has been added and scipy now uses OpenBLAS rather than CLAPACK
diff --git a/src/core/jsproxy.c b/src/core/jsproxy.c
index a558b4ed75e..56b34a67456 100644
--- a/src/core/jsproxy.c
+++ b/src/core/jsproxy.c
@@ -4011,6 +4011,7 @@ EM_JS_NUM(int, JsProxy_compute_typeflags, (JsRef idobj), {
}
const isBufferView = safeBool(() => ArrayBuffer.isView(obj));
const isArray = safeBool(() => Array.isArray(obj));
+ const constructorName = safeBool(() => obj.constructor.name) || "";
// If we somehow set more than one of IS_CALLABLE, IS_BUFFER, and IS_ERROR,
// we'll run into trouble. I think that for this to happen, someone would have
@@ -4040,7 +4041,28 @@ EM_JS_NUM(int, JsProxy_compute_typeflags, (JsRef idobj), {
isBufferView && typeTag !== '[object DataView]');
SET_FLAG_IF(IS_GENERATOR, typeTag === "[object Generator]");
SET_FLAG_IF(IS_ASYNC_GENERATOR, typeTag === "[object AsyncGenerator]");
- SET_FLAG_IF(IS_ERROR, (hasProperty(obj, "name") && hasProperty(obj, "message") && hasProperty(obj, "stack")) && !(type_flags & (IS_CALLABLE | IS_BUFFER)));
+
+ /**
+ * DOMException is a weird special case. According to WHATWG, there are two
+ * types of Exception objects, simple exceptions and DOMExceptions. The spec
+ * says:
+ *
+ * > if an implementation gives native Error objects special powers or
+ * > nonstandard properties (such as a stack property), it should also expose
+ * > those on DOMException objects
+ *
+ * Firefox respects this and has DOMException.stack. But Safari and Chrome do
+ * not. Hence the special check here for DOMException.
+ */
+ SET_FLAG_IF(IS_ERROR,
+ (
+ hasProperty(obj, "name")
+ && hasProperty(obj, "message")
+ && (
+ hasProperty(obj, "stack")
+ || constructorName === "DOMException"
+ )
+ ) && !(type_flags & (IS_CALLABLE | IS_BUFFER)));
// clang-format on
return type_flags;
});
diff --git a/src/py/_pyodide/_core_docs.py b/src/py/_pyodide/_core_docs.py
index da86e3c713d..74b696e5f86 100644
--- a/src/py/_pyodide/_core_docs.py
+++ b/src/py/_pyodide/_core_docs.py
@@ -988,6 +988,10 @@ def _new_exc(cls, name: str, message: str = "", stack: str = "") -> "JsException
result.stack = stack
return result
+ @classmethod
+ def new(cls, *args: Any) -> "JsException":
+ return cls()
+
def __str__(self):
return f"{self.name}: {self.message}"
diff --git a/src/py/js.pyi b/src/py/js.pyi
index f5c365ad634..0ea16163fb2 100644
--- a/src/py/js.pyi
+++ b/src/py/js.pyi
@@ -5,6 +5,7 @@ from _pyodide._core_docs import _JsProxyMetaClass
from pyodide.ffi import (
JsArray,
JsDomElement,
+ JsException,
JsFetchResponse,
JsProxy,
JsTypedArray,
@@ -90,3 +91,6 @@ class document(_JsObject):
def createElement(tagName: str) -> JsDomElement: ...
@staticmethod
def appendChild(child: JsDomElement) -> None: ...
+
+class DOMException(JsException):
+ pass
diff --git a/src/tests/test_pyodide.py b/src/tests/test_pyodide.py
index ab3bd278635..ddb4836b456 100644
--- a/src/tests/test_pyodide.py
+++ b/src/tests/test_pyodide.py
@@ -583,6 +583,18 @@ def test_run_python_js_error(selenium):
)
[email protected]_browsers(node="No DOMException in node")
+@run_in_pyodide
+def test_run_python_dom_error(selenium):
+ import pytest
+
+ from js import DOMException
+ from pyodide.ffi import JsException
+
+ with pytest.raises(JsException, match="oops"):
+ raise DOMException.new("oops")
+
+
def test_run_python_locals(selenium):
selenium.run_js(
"""
diff --git a/src/tests/test_typeconversions.py b/src/tests/test_typeconversions.py
index 5e5f6265c8e..cfbfa1fe47d 100644
--- a/src/tests/test_typeconversions.py
+++ b/src/tests/test_typeconversions.py
@@ -951,6 +951,10 @@ def test_dict_js2py2js(selenium):
def test_error_js2py2js(selenium):
selenium.run_js("self.err = new Error('hello there?');")
assert_js_to_py_to_js(selenium, "err")
+ if selenium.browser == "node":
+ return
+ selenium.run_js("self.err = new DOMException('hello there?');")
+ assert_js_to_py_to_js(selenium, "err")
def test_error_py2js2py(selenium):
|
kivy__kivy-6128 | 🙏 emoji in README.md breaks return fileh.read() in pip install from master
When running with latest pip 19
```
pip3 install https://github.com/kivy/kivy/archive/master.zip
```
I get:
```
Collecting https://github.com/kivy/kivy/archive/master.zip
Downloading https://github.com/kivy/kivy/archive/master.zip
/ 41.6MB 24.9MB/s
Complete output from command python setup.py egg_info:
fatal: not a git repository (or any of the parent directories): .git
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-req-build-qbahrg18/setup.py", line 1007, in <module>
long_description=get_description(),
File "/tmp/pip-req-build-qbahrg18/setup.py", line 44, in get_description
return fileh.read()
File "/usr/lib/python3.6/encodings/ascii.py", line 26, in decode
return codecs.ascii_decode(input, self.errors)[0]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xf0 in position 4973: ordinal not in range(128)
Using distutil
```
position 4973 is the 🙏 emoji
| [
{
"content": "#\n# Kivy - Cross-platform UI framework\n# https://kivy.org/\n#\nfrom __future__ import print_function\n\nimport sys\nbuild_examples = False\nif \"--build_examples\" in sys.argv:\n build_examples = True\n sys.argv.remove(\"--build_examples\")\n\nfrom copy import deepcopy\nimport os\nfrom os.path import join, dirname, sep, exists, basename, isdir\nfrom os import walk, environ, makedirs\nfrom distutils.version import LooseVersion\nfrom distutils.sysconfig import get_python_inc\nfrom collections import OrderedDict\nfrom time import sleep, time\nfrom subprocess import check_output, CalledProcessError\nfrom datetime import datetime\n\nif environ.get('KIVY_USE_SETUPTOOLS'):\n from setuptools import setup, Extension\n print('Using setuptools')\nelse:\n from distutils.core import setup\n from distutils.extension import Extension\n print('Using distutils')\n\n\nPY3 = sys.version > '3'\n\nif PY3: # fix error with py3's LooseVersion comparisons\n def ver_equal(self, other):\n return self.version == other\n\n LooseVersion.__eq__ = ver_equal\n\n\ndef get_description():\n with open(join(dirname(__file__), 'README.md')) as fileh:\n return fileh.read()\n\n\ndef get_version(filename='kivy/version.py'):\n VERSION = kivy.__version__\n epoch = int(environ.get('SOURCE_DATE_EPOCH', time()))\n DATE = datetime.utcfromtimestamp(epoch).strftime('%Y%m%d')\n try:\n GIT_REVISION = check_output(\n ['git', 'rev-parse', 'HEAD']\n ).strip().decode('ascii')\n except (CalledProcessError, OSError, IOError) as e:\n # CalledProcessError has no errno\n errno = getattr(e, 'errno', None)\n if errno != 2 and 'CalledProcessError' not in repr(e):\n raise\n GIT_REVISION = \"Unknown\"\n\n cnt = (\n \"# THIS FILE IS GENERATED FROM KIVY SETUP.PY\\n\"\n \"__version__ = '%(version)s'\\n\"\n \"__hash__ = '%(hash)s'\\n\"\n \"__date__ = '%(date)s'\\n\"\n )\n\n with open(filename, 'w') as f:\n f.write(cnt % {\n 'version': VERSION,\n 'hash': GIT_REVISION,\n 'date': DATE\n })\n return VERSION\n\n\nMIN_CYTHON_STRING = '0.24'\nMIN_CYTHON_VERSION = LooseVersion(MIN_CYTHON_STRING)\nMAX_CYTHON_STRING = '0.28.5'\nMAX_CYTHON_VERSION = LooseVersion(MAX_CYTHON_STRING)\nCYTHON_UNSUPPORTED = (\n # ref https://github.com/cython/cython/issues/1968\n '0.27', '0.27.2'\n)\n\n\ndef getoutput(cmd, env=None):\n import subprocess\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, env=env)\n p.wait()\n if p.returncode: # if not returncode == 0\n print('WARNING: A problem occurred while running {0} (code {1})\\n'\n .format(cmd, p.returncode))\n stderr_content = p.stderr.read()\n if stderr_content:\n print('{0}\\n'.format(stderr_content))\n return \"\"\n return p.stdout.read()\n\n\ndef pkgconfig(*packages, **kw):\n flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}\n lenviron = None\n pconfig = join(sys.prefix, 'libs', 'pkgconfig')\n\n if isdir(pconfig):\n lenviron = environ.copy()\n lenviron['PKG_CONFIG_PATH'] = '{};{}'.format(\n environ.get('PKG_CONFIG_PATH', ''), pconfig)\n cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))\n results = getoutput(cmd, lenviron).split()\n for token in results:\n ext = token[:2].decode('utf-8')\n flag = flag_map.get(ext)\n if not flag:\n continue\n kw.setdefault(flag, []).append(token[2:].decode('utf-8'))\n return kw\n\n\n# -----------------------------------------------------------------------------\n# Determine on which platform we are\n\nplatform = sys.platform\n\n# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)\nif sys.platform == 'darwin':\n if sys.maxsize > 2 ** 32:\n osx_arch = 'x86_64'\n else:\n osx_arch = 'i386'\n\n# Detect Python for android project (http://github.com/kivy/python-for-android)\nndkplatform = environ.get('NDKPLATFORM')\nif ndkplatform is not None and environ.get('LIBLINK'):\n platform = 'android'\nkivy_ios_root = environ.get('KIVYIOSROOT', None)\nif kivy_ios_root is not None:\n platform = 'ios'\n# proprietary broadcom video core drivers\nif exists('/opt/vc/include/bcm_host.h'):\n platform = 'rpi'\n# use mesa video core drivers\nif environ.get('VIDEOCOREMESA', None):\n platform = 'vc'\nif exists('/usr/lib/arm-linux-gnueabihf/libMali.so'):\n platform = 'mali'\n\n# -----------------------------------------------------------------------------\n# Detect options\n#\nc_options = OrderedDict()\nc_options['use_rpi'] = platform == 'rpi'\nc_options['use_egl'] = False\nc_options['use_opengl_es2'] = None\nc_options['use_opengl_mock'] = environ.get('READTHEDOCS', None) == 'True'\nc_options['use_sdl2'] = None\nc_options['use_pangoft2'] = None\nc_options['use_ios'] = False\nc_options['use_android'] = False\nc_options['use_mesagl'] = False\nc_options['use_x11'] = False\nc_options['use_wayland'] = False\nc_options['use_gstreamer'] = None\nc_options['use_avfoundation'] = platform == 'darwin'\nc_options['use_osx_frameworks'] = platform == 'darwin'\nc_options['debug_gl'] = False\n\n# now check if environ is changing the default values\nfor key in list(c_options.keys()):\n ukey = key.upper()\n if ukey in environ:\n value = bool(int(environ[ukey]))\n print('Environ change {0} -> {1}'.format(key, value))\n c_options[key] = value\n\n\n# -----------------------------------------------------------------------------\n# Cython check\n# on python-for-android and kivy-ios, cython usage is external\n\ncython_unsupported_append = '''\n\n Please note that the following versions of Cython are not supported\n at all: {}\n'''.format(', '.join(map(str, CYTHON_UNSUPPORTED)))\n\ncython_min = '''\\\n This version of Cython is not compatible with Kivy. Please upgrade to\n at least version {0}, preferably the newest supported version {1}.\n\n If your platform provides a Cython package, make sure you have upgraded\n to the newest version. If the newest version available is still too low,\n please remove it and install the newest supported Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append if CYTHON_UNSUPPORTED else '')\n\ncython_max = '''\\\n This version of Cython is untested with Kivy. While this version may\n work perfectly fine, it is possible that you may experience issues. If\n you do have issues, please downgrade to a supported version. It is\n best to use the newest supported version, {1}, but the minimum\n supported version is {0}.\n\n If your platform provides a Cython package, check if you can downgrade\n to a supported version. Otherwise, uninstall the platform package and\n install Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append if CYTHON_UNSUPPORTED else '')\n\ncython_unsupported = '''\\\n This version of Cython suffers from known bugs and is unsupported.\n Please install the newest supported version, {1}, if possible, but\n the minimum supported version is {0}.\n\n If your platform provides a Cython package, check if you can install\n a supported version. Otherwise, uninstall the platform package and\n install Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append)\n\nhave_cython = False\nskip_cython = False\nif platform in ('ios', 'android'):\n print('\\nCython check avoided.')\n skip_cython = True\nelse:\n try:\n # check for cython\n from Cython.Distutils import build_ext\n have_cython = True\n import Cython\n cy_version_str = Cython.__version__\n cy_ver = LooseVersion(cy_version_str)\n print('\\nDetected Cython version {}'.format(cy_version_str))\n if cy_ver < MIN_CYTHON_VERSION:\n print(cython_min)\n raise ImportError('Incompatible Cython Version')\n if cy_ver in CYTHON_UNSUPPORTED:\n print(cython_unsupported)\n raise ImportError('Incompatible Cython Version')\n if cy_ver > MAX_CYTHON_VERSION:\n print(cython_max)\n sleep(1)\n except ImportError:\n print(\"\\nCython is missing, it's required for compiling kivy !\\n\\n\")\n raise\n\nif not have_cython:\n from distutils.command.build_ext import build_ext\n\n# -----------------------------------------------------------------------------\n# Setup classes\n\n# the build path where kivy is being compiled\nsrc_path = build_path = dirname(__file__)\n\n\nclass KivyBuildExt(build_ext):\n\n def finalize_options(self):\n retval = build_ext.finalize_options(self)\n global build_path\n if (self.build_lib is not None and exists(self.build_lib) and\n not self.inplace):\n build_path = self.build_lib\n return retval\n\n def build_extensions(self):\n # build files\n config_h_fn = ('include', 'config.h')\n config_pxi_fn = ('include', 'config.pxi')\n config_py_fn = ('setupconfig.py', )\n\n # generate headers\n config_h = '// Autogenerated file for Kivy C configuration\\n'\n config_h += '#define __PY3 {0}\\n'.format(int(PY3))\n config_pxi = '# Autogenerated file for Kivy Cython configuration\\n'\n config_pxi += 'DEF PY3 = {0}\\n'.format(int(PY3))\n config_py = '# Autogenerated file for Kivy configuration\\n'\n config_py += 'PY3 = {0}\\n'.format(int(PY3))\n config_py += 'CYTHON_MIN = {0}\\nCYTHON_MAX = {1}\\n'.format(\n repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING))\n config_py += 'CYTHON_BAD = {0}\\n'.format(repr(', '.join(map(\n str, CYTHON_UNSUPPORTED))))\n\n # generate content\n print('Build configuration is:')\n for opt, value in c_options.items():\n value = int(bool(value))\n print(' * {0} = {1}'.format(opt, value))\n opt = opt.upper()\n config_h += '#define __{0} {1}\\n'.format(opt, value)\n config_pxi += 'DEF {0} = {1}\\n'.format(opt, value)\n config_py += '{0} = {1}\\n'.format(opt, value)\n debug = bool(self.debug)\n print(' * debug = {0}'.format(debug))\n\n config_pxi += 'DEF DEBUG = {0}\\n'.format(debug)\n config_py += 'DEBUG = {0}\\n'.format(debug)\n config_pxi += 'DEF PLATFORM = \"{0}\"\\n'.format(platform)\n config_py += 'PLATFORM = \"{0}\"\\n'.format(platform)\n for fn, content in (\n (config_h_fn, config_h), (config_pxi_fn, config_pxi),\n (config_py_fn, config_py)):\n build_fn = expand(build_path, *fn)\n if self.update_if_changed(build_fn, content):\n print('Updated {}'.format(build_fn))\n src_fn = expand(src_path, *fn)\n if src_fn != build_fn and self.update_if_changed(src_fn, content):\n print('Updated {}'.format(src_fn))\n\n c = self.compiler.compiler_type\n print('Detected compiler is {}'.format(c))\n if c != 'msvc':\n for e in self.extensions:\n e.extra_link_args += ['-lm']\n\n build_ext.build_extensions(self)\n\n def update_if_changed(self, fn, content):\n need_update = True\n if exists(fn):\n with open(fn) as fd:\n need_update = fd.read() != content\n if need_update:\n directory_name = dirname(fn)\n if not exists(directory_name):\n makedirs(directory_name)\n with open(fn, 'w') as fd:\n fd.write(content)\n return need_update\n\n\ndef _check_and_fix_sdl2_mixer(f_path):\n print(\"Check if SDL2_mixer smpeg2 have an @executable_path\")\n rpath_from = (\"@executable_path/../Frameworks/SDL2.framework\"\n \"/Versions/A/SDL2\")\n rpath_to = \"@rpath/../../../../SDL2.framework/Versions/A/SDL2\"\n smpeg2_path = (\"{}/Versions/A/Frameworks/smpeg2.framework\"\n \"/Versions/A/smpeg2\").format(f_path)\n output = getoutput((\"otool -L '{}'\").format(smpeg2_path)).decode('utf-8')\n if \"@executable_path\" not in output:\n return\n\n print(\"WARNING: Your SDL2_mixer version is invalid\")\n print(\"WARNING: The smpeg2 framework embedded in SDL2_mixer contains a\")\n print(\"WARNING: reference to @executable_path that will fail the\")\n print(\"WARNING: execution of your application.\")\n print(\"WARNING: We are going to change:\")\n print(\"WARNING: from: {}\".format(rpath_from))\n print(\"WARNING: to: {}\".format(rpath_to))\n getoutput(\"install_name_tool -change {} {} {}\".format(\n rpath_from, rpath_to, smpeg2_path))\n\n output = getoutput((\"otool -L '{}'\").format(smpeg2_path))\n if b\"@executable_path\" not in output:\n print(\"WARNING: Change successfully applied!\")\n print(\"WARNING: You'll never see this message again.\")\n else:\n print(\"WARNING: Unable to apply the changes, sorry.\")\n\n\n# -----------------------------------------------------------------------------\n# extract version (simulate doc generation, kivy will be not imported)\nenviron['KIVY_DOC_INCLUDE'] = '1'\nimport kivy\n\n# extra build commands go in the cmdclass dict {'command-name': CommandClass}\n# see tools.packaging.{platform}.build.py for custom build commands for\n# portable packages. Also e.g. we use build_ext command from cython if its\n# installed for c extensions.\nfrom kivy.tools.packaging.factory import FactoryBuild\ncmdclass = {\n 'build_factory': FactoryBuild,\n 'build_ext': KivyBuildExt}\n\ntry:\n # add build rules for portable packages to cmdclass\n if platform == 'win32':\n from kivy.tools.packaging.win32.build import WindowsPortableBuild\n cmdclass['build_portable'] = WindowsPortableBuild\n elif platform == 'darwin':\n from kivy.tools.packaging.osx.build import OSXPortableBuild\n cmdclass['build_portable'] = OSXPortableBuild\nexcept ImportError:\n print('User distribution detected, avoid portable command.')\n\n# Detect which opengl version headers to use\nif platform in ('android', 'darwin', 'ios', 'rpi', 'mali', 'vc'):\n c_options['use_opengl_es2'] = True\nelif c_options['use_opengl_es2'] is None:\n c_options['use_opengl_es2'] = \\\n environ.get('KIVY_GRAPHICS', '').lower() == 'gles'\n\nprint('Using this graphics system: {}'.format(\n ['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)]))\n\n# check if we are in a kivy-ios build\nif platform == 'ios':\n print('Kivy-IOS project environment detect, use it.')\n print('Kivy-IOS project located at {0}'.format(kivy_ios_root))\n c_options['use_ios'] = True\n c_options['use_sdl2'] = True\n\nelif platform == 'android':\n c_options['use_android'] = True\n\nelif platform == 'darwin':\n if c_options['use_osx_frameworks']:\n if osx_arch == \"i386\":\n print(\"Warning: building with frameworks fail on i386\")\n else:\n print(\"OSX framework used, force to x86_64 only\")\n environ[\"ARCHFLAGS\"] = environ.get(\"ARCHFLAGS\", \"-arch x86_64\")\n print(\"OSX ARCHFLAGS are: {}\".format(environ[\"ARCHFLAGS\"]))\n\n# detect gstreamer, only on desktop\n# works if we forced the options or in autodetection\nif platform not in ('ios', 'android') and (c_options['use_gstreamer']\n in (None, True)):\n gstreamer_valid = False\n if c_options['use_osx_frameworks'] and platform == 'darwin':\n # check the existence of frameworks\n f_path = '/Library/Frameworks/GStreamer.framework'\n if not exists(f_path):\n c_options['use_gstreamer'] = False\n print('GStreamer framework not found, fallback on pkg-config')\n else:\n print('GStreamer framework found')\n gstreamer_valid = True\n c_options['use_gstreamer'] = True\n gst_flags = {\n 'extra_link_args': [\n '-F/Library/Frameworks',\n '-Xlinker', '-rpath',\n '-Xlinker', '/Library/Frameworks',\n '-Xlinker', '-headerpad',\n '-Xlinker', '190',\n '-framework', 'GStreamer'],\n 'include_dirs': [join(f_path, 'Headers')]}\n\n if not gstreamer_valid:\n # use pkg-config approach instead\n gst_flags = pkgconfig('gstreamer-1.0')\n if 'libraries' in gst_flags:\n print('GStreamer found via pkg-config')\n c_options['use_gstreamer'] = True\n\n\n# detect SDL2, only on desktop and iOS, or android if explicitly enabled\n# works if we forced the options or in autodetection\nsdl2_flags = {}\nif c_options['use_sdl2'] or (\n platform not in ('android',) and c_options['use_sdl2'] is None):\n\n sdl2_valid = False\n if c_options['use_osx_frameworks'] and platform == 'darwin':\n # check the existence of frameworks\n sdl2_valid = True\n sdl2_flags = {\n 'extra_link_args': [\n '-F/Library/Frameworks',\n '-Xlinker', '-rpath',\n '-Xlinker', '/Library/Frameworks',\n '-Xlinker', '-headerpad',\n '-Xlinker', '190'],\n 'include_dirs': [],\n 'extra_compile_args': ['-F/Library/Frameworks']\n }\n for name in ('SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'):\n f_path = '/Library/Frameworks/{}.framework'.format(name)\n if not exists(f_path):\n print('Missing framework {}'.format(f_path))\n sdl2_valid = False\n continue\n sdl2_flags['extra_link_args'] += ['-framework', name]\n sdl2_flags['include_dirs'] += [join(f_path, 'Headers')]\n print('Found sdl2 frameworks: {}'.format(f_path))\n if name == 'SDL2_mixer':\n _check_and_fix_sdl2_mixer(f_path)\n\n if not sdl2_valid:\n c_options['use_sdl2'] = False\n print('SDL2 frameworks not found, fallback on pkg-config')\n else:\n c_options['use_sdl2'] = True\n print('Activate SDL2 compilation')\n\n if not sdl2_valid and platform != \"ios\":\n # use pkg-config approach instead\n sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer')\n if 'libraries' in sdl2_flags:\n print('SDL2 found via pkg-config')\n c_options['use_sdl2'] = True\n\n\n# -----------------------------------------------------------------------------\n# declare flags\n\n\ndef get_modulename_from_file(filename):\n filename = filename.replace(sep, '/')\n pyx = '.'.join(filename.split('.')[:-1])\n pyxl = pyx.split('/')\n while pyxl[0] != 'kivy':\n pyxl.pop(0)\n if pyxl[1] == 'kivy':\n pyxl.pop(0)\n return '.'.join(pyxl)\n\n\ndef expand(root, *args):\n return join(root, 'kivy', *args)\n\n\nclass CythonExtension(Extension):\n\n def __init__(self, *args, **kwargs):\n Extension.__init__(self, *args, **kwargs)\n self.cython_directives = {\n 'c_string_encoding': 'utf-8',\n 'profile': 'USE_PROFILE' in environ,\n 'embedsignature': 'USE_EMBEDSIGNATURE' in environ}\n # XXX with pip, setuptools is imported before distutils, and change\n # our pyx to c, then, cythonize doesn't happen. So force again our\n # sources\n self.sources = args[1]\n\n\ndef merge(d1, *args):\n d1 = deepcopy(d1)\n for d2 in args:\n for key, value in d2.items():\n value = deepcopy(value)\n if key in d1:\n d1[key].extend(value)\n else:\n d1[key] = value\n return d1\n\n\ndef determine_base_flags():\n flags = {\n 'libraries': [],\n 'include_dirs': [join(src_path, 'kivy', 'include')],\n 'library_dirs': [],\n 'extra_link_args': [],\n 'extra_compile_args': []}\n if c_options['use_ios']:\n sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT'))\n if not sysroot:\n raise Exception('IOSSDKROOT is not set')\n flags['include_dirs'] += [sysroot]\n flags['extra_compile_args'] += ['-isysroot', sysroot]\n flags['extra_link_args'] += ['-isysroot', sysroot]\n elif platform.startswith('freebsd'):\n flags['include_dirs'] += [join(\n environ.get('LOCALBASE', '/usr/local'), 'include')]\n flags['library_dirs'] += [join(\n environ.get('LOCALBASE', '/usr/local'), 'lib')]\n elif platform == 'darwin':\n v = os.uname()\n if v[2] >= '13.0.0':\n # use xcode-select to search on the right Xcode path\n # XXX use the best SDK available instead of a specific one\n import platform as _platform\n xcode_dev = getoutput('xcode-select -p').splitlines()[0]\n sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])\n print('Xcode detected at {}, and using OS X{} sdk'.format(\n xcode_dev, sdk_mac_ver))\n sysroot = join(\n xcode_dev.decode('utf-8'),\n 'Platforms/MacOSX.platform/Developer/SDKs',\n 'MacOSX{}.sdk'.format(sdk_mac_ver),\n 'System/Library/Frameworks')\n else:\n sysroot = ('/System/Library/Frameworks/'\n 'ApplicationServices.framework/Frameworks')\n flags['extra_compile_args'] += ['-F%s' % sysroot]\n flags['extra_link_args'] += ['-F%s' % sysroot]\n elif platform == 'win32':\n flags['include_dirs'] += [get_python_inc(prefix=sys.prefix)]\n flags['library_dirs'] += [join(sys.prefix, \"libs\")]\n return flags\n\n\ndef determine_gl_flags():\n kivy_graphics_include = join(src_path, 'kivy', 'include')\n flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}\n base_flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}\n if c_options['use_opengl_mock']:\n return flags, base_flags\n if platform == 'win32':\n flags['libraries'] = ['opengl32', 'glew32']\n elif platform == 'ios':\n flags['libraries'] = ['GLESv2']\n flags['extra_link_args'] = ['-framework', 'OpenGLES']\n elif platform == 'darwin':\n flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch]\n flags['extra_compile_args'] = ['-arch', osx_arch]\n elif platform.startswith('freebsd'):\n flags['libraries'] = ['GL']\n elif platform.startswith('openbsd'):\n flags['include_dirs'] = ['/usr/X11R6/include']\n flags['library_dirs'] = ['/usr/X11R6/lib']\n flags['libraries'] = ['GL']\n elif platform == 'android':\n flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')]\n flags['library_dirs'] = [join(ndkplatform, 'usr', 'lib')]\n flags['libraries'] = ['GLESv2']\n elif platform == 'rpi':\n flags['include_dirs'] = [\n '/opt/vc/include',\n '/opt/vc/include/interface/vcos/pthreads',\n '/opt/vc/include/interface/vmcs_host/linux']\n flags['library_dirs'] = ['/opt/vc/lib']\n brcm_lib_files = (\n '/opt/vc/lib/libbrcmEGL.so',\n '/opt/vc/lib/libbrcmGLESv2.so')\n if all((exists(lib) for lib in brcm_lib_files)):\n print(\n 'Found brcmEGL and brcmGLES library files'\n 'for rpi platform at /opt/vc/lib/')\n gl_libs = ['brcmEGL', 'brcmGLESv2']\n else:\n print(\n 'Failed to find brcmEGL and brcmGLESv2 library files'\n 'for rpi platform, falling back to EGL and GLESv2.')\n gl_libs = ['EGL', 'GLESv2']\n flags['libraries'] = ['bcm_host'] + gl_libs\n elif platform in ['mali', 'vc']:\n flags['include_dirs'] = ['/usr/include/']\n flags['library_dirs'] = ['/usr/lib/arm-linux-gnueabihf']\n flags['libraries'] = ['GLESv2']\n c_options['use_x11'] = True\n c_options['use_egl'] = True\n else:\n flags['libraries'] = ['GL']\n return flags, base_flags\n\n\ndef determine_sdl2():\n flags = {}\n if not c_options['use_sdl2']:\n return flags\n\n sdl2_path = environ.get('KIVY_SDL2_PATH', None)\n\n if sdl2_flags and not sdl2_path and platform == 'darwin':\n return sdl2_flags\n\n # no pkgconfig info, or we want to use a specific sdl2 path, so perform\n # manual configuration\n flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer']\n split_chr = ';' if platform == 'win32' else ':'\n sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else []\n\n if not sdl2_paths:\n sdl_inc = join(sys.prefix, 'include', 'SDL2')\n if isdir(sdl_inc):\n sdl2_paths = [sdl_inc]\n sdl2_paths.extend(['/usr/local/include/SDL2', '/usr/include/SDL2'])\n\n flags['include_dirs'] = sdl2_paths\n flags['extra_link_args'] = []\n flags['extra_compile_args'] = []\n flags['library_dirs'] = (\n sdl2_paths if sdl2_paths else\n ['/usr/local/lib/'])\n\n if sdl2_flags:\n flags = merge(flags, sdl2_flags)\n\n # ensure headers for all the SDL2 and sub libraries are available\n libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image']\n can_compile = True\n for lib in libs_to_check:\n found = False\n for d in flags['include_dirs']:\n fn = join(d, '{}.h'.format(lib))\n if exists(fn):\n found = True\n print('SDL2: found {} header at {}'.format(lib, fn))\n break\n\n if not found:\n print('SDL2: missing sub library {}'.format(lib))\n can_compile = False\n\n if not can_compile:\n c_options['use_sdl2'] = False\n return {}\n\n return flags\n\n\nbase_flags = determine_base_flags()\ngl_flags, gl_flags_base = determine_gl_flags()\n\n# -----------------------------------------------------------------------------\n# sources to compile\n# all the dependencies have been found manually with:\n# grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx}\ngraphics_dependencies = {\n 'buffer.pyx': ['common.pxi'],\n 'context.pxd': ['instructions.pxd', 'texture.pxd', 'vbo.pxd', 'cgl.pxd'],\n 'cgl.pxd': ['common.pxi', 'config.pxi', 'gl_redirect.h'],\n 'compiler.pxd': ['instructions.pxd'],\n 'compiler.pyx': ['context_instructions.pxd'],\n 'cgl.pyx': ['cgl.pxd'],\n 'cgl_mock.pyx': ['cgl.pxd'],\n 'cgl_sdl2.pyx': ['cgl.pxd'],\n 'cgl_gl.pyx': ['cgl.pxd'],\n 'cgl_glew.pyx': ['cgl.pxd'],\n 'context_instructions.pxd': [\n 'transformation.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pxd': ['cgl.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pyx': [\n 'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd'],\n 'gl_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd', 'instructions.pxd'],\n 'instructions.pxd': [\n 'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd',\n 'texture.pxd', '../_event.pxd'],\n 'instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd',\n 'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd'],\n 'opengl.pyx': [\n 'config.pxi', 'common.pxi', 'cgl.pxd', 'gl_redirect.h'],\n 'opengl_utils.pyx': [\n 'opengl_utils_def.pxi', 'cgl.pxd', ],\n 'shader.pxd': ['cgl.pxd', 'transformation.pxd', 'vertex.pxd'],\n 'shader.pyx': [\n 'config.pxi', 'common.pxi', 'cgl.pxd',\n 'vertex.pxd', 'transformation.pxd', 'context.pxd',\n 'gl_debug_logger.pxi'],\n 'stencil_instructions.pxd': ['instructions.pxd'],\n 'stencil_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd',\n 'gl_debug_logger.pxi'],\n 'scissor_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd'],\n 'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd',\n 'vertex_instructions.pxd', 'tesselator.pxd'],\n 'texture.pxd': ['cgl.pxd'],\n 'texture.pyx': [\n 'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd',\n 'cgl.pxd', 'opengl_utils.pxd',\n 'img_tools.pxi', 'gl_debug_logger.pxi'],\n 'vbo.pxd': ['buffer.pxd', 'cgl.pxd', 'vertex.pxd'],\n 'vbo.pyx': [\n 'config.pxi', 'common.pxi', 'context.pxd',\n 'instructions.pxd', 'shader.pxd', 'gl_debug_logger.pxi'],\n 'vertex.pxd': ['cgl.pxd'],\n 'vertex.pyx': ['config.pxi', 'common.pxi'],\n 'vertex_instructions.pyx': [\n 'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd',\n 'instructions.pxd', 'vertex_instructions.pxd',\n 'cgl.pxd', 'texture.pxd', 'vertex_instructions_line.pxi'],\n 'vertex_instructions_line.pxi': ['stencil_instructions.pxd']}\n\nsources = {\n '_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}),\n '_clock.pyx': {},\n 'weakproxy.pyx': {},\n 'properties.pyx': merge(base_flags, {'depends': ['_event.pxd']}),\n 'graphics/buffer.pyx': merge(base_flags, gl_flags_base),\n 'graphics/context.pyx': merge(base_flags, gl_flags_base),\n 'graphics/compiler.pyx': merge(base_flags, gl_flags_base),\n 'graphics/context_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/fbo.pyx': merge(base_flags, gl_flags_base),\n 'graphics/gl_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/opengl.pyx': merge(base_flags, gl_flags_base),\n 'graphics/opengl_utils.pyx': merge(base_flags, gl_flags_base),\n 'graphics/shader.pyx': merge(base_flags, gl_flags_base),\n 'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/scissor_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/texture.pyx': merge(base_flags, gl_flags_base),\n 'graphics/transformation.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vbo.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vertex.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_mock.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_gl.pyx': merge(base_flags, gl_flags),\n 'graphics/cgl_backend/cgl_glew.pyx': merge(base_flags, gl_flags),\n 'graphics/cgl_backend/cgl_sdl2.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_debug.pyx': merge(base_flags, gl_flags_base),\n 'core/text/text_layout.pyx': base_flags,\n 'core/window/window_info.pyx': base_flags,\n 'graphics/tesselator.pyx': merge(base_flags, {\n 'include_dirs': ['kivy/lib/libtess2/Include'],\n 'c_depends': [\n 'lib/libtess2/Source/bucketalloc.c',\n 'lib/libtess2/Source/dict.c',\n 'lib/libtess2/Source/geom.c',\n 'lib/libtess2/Source/mesh.c',\n 'lib/libtess2/Source/priorityq.c',\n 'lib/libtess2/Source/sweep.c',\n 'lib/libtess2/Source/tess.c'\n ]\n }),\n 'graphics/svg.pyx': merge(base_flags, gl_flags_base)\n}\n\nif c_options[\"use_sdl2\"]:\n sdl2_flags = determine_sdl2()\n\nif c_options['use_sdl2'] and sdl2_flags:\n sources['graphics/cgl_backend/cgl_sdl2.pyx'] = merge(\n sources['graphics/cgl_backend/cgl_sdl2.pyx'], sdl2_flags)\n sdl2_depends = {'depends': ['lib/sdl2.pxi']}\n for source_file in ('core/window/_window_sdl2.pyx',\n 'core/image/_img_sdl2.pyx',\n 'core/text/_text_sdl2.pyx',\n 'core/audio/audio_sdl2.pyx',\n 'core/clipboard/_clipboard_sdl2.pyx'):\n sources[source_file] = merge(\n base_flags, sdl2_flags, sdl2_depends)\n\nif c_options['use_pangoft2'] in (None, True) and platform not in (\n 'android', 'ios', 'windows'):\n pango_flags = pkgconfig('pangoft2')\n if pango_flags and 'libraries' in pango_flags:\n print('Pango: pangoft2 found via pkg-config')\n c_options['use_pangoft2'] = True\n pango_depends = {'depends': ['lib/pangoft2.pxi',\n 'lib/pangoft2.h']}\n sources['core/text/_text_pango.pyx'] = merge(\n base_flags, pango_flags, pango_depends)\n\nif platform in ('darwin', 'ios'):\n # activate ImageIO provider for our core image\n if platform == 'ios':\n osx_flags = {'extra_link_args': [\n '-framework', 'Foundation',\n '-framework', 'UIKit',\n '-framework', 'AudioToolbox',\n '-framework', 'CoreGraphics',\n '-framework', 'QuartzCore',\n '-framework', 'ImageIO',\n '-framework', 'Accelerate']}\n else:\n osx_flags = {'extra_link_args': [\n '-framework', 'ApplicationServices']}\n sources['core/image/img_imageio.pyx'] = merge(\n base_flags, osx_flags)\n\nif c_options['use_avfoundation']:\n import platform as _platform\n mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]]\n if mac_ver >= [10, 7]:\n osx_flags = {\n 'extra_link_args': ['-framework', 'AVFoundation'],\n 'extra_compile_args': ['-ObjC++'],\n 'depends': ['core/camera/camera_avfoundation_implem.m']}\n sources['core/camera/camera_avfoundation.pyx'] = merge(\n base_flags, osx_flags)\n else:\n print('AVFoundation cannot be used, OSX >= 10.7 is required')\n\nif c_options['use_rpi']:\n sources['lib/vidcore_lite/egl.pyx'] = merge(\n base_flags, gl_flags)\n sources['lib/vidcore_lite/bcm.pyx'] = merge(\n base_flags, gl_flags)\n\nif c_options['use_x11']:\n libs = ['Xrender', 'X11']\n if c_options['use_egl']:\n libs += ['EGL']\n else:\n libs += ['GL']\n sources['core/window/window_x11.pyx'] = merge(\n base_flags, gl_flags, {\n # FIXME add an option to depend on them but not compile them\n # cause keytab is included in core, and core is included in\n # window_x11\n #\n # 'depends': [\n # 'core/window/window_x11_keytab.c',\n # 'core/window/window_x11_core.c'],\n 'libraries': libs})\n\nif c_options['use_gstreamer']:\n sources['lib/gstplayer/_gstplayer.pyx'] = merge(\n base_flags, gst_flags, {\n 'depends': ['lib/gstplayer/_gstplayer.h']})\n\n\n# -----------------------------------------------------------------------------\n# extension modules\n\ndef get_dependencies(name, deps=None):\n if deps is None:\n deps = []\n for dep in graphics_dependencies.get(name, []):\n if dep not in deps:\n deps.append(dep)\n get_dependencies(dep, deps)\n return deps\n\n\ndef resolve_dependencies(fn, depends):\n fn = basename(fn)\n deps = []\n get_dependencies(fn, deps)\n get_dependencies(fn.replace('.pyx', '.pxd'), deps)\n\n deps_final = []\n paths_to_test = ['graphics', 'include']\n for dep in deps:\n found = False\n for path in paths_to_test:\n filename = expand(src_path, path, dep)\n if exists(filename):\n deps_final.append(filename)\n found = True\n break\n if not found:\n print('ERROR: Dependency for {} not resolved: {}'.format(\n fn, dep\n ))\n\n return deps_final\n\n\ndef get_extensions_from_sources(sources):\n ext_modules = []\n if environ.get('KIVY_FAKE_BUILDEXT'):\n print('Fake build_ext asked, will generate only .h/.c')\n return ext_modules\n for pyx, flags in sources.items():\n is_graphics = pyx.startswith('graphics')\n pyx = expand(src_path, pyx)\n depends = [expand(src_path, x) for x in flags.pop('depends', [])]\n c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])]\n if not have_cython:\n pyx = '%s.c' % pyx[:-4]\n if is_graphics:\n depends = resolve_dependencies(pyx, depends)\n f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in (\n 'c', 'cpp', 'm')]\n module_name = get_modulename_from_file(pyx)\n flags_clean = {'depends': depends}\n for key, value in flags.items():\n if len(value):\n flags_clean[key] = value\n ext_modules.append(CythonExtension(\n module_name, [pyx] + f_depends + c_depends, **flags_clean))\n return ext_modules\n\n\next_modules = get_extensions_from_sources(sources)\n\n\n# -----------------------------------------------------------------------------\n# automatically detect data files\nsplit_examples = int(environ.get('KIVY_SPLIT_EXAMPLES', '0'))\ndata_file_prefix = 'share/kivy-'\nexamples = {}\nexamples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json',\n 'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv', 'mpg',\n 'glsl', 'zip')\nfor root, subFolders, files in walk('examples'):\n for fn in files:\n ext = fn.split('.')[-1].lower()\n if ext not in examples_allowed_ext:\n continue\n filename = join(root, fn)\n directory = '%s%s' % (data_file_prefix, dirname(filename))\n if directory not in examples:\n examples[directory] = []\n examples[directory].append(filename)\n\nbinary_deps = []\nbinary_deps_path = join(src_path, 'kivy', 'binary_deps')\nif isdir(binary_deps_path):\n for root, dirnames, filenames in walk(binary_deps_path):\n for fname in filenames:\n binary_deps.append(\n join(root.replace(binary_deps_path, 'binary_deps'), fname))\n\n# -----------------------------------------------------------------------------\n# setup !\nif not build_examples:\n setup(\n name='Kivy',\n version=get_version(),\n author='Kivy Team and other contributors',\n author_email='[email protected]',\n url='http://kivy.org',\n license='MIT',\n description=(\n 'A software library for rapid development of '\n 'hardware-accelerated multitouch applications.'),\n long_description=get_description(),\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n packages=[\n 'kivy',\n 'kivy.core',\n 'kivy.core.audio',\n 'kivy.core.camera',\n 'kivy.core.clipboard',\n 'kivy.core.image',\n 'kivy.core.gl',\n 'kivy.core.spelling',\n 'kivy.core.text',\n 'kivy.core.video',\n 'kivy.core.window',\n 'kivy.deps',\n 'kivy.effects',\n 'kivy.graphics',\n 'kivy.graphics.cgl_backend',\n 'kivy.garden',\n 'kivy.input',\n 'kivy.input.postproc',\n 'kivy.input.providers',\n 'kivy.lang',\n 'kivy.lib',\n 'kivy.lib.gstplayer',\n 'kivy.lib.vidcore_lite',\n 'kivy.modules',\n 'kivy.network',\n 'kivy.storage',\n 'kivy.tests',\n 'kivy.tools',\n 'kivy.tools.packaging',\n 'kivy.tools.packaging.pyinstaller_hooks',\n 'kivy.tools.highlight',\n 'kivy.extras',\n 'kivy.uix',\n 'kivy.uix.behaviors',\n 'kivy.uix.recycleview',\n ],\n package_dir={'kivy': 'kivy'},\n package_data={'kivy': [\n 'setupconfig.py',\n '*.pxd',\n '*.pxi',\n 'core/text/*.pxd',\n 'core/text/*.pxi',\n 'core/window/*.pxi',\n 'core/window/*.pxd',\n 'graphics/*.pxd',\n 'graphics/*.pxi',\n 'graphics/*.h',\n 'include/*',\n 'lib/vidcore_lite/*.pxd',\n 'lib/vidcore_lite/*.pxi',\n 'data/*.kv',\n 'data/*.json',\n 'data/fonts/*.ttf',\n 'data/images/*.png',\n 'data/images/*.jpg',\n 'data/images/*.gif',\n 'data/images/*.atlas',\n 'data/keyboards/*.json',\n 'data/logo/*.png',\n 'data/glsl/*.png',\n 'data/glsl/*.vs',\n 'data/glsl/*.fs',\n 'tests/*.zip',\n 'tests/*.kv',\n 'tests/*.png',\n 'tests/*.ttf',\n 'tests/*.ogg',\n 'tools/gles_compat/*',\n 'tools/highlight/*',\n 'tools/packaging/README.txt',\n 'tools/packaging/win32/kivy.bat',\n 'tools/packaging/win32/kivyenv.sh',\n 'tools/packaging/win32/README.txt',\n 'tools/packaging/osx/Info.plist',\n 'tools/packaging/osx/InfoPlist.strings',\n 'tools/packaging/osx/kivy.sh',\n 'tools/pep8checker/*',\n 'tools/theming/defaulttheme/*',\n ] + binary_deps},\n data_files=[] if split_examples else list(examples.items()),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Artistic Software',\n 'Topic :: Games/Entertainment',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',\n 'Topic :: Multimedia :: Graphics :: Presentation',\n 'Topic :: Multimedia :: Graphics :: Viewers',\n 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',\n 'Topic :: Multimedia :: Video :: Display',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Visualization',\n ('Topic :: Software Development :: Libraries :: '\n 'Application Frameworks'),\n 'Topic :: Software Development :: User Interfaces'],\n dependency_links=[\n 'https://github.com/kivy-garden/garden/archive/master.zip'],\n install_requires=[\n 'Kivy-Garden>=0.1.4', 'docutils', 'pygments'\n ],\n extra_requires={\n 'tuio': ['oscpy']\n },\n setup_requires=[\n 'cython>=' + MIN_CYTHON_STRING\n ] if not skip_cython else [])\nelse:\n setup(\n name='Kivy-examples',\n version=get_version(),\n author='Kivy Team and other contributors',\n author_email='[email protected]',\n url='http://kivy.org',\n license='MIT',\n description=('Kivy examples.'),\n long_description=get_description(),\n data_files=list(examples.items()))\n",
"path": "setup.py"
}
] | [
{
"content": "#\n# Kivy - Cross-platform UI framework\n# https://kivy.org/\n#\nfrom __future__ import print_function\n\nimport sys\nbuild_examples = False\nif \"--build_examples\" in sys.argv:\n build_examples = True\n sys.argv.remove(\"--build_examples\")\n\nfrom copy import deepcopy\nimport os\nfrom os.path import join, dirname, sep, exists, basename, isdir\nfrom os import walk, environ, makedirs\nfrom distutils.version import LooseVersion\nfrom distutils.sysconfig import get_python_inc\nfrom collections import OrderedDict\nfrom time import sleep, time\nfrom subprocess import check_output, CalledProcessError\nfrom datetime import datetime\n\nif environ.get('KIVY_USE_SETUPTOOLS'):\n from setuptools import setup, Extension\n print('Using setuptools')\nelse:\n from distutils.core import setup\n from distutils.extension import Extension\n print('Using distutils')\n\n\nPY3 = sys.version > '3'\n\nif PY3: # fix error with py3's LooseVersion comparisons\n def ver_equal(self, other):\n return self.version == other\n\n LooseVersion.__eq__ = ver_equal\n\n\ndef get_description():\n with open(join(dirname(__file__), 'README.md'), 'rb') as fileh:\n return fileh.read().decode(\"utf8\")\n\n\ndef get_version(filename='kivy/version.py'):\n VERSION = kivy.__version__\n epoch = int(environ.get('SOURCE_DATE_EPOCH', time()))\n DATE = datetime.utcfromtimestamp(epoch).strftime('%Y%m%d')\n try:\n GIT_REVISION = check_output(\n ['git', 'rev-parse', 'HEAD']\n ).strip().decode('ascii')\n except (CalledProcessError, OSError, IOError) as e:\n # CalledProcessError has no errno\n errno = getattr(e, 'errno', None)\n if errno != 2 and 'CalledProcessError' not in repr(e):\n raise\n GIT_REVISION = \"Unknown\"\n\n cnt = (\n \"# THIS FILE IS GENERATED FROM KIVY SETUP.PY\\n\"\n \"__version__ = '%(version)s'\\n\"\n \"__hash__ = '%(hash)s'\\n\"\n \"__date__ = '%(date)s'\\n\"\n )\n\n with open(filename, 'w') as f:\n f.write(cnt % {\n 'version': VERSION,\n 'hash': GIT_REVISION,\n 'date': DATE\n })\n return VERSION\n\n\nMIN_CYTHON_STRING = '0.24'\nMIN_CYTHON_VERSION = LooseVersion(MIN_CYTHON_STRING)\nMAX_CYTHON_STRING = '0.28.5'\nMAX_CYTHON_VERSION = LooseVersion(MAX_CYTHON_STRING)\nCYTHON_UNSUPPORTED = (\n # ref https://github.com/cython/cython/issues/1968\n '0.27', '0.27.2'\n)\n\n\ndef getoutput(cmd, env=None):\n import subprocess\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, env=env)\n p.wait()\n if p.returncode: # if not returncode == 0\n print('WARNING: A problem occurred while running {0} (code {1})\\n'\n .format(cmd, p.returncode))\n stderr_content = p.stderr.read()\n if stderr_content:\n print('{0}\\n'.format(stderr_content))\n return \"\"\n return p.stdout.read()\n\n\ndef pkgconfig(*packages, **kw):\n flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}\n lenviron = None\n pconfig = join(sys.prefix, 'libs', 'pkgconfig')\n\n if isdir(pconfig):\n lenviron = environ.copy()\n lenviron['PKG_CONFIG_PATH'] = '{};{}'.format(\n environ.get('PKG_CONFIG_PATH', ''), pconfig)\n cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))\n results = getoutput(cmd, lenviron).split()\n for token in results:\n ext = token[:2].decode('utf-8')\n flag = flag_map.get(ext)\n if not flag:\n continue\n kw.setdefault(flag, []).append(token[2:].decode('utf-8'))\n return kw\n\n\n# -----------------------------------------------------------------------------\n# Determine on which platform we are\n\nplatform = sys.platform\n\n# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)\nif sys.platform == 'darwin':\n if sys.maxsize > 2 ** 32:\n osx_arch = 'x86_64'\n else:\n osx_arch = 'i386'\n\n# Detect Python for android project (http://github.com/kivy/python-for-android)\nndkplatform = environ.get('NDKPLATFORM')\nif ndkplatform is not None and environ.get('LIBLINK'):\n platform = 'android'\nkivy_ios_root = environ.get('KIVYIOSROOT', None)\nif kivy_ios_root is not None:\n platform = 'ios'\n# proprietary broadcom video core drivers\nif exists('/opt/vc/include/bcm_host.h'):\n platform = 'rpi'\n# use mesa video core drivers\nif environ.get('VIDEOCOREMESA', None):\n platform = 'vc'\nif exists('/usr/lib/arm-linux-gnueabihf/libMali.so'):\n platform = 'mali'\n\n# -----------------------------------------------------------------------------\n# Detect options\n#\nc_options = OrderedDict()\nc_options['use_rpi'] = platform == 'rpi'\nc_options['use_egl'] = False\nc_options['use_opengl_es2'] = None\nc_options['use_opengl_mock'] = environ.get('READTHEDOCS', None) == 'True'\nc_options['use_sdl2'] = None\nc_options['use_pangoft2'] = None\nc_options['use_ios'] = False\nc_options['use_android'] = False\nc_options['use_mesagl'] = False\nc_options['use_x11'] = False\nc_options['use_wayland'] = False\nc_options['use_gstreamer'] = None\nc_options['use_avfoundation'] = platform == 'darwin'\nc_options['use_osx_frameworks'] = platform == 'darwin'\nc_options['debug_gl'] = False\n\n# now check if environ is changing the default values\nfor key in list(c_options.keys()):\n ukey = key.upper()\n if ukey in environ:\n value = bool(int(environ[ukey]))\n print('Environ change {0} -> {1}'.format(key, value))\n c_options[key] = value\n\n\n# -----------------------------------------------------------------------------\n# Cython check\n# on python-for-android and kivy-ios, cython usage is external\n\ncython_unsupported_append = '''\n\n Please note that the following versions of Cython are not supported\n at all: {}\n'''.format(', '.join(map(str, CYTHON_UNSUPPORTED)))\n\ncython_min = '''\\\n This version of Cython is not compatible with Kivy. Please upgrade to\n at least version {0}, preferably the newest supported version {1}.\n\n If your platform provides a Cython package, make sure you have upgraded\n to the newest version. If the newest version available is still too low,\n please remove it and install the newest supported Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append if CYTHON_UNSUPPORTED else '')\n\ncython_max = '''\\\n This version of Cython is untested with Kivy. While this version may\n work perfectly fine, it is possible that you may experience issues. If\n you do have issues, please downgrade to a supported version. It is\n best to use the newest supported version, {1}, but the minimum\n supported version is {0}.\n\n If your platform provides a Cython package, check if you can downgrade\n to a supported version. Otherwise, uninstall the platform package and\n install Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append if CYTHON_UNSUPPORTED else '')\n\ncython_unsupported = '''\\\n This version of Cython suffers from known bugs and is unsupported.\n Please install the newest supported version, {1}, if possible, but\n the minimum supported version is {0}.\n\n If your platform provides a Cython package, check if you can install\n a supported version. Otherwise, uninstall the platform package and\n install Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append)\n\nhave_cython = False\nskip_cython = False\nif platform in ('ios', 'android'):\n print('\\nCython check avoided.')\n skip_cython = True\nelse:\n try:\n # check for cython\n from Cython.Distutils import build_ext\n have_cython = True\n import Cython\n cy_version_str = Cython.__version__\n cy_ver = LooseVersion(cy_version_str)\n print('\\nDetected Cython version {}'.format(cy_version_str))\n if cy_ver < MIN_CYTHON_VERSION:\n print(cython_min)\n raise ImportError('Incompatible Cython Version')\n if cy_ver in CYTHON_UNSUPPORTED:\n print(cython_unsupported)\n raise ImportError('Incompatible Cython Version')\n if cy_ver > MAX_CYTHON_VERSION:\n print(cython_max)\n sleep(1)\n except ImportError:\n print(\"\\nCython is missing, it's required for compiling kivy !\\n\\n\")\n raise\n\nif not have_cython:\n from distutils.command.build_ext import build_ext\n\n# -----------------------------------------------------------------------------\n# Setup classes\n\n# the build path where kivy is being compiled\nsrc_path = build_path = dirname(__file__)\n\n\nclass KivyBuildExt(build_ext):\n\n def finalize_options(self):\n retval = build_ext.finalize_options(self)\n global build_path\n if (self.build_lib is not None and exists(self.build_lib) and\n not self.inplace):\n build_path = self.build_lib\n return retval\n\n def build_extensions(self):\n # build files\n config_h_fn = ('include', 'config.h')\n config_pxi_fn = ('include', 'config.pxi')\n config_py_fn = ('setupconfig.py', )\n\n # generate headers\n config_h = '// Autogenerated file for Kivy C configuration\\n'\n config_h += '#define __PY3 {0}\\n'.format(int(PY3))\n config_pxi = '# Autogenerated file for Kivy Cython configuration\\n'\n config_pxi += 'DEF PY3 = {0}\\n'.format(int(PY3))\n config_py = '# Autogenerated file for Kivy configuration\\n'\n config_py += 'PY3 = {0}\\n'.format(int(PY3))\n config_py += 'CYTHON_MIN = {0}\\nCYTHON_MAX = {1}\\n'.format(\n repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING))\n config_py += 'CYTHON_BAD = {0}\\n'.format(repr(', '.join(map(\n str, CYTHON_UNSUPPORTED))))\n\n # generate content\n print('Build configuration is:')\n for opt, value in c_options.items():\n value = int(bool(value))\n print(' * {0} = {1}'.format(opt, value))\n opt = opt.upper()\n config_h += '#define __{0} {1}\\n'.format(opt, value)\n config_pxi += 'DEF {0} = {1}\\n'.format(opt, value)\n config_py += '{0} = {1}\\n'.format(opt, value)\n debug = bool(self.debug)\n print(' * debug = {0}'.format(debug))\n\n config_pxi += 'DEF DEBUG = {0}\\n'.format(debug)\n config_py += 'DEBUG = {0}\\n'.format(debug)\n config_pxi += 'DEF PLATFORM = \"{0}\"\\n'.format(platform)\n config_py += 'PLATFORM = \"{0}\"\\n'.format(platform)\n for fn, content in (\n (config_h_fn, config_h), (config_pxi_fn, config_pxi),\n (config_py_fn, config_py)):\n build_fn = expand(build_path, *fn)\n if self.update_if_changed(build_fn, content):\n print('Updated {}'.format(build_fn))\n src_fn = expand(src_path, *fn)\n if src_fn != build_fn and self.update_if_changed(src_fn, content):\n print('Updated {}'.format(src_fn))\n\n c = self.compiler.compiler_type\n print('Detected compiler is {}'.format(c))\n if c != 'msvc':\n for e in self.extensions:\n e.extra_link_args += ['-lm']\n\n build_ext.build_extensions(self)\n\n def update_if_changed(self, fn, content):\n need_update = True\n if exists(fn):\n with open(fn) as fd:\n need_update = fd.read() != content\n if need_update:\n directory_name = dirname(fn)\n if not exists(directory_name):\n makedirs(directory_name)\n with open(fn, 'w') as fd:\n fd.write(content)\n return need_update\n\n\ndef _check_and_fix_sdl2_mixer(f_path):\n print(\"Check if SDL2_mixer smpeg2 have an @executable_path\")\n rpath_from = (\"@executable_path/../Frameworks/SDL2.framework\"\n \"/Versions/A/SDL2\")\n rpath_to = \"@rpath/../../../../SDL2.framework/Versions/A/SDL2\"\n smpeg2_path = (\"{}/Versions/A/Frameworks/smpeg2.framework\"\n \"/Versions/A/smpeg2\").format(f_path)\n output = getoutput((\"otool -L '{}'\").format(smpeg2_path)).decode('utf-8')\n if \"@executable_path\" not in output:\n return\n\n print(\"WARNING: Your SDL2_mixer version is invalid\")\n print(\"WARNING: The smpeg2 framework embedded in SDL2_mixer contains a\")\n print(\"WARNING: reference to @executable_path that will fail the\")\n print(\"WARNING: execution of your application.\")\n print(\"WARNING: We are going to change:\")\n print(\"WARNING: from: {}\".format(rpath_from))\n print(\"WARNING: to: {}\".format(rpath_to))\n getoutput(\"install_name_tool -change {} {} {}\".format(\n rpath_from, rpath_to, smpeg2_path))\n\n output = getoutput((\"otool -L '{}'\").format(smpeg2_path))\n if b\"@executable_path\" not in output:\n print(\"WARNING: Change successfully applied!\")\n print(\"WARNING: You'll never see this message again.\")\n else:\n print(\"WARNING: Unable to apply the changes, sorry.\")\n\n\n# -----------------------------------------------------------------------------\n# extract version (simulate doc generation, kivy will be not imported)\nenviron['KIVY_DOC_INCLUDE'] = '1'\nimport kivy\n\n# extra build commands go in the cmdclass dict {'command-name': CommandClass}\n# see tools.packaging.{platform}.build.py for custom build commands for\n# portable packages. Also e.g. we use build_ext command from cython if its\n# installed for c extensions.\nfrom kivy.tools.packaging.factory import FactoryBuild\ncmdclass = {\n 'build_factory': FactoryBuild,\n 'build_ext': KivyBuildExt}\n\ntry:\n # add build rules for portable packages to cmdclass\n if platform == 'win32':\n from kivy.tools.packaging.win32.build import WindowsPortableBuild\n cmdclass['build_portable'] = WindowsPortableBuild\n elif platform == 'darwin':\n from kivy.tools.packaging.osx.build import OSXPortableBuild\n cmdclass['build_portable'] = OSXPortableBuild\nexcept ImportError:\n print('User distribution detected, avoid portable command.')\n\n# Detect which opengl version headers to use\nif platform in ('android', 'darwin', 'ios', 'rpi', 'mali', 'vc'):\n c_options['use_opengl_es2'] = True\nelif c_options['use_opengl_es2'] is None:\n c_options['use_opengl_es2'] = \\\n environ.get('KIVY_GRAPHICS', '').lower() == 'gles'\n\nprint('Using this graphics system: {}'.format(\n ['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)]))\n\n# check if we are in a kivy-ios build\nif platform == 'ios':\n print('Kivy-IOS project environment detect, use it.')\n print('Kivy-IOS project located at {0}'.format(kivy_ios_root))\n c_options['use_ios'] = True\n c_options['use_sdl2'] = True\n\nelif platform == 'android':\n c_options['use_android'] = True\n\nelif platform == 'darwin':\n if c_options['use_osx_frameworks']:\n if osx_arch == \"i386\":\n print(\"Warning: building with frameworks fail on i386\")\n else:\n print(\"OSX framework used, force to x86_64 only\")\n environ[\"ARCHFLAGS\"] = environ.get(\"ARCHFLAGS\", \"-arch x86_64\")\n print(\"OSX ARCHFLAGS are: {}\".format(environ[\"ARCHFLAGS\"]))\n\n# detect gstreamer, only on desktop\n# works if we forced the options or in autodetection\nif platform not in ('ios', 'android') and (c_options['use_gstreamer']\n in (None, True)):\n gstreamer_valid = False\n if c_options['use_osx_frameworks'] and platform == 'darwin':\n # check the existence of frameworks\n f_path = '/Library/Frameworks/GStreamer.framework'\n if not exists(f_path):\n c_options['use_gstreamer'] = False\n print('GStreamer framework not found, fallback on pkg-config')\n else:\n print('GStreamer framework found')\n gstreamer_valid = True\n c_options['use_gstreamer'] = True\n gst_flags = {\n 'extra_link_args': [\n '-F/Library/Frameworks',\n '-Xlinker', '-rpath',\n '-Xlinker', '/Library/Frameworks',\n '-Xlinker', '-headerpad',\n '-Xlinker', '190',\n '-framework', 'GStreamer'],\n 'include_dirs': [join(f_path, 'Headers')]}\n\n if not gstreamer_valid:\n # use pkg-config approach instead\n gst_flags = pkgconfig('gstreamer-1.0')\n if 'libraries' in gst_flags:\n print('GStreamer found via pkg-config')\n c_options['use_gstreamer'] = True\n\n\n# detect SDL2, only on desktop and iOS, or android if explicitly enabled\n# works if we forced the options or in autodetection\nsdl2_flags = {}\nif c_options['use_sdl2'] or (\n platform not in ('android',) and c_options['use_sdl2'] is None):\n\n sdl2_valid = False\n if c_options['use_osx_frameworks'] and platform == 'darwin':\n # check the existence of frameworks\n sdl2_valid = True\n sdl2_flags = {\n 'extra_link_args': [\n '-F/Library/Frameworks',\n '-Xlinker', '-rpath',\n '-Xlinker', '/Library/Frameworks',\n '-Xlinker', '-headerpad',\n '-Xlinker', '190'],\n 'include_dirs': [],\n 'extra_compile_args': ['-F/Library/Frameworks']\n }\n for name in ('SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'):\n f_path = '/Library/Frameworks/{}.framework'.format(name)\n if not exists(f_path):\n print('Missing framework {}'.format(f_path))\n sdl2_valid = False\n continue\n sdl2_flags['extra_link_args'] += ['-framework', name]\n sdl2_flags['include_dirs'] += [join(f_path, 'Headers')]\n print('Found sdl2 frameworks: {}'.format(f_path))\n if name == 'SDL2_mixer':\n _check_and_fix_sdl2_mixer(f_path)\n\n if not sdl2_valid:\n c_options['use_sdl2'] = False\n print('SDL2 frameworks not found, fallback on pkg-config')\n else:\n c_options['use_sdl2'] = True\n print('Activate SDL2 compilation')\n\n if not sdl2_valid and platform != \"ios\":\n # use pkg-config approach instead\n sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer')\n if 'libraries' in sdl2_flags:\n print('SDL2 found via pkg-config')\n c_options['use_sdl2'] = True\n\n\n# -----------------------------------------------------------------------------\n# declare flags\n\n\ndef get_modulename_from_file(filename):\n filename = filename.replace(sep, '/')\n pyx = '.'.join(filename.split('.')[:-1])\n pyxl = pyx.split('/')\n while pyxl[0] != 'kivy':\n pyxl.pop(0)\n if pyxl[1] == 'kivy':\n pyxl.pop(0)\n return '.'.join(pyxl)\n\n\ndef expand(root, *args):\n return join(root, 'kivy', *args)\n\n\nclass CythonExtension(Extension):\n\n def __init__(self, *args, **kwargs):\n Extension.__init__(self, *args, **kwargs)\n self.cython_directives = {\n 'c_string_encoding': 'utf-8',\n 'profile': 'USE_PROFILE' in environ,\n 'embedsignature': 'USE_EMBEDSIGNATURE' in environ}\n # XXX with pip, setuptools is imported before distutils, and change\n # our pyx to c, then, cythonize doesn't happen. So force again our\n # sources\n self.sources = args[1]\n\n\ndef merge(d1, *args):\n d1 = deepcopy(d1)\n for d2 in args:\n for key, value in d2.items():\n value = deepcopy(value)\n if key in d1:\n d1[key].extend(value)\n else:\n d1[key] = value\n return d1\n\n\ndef determine_base_flags():\n flags = {\n 'libraries': [],\n 'include_dirs': [join(src_path, 'kivy', 'include')],\n 'library_dirs': [],\n 'extra_link_args': [],\n 'extra_compile_args': []}\n if c_options['use_ios']:\n sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT'))\n if not sysroot:\n raise Exception('IOSSDKROOT is not set')\n flags['include_dirs'] += [sysroot]\n flags['extra_compile_args'] += ['-isysroot', sysroot]\n flags['extra_link_args'] += ['-isysroot', sysroot]\n elif platform.startswith('freebsd'):\n flags['include_dirs'] += [join(\n environ.get('LOCALBASE', '/usr/local'), 'include')]\n flags['library_dirs'] += [join(\n environ.get('LOCALBASE', '/usr/local'), 'lib')]\n elif platform == 'darwin':\n v = os.uname()\n if v[2] >= '13.0.0':\n # use xcode-select to search on the right Xcode path\n # XXX use the best SDK available instead of a specific one\n import platform as _platform\n xcode_dev = getoutput('xcode-select -p').splitlines()[0]\n sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])\n print('Xcode detected at {}, and using OS X{} sdk'.format(\n xcode_dev, sdk_mac_ver))\n sysroot = join(\n xcode_dev.decode('utf-8'),\n 'Platforms/MacOSX.platform/Developer/SDKs',\n 'MacOSX{}.sdk'.format(sdk_mac_ver),\n 'System/Library/Frameworks')\n else:\n sysroot = ('/System/Library/Frameworks/'\n 'ApplicationServices.framework/Frameworks')\n flags['extra_compile_args'] += ['-F%s' % sysroot]\n flags['extra_link_args'] += ['-F%s' % sysroot]\n elif platform == 'win32':\n flags['include_dirs'] += [get_python_inc(prefix=sys.prefix)]\n flags['library_dirs'] += [join(sys.prefix, \"libs\")]\n return flags\n\n\ndef determine_gl_flags():\n kivy_graphics_include = join(src_path, 'kivy', 'include')\n flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}\n base_flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}\n if c_options['use_opengl_mock']:\n return flags, base_flags\n if platform == 'win32':\n flags['libraries'] = ['opengl32', 'glew32']\n elif platform == 'ios':\n flags['libraries'] = ['GLESv2']\n flags['extra_link_args'] = ['-framework', 'OpenGLES']\n elif platform == 'darwin':\n flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch]\n flags['extra_compile_args'] = ['-arch', osx_arch]\n elif platform.startswith('freebsd'):\n flags['libraries'] = ['GL']\n elif platform.startswith('openbsd'):\n flags['include_dirs'] = ['/usr/X11R6/include']\n flags['library_dirs'] = ['/usr/X11R6/lib']\n flags['libraries'] = ['GL']\n elif platform == 'android':\n flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')]\n flags['library_dirs'] = [join(ndkplatform, 'usr', 'lib')]\n flags['libraries'] = ['GLESv2']\n elif platform == 'rpi':\n flags['include_dirs'] = [\n '/opt/vc/include',\n '/opt/vc/include/interface/vcos/pthreads',\n '/opt/vc/include/interface/vmcs_host/linux']\n flags['library_dirs'] = ['/opt/vc/lib']\n brcm_lib_files = (\n '/opt/vc/lib/libbrcmEGL.so',\n '/opt/vc/lib/libbrcmGLESv2.so')\n if all((exists(lib) for lib in brcm_lib_files)):\n print(\n 'Found brcmEGL and brcmGLES library files'\n 'for rpi platform at /opt/vc/lib/')\n gl_libs = ['brcmEGL', 'brcmGLESv2']\n else:\n print(\n 'Failed to find brcmEGL and brcmGLESv2 library files'\n 'for rpi platform, falling back to EGL and GLESv2.')\n gl_libs = ['EGL', 'GLESv2']\n flags['libraries'] = ['bcm_host'] + gl_libs\n elif platform in ['mali', 'vc']:\n flags['include_dirs'] = ['/usr/include/']\n flags['library_dirs'] = ['/usr/lib/arm-linux-gnueabihf']\n flags['libraries'] = ['GLESv2']\n c_options['use_x11'] = True\n c_options['use_egl'] = True\n else:\n flags['libraries'] = ['GL']\n return flags, base_flags\n\n\ndef determine_sdl2():\n flags = {}\n if not c_options['use_sdl2']:\n return flags\n\n sdl2_path = environ.get('KIVY_SDL2_PATH', None)\n\n if sdl2_flags and not sdl2_path and platform == 'darwin':\n return sdl2_flags\n\n # no pkgconfig info, or we want to use a specific sdl2 path, so perform\n # manual configuration\n flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer']\n split_chr = ';' if platform == 'win32' else ':'\n sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else []\n\n if not sdl2_paths:\n sdl_inc = join(sys.prefix, 'include', 'SDL2')\n if isdir(sdl_inc):\n sdl2_paths = [sdl_inc]\n sdl2_paths.extend(['/usr/local/include/SDL2', '/usr/include/SDL2'])\n\n flags['include_dirs'] = sdl2_paths\n flags['extra_link_args'] = []\n flags['extra_compile_args'] = []\n flags['library_dirs'] = (\n sdl2_paths if sdl2_paths else\n ['/usr/local/lib/'])\n\n if sdl2_flags:\n flags = merge(flags, sdl2_flags)\n\n # ensure headers for all the SDL2 and sub libraries are available\n libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image']\n can_compile = True\n for lib in libs_to_check:\n found = False\n for d in flags['include_dirs']:\n fn = join(d, '{}.h'.format(lib))\n if exists(fn):\n found = True\n print('SDL2: found {} header at {}'.format(lib, fn))\n break\n\n if not found:\n print('SDL2: missing sub library {}'.format(lib))\n can_compile = False\n\n if not can_compile:\n c_options['use_sdl2'] = False\n return {}\n\n return flags\n\n\nbase_flags = determine_base_flags()\ngl_flags, gl_flags_base = determine_gl_flags()\n\n# -----------------------------------------------------------------------------\n# sources to compile\n# all the dependencies have been found manually with:\n# grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx}\ngraphics_dependencies = {\n 'buffer.pyx': ['common.pxi'],\n 'context.pxd': ['instructions.pxd', 'texture.pxd', 'vbo.pxd', 'cgl.pxd'],\n 'cgl.pxd': ['common.pxi', 'config.pxi', 'gl_redirect.h'],\n 'compiler.pxd': ['instructions.pxd'],\n 'compiler.pyx': ['context_instructions.pxd'],\n 'cgl.pyx': ['cgl.pxd'],\n 'cgl_mock.pyx': ['cgl.pxd'],\n 'cgl_sdl2.pyx': ['cgl.pxd'],\n 'cgl_gl.pyx': ['cgl.pxd'],\n 'cgl_glew.pyx': ['cgl.pxd'],\n 'context_instructions.pxd': [\n 'transformation.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pxd': ['cgl.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pyx': [\n 'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd'],\n 'gl_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd', 'instructions.pxd'],\n 'instructions.pxd': [\n 'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd',\n 'texture.pxd', '../_event.pxd'],\n 'instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd',\n 'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd'],\n 'opengl.pyx': [\n 'config.pxi', 'common.pxi', 'cgl.pxd', 'gl_redirect.h'],\n 'opengl_utils.pyx': [\n 'opengl_utils_def.pxi', 'cgl.pxd', ],\n 'shader.pxd': ['cgl.pxd', 'transformation.pxd', 'vertex.pxd'],\n 'shader.pyx': [\n 'config.pxi', 'common.pxi', 'cgl.pxd',\n 'vertex.pxd', 'transformation.pxd', 'context.pxd',\n 'gl_debug_logger.pxi'],\n 'stencil_instructions.pxd': ['instructions.pxd'],\n 'stencil_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd',\n 'gl_debug_logger.pxi'],\n 'scissor_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd'],\n 'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd',\n 'vertex_instructions.pxd', 'tesselator.pxd'],\n 'texture.pxd': ['cgl.pxd'],\n 'texture.pyx': [\n 'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd',\n 'cgl.pxd', 'opengl_utils.pxd',\n 'img_tools.pxi', 'gl_debug_logger.pxi'],\n 'vbo.pxd': ['buffer.pxd', 'cgl.pxd', 'vertex.pxd'],\n 'vbo.pyx': [\n 'config.pxi', 'common.pxi', 'context.pxd',\n 'instructions.pxd', 'shader.pxd', 'gl_debug_logger.pxi'],\n 'vertex.pxd': ['cgl.pxd'],\n 'vertex.pyx': ['config.pxi', 'common.pxi'],\n 'vertex_instructions.pyx': [\n 'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd',\n 'instructions.pxd', 'vertex_instructions.pxd',\n 'cgl.pxd', 'texture.pxd', 'vertex_instructions_line.pxi'],\n 'vertex_instructions_line.pxi': ['stencil_instructions.pxd']}\n\nsources = {\n '_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}),\n '_clock.pyx': {},\n 'weakproxy.pyx': {},\n 'properties.pyx': merge(base_flags, {'depends': ['_event.pxd']}),\n 'graphics/buffer.pyx': merge(base_flags, gl_flags_base),\n 'graphics/context.pyx': merge(base_flags, gl_flags_base),\n 'graphics/compiler.pyx': merge(base_flags, gl_flags_base),\n 'graphics/context_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/fbo.pyx': merge(base_flags, gl_flags_base),\n 'graphics/gl_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/opengl.pyx': merge(base_flags, gl_flags_base),\n 'graphics/opengl_utils.pyx': merge(base_flags, gl_flags_base),\n 'graphics/shader.pyx': merge(base_flags, gl_flags_base),\n 'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/scissor_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/texture.pyx': merge(base_flags, gl_flags_base),\n 'graphics/transformation.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vbo.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vertex.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_mock.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_gl.pyx': merge(base_flags, gl_flags),\n 'graphics/cgl_backend/cgl_glew.pyx': merge(base_flags, gl_flags),\n 'graphics/cgl_backend/cgl_sdl2.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_debug.pyx': merge(base_flags, gl_flags_base),\n 'core/text/text_layout.pyx': base_flags,\n 'core/window/window_info.pyx': base_flags,\n 'graphics/tesselator.pyx': merge(base_flags, {\n 'include_dirs': ['kivy/lib/libtess2/Include'],\n 'c_depends': [\n 'lib/libtess2/Source/bucketalloc.c',\n 'lib/libtess2/Source/dict.c',\n 'lib/libtess2/Source/geom.c',\n 'lib/libtess2/Source/mesh.c',\n 'lib/libtess2/Source/priorityq.c',\n 'lib/libtess2/Source/sweep.c',\n 'lib/libtess2/Source/tess.c'\n ]\n }),\n 'graphics/svg.pyx': merge(base_flags, gl_flags_base)\n}\n\nif c_options[\"use_sdl2\"]:\n sdl2_flags = determine_sdl2()\n\nif c_options['use_sdl2'] and sdl2_flags:\n sources['graphics/cgl_backend/cgl_sdl2.pyx'] = merge(\n sources['graphics/cgl_backend/cgl_sdl2.pyx'], sdl2_flags)\n sdl2_depends = {'depends': ['lib/sdl2.pxi']}\n for source_file in ('core/window/_window_sdl2.pyx',\n 'core/image/_img_sdl2.pyx',\n 'core/text/_text_sdl2.pyx',\n 'core/audio/audio_sdl2.pyx',\n 'core/clipboard/_clipboard_sdl2.pyx'):\n sources[source_file] = merge(\n base_flags, sdl2_flags, sdl2_depends)\n\nif c_options['use_pangoft2'] in (None, True) and platform not in (\n 'android', 'ios', 'windows'):\n pango_flags = pkgconfig('pangoft2')\n if pango_flags and 'libraries' in pango_flags:\n print('Pango: pangoft2 found via pkg-config')\n c_options['use_pangoft2'] = True\n pango_depends = {'depends': ['lib/pangoft2.pxi',\n 'lib/pangoft2.h']}\n sources['core/text/_text_pango.pyx'] = merge(\n base_flags, pango_flags, pango_depends)\n\nif platform in ('darwin', 'ios'):\n # activate ImageIO provider for our core image\n if platform == 'ios':\n osx_flags = {'extra_link_args': [\n '-framework', 'Foundation',\n '-framework', 'UIKit',\n '-framework', 'AudioToolbox',\n '-framework', 'CoreGraphics',\n '-framework', 'QuartzCore',\n '-framework', 'ImageIO',\n '-framework', 'Accelerate']}\n else:\n osx_flags = {'extra_link_args': [\n '-framework', 'ApplicationServices']}\n sources['core/image/img_imageio.pyx'] = merge(\n base_flags, osx_flags)\n\nif c_options['use_avfoundation']:\n import platform as _platform\n mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]]\n if mac_ver >= [10, 7]:\n osx_flags = {\n 'extra_link_args': ['-framework', 'AVFoundation'],\n 'extra_compile_args': ['-ObjC++'],\n 'depends': ['core/camera/camera_avfoundation_implem.m']}\n sources['core/camera/camera_avfoundation.pyx'] = merge(\n base_flags, osx_flags)\n else:\n print('AVFoundation cannot be used, OSX >= 10.7 is required')\n\nif c_options['use_rpi']:\n sources['lib/vidcore_lite/egl.pyx'] = merge(\n base_flags, gl_flags)\n sources['lib/vidcore_lite/bcm.pyx'] = merge(\n base_flags, gl_flags)\n\nif c_options['use_x11']:\n libs = ['Xrender', 'X11']\n if c_options['use_egl']:\n libs += ['EGL']\n else:\n libs += ['GL']\n sources['core/window/window_x11.pyx'] = merge(\n base_flags, gl_flags, {\n # FIXME add an option to depend on them but not compile them\n # cause keytab is included in core, and core is included in\n # window_x11\n #\n # 'depends': [\n # 'core/window/window_x11_keytab.c',\n # 'core/window/window_x11_core.c'],\n 'libraries': libs})\n\nif c_options['use_gstreamer']:\n sources['lib/gstplayer/_gstplayer.pyx'] = merge(\n base_flags, gst_flags, {\n 'depends': ['lib/gstplayer/_gstplayer.h']})\n\n\n# -----------------------------------------------------------------------------\n# extension modules\n\ndef get_dependencies(name, deps=None):\n if deps is None:\n deps = []\n for dep in graphics_dependencies.get(name, []):\n if dep not in deps:\n deps.append(dep)\n get_dependencies(dep, deps)\n return deps\n\n\ndef resolve_dependencies(fn, depends):\n fn = basename(fn)\n deps = []\n get_dependencies(fn, deps)\n get_dependencies(fn.replace('.pyx', '.pxd'), deps)\n\n deps_final = []\n paths_to_test = ['graphics', 'include']\n for dep in deps:\n found = False\n for path in paths_to_test:\n filename = expand(src_path, path, dep)\n if exists(filename):\n deps_final.append(filename)\n found = True\n break\n if not found:\n print('ERROR: Dependency for {} not resolved: {}'.format(\n fn, dep\n ))\n\n return deps_final\n\n\ndef get_extensions_from_sources(sources):\n ext_modules = []\n if environ.get('KIVY_FAKE_BUILDEXT'):\n print('Fake build_ext asked, will generate only .h/.c')\n return ext_modules\n for pyx, flags in sources.items():\n is_graphics = pyx.startswith('graphics')\n pyx = expand(src_path, pyx)\n depends = [expand(src_path, x) for x in flags.pop('depends', [])]\n c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])]\n if not have_cython:\n pyx = '%s.c' % pyx[:-4]\n if is_graphics:\n depends = resolve_dependencies(pyx, depends)\n f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in (\n 'c', 'cpp', 'm')]\n module_name = get_modulename_from_file(pyx)\n flags_clean = {'depends': depends}\n for key, value in flags.items():\n if len(value):\n flags_clean[key] = value\n ext_modules.append(CythonExtension(\n module_name, [pyx] + f_depends + c_depends, **flags_clean))\n return ext_modules\n\n\next_modules = get_extensions_from_sources(sources)\n\n\n# -----------------------------------------------------------------------------\n# automatically detect data files\nsplit_examples = int(environ.get('KIVY_SPLIT_EXAMPLES', '0'))\ndata_file_prefix = 'share/kivy-'\nexamples = {}\nexamples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json',\n 'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv', 'mpg',\n 'glsl', 'zip')\nfor root, subFolders, files in walk('examples'):\n for fn in files:\n ext = fn.split('.')[-1].lower()\n if ext not in examples_allowed_ext:\n continue\n filename = join(root, fn)\n directory = '%s%s' % (data_file_prefix, dirname(filename))\n if directory not in examples:\n examples[directory] = []\n examples[directory].append(filename)\n\nbinary_deps = []\nbinary_deps_path = join(src_path, 'kivy', 'binary_deps')\nif isdir(binary_deps_path):\n for root, dirnames, filenames in walk(binary_deps_path):\n for fname in filenames:\n binary_deps.append(\n join(root.replace(binary_deps_path, 'binary_deps'), fname))\n\n# -----------------------------------------------------------------------------\n# setup !\nif not build_examples:\n setup(\n name='Kivy',\n version=get_version(),\n author='Kivy Team and other contributors',\n author_email='[email protected]',\n url='http://kivy.org',\n license='MIT',\n description=(\n 'A software library for rapid development of '\n 'hardware-accelerated multitouch applications.'),\n long_description=get_description(),\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n packages=[\n 'kivy',\n 'kivy.core',\n 'kivy.core.audio',\n 'kivy.core.camera',\n 'kivy.core.clipboard',\n 'kivy.core.image',\n 'kivy.core.gl',\n 'kivy.core.spelling',\n 'kivy.core.text',\n 'kivy.core.video',\n 'kivy.core.window',\n 'kivy.deps',\n 'kivy.effects',\n 'kivy.graphics',\n 'kivy.graphics.cgl_backend',\n 'kivy.garden',\n 'kivy.input',\n 'kivy.input.postproc',\n 'kivy.input.providers',\n 'kivy.lang',\n 'kivy.lib',\n 'kivy.lib.gstplayer',\n 'kivy.lib.vidcore_lite',\n 'kivy.modules',\n 'kivy.network',\n 'kivy.storage',\n 'kivy.tests',\n 'kivy.tools',\n 'kivy.tools.packaging',\n 'kivy.tools.packaging.pyinstaller_hooks',\n 'kivy.tools.highlight',\n 'kivy.extras',\n 'kivy.uix',\n 'kivy.uix.behaviors',\n 'kivy.uix.recycleview',\n ],\n package_dir={'kivy': 'kivy'},\n package_data={'kivy': [\n 'setupconfig.py',\n '*.pxd',\n '*.pxi',\n 'core/text/*.pxd',\n 'core/text/*.pxi',\n 'core/window/*.pxi',\n 'core/window/*.pxd',\n 'graphics/*.pxd',\n 'graphics/*.pxi',\n 'graphics/*.h',\n 'include/*',\n 'lib/vidcore_lite/*.pxd',\n 'lib/vidcore_lite/*.pxi',\n 'data/*.kv',\n 'data/*.json',\n 'data/fonts/*.ttf',\n 'data/images/*.png',\n 'data/images/*.jpg',\n 'data/images/*.gif',\n 'data/images/*.atlas',\n 'data/keyboards/*.json',\n 'data/logo/*.png',\n 'data/glsl/*.png',\n 'data/glsl/*.vs',\n 'data/glsl/*.fs',\n 'tests/*.zip',\n 'tests/*.kv',\n 'tests/*.png',\n 'tests/*.ttf',\n 'tests/*.ogg',\n 'tools/gles_compat/*',\n 'tools/highlight/*',\n 'tools/packaging/README.txt',\n 'tools/packaging/win32/kivy.bat',\n 'tools/packaging/win32/kivyenv.sh',\n 'tools/packaging/win32/README.txt',\n 'tools/packaging/osx/Info.plist',\n 'tools/packaging/osx/InfoPlist.strings',\n 'tools/packaging/osx/kivy.sh',\n 'tools/pep8checker/*',\n 'tools/theming/defaulttheme/*',\n ] + binary_deps},\n data_files=[] if split_examples else list(examples.items()),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Artistic Software',\n 'Topic :: Games/Entertainment',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',\n 'Topic :: Multimedia :: Graphics :: Presentation',\n 'Topic :: Multimedia :: Graphics :: Viewers',\n 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',\n 'Topic :: Multimedia :: Video :: Display',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Visualization',\n ('Topic :: Software Development :: Libraries :: '\n 'Application Frameworks'),\n 'Topic :: Software Development :: User Interfaces'],\n dependency_links=[\n 'https://github.com/kivy-garden/garden/archive/master.zip'],\n install_requires=[\n 'Kivy-Garden>=0.1.4', 'docutils', 'pygments'\n ],\n extra_requires={\n 'tuio': ['oscpy']\n },\n setup_requires=[\n 'cython>=' + MIN_CYTHON_STRING\n ] if not skip_cython else [])\nelse:\n setup(\n name='Kivy-examples',\n version=get_version(),\n author='Kivy Team and other contributors',\n author_email='[email protected]',\n url='http://kivy.org',\n license='MIT',\n description=('Kivy examples.'),\n long_description=get_description(),\n data_files=list(examples.items()))\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index eac026084d..4eebf8fdf9 100644
--- a/setup.py
+++ b/setup.py
@@ -40,8 +40,8 @@ def ver_equal(self, other):
def get_description():
- with open(join(dirname(__file__), 'README.md')) as fileh:
- return fileh.read()
+ with open(join(dirname(__file__), 'README.md'), 'rb') as fileh:
+ return fileh.read().decode("utf8")
def get_version(filename='kivy/version.py'):
|
scikit-hep__pyhf-2220 | Menu on mobile page not accessible for pyhf v0.7.1 docs
### Summary
On the [`pyhf` `v0.7.1` docs](https://pyhf.readthedocs.io/en/v0.7.1/) and on the `main` docs build the drop down menu (circled in screen shot bellow) fails to open when clicked on.

Things work fine on desktop and confusingly @alexander-held has pointed out that the [`v0.5.2` `cabinetry` docs](https://cabinetry.readthedocs.io/en/stable/) (where were [released](https://github.com/scikit-hep/cabinetry/releases/tag/v0.5.2) very close in time to the `pyhf` `v0.7.1` docs) have a menu that works fine on mobile.
### Documentation Page Link
https://pyhf.readthedocs.io/en/v0.7.1/
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| [
{
"content": "#\n# pyhf documentation build configuration file, created by\n# sphinx-quickstart on Fri Feb 9 11:58:49 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use Path('../relative_path_to_dir').resolve() to make it absolute, like shown here.\n\nimport sys\nfrom pathlib import Path\n\nimport jupytext\nfrom pkg_resources import get_distribution\n\nsys.path.insert(0, str(Path('./exts').resolve()))\n\n# Convert jupyterlite example to ipynb\ndocs_dir = Path(__file__).resolve().parent\npy_percent_as_notebook = jupytext.read(docs_dir / \"lite\" / \"jupyterlite.py\")\njupytext.write(\n py_percent_as_notebook, docs_dir / \"lite\" / \"jupyterlite.ipynb\", fmt=\"ipynb\"\n)\n\n\ndef setup(app):\n app.add_css_file(\n 'https://cdnjs.cloudflare.com/ajax/libs/github-fork-ribbon-css/0.2.2/gh-fork-ribbon.min.css'\n )\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.bibtex',\n 'sphinx.ext.napoleon',\n 'sphinx_click.ext',\n 'nbsphinx',\n 'sphinx_issues',\n 'sphinx_copybutton',\n 'xref',\n 'jupyterlite_sphinx',\n]\nbibtex_bibfiles = [\n \"bib/docs.bib\",\n \"bib/HEPData_likelihoods.bib\",\n \"bib/media.bib\",\n \"bib/posters.bib\",\n \"bib/preferred.bib\",\n \"bib/talks.bib\",\n \"bib/tutorials.bib\",\n \"bib/use_citations.bib\",\n \"bib/general_citations.bib\",\n]\nbibtex_default_style = \"unsrt\"\n\n# external links\nxref_links = {\"arXiv:1007.1727\": (\"[1007.1727]\", \"https://arxiv.org/abs/1007.1727\")}\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'numpy': ('https://numpy.org/doc/stable/', None),\n 'scipy': ('https://docs.scipy.org/doc/scipy/', None),\n 'matplotlib': ('https://matplotlib.org/stable/', None),\n 'iminuit': ('https://iminuit.readthedocs.io/en/stable/', None),\n 'uproot': ('https://uproot.readthedocs.io/en/latest/', None),\n 'jsonpatch': ('https://python-json-patch.readthedocs.io/en/latest/', None),\n}\n\n# GitHub repo\nissues_github_path = 'scikit-hep/pyhf'\n\n# Generate the API documentation when building\nautosummary_generate = True\nnumpydoc_show_class_members = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = ['.rst', '.md']\n# source_suffix = '.rst'\n\n# The encoding of source files.\n#\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'pyhf'\ncopyright = '2018, Lukas Heinrich, Matthew Feickert, Giordon Stark'\nauthor = 'Lukas Heinrich, Matthew Feickert, Giordon Stark'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n# The full version, including alpha/beta/rc tags.\nrelease = get_distribution('pyhf').version\n# for example take major/minor/patch\nversion = '.'.join(release.split('.')[:3])\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#\n# today = ''\n#\n# Else, today_fmt is used as the format for a strftime call.\n#\n# today_fmt = '%B %d, %Y'\n\nautodoc_mock_imports = [\n 'tensorflow',\n 'torch',\n 'jax',\n 'iminuit',\n 'tensorflow_probability',\n]\n\n\n_type_aliases_inverted = {\n 'pyhf.typing': [\n 'PathOrStr',\n 'ParameterBase',\n 'Parameter',\n 'Measurement',\n 'ModifierBase',\n 'NormSys',\n 'NormFactor',\n 'HistoSys',\n 'StatError',\n 'ShapeSys',\n 'ShapeFactor',\n 'LumiSys',\n 'Modifier',\n 'Sample',\n 'Channel',\n 'Observation',\n 'Workspace',\n 'Literal',\n ],\n 'numpy.typing': ['ArrayLike', 'DTypeLike', 'NBitBase', 'NDArray'],\n}\nautodoc_type_aliases = {\n item: f'{k}.{item}' for k, v in _type_aliases_inverted.items() for item in v\n}\n\nautodoc_typehints_format = 'fully-qualified'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\n '_build',\n 'JOSS',\n 'lite',\n '**.ipynb_checkpoints',\n 'examples/experiments/edwardpyhf.ipynb',\n 'examples/notebooks/ImpactPlot.ipynb',\n 'examples/notebooks/Recast.ipynb',\n 'examples/notebooks/StatError.ipynb',\n 'examples/notebooks/example-tensorflow.ipynb',\n 'examples/notebooks/histogrammar.ipynb',\n 'examples/notebooks/histosys.ipynb',\n 'examples/notebooks/histosys-pytorch.ipynb',\n 'examples/notebooks/importxml.ipynb',\n 'examples/notebooks/multichannel-coupled-normsys.ipynb',\n 'examples/notebooks/multichannel-normsys.ipynb',\n 'examples/notebooks/normsys.ipynb',\n 'examples/notebooks/pullplot.ipynb',\n 'examples/notebooks/pytorch_tests_onoff.ipynb',\n 'examples/notebooks/tensorflow-limit.ipynb',\n]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = []\n\n# The name for this set of Sphinx documents.\n# \"<project> v<release> documentation\" by default.\n#\n# html_title = u'pyhf v0.3.0'\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#\n# html_logo = None\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_css_files = [\n 'css/custom.css',\n]\n\nhtml_js_files = [\n 'js/custom.js',\n (\n 'https://views.scientific-python.org/js/plausible.js',\n {\"data-domain\": \"pyhf.readthedocs.io\", \"defer\": \"defer\"},\n ),\n]\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#\nhtml_extra_path = ['_extras']\n\n# If not None, a 'Last updated on:' timestamp is inserted at every page\n# bottom, using the given strftime format.\n# The empty string is equivalent to '%b %d, %Y'.\n#\n# html_last_updated_fmt = None\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n#\n# html_domain_indices = True\n\n# If false, no index is generated.\n#\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'\n#\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# 'ja' uses this config value.\n# 'zh' user can custom change `jieba` dictionary path.\n#\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'pyhfdoc'\n\n# sphinx-copybutton configuration\ncopybutton_prompt_text = r\">>> |\\.\\.\\. |\\$ \"\ncopybutton_prompt_is_regexp = True\ncopybutton_here_doc_delimiter = \"EOF\"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n 'pyhf.tex',\n 'pyhf Documentation',\n 'Lukas Heinrich, Matthew Feickert, Giordon Stark',\n 'manual',\n )\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n#\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#\n# latex_appendices = []\n\n# It false, will not define \\strong, \\code, \titleref, \\crossref ... but only\n# \\sphinxstrong, ..., \\sphinxtitleref, ... To help avoid clash with user added\n# packages.\n#\n# latex_keep_old_macro_names = True\n\n# If false, no module index is generated.\n#\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'pyhf', 'pyhf Documentation', [author], 1)]\n\n# If true, show URL addresses after external links.\n#\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n 'pyhf',\n 'pyhf Documentation',\n author,\n 'pyhf',\n 'One line description of project.',\n 'Miscellaneous',\n )\n]\n\n# Documents to append as an appendix to all manuals.\n#\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n#\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#\n# texinfo_no_detailmenu = False\n\nmathjax3_config = {\n 'tex2jax': {'inlineMath': [['$', '$'], ['\\\\(', '\\\\)']]},\n 'tex': {\n 'macros': {\n 'bm': [\"\\\\boldsymbol{#1}\", 1], # \\usepackage{bm}, see mathjax/MathJax#1219\n 'HiFa': r'\\texttt{HistFactory}',\n 'Root': r'\\texttt{ROOT}',\n 'RooStats': r'\\texttt{RooStats}',\n 'RooFit': r'\\texttt{RooFit}',\n 'pyhf': r'\\texttt{pyhf}',\n 'CLs': r'\\mathrm{CL}_{s}',\n 'freeset': r'\\bm{\\eta}',\n 'constrset': r'\\bm{\\chi}',\n 'singleconstr': r'\\chi',\n 'channelcounts': r'\\bm{n}',\n 'auxdata': r'\\bm{a}',\n 'poiset': r'\\bm{\\psi}',\n 'nuisset': r'\\bm{\\theta}',\n 'fullset': r'\\bm{\\phi}',\n 'singlefull': r'\\phi',\n 'TeV': r'\\textrm{TeV}',\n }\n },\n}\n\n# c.f. https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder\nlinkcheck_ignore = [\n 'cli.html#pyhf-xml2json',\n # https://doi.org/10.31526/lhep.2020.158 is causing linkcheck connection timeouts in CI\n r'https://doi\\.org/10\\.31526/.*',\n # https://doi.org/10.1051/epjconf/x DOI URLs will periodically generate 500 Server Error\n r'https://doi\\.org/10\\.1051/epjconf/.*',\n # tags for a release won't exist until it is made, but the release notes\n # and ReadTheDocs need to reference them\n r'https://github.com/scikit-hep/pyhf/releases/tag/.*',\n r'https://pyhf.readthedocs.io/en/.*',\n]\nlinkcheck_retries = 50\n\n# JupyterLite configuration\njupyterlite_dir = \"lite\"\n",
"path": "docs/conf.py"
}
] | [
{
"content": "#\n# pyhf documentation build configuration file, created by\n# sphinx-quickstart on Fri Feb 9 11:58:49 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use Path('../relative_path_to_dir').resolve() to make it absolute, like shown here.\n\nimport sys\nfrom pathlib import Path\n\nimport jupytext\nfrom pkg_resources import get_distribution\n\nsys.path.insert(0, str(Path('./exts').resolve()))\n\n# Convert jupyterlite example to ipynb\ndocs_dir = Path(__file__).resolve().parent\npy_percent_as_notebook = jupytext.read(docs_dir / \"lite\" / \"jupyterlite.py\")\njupytext.write(\n py_percent_as_notebook, docs_dir / \"lite\" / \"jupyterlite.ipynb\", fmt=\"ipynb\"\n)\n\n\ndef setup(app):\n app.add_css_file(\n 'https://cdnjs.cloudflare.com/ajax/libs/github-fork-ribbon-css/0.2.2/gh-fork-ribbon.min.css'\n )\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.intersphinx',\n 'sphinx_rtd_theme',\n 'sphinxcontrib.bibtex',\n 'sphinx.ext.napoleon',\n 'sphinx_click.ext',\n 'nbsphinx',\n 'sphinx_issues',\n 'sphinx_copybutton',\n 'xref',\n 'jupyterlite_sphinx',\n]\nbibtex_bibfiles = [\n \"bib/docs.bib\",\n \"bib/HEPData_likelihoods.bib\",\n \"bib/media.bib\",\n \"bib/posters.bib\",\n \"bib/preferred.bib\",\n \"bib/talks.bib\",\n \"bib/tutorials.bib\",\n \"bib/use_citations.bib\",\n \"bib/general_citations.bib\",\n]\nbibtex_default_style = \"unsrt\"\n\n# external links\nxref_links = {\"arXiv:1007.1727\": (\"[1007.1727]\", \"https://arxiv.org/abs/1007.1727\")}\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'numpy': ('https://numpy.org/doc/stable/', None),\n 'scipy': ('https://docs.scipy.org/doc/scipy/', None),\n 'matplotlib': ('https://matplotlib.org/stable/', None),\n 'iminuit': ('https://iminuit.readthedocs.io/en/stable/', None),\n 'uproot': ('https://uproot.readthedocs.io/en/latest/', None),\n 'jsonpatch': ('https://python-json-patch.readthedocs.io/en/latest/', None),\n}\n\n# GitHub repo\nissues_github_path = 'scikit-hep/pyhf'\n\n# Generate the API documentation when building\nautosummary_generate = True\nnumpydoc_show_class_members = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = ['.rst', '.md']\n# source_suffix = '.rst'\n\n# The encoding of source files.\n#\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'pyhf'\ncopyright = '2018, Lukas Heinrich, Matthew Feickert, Giordon Stark'\nauthor = 'Lukas Heinrich, Matthew Feickert, Giordon Stark'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n# The full version, including alpha/beta/rc tags.\nrelease = get_distribution('pyhf').version\n# for example take major/minor/patch\nversion = '.'.join(release.split('.')[:3])\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#\n# today = ''\n#\n# Else, today_fmt is used as the format for a strftime call.\n#\n# today_fmt = '%B %d, %Y'\n\nautodoc_mock_imports = [\n 'tensorflow',\n 'torch',\n 'jax',\n 'iminuit',\n 'tensorflow_probability',\n]\n\n\n_type_aliases_inverted = {\n 'pyhf.typing': [\n 'PathOrStr',\n 'ParameterBase',\n 'Parameter',\n 'Measurement',\n 'ModifierBase',\n 'NormSys',\n 'NormFactor',\n 'HistoSys',\n 'StatError',\n 'ShapeSys',\n 'ShapeFactor',\n 'LumiSys',\n 'Modifier',\n 'Sample',\n 'Channel',\n 'Observation',\n 'Workspace',\n 'Literal',\n ],\n 'numpy.typing': ['ArrayLike', 'DTypeLike', 'NBitBase', 'NDArray'],\n}\nautodoc_type_aliases = {\n item: f'{k}.{item}' for k, v in _type_aliases_inverted.items() for item in v\n}\n\nautodoc_typehints_format = 'fully-qualified'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\n '_build',\n 'JOSS',\n 'lite',\n '**.ipynb_checkpoints',\n 'examples/experiments/edwardpyhf.ipynb',\n 'examples/notebooks/ImpactPlot.ipynb',\n 'examples/notebooks/Recast.ipynb',\n 'examples/notebooks/StatError.ipynb',\n 'examples/notebooks/example-tensorflow.ipynb',\n 'examples/notebooks/histogrammar.ipynb',\n 'examples/notebooks/histosys.ipynb',\n 'examples/notebooks/histosys-pytorch.ipynb',\n 'examples/notebooks/importxml.ipynb',\n 'examples/notebooks/multichannel-coupled-normsys.ipynb',\n 'examples/notebooks/multichannel-normsys.ipynb',\n 'examples/notebooks/normsys.ipynb',\n 'examples/notebooks/pullplot.ipynb',\n 'examples/notebooks/pytorch_tests_onoff.ipynb',\n 'examples/notebooks/tensorflow-limit.ipynb',\n]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = []\n\n# The name for this set of Sphinx documents.\n# \"<project> v<release> documentation\" by default.\n#\n# html_title = u'pyhf v0.3.0'\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#\n# html_logo = None\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_css_files = [\n 'css/custom.css',\n]\n\nhtml_js_files = [\n 'js/custom.js',\n (\n 'https://views.scientific-python.org/js/plausible.js',\n {\"data-domain\": \"pyhf.readthedocs.io\", \"defer\": \"defer\"},\n ),\n]\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#\nhtml_extra_path = ['_extras']\n\n# If not None, a 'Last updated on:' timestamp is inserted at every page\n# bottom, using the given strftime format.\n# The empty string is equivalent to '%b %d, %Y'.\n#\n# html_last_updated_fmt = None\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n#\n# html_domain_indices = True\n\n# If false, no index is generated.\n#\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'\n#\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# 'ja' uses this config value.\n# 'zh' user can custom change `jieba` dictionary path.\n#\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'pyhfdoc'\n\n# sphinx-copybutton configuration\ncopybutton_prompt_text = r\">>> |\\.\\.\\. |\\$ \"\ncopybutton_prompt_is_regexp = True\ncopybutton_here_doc_delimiter = \"EOF\"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n 'pyhf.tex',\n 'pyhf Documentation',\n 'Lukas Heinrich, Matthew Feickert, Giordon Stark',\n 'manual',\n )\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n#\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#\n# latex_appendices = []\n\n# It false, will not define \\strong, \\code, \titleref, \\crossref ... but only\n# \\sphinxstrong, ..., \\sphinxtitleref, ... To help avoid clash with user added\n# packages.\n#\n# latex_keep_old_macro_names = True\n\n# If false, no module index is generated.\n#\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'pyhf', 'pyhf Documentation', [author], 1)]\n\n# If true, show URL addresses after external links.\n#\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n 'pyhf',\n 'pyhf Documentation',\n author,\n 'pyhf',\n 'One line description of project.',\n 'Miscellaneous',\n )\n]\n\n# Documents to append as an appendix to all manuals.\n#\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n#\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#\n# texinfo_no_detailmenu = False\n\nmathjax3_config = {\n 'tex2jax': {'inlineMath': [['$', '$'], ['\\\\(', '\\\\)']]},\n 'tex': {\n 'macros': {\n 'bm': [\"\\\\boldsymbol{#1}\", 1], # \\usepackage{bm}, see mathjax/MathJax#1219\n 'HiFa': r'\\texttt{HistFactory}',\n 'Root': r'\\texttt{ROOT}',\n 'RooStats': r'\\texttt{RooStats}',\n 'RooFit': r'\\texttt{RooFit}',\n 'pyhf': r'\\texttt{pyhf}',\n 'CLs': r'\\mathrm{CL}_{s}',\n 'freeset': r'\\bm{\\eta}',\n 'constrset': r'\\bm{\\chi}',\n 'singleconstr': r'\\chi',\n 'channelcounts': r'\\bm{n}',\n 'auxdata': r'\\bm{a}',\n 'poiset': r'\\bm{\\psi}',\n 'nuisset': r'\\bm{\\theta}',\n 'fullset': r'\\bm{\\phi}',\n 'singlefull': r'\\phi',\n 'TeV': r'\\textrm{TeV}',\n }\n },\n}\n\n# c.f. https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder\nlinkcheck_ignore = [\n 'cli.html#pyhf-xml2json',\n # https://doi.org/10.31526/lhep.2020.158 is causing linkcheck connection timeouts in CI\n r'https://doi\\.org/10\\.31526/.*',\n # https://doi.org/10.1051/epjconf/x DOI URLs will periodically generate 500 Server Error\n r'https://doi\\.org/10\\.1051/epjconf/.*',\n # tags for a release won't exist until it is made, but the release notes\n # and ReadTheDocs need to reference them\n r'https://github.com/scikit-hep/pyhf/releases/tag/.*',\n r'https://pyhf.readthedocs.io/en/.*',\n]\nlinkcheck_retries = 50\n\n# JupyterLite configuration\njupyterlite_dir = \"lite\"\n",
"path": "docs/conf.py"
}
] | diff --git a/docs/conf.py b/docs/conf.py
index 30b9f2c6aa..cda874ee89 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -55,6 +55,7 @@ def setup(app):
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx',
+ 'sphinx_rtd_theme',
'sphinxcontrib.bibtex',
'sphinx.ext.napoleon',
'sphinx_click.ext',
|
mindsdb__lightwood-603 | :wrench: Add default logging level environment variable
## Task
Add a `LIGHTWOOD_LOG` environment variable that controls the default logging level for lightwood. It should be possible to set values for it so that `DEBUG`, `INFO`, `WARNING`, `ERROR` and `CRITICAL` are all possible options. The logger lightwood uses is declared and exported [here](https://github.com/mindsdb/lightwood/blob/stable/lightwood/helpers/log.py).
## Steps :male_detective: :female_detective:
- Fork the Lightwood repository, checkout the `staging` branch and from it create a new one.
- Implement the necessary changes.
- Check that only the appropriate logs are getting through. For this, you can run any of the integration tests, like [`test_boston_housing`](https://github.com/mindsdb/lightwood/blob/stable/tests/integration/basic/test_boston_housing.py), and analyze the output.
- Make the PR and address any comments that reviewers might make.
## Additional rewards :1st_place_medal:
Each documentation PR brings :one: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/
| [
{
"content": "import logging\nimport os\n\n\ndef initialize_log():\n pid = os.getpid()\n logging.basicConfig()\n log = logging.getLogger(f'lightwood-{pid}')\n log.setLevel(logging.DEBUG)\n return log\n\n\nlog = initialize_log()\n",
"path": "lightwood/helpers/log.py"
}
] | [
{
"content": "import logging\nimport os\n\n\ndef initialize_log():\n pid = os.getpid()\n logging.basicConfig()\n log = logging.getLogger(f'lightwood-{pid}')\n log_level = os.environ.get('LIGHTWOOD_LOG', 'DEBUG')\n log.setLevel(log_level)\n return log\n\n\nlog = initialize_log()\n",
"path": "lightwood/helpers/log.py"
}
] | diff --git a/lightwood/helpers/log.py b/lightwood/helpers/log.py
index d25dc4c01..96f893b3e 100644
--- a/lightwood/helpers/log.py
+++ b/lightwood/helpers/log.py
@@ -6,7 +6,8 @@ def initialize_log():
pid = os.getpid()
logging.basicConfig()
log = logging.getLogger(f'lightwood-{pid}')
- log.setLevel(logging.DEBUG)
+ log_level = os.environ.get('LIGHTWOOD_LOG', 'DEBUG')
+ log.setLevel(log_level)
return log
|
crytic__slither-1108 | [Bug]: Infinite loop in RTLO detector
### What happened?
Slither hangs on this code indefinitely
### Can you share code with us to reproduce this bug?
https://github.com/ethereum/solidity/blob/develop/test/libsolidity/syntaxTests/comments/multiline_unicode_direction_override_5.sol
### Version
0.8.2
### Relevant log output
_No response_
| [
{
"content": "import re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\n\n\nclass RightToLeftOverride(AbstractDetector):\n \"\"\"\n Detect the usage of a Right-To-Left-Override (U+202E) character\n \"\"\"\n\n ARGUMENT = \"rtlo\"\n HELP = \"Right-To-Left-Override control character is used\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#right-to-left-override-character\"\n WIKI_TITLE = \"Right-to-Left-Override character\"\n WIKI_DESCRIPTION = \"An attacker can manipulate the logic of the contract by using a right-to-left-override character (`U+202E)`.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract Token\n{\n\n address payable o; // owner\n mapping(address => uint) tokens;\n\n function withdraw() external returns(uint)\n {\n uint amount = tokens[msg.sender];\n address payable d = msg.sender;\n tokens[msg.sender] = 0;\n _withdraw(/*owner/*noitanitsed*/ d, o/*\n\t\t /*value */, amount);\n }\n\n function _withdraw(address payable fee_receiver, address payable destination, uint value) internal\n {\n\t\tfee_receiver.transfer(1);\n\t\tdestination.transfer(value);\n }\n}\n```\n\n`Token` uses the right-to-left-override character when calling `_withdraw`. As a result, the fee is incorrectly sent to `msg.sender`, and the token balance is sent to the owner.\n\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Special control characters must not be allowed.\"\n\n RTLO_CHARACTER_ENCODED = \"\\u202e\".encode(\"utf-8\")\n STANDARD_JSON = False\n\n def _detect(self):\n results = []\n pattern = re.compile(\".*\\u202e.*\".encode(\"utf-8\"))\n\n for filename, source in self.slither.source_code.items():\n # Attempt to find all RTLO characters in this source file.\n original_source_encoded = source.encode(\"utf-8\")\n start_index = 0\n\n # Keep searching all file contents for the character.\n while True:\n source_encoded = original_source_encoded[start_index:]\n result_index = source_encoded.find(self.RTLO_CHARACTER_ENCODED)\n\n # If we couldn't find the character in the remainder of source, stop.\n if result_index == -1:\n break\n\n # We found another instance of the character, define our output\n idx = start_index + result_index\n\n relative = self.slither.crytic_compile.filename_lookup(filename).relative\n info = f\"{relative} contains a unicode right-to-left-override character at byte offset {idx}:\\n\"\n\n # We have a patch, so pattern.find will return at least one result\n\n info += f\"\\t- {pattern.findall(source_encoded)[0]}\\n\"\n res = self.generate_result(info)\n res.add_other(\n \"rtlo-character\",\n (filename, idx, len(self.RTLO_CHARACTER_ENCODED)),\n self.compilation_unit,\n )\n results.append(res)\n\n # Advance the start index for the next iteration\n start_index = result_index + 1\n\n return results\n",
"path": "slither/detectors/source/rtlo.py"
}
] | [
{
"content": "import re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\n\n\nclass RightToLeftOverride(AbstractDetector):\n \"\"\"\n Detect the usage of a Right-To-Left-Override (U+202E) character\n \"\"\"\n\n ARGUMENT = \"rtlo\"\n HELP = \"Right-To-Left-Override control character is used\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#right-to-left-override-character\"\n WIKI_TITLE = \"Right-to-Left-Override character\"\n WIKI_DESCRIPTION = \"An attacker can manipulate the logic of the contract by using a right-to-left-override character (`U+202E)`.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract Token\n{\n\n address payable o; // owner\n mapping(address => uint) tokens;\n\n function withdraw() external returns(uint)\n {\n uint amount = tokens[msg.sender];\n address payable d = msg.sender;\n tokens[msg.sender] = 0;\n _withdraw(/*owner/*noitanitsed*/ d, o/*\n\t\t /*value */, amount);\n }\n\n function _withdraw(address payable fee_receiver, address payable destination, uint value) internal\n {\n\t\tfee_receiver.transfer(1);\n\t\tdestination.transfer(value);\n }\n}\n```\n\n`Token` uses the right-to-left-override character when calling `_withdraw`. As a result, the fee is incorrectly sent to `msg.sender`, and the token balance is sent to the owner.\n\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Special control characters must not be allowed.\"\n\n RTLO_CHARACTER_ENCODED = \"\\u202e\".encode(\"utf-8\")\n STANDARD_JSON = False\n\n def _detect(self):\n results = []\n pattern = re.compile(\".*\\u202e.*\".encode(\"utf-8\"))\n\n for filename, source in self.slither.source_code.items():\n # Attempt to find all RTLO characters in this source file.\n original_source_encoded = source.encode(\"utf-8\")\n start_index = 0\n\n # Keep searching all file contents for the character.\n while True:\n source_encoded = original_source_encoded[start_index:]\n result_index = source_encoded.find(self.RTLO_CHARACTER_ENCODED)\n\n # If we couldn't find the character in the remainder of source, stop.\n if result_index == -1:\n break\n\n # We found another instance of the character, define our output\n idx = start_index + result_index\n\n relative = self.slither.crytic_compile.filename_lookup(filename).relative\n info = f\"{relative} contains a unicode right-to-left-override character at byte offset {idx}:\\n\"\n\n # We have a patch, so pattern.find will return at least one result\n\n info += f\"\\t- {pattern.findall(source_encoded)[0]}\\n\"\n res = self.generate_result(info)\n res.add_other(\n \"rtlo-character\",\n (filename, idx, len(self.RTLO_CHARACTER_ENCODED)),\n self.compilation_unit,\n )\n results.append(res)\n\n # Advance the start index for the next iteration\n start_index = idx + 1\n\n return results\n",
"path": "slither/detectors/source/rtlo.py"
}
] | diff --git a/slither/detectors/source/rtlo.py b/slither/detectors/source/rtlo.py
index 904f2d2e39..df1f265952 100644
--- a/slither/detectors/source/rtlo.py
+++ b/slither/detectors/source/rtlo.py
@@ -88,6 +88,6 @@ def _detect(self):
results.append(res)
# Advance the start index for the next iteration
- start_index = result_index + 1
+ start_index = idx + 1
return results
diff --git a/tests/detectors/rtlo/0.8.0/unicode_direction_override.sol b/tests/detectors/rtlo/0.8.0/unicode_direction_override.sol
new file mode 100644
index 0000000000..80f312986d
--- /dev/null
+++ b/tests/detectors/rtlo/0.8.0/unicode_direction_override.sol
@@ -0,0 +1,11 @@
+pragma solidity ^0.8.0;
+contract my_contract {
+ function empty_func() external pure
+ {
+ // The string below contains 3 RLO and 3 PDF unicode characters
+ // RLO is U+202E and changes the print direction to right-to-left
+ // PDF is U+202C and restores the print direction to what it was before RLO
+ /*ok aaabbbcccdddeee*/
+ }
+}
+// ----
\ No newline at end of file
diff --git a/tests/detectors/rtlo/0.8.0/unicode_direction_override.sol.0.8.0.RightToLeftOverride.json b/tests/detectors/rtlo/0.8.0/unicode_direction_override.sol.0.8.0.RightToLeftOverride.json
new file mode 100644
index 0000000000..97160fb1f5
--- /dev/null
+++ b/tests/detectors/rtlo/0.8.0/unicode_direction_override.sol.0.8.0.RightToLeftOverride.json
@@ -0,0 +1,91 @@
+[
+ [
+ {
+ "elements": [
+ {
+ "type": "other",
+ "name": "rtlo-character",
+ "source_mapping": {
+ "start": 336,
+ "length": 3,
+ "filename_used": "/GENERIC_PATH",
+ "filename_relative": "tests/detectors/rtlo/0.8.0/unicode_direction_override.sol",
+ "filename_absolute": "/GENERIC_PATH",
+ "filename_short": "tests/detectors/rtlo/0.8.0/unicode_direction_override.sol",
+ "is_dependency": false,
+ "lines": [
+ 8
+ ],
+ "starting_column": 14,
+ "ending_column": 17
+ }
+ }
+ ],
+ "description": "tests/detectors/rtlo/0.8.0/unicode_direction_override.sol contains a unicode right-to-left-override character at byte offset 336:\n\t- b' /*ok \\xe2\\x80\\xaeaaa\\xe2\\x80\\xaebbb\\xe2\\x80\\xaeccc\\xe2\\x80\\xacddd\\xe2\\x80\\xaceee\\xe2\\x80\\xac*/'\n",
+ "markdown": "tests/detectors/rtlo/0.8.0/unicode_direction_override.sol contains a unicode right-to-left-override character at byte offset 336:\n\t- b' /*ok \\xe2\\x80\\xaeaaa\\xe2\\x80\\xaebbb\\xe2\\x80\\xaeccc\\xe2\\x80\\xacddd\\xe2\\x80\\xaceee\\xe2\\x80\\xac*/'\n",
+ "first_markdown_element": "",
+ "id": "2407672dea557be27d0c488ba9c714e6a7f21dd3f7759058e718c1984e142f95",
+ "check": "rtlo",
+ "impact": "High",
+ "confidence": "High"
+ },
+ {
+ "elements": [
+ {
+ "type": "other",
+ "name": "rtlo-character",
+ "source_mapping": {
+ "start": 348,
+ "length": 3,
+ "filename_used": "/GENERIC_PATH",
+ "filename_relative": "tests/detectors/rtlo/0.8.0/unicode_direction_override.sol",
+ "filename_absolute": "/GENERIC_PATH",
+ "filename_short": "tests/detectors/rtlo/0.8.0/unicode_direction_override.sol",
+ "is_dependency": false,
+ "lines": [
+ 8
+ ],
+ "starting_column": 26,
+ "ending_column": 29
+ }
+ }
+ ],
+ "description": "tests/detectors/rtlo/0.8.0/unicode_direction_override.sol contains a unicode right-to-left-override character at byte offset 348:\n\t- b'\\x80\\xaebbb\\xe2\\x80\\xaeccc\\xe2\\x80\\xacddd\\xe2\\x80\\xaceee\\xe2\\x80\\xac*/'\n",
+ "markdown": "tests/detectors/rtlo/0.8.0/unicode_direction_override.sol contains a unicode right-to-left-override character at byte offset 348:\n\t- b'\\x80\\xaebbb\\xe2\\x80\\xaeccc\\xe2\\x80\\xacddd\\xe2\\x80\\xaceee\\xe2\\x80\\xac*/'\n",
+ "first_markdown_element": "",
+ "id": "477e54031d4d30d485b9cdc2d7ef3e9ae3de52640364505df8eb9619c2bcde6b",
+ "check": "rtlo",
+ "impact": "High",
+ "confidence": "High"
+ },
+ {
+ "elements": [
+ {
+ "type": "other",
+ "name": "rtlo-character",
+ "source_mapping": {
+ "start": 342,
+ "length": 3,
+ "filename_used": "/GENERIC_PATH",
+ "filename_relative": "tests/detectors/rtlo/0.8.0/unicode_direction_override.sol",
+ "filename_absolute": "/GENERIC_PATH",
+ "filename_short": "tests/detectors/rtlo/0.8.0/unicode_direction_override.sol",
+ "is_dependency": false,
+ "lines": [
+ 8
+ ],
+ "starting_column": 20,
+ "ending_column": 23
+ }
+ }
+ ],
+ "description": "tests/detectors/rtlo/0.8.0/unicode_direction_override.sol contains a unicode right-to-left-override character at byte offset 342:\n\t- b'\\x80\\xaeaaa\\xe2\\x80\\xaebbb\\xe2\\x80\\xaeccc\\xe2\\x80\\xacddd\\xe2\\x80\\xaceee\\xe2\\x80\\xac*/'\n",
+ "markdown": "tests/detectors/rtlo/0.8.0/unicode_direction_override.sol contains a unicode right-to-left-override character at byte offset 342:\n\t- b'\\x80\\xaeaaa\\xe2\\x80\\xaebbb\\xe2\\x80\\xaeccc\\xe2\\x80\\xacddd\\xe2\\x80\\xaceee\\xe2\\x80\\xac*/'\n",
+ "first_markdown_element": "",
+ "id": "9dd23585bb0ff1f244f749281b27f62978e0bb5b0ae58c8c9cb6d3f9c7e82253",
+ "check": "rtlo",
+ "impact": "High",
+ "confidence": "High"
+ }
+ ]
+]
\ No newline at end of file
diff --git a/tests/test_detectors.py b/tests/test_detectors.py
index 7b5fd993c6..f7884d68f0 100644
--- a/tests/test_detectors.py
+++ b/tests/test_detectors.py
@@ -724,6 +724,11 @@ def id_test(test_item: Test):
"right_to_left_override.sol",
"0.6.11",
),
+ Test(
+ all_detectors.RightToLeftOverride,
+ "unicode_direction_override.sol",
+ "0.8.0",
+ ),
Test(all_detectors.VoidConstructor, "void-cst.sol", "0.4.25"),
Test(all_detectors.VoidConstructor, "void-cst.sol", "0.5.16"),
Test(all_detectors.VoidConstructor, "void-cst.sol", "0.6.11"),
|
python-discord__site-402 | Replace Allauth anti-email monkey-patch with proper settings.
The allauth extension we use for the discord login and connection to a github account for any of our site accounts.
We have a [monkeypatch](https://github.com/python-discord/site/blob/master/pydis_site/apps/home/apps.py#L17-L38) to avoid saving in our database any email details that we may get from oauth authorisations. This does not avoid the request for emails showing in the auth request page though, as the scope is still requested.
Instead, we can define in settings.py the appropriate provider settings, particularly any required scopes to be requested. If we only provide `identify` for the discord one, it won't add the `email` scope in the auth request page in future, making it a cleaner and more appropriate solution.
The setting would look like:
```py
SOCIALACCOUNT_PROVIDERS = {
'discord': {
'SCOPE': [
'identify',
],
}
}
```
The relevant scope setting for github can be given also, it just needs to be looked up as to what scopes we should restrict it to in order to avoid unnecessary sensitive data being stored.
| [
{
"content": "\"\"\"\nDjango settings for pydis_site project.\n\nGenerated by 'django-admin startproject' using Django 2.1.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.1/ref/settings/\n\"\"\"\n\nimport os\nimport secrets\nimport sys\nimport typing\n\nimport environ\nimport sentry_sdk\nfrom django.contrib.messages import constants as messages\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nfrom pydis_site.constants import GIT_SHA\n\nif typing.TYPE_CHECKING:\n from django.contrib.auth.models import User\n from wiki.models import Article\n\nenv = environ.Env(\n DEBUG=(bool, False),\n SITE_SENTRY_DSN=(str, \"\")\n)\n\nsentry_sdk.init(\n dsn=env('SITE_SENTRY_DSN'),\n integrations=[DjangoIntegration()],\n send_default_pii=True,\n release=f\"pydis-site@{GIT_SHA}\"\n)\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nDEBUG = env('DEBUG')\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nif DEBUG:\n ALLOWED_HOSTS = env.list(\n 'ALLOWED_HOSTS',\n default=[\n 'pythondiscord.local',\n 'api.pythondiscord.local',\n 'admin.pythondiscord.local',\n 'staff.pythondiscord.local',\n '0.0.0.0', # noqa: S104\n 'localhost',\n 'web',\n 'api.web',\n 'admin.web',\n 'staff.web'\n ]\n )\n SECRET_KEY = \"yellow polkadot bikini\" # noqa: S105\n\nelif 'CI' in os.environ:\n ALLOWED_HOSTS = ['*']\n SECRET_KEY = secrets.token_urlsafe(32)\n\nelse:\n ALLOWED_HOSTS = env.list(\n 'ALLOWED_HOSTS',\n default=[\n 'pythondiscord.com',\n 'admin.pythondiscord.com',\n 'api.pythondiscord.com',\n 'staff.pythondiscord.com',\n 'pydis.com',\n 'api.pydis.com',\n 'admin.pydis.com',\n 'staff.pydis.com',\n ]\n )\n SECRET_KEY = env('SECRET_KEY')\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'pydis_site.apps.api',\n 'pydis_site.apps.home',\n 'pydis_site.apps.staff',\n\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.humanize.apps.HumanizeConfig',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.sites.apps.SitesConfig',\n 'django.contrib.staticfiles',\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n\n 'allauth.socialaccount.providers.discord',\n 'allauth.socialaccount.providers.github',\n\n 'django_hosts',\n 'django_filters',\n 'django_nyt.apps.DjangoNytConfig',\n 'django_simple_bulma',\n 'mptt',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'sekizai',\n 'sorl.thumbnail',\n\n 'wiki.apps.WikiConfig',\n\n 'wiki.plugins.images.apps.ImagesConfig',\n 'wiki.plugins.links.apps.LinksConfig',\n 'wiki.plugins.redlinks.apps.RedlinksConfig',\n 'wiki.plugins.notifications.apps.NotificationsConfig', # Required for migrations\n]\n\nMIDDLEWARE = [\n 'django_hosts.middleware.HostsRequestMiddleware',\n\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n 'django_hosts.middleware.HostsResponseMiddleware',\n]\nROOT_URLCONF = 'pydis_site.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'pydis_site', 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'builtins': [\n 'django_hosts.templatetags.hosts_override',\n ],\n\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n \"sekizai.context_processors.sekizai\",\n \"pydis_site.context_processors.git_sha_processor\"\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'pydis_site.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/2.1/ref/settings/#databases\n\nDATABASES = {\n 'default': env.db()\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.1/topics/i18n/\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.1/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, 'pydis_site', 'static')]\nSTATIC_ROOT = env('STATIC_ROOT', default='/app/staticfiles')\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = env('MEDIA_ROOT', default='/site/media')\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n\n 'django_simple_bulma.finders.SimpleBulmaFinder',\n]\n\n# django-hosts\n# https://django-hosts.readthedocs.io/en/latest/\nROOT_HOSTCONF = 'pydis_site.hosts'\nDEFAULT_HOST = 'home'\n\nif DEBUG:\n PARENT_HOST = env('PARENT_HOST', default='pythondiscord.local:8000')\n\n if \":\" in PARENT_HOST:\n ALLOWED_HOSTS.append(PARENT_HOST.split(\":\", 1)[0])\n else:\n ALLOWED_HOSTS.append(PARENT_HOST)\nelse:\n PARENT_HOST = env('PARENT_HOST', default='pythondiscord.com')\n\n# Django REST framework\n# http://www.django-rest-framework.org\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.DjangoModelPermissions',\n ),\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json'\n}\n\n# Logging\n# https://docs.djangoproject.com/en/2.1/topics/logging/\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': (\n '%(asctime)s | %(process)d:%(thread)d | %(module)s | %(levelname)-8s | %(message)s'\n )\n }\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler'\n },\n 'database': {\n 'class': 'pydis_site.apps.api.dblogger.DatabaseLogHandler'\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console', 'database'],\n 'propagate': True,\n 'level': env(\n 'LOG_LEVEL',\n default=(\n # If there is no explicit `LOG_LEVEL` set,\n # use `DEBUG` if we're running in debug mode but not\n # testing. Use `ERROR` if we're running tests, else\n # default to using `WARN`.\n 'INFO'\n if DEBUG and 'test' not in sys.argv\n else (\n 'ERROR'\n if 'test' in sys.argv\n else 'WARN'\n )\n )\n )\n }\n }\n}\n\n# Django Messages framework config\nMESSAGE_TAGS = {\n messages.DEBUG: 'primary',\n messages.INFO: 'info',\n messages.SUCCESS: 'success',\n messages.WARNING: 'warning',\n messages.ERROR: 'danger',\n}\n\n# Custom settings for django-simple-bulma\nBULMA_SETTINGS = {\n \"variables\": { # If you update these colours, please update the notification.css file\n \"primary\": \"#7289DA\", # Discord blurple\n\n # \"orange\": \"\", # Apparently unused, but the default is fine\n # \"yellow\": \"\", # The default yellow looks pretty good\n \"green\": \"#32ac66\", # Colour picked after Discord discussion\n \"turquoise\": \"#7289DA\", # Blurple, because Bulma uses this regardless of `primary` above\n \"blue\": \"#2482c1\", # Colour picked after Discord discussion\n \"cyan\": \"#2482c1\", # Colour picked after Discord discussion (matches the blue)\n \"purple\": \"#aa55e4\", # Apparently unused, but changed for consistency\n \"red\": \"#d63852\", # Colour picked after Discord discussion\n\n \"link\": \"$primary\",\n\n \"dimensions\": \"16 24 32 48 64 96 128 256 512\", # Possible image dimensions\n \"navbar-height\": \"4.75rem\",\n \"footer-padding\": \"1rem 1.5rem 1rem\",\n }\n}\n\n# Required for the wiki\nLOGIN_URL = \"/admin/login\" # Update this when the real login system is in place\nSITE_ID = 1\n\nWIKI_ACCOUNT_HANDLING = False\nWIKI_ACCOUNT_SIGNUP_ALLOWED = False\n\nWIKI_ANONYMOUS = True\nWIKI_ANONYMOUS_WRITE = False\n\nWIKI_MARKDOWN_KWARGS = {\n \"extension_configs\": {\n \"wiki.plugins.macros.mdx.toc\": {\n \"anchorlink\": True,\n \"baselevel\": 2\n }\n }, \"extensions\": [\n \"markdown.extensions.abbr\",\n \"markdown.extensions.attr_list\",\n \"markdown.extensions.extra\",\n \"markdown.extensions.footnotes\",\n \"markdown.extensions.nl2br\",\n \"markdown.extensions.sane_lists\",\n\n \"wiki.core.markdown.mdx.codehilite\",\n \"wiki.core.markdown.mdx.previewlinks\",\n \"wiki.core.markdown.mdx.responsivetable\",\n \"wiki.plugins.macros.mdx.toc\",\n \"wiki.plugins.macros.mdx.wikilinks\",\n ]\n}\n\nWIKI_MESSAGE_TAG_CSS_CLASS = {\n messages.DEBUG: \"\", # is-info isn't distinctive enough from blurple\n messages.ERROR: \"is-danger\",\n messages.INFO: \"is-primary\",\n messages.SUCCESS: \"is-success\",\n messages.WARNING: \"is-warning\",\n}\n\nWIKI_MARKDOWN_SANITIZE_HTML = False\n\n\n# Wiki permissions\n\n\ndef WIKI_CAN_DELETE(article: \"Article\", user: \"User\") -> bool: # noqa: N802\n \"\"\"Check whether a user may delete an article.\"\"\"\n return user.has_perm('wiki.delete_article')\n\n\ndef WIKI_CAN_MODERATE(article: \"Article\", user: \"User\") -> bool: # noqa: N802\n \"\"\"Check whether a user may moderate an article.\"\"\"\n return user.has_perm('wiki.moderate')\n\n\ndef WIKI_CAN_WRITE(article: \"Article\", user: \"User\") -> bool: # noqa: N802\n \"\"\"Check whether a user may create or edit an article.\"\"\"\n return user.has_perm('wiki.change_article')\n\n\n# Django Allauth stuff\n\nAUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = \"pydis_site.utils.account.AccountAdapter\"\nACCOUNT_EMAIL_REQUIRED = False # Undocumented allauth setting; don't require emails\nACCOUNT_EMAIL_VERIFICATION = \"none\" # No verification required; we don't use emails for anything\n\n# We use this validator because Allauth won't let us actually supply a list with no validators\n# in it, and we can't just give it a lambda - that'd be too easy, I suppose.\nACCOUNT_USERNAME_VALIDATORS = \"pydis_site.VALIDATORS\"\n\nLOGIN_REDIRECT_URL = \"home\"\nSOCIALACCOUNT_ADAPTER = \"pydis_site.utils.account.SocialAccountAdapter\"\n",
"path": "pydis_site/settings.py"
}
] | [
{
"content": "\"\"\"\nDjango settings for pydis_site project.\n\nGenerated by 'django-admin startproject' using Django 2.1.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.1/ref/settings/\n\"\"\"\n\nimport os\nimport secrets\nimport sys\nimport typing\n\nimport environ\nimport sentry_sdk\nfrom django.contrib.messages import constants as messages\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nfrom pydis_site.constants import GIT_SHA\n\nif typing.TYPE_CHECKING:\n from django.contrib.auth.models import User\n from wiki.models import Article\n\nenv = environ.Env(\n DEBUG=(bool, False),\n SITE_SENTRY_DSN=(str, \"\")\n)\n\nsentry_sdk.init(\n dsn=env('SITE_SENTRY_DSN'),\n integrations=[DjangoIntegration()],\n send_default_pii=True,\n release=f\"pydis-site@{GIT_SHA}\"\n)\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nDEBUG = env('DEBUG')\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nif DEBUG:\n ALLOWED_HOSTS = env.list(\n 'ALLOWED_HOSTS',\n default=[\n 'pythondiscord.local',\n 'api.pythondiscord.local',\n 'admin.pythondiscord.local',\n 'staff.pythondiscord.local',\n '0.0.0.0', # noqa: S104\n 'localhost',\n 'web',\n 'api.web',\n 'admin.web',\n 'staff.web'\n ]\n )\n SECRET_KEY = \"yellow polkadot bikini\" # noqa: S105\n\nelif 'CI' in os.environ:\n ALLOWED_HOSTS = ['*']\n SECRET_KEY = secrets.token_urlsafe(32)\n\nelse:\n ALLOWED_HOSTS = env.list(\n 'ALLOWED_HOSTS',\n default=[\n 'pythondiscord.com',\n 'admin.pythondiscord.com',\n 'api.pythondiscord.com',\n 'staff.pythondiscord.com',\n 'pydis.com',\n 'api.pydis.com',\n 'admin.pydis.com',\n 'staff.pydis.com',\n ]\n )\n SECRET_KEY = env('SECRET_KEY')\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'pydis_site.apps.api',\n 'pydis_site.apps.home',\n 'pydis_site.apps.staff',\n\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.humanize.apps.HumanizeConfig',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.sites.apps.SitesConfig',\n 'django.contrib.staticfiles',\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n\n 'allauth.socialaccount.providers.discord',\n 'allauth.socialaccount.providers.github',\n\n 'django_hosts',\n 'django_filters',\n 'django_nyt.apps.DjangoNytConfig',\n 'django_simple_bulma',\n 'mptt',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'sekizai',\n 'sorl.thumbnail',\n\n 'wiki.apps.WikiConfig',\n\n 'wiki.plugins.images.apps.ImagesConfig',\n 'wiki.plugins.links.apps.LinksConfig',\n 'wiki.plugins.redlinks.apps.RedlinksConfig',\n 'wiki.plugins.notifications.apps.NotificationsConfig', # Required for migrations\n]\n\nMIDDLEWARE = [\n 'django_hosts.middleware.HostsRequestMiddleware',\n\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n 'django_hosts.middleware.HostsResponseMiddleware',\n]\nROOT_URLCONF = 'pydis_site.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'pydis_site', 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'builtins': [\n 'django_hosts.templatetags.hosts_override',\n ],\n\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n \"sekizai.context_processors.sekizai\",\n \"pydis_site.context_processors.git_sha_processor\"\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'pydis_site.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/2.1/ref/settings/#databases\n\nDATABASES = {\n 'default': env.db()\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.1/topics/i18n/\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.1/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, 'pydis_site', 'static')]\nSTATIC_ROOT = env('STATIC_ROOT', default='/app/staticfiles')\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = env('MEDIA_ROOT', default='/site/media')\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n\n 'django_simple_bulma.finders.SimpleBulmaFinder',\n]\n\n# django-hosts\n# https://django-hosts.readthedocs.io/en/latest/\nROOT_HOSTCONF = 'pydis_site.hosts'\nDEFAULT_HOST = 'home'\n\nif DEBUG:\n PARENT_HOST = env('PARENT_HOST', default='pythondiscord.local:8000')\n\n if \":\" in PARENT_HOST:\n ALLOWED_HOSTS.append(PARENT_HOST.split(\":\", 1)[0])\n else:\n ALLOWED_HOSTS.append(PARENT_HOST)\nelse:\n PARENT_HOST = env('PARENT_HOST', default='pythondiscord.com')\n\n# Django REST framework\n# http://www.django-rest-framework.org\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.DjangoModelPermissions',\n ),\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json'\n}\n\n# Logging\n# https://docs.djangoproject.com/en/2.1/topics/logging/\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': (\n '%(asctime)s | %(process)d:%(thread)d | %(module)s | %(levelname)-8s | %(message)s'\n )\n }\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler'\n },\n 'database': {\n 'class': 'pydis_site.apps.api.dblogger.DatabaseLogHandler'\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console', 'database'],\n 'propagate': True,\n 'level': env(\n 'LOG_LEVEL',\n default=(\n # If there is no explicit `LOG_LEVEL` set,\n # use `DEBUG` if we're running in debug mode but not\n # testing. Use `ERROR` if we're running tests, else\n # default to using `WARN`.\n 'INFO'\n if DEBUG and 'test' not in sys.argv\n else (\n 'ERROR'\n if 'test' in sys.argv\n else 'WARN'\n )\n )\n )\n }\n }\n}\n\n# Django Messages framework config\nMESSAGE_TAGS = {\n messages.DEBUG: 'primary',\n messages.INFO: 'info',\n messages.SUCCESS: 'success',\n messages.WARNING: 'warning',\n messages.ERROR: 'danger',\n}\n\n# Custom settings for django-simple-bulma\nBULMA_SETTINGS = {\n \"variables\": { # If you update these colours, please update the notification.css file\n \"primary\": \"#7289DA\", # Discord blurple\n\n # \"orange\": \"\", # Apparently unused, but the default is fine\n # \"yellow\": \"\", # The default yellow looks pretty good\n \"green\": \"#32ac66\", # Colour picked after Discord discussion\n \"turquoise\": \"#7289DA\", # Blurple, because Bulma uses this regardless of `primary` above\n \"blue\": \"#2482c1\", # Colour picked after Discord discussion\n \"cyan\": \"#2482c1\", # Colour picked after Discord discussion (matches the blue)\n \"purple\": \"#aa55e4\", # Apparently unused, but changed for consistency\n \"red\": \"#d63852\", # Colour picked after Discord discussion\n\n \"link\": \"$primary\",\n\n \"dimensions\": \"16 24 32 48 64 96 128 256 512\", # Possible image dimensions\n \"navbar-height\": \"4.75rem\",\n \"footer-padding\": \"1rem 1.5rem 1rem\",\n }\n}\n\n# Required for the wiki\nLOGIN_URL = \"/admin/login\" # Update this when the real login system is in place\nSITE_ID = 1\n\nWIKI_ACCOUNT_HANDLING = False\nWIKI_ACCOUNT_SIGNUP_ALLOWED = False\n\nWIKI_ANONYMOUS = True\nWIKI_ANONYMOUS_WRITE = False\n\nWIKI_MARKDOWN_KWARGS = {\n \"extension_configs\": {\n \"wiki.plugins.macros.mdx.toc\": {\n \"anchorlink\": True,\n \"baselevel\": 2\n }\n }, \"extensions\": [\n \"markdown.extensions.abbr\",\n \"markdown.extensions.attr_list\",\n \"markdown.extensions.extra\",\n \"markdown.extensions.footnotes\",\n \"markdown.extensions.nl2br\",\n \"markdown.extensions.sane_lists\",\n\n \"wiki.core.markdown.mdx.codehilite\",\n \"wiki.core.markdown.mdx.previewlinks\",\n \"wiki.core.markdown.mdx.responsivetable\",\n \"wiki.plugins.macros.mdx.toc\",\n \"wiki.plugins.macros.mdx.wikilinks\",\n ]\n}\n\nWIKI_MESSAGE_TAG_CSS_CLASS = {\n messages.DEBUG: \"\", # is-info isn't distinctive enough from blurple\n messages.ERROR: \"is-danger\",\n messages.INFO: \"is-primary\",\n messages.SUCCESS: \"is-success\",\n messages.WARNING: \"is-warning\",\n}\n\nWIKI_MARKDOWN_SANITIZE_HTML = False\n\n\n# Wiki permissions\n\n\ndef WIKI_CAN_DELETE(article: \"Article\", user: \"User\") -> bool: # noqa: N802\n \"\"\"Check whether a user may delete an article.\"\"\"\n return user.has_perm('wiki.delete_article')\n\n\ndef WIKI_CAN_MODERATE(article: \"Article\", user: \"User\") -> bool: # noqa: N802\n \"\"\"Check whether a user may moderate an article.\"\"\"\n return user.has_perm('wiki.moderate')\n\n\ndef WIKI_CAN_WRITE(article: \"Article\", user: \"User\") -> bool: # noqa: N802\n \"\"\"Check whether a user may create or edit an article.\"\"\"\n return user.has_perm('wiki.change_article')\n\n\n# Django Allauth stuff\n\nAUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = \"pydis_site.utils.account.AccountAdapter\"\nACCOUNT_EMAIL_REQUIRED = False # Undocumented allauth setting; don't require emails\nACCOUNT_EMAIL_VERIFICATION = \"none\" # No verification required; we don't use emails for anything\n\n# We use this validator because Allauth won't let us actually supply a list with no validators\n# in it, and we can't just give it a lambda - that'd be too easy, I suppose.\nACCOUNT_USERNAME_VALIDATORS = \"pydis_site.VALIDATORS\"\n\nLOGIN_REDIRECT_URL = \"home\"\nSOCIALACCOUNT_ADAPTER = \"pydis_site.utils.account.SocialAccountAdapter\"\nSOCIALACCOUNT_PROVIDERS = {\n \"discord\": {\n \"SCOPE\": [\n \"identify\",\n ],\n \"AUTH_PARAMS\": {\"prompt\": \"none\"}\n }\n}\n",
"path": "pydis_site/settings.py"
}
] | diff --git a/pydis_site/settings.py b/pydis_site/settings.py
index 1f042c1bb..3769fa253 100644
--- a/pydis_site/settings.py
+++ b/pydis_site/settings.py
@@ -401,3 +401,11 @@ def WIKI_CAN_WRITE(article: "Article", user: "User") -> bool: # noqa: N802
LOGIN_REDIRECT_URL = "home"
SOCIALACCOUNT_ADAPTER = "pydis_site.utils.account.SocialAccountAdapter"
+SOCIALACCOUNT_PROVIDERS = {
+ "discord": {
+ "SCOPE": [
+ "identify",
+ ],
+ "AUTH_PARAMS": {"prompt": "none"}
+ }
+}
|
iterative__dvc-3828 | End of file fixer
I am using an [end-of-file fixer in the pre-commit hook](https://pre-commit.com/hooks.html). It checks that the file ends with an empty new line.
It looks like files
```
modified: .dvc/plots/confusion.json
modified: .dvc/plots/default.json
modified: .dvc/plots/scatter.json
```
That are automatically created by `dvc init` do not have an empty line at the end of the file.
| [
{
"content": "import json\nimport logging\nimport os\nimport re\n\nfrom funcy import cached_property\n\nfrom dvc.exceptions import DvcException\nfrom dvc.utils.fs import makedirs\n\nlogger = logging.getLogger(__name__)\n\n\nclass TemplateNotFoundError(DvcException):\n def __init__(self, path):\n super().__init__(f\"Template '{path}' not found.\")\n\n\nclass NoDataForTemplateError(DvcException):\n def __init__(self, template_path):\n super().__init__(\n \"No data provided for '{}'.\".format(os.path.relpath(template_path))\n )\n\n\nclass NoFieldInDataError(DvcException):\n def __init__(self, field_name):\n super().__init__(\n f\"Field '{field_name}' does not exist in provided data.\"\n )\n\n\nclass Template:\n INDENT = 4\n SEPARATORS = (\",\", \": \")\n EXTENSION = \".json\"\n METRIC_DATA_ANCHOR = \"<DVC_METRIC_DATA>\"\n X_ANCHOR = \"<DVC_METRIC_X>\"\n Y_ANCHOR = \"<DVC_METRIC_Y>\"\n TITLE_ANCHOR = \"<DVC_METRIC_TITLE>\"\n X_TITLE_ANCHOR = \"<DVC_METRIC_X_TITLE>\"\n Y_TITLE_ANCHOR = \"<DVC_METRIC_Y_TITLE>\"\n\n def __init__(self, templates_dir):\n self.plot_templates_dir = templates_dir\n\n def dump(self):\n makedirs(self.plot_templates_dir, exist_ok=True)\n\n with open(\n os.path.join(\n self.plot_templates_dir, self.TEMPLATE_NAME + self.EXTENSION\n ),\n \"w\",\n ) as fobj:\n json.dump(\n self.DEFAULT_CONTENT,\n fobj,\n indent=self.INDENT,\n separators=self.SEPARATORS,\n )\n\n @staticmethod\n def get_data_anchor(template_content):\n regex = re.compile('\"<DVC_METRIC_DATA[^>\"]*>\"')\n return regex.findall(template_content)\n\n @staticmethod\n def parse_data_anchors(template_content):\n data_files = {\n Template.get_datafile(m)\n for m in Template.get_data_anchor(template_content)\n }\n return {df for df in data_files if df}\n\n @staticmethod\n def get_datafile(anchor_string):\n return (\n anchor_string.replace(\"<\", \"\")\n .replace(\">\", \"\")\n .replace('\"', \"\")\n .replace(\"DVC_METRIC_DATA\", \"\")\n .replace(\",\", \"\")\n )\n\n @staticmethod\n def fill(\n template_path,\n data,\n priority_datafile=None,\n x_field=None,\n y_field=None,\n title=None,\n x_title=None,\n y_title=None,\n ):\n with open(template_path) as fobj:\n result_content = fobj.read()\n\n if x_field:\n Template._check_field_exists(data, x_field)\n if y_field:\n Template._check_field_exists(data, y_field)\n\n result_content = Template._replace_data_anchors(\n result_content, data, priority_datafile\n )\n\n result_content = Template._replace_metadata_anchors(\n result_content, title, x_field, x_title, y_field, y_title\n )\n\n return result_content\n\n @staticmethod\n def _check_field_exists(data, field):\n for file, data_points in data.items():\n if not any(\n field in data_point.keys() for data_point in data_points\n ):\n raise NoFieldInDataError(field)\n\n @staticmethod\n def _replace_metadata_anchors(\n result_content, title, x_field, x_title, y_field, y_title\n ):\n if Template.TITLE_ANCHOR in result_content:\n if title:\n result_content = result_content.replace(\n Template.TITLE_ANCHOR, title\n )\n else:\n result_content = result_content.replace(\n Template.TITLE_ANCHOR, \"\"\n )\n if Template.X_ANCHOR in result_content and x_field:\n result_content = result_content.replace(Template.X_ANCHOR, x_field)\n if Template.Y_ANCHOR in result_content and y_field:\n result_content = result_content.replace(Template.Y_ANCHOR, y_field)\n if Template.X_TITLE_ANCHOR in result_content:\n if not x_title and x_field:\n x_title = x_field\n result_content = result_content.replace(\n Template.X_TITLE_ANCHOR, x_title\n )\n if Template.Y_TITLE_ANCHOR in result_content:\n if not y_title and y_field:\n y_title = y_field\n result_content = result_content.replace(\n Template.Y_TITLE_ANCHOR, y_title\n )\n return result_content\n\n @staticmethod\n def _replace_data_anchors(result_content, data, priority_datafile):\n for anchor in Template.get_data_anchor(result_content):\n file = Template.get_datafile(anchor)\n\n if not file or priority_datafile:\n key = priority_datafile\n else:\n key = file\n\n result_content = result_content.replace(\n anchor,\n json.dumps(\n data[key],\n indent=Template.INDENT,\n separators=Template.SEPARATORS,\n sort_keys=True,\n ),\n )\n return result_content\n\n\nclass DefaultLinearTemplate(Template):\n TEMPLATE_NAME = \"default\"\n\n DEFAULT_CONTENT = {\n \"$schema\": \"https://vega.github.io/schema/vega-lite/v4.json\",\n \"data\": {\"values\": Template.METRIC_DATA_ANCHOR},\n \"title\": Template.TITLE_ANCHOR,\n \"mark\": {\"type\": \"line\"},\n \"encoding\": {\n \"x\": {\n \"field\": Template.X_ANCHOR,\n \"type\": \"quantitative\",\n \"title\": Template.X_TITLE_ANCHOR,\n },\n \"y\": {\n \"field\": Template.Y_ANCHOR,\n \"type\": \"quantitative\",\n \"title\": Template.Y_TITLE_ANCHOR,\n \"scale\": {\"zero\": False},\n },\n \"color\": {\"field\": \"rev\", \"type\": \"nominal\"},\n },\n }\n\n\nclass DefaultConfusionTemplate(Template):\n TEMPLATE_NAME = \"confusion\"\n DEFAULT_CONTENT = {\n \"$schema\": \"https://vega.github.io/schema/vega-lite/v4.json\",\n \"data\": {\"values\": Template.METRIC_DATA_ANCHOR},\n \"title\": Template.TITLE_ANCHOR,\n \"mark\": \"rect\",\n \"encoding\": {\n \"x\": {\n \"field\": Template.X_ANCHOR,\n \"type\": \"nominal\",\n \"sort\": \"ascending\",\n \"title\": Template.X_TITLE_ANCHOR,\n },\n \"y\": {\n \"field\": Template.Y_ANCHOR,\n \"type\": \"nominal\",\n \"sort\": \"ascending\",\n \"title\": Template.Y_TITLE_ANCHOR,\n },\n \"color\": {\"aggregate\": \"count\", \"type\": \"quantitative\"},\n \"facet\": {\"field\": \"rev\", \"type\": \"nominal\"},\n },\n }\n\n\nclass DefaultScatterTemplate(Template):\n TEMPLATE_NAME = \"scatter\"\n DEFAULT_CONTENT = {\n \"$schema\": \"https://vega.github.io/schema/vega-lite/v4.json\",\n \"data\": {\"values\": Template.METRIC_DATA_ANCHOR},\n \"title\": Template.TITLE_ANCHOR,\n \"mark\": \"point\",\n \"encoding\": {\n \"x\": {\n \"field\": Template.X_ANCHOR,\n \"type\": \"quantitative\",\n \"title\": Template.X_TITLE_ANCHOR,\n },\n \"y\": {\n \"field\": Template.Y_ANCHOR,\n \"type\": \"quantitative\",\n \"title\": Template.Y_TITLE_ANCHOR,\n \"scale\": {\"zero\": False},\n },\n \"color\": {\"field\": \"rev\", \"type\": \"nominal\"},\n },\n }\n\n\nclass PlotTemplates:\n TEMPLATES_DIR = \"plots\"\n TEMPLATES = [\n DefaultLinearTemplate,\n DefaultConfusionTemplate,\n DefaultScatterTemplate,\n ]\n\n @cached_property\n def templates_dir(self):\n return os.path.join(self.dvc_dir, self.TEMPLATES_DIR)\n\n @cached_property\n def default_template(self):\n default_plot_path = os.path.join(self.templates_dir, \"default.json\")\n if not os.path.exists(default_plot_path):\n raise TemplateNotFoundError(os.path.relpath(default_plot_path))\n return default_plot_path\n\n def get_template(self, path):\n t_path = os.path.join(self.templates_dir, path)\n if os.path.exists(t_path):\n return t_path\n\n all_templates = [\n os.path.join(root, file)\n for root, _, files in os.walk(self.templates_dir)\n for file in files\n ]\n matches = [\n template\n for template in all_templates\n if os.path.splitext(template)[0] == t_path\n ]\n if matches:\n assert len(matches) == 1\n return matches[0]\n\n raise TemplateNotFoundError(path)\n\n def __init__(self, dvc_dir):\n self.dvc_dir = dvc_dir\n\n if not os.path.exists(self.templates_dir):\n makedirs(self.templates_dir, exist_ok=True)\n for t in self.TEMPLATES:\n t(self.templates_dir).dump()\n",
"path": "dvc/repo/plots/template.py"
}
] | [
{
"content": "import json\nimport logging\nimport os\nimport re\n\nfrom funcy import cached_property\n\nfrom dvc.exceptions import DvcException\nfrom dvc.utils.fs import makedirs\n\nlogger = logging.getLogger(__name__)\n\n\nclass TemplateNotFoundError(DvcException):\n def __init__(self, path):\n super().__init__(f\"Template '{path}' not found.\")\n\n\nclass NoDataForTemplateError(DvcException):\n def __init__(self, template_path):\n super().__init__(\n \"No data provided for '{}'.\".format(os.path.relpath(template_path))\n )\n\n\nclass NoFieldInDataError(DvcException):\n def __init__(self, field_name):\n super().__init__(\n f\"Field '{field_name}' does not exist in provided data.\"\n )\n\n\nclass Template:\n INDENT = 4\n SEPARATORS = (\",\", \": \")\n EXTENSION = \".json\"\n METRIC_DATA_ANCHOR = \"<DVC_METRIC_DATA>\"\n X_ANCHOR = \"<DVC_METRIC_X>\"\n Y_ANCHOR = \"<DVC_METRIC_Y>\"\n TITLE_ANCHOR = \"<DVC_METRIC_TITLE>\"\n X_TITLE_ANCHOR = \"<DVC_METRIC_X_TITLE>\"\n Y_TITLE_ANCHOR = \"<DVC_METRIC_Y_TITLE>\"\n\n def __init__(self, templates_dir):\n self.plot_templates_dir = templates_dir\n\n def dump(self):\n makedirs(self.plot_templates_dir, exist_ok=True)\n\n with open(\n os.path.join(\n self.plot_templates_dir, self.TEMPLATE_NAME + self.EXTENSION\n ),\n \"w\",\n ) as fobj:\n json.dump(\n self.DEFAULT_CONTENT,\n fobj,\n indent=self.INDENT,\n separators=self.SEPARATORS,\n )\n fobj.write(\"\\n\")\n\n @staticmethod\n def get_data_anchor(template_content):\n regex = re.compile('\"<DVC_METRIC_DATA[^>\"]*>\"')\n return regex.findall(template_content)\n\n @staticmethod\n def parse_data_anchors(template_content):\n data_files = {\n Template.get_datafile(m)\n for m in Template.get_data_anchor(template_content)\n }\n return {df for df in data_files if df}\n\n @staticmethod\n def get_datafile(anchor_string):\n return (\n anchor_string.replace(\"<\", \"\")\n .replace(\">\", \"\")\n .replace('\"', \"\")\n .replace(\"DVC_METRIC_DATA\", \"\")\n .replace(\",\", \"\")\n )\n\n @staticmethod\n def fill(\n template_path,\n data,\n priority_datafile=None,\n x_field=None,\n y_field=None,\n title=None,\n x_title=None,\n y_title=None,\n ):\n with open(template_path) as fobj:\n result_content = fobj.read()\n\n if x_field:\n Template._check_field_exists(data, x_field)\n if y_field:\n Template._check_field_exists(data, y_field)\n\n result_content = Template._replace_data_anchors(\n result_content, data, priority_datafile\n )\n\n result_content = Template._replace_metadata_anchors(\n result_content, title, x_field, x_title, y_field, y_title\n )\n\n return result_content\n\n @staticmethod\n def _check_field_exists(data, field):\n for file, data_points in data.items():\n if not any(\n field in data_point.keys() for data_point in data_points\n ):\n raise NoFieldInDataError(field)\n\n @staticmethod\n def _replace_metadata_anchors(\n result_content, title, x_field, x_title, y_field, y_title\n ):\n if Template.TITLE_ANCHOR in result_content:\n if title:\n result_content = result_content.replace(\n Template.TITLE_ANCHOR, title\n )\n else:\n result_content = result_content.replace(\n Template.TITLE_ANCHOR, \"\"\n )\n if Template.X_ANCHOR in result_content and x_field:\n result_content = result_content.replace(Template.X_ANCHOR, x_field)\n if Template.Y_ANCHOR in result_content and y_field:\n result_content = result_content.replace(Template.Y_ANCHOR, y_field)\n if Template.X_TITLE_ANCHOR in result_content:\n if not x_title and x_field:\n x_title = x_field\n result_content = result_content.replace(\n Template.X_TITLE_ANCHOR, x_title\n )\n if Template.Y_TITLE_ANCHOR in result_content:\n if not y_title and y_field:\n y_title = y_field\n result_content = result_content.replace(\n Template.Y_TITLE_ANCHOR, y_title\n )\n return result_content\n\n @staticmethod\n def _replace_data_anchors(result_content, data, priority_datafile):\n for anchor in Template.get_data_anchor(result_content):\n file = Template.get_datafile(anchor)\n\n if not file or priority_datafile:\n key = priority_datafile\n else:\n key = file\n\n result_content = result_content.replace(\n anchor,\n json.dumps(\n data[key],\n indent=Template.INDENT,\n separators=Template.SEPARATORS,\n sort_keys=True,\n ),\n )\n return result_content\n\n\nclass DefaultLinearTemplate(Template):\n TEMPLATE_NAME = \"default\"\n\n DEFAULT_CONTENT = {\n \"$schema\": \"https://vega.github.io/schema/vega-lite/v4.json\",\n \"data\": {\"values\": Template.METRIC_DATA_ANCHOR},\n \"title\": Template.TITLE_ANCHOR,\n \"mark\": {\"type\": \"line\"},\n \"encoding\": {\n \"x\": {\n \"field\": Template.X_ANCHOR,\n \"type\": \"quantitative\",\n \"title\": Template.X_TITLE_ANCHOR,\n },\n \"y\": {\n \"field\": Template.Y_ANCHOR,\n \"type\": \"quantitative\",\n \"title\": Template.Y_TITLE_ANCHOR,\n \"scale\": {\"zero\": False},\n },\n \"color\": {\"field\": \"rev\", \"type\": \"nominal\"},\n },\n }\n\n\nclass DefaultConfusionTemplate(Template):\n TEMPLATE_NAME = \"confusion\"\n DEFAULT_CONTENT = {\n \"$schema\": \"https://vega.github.io/schema/vega-lite/v4.json\",\n \"data\": {\"values\": Template.METRIC_DATA_ANCHOR},\n \"title\": Template.TITLE_ANCHOR,\n \"mark\": \"rect\",\n \"encoding\": {\n \"x\": {\n \"field\": Template.X_ANCHOR,\n \"type\": \"nominal\",\n \"sort\": \"ascending\",\n \"title\": Template.X_TITLE_ANCHOR,\n },\n \"y\": {\n \"field\": Template.Y_ANCHOR,\n \"type\": \"nominal\",\n \"sort\": \"ascending\",\n \"title\": Template.Y_TITLE_ANCHOR,\n },\n \"color\": {\"aggregate\": \"count\", \"type\": \"quantitative\"},\n \"facet\": {\"field\": \"rev\", \"type\": \"nominal\"},\n },\n }\n\n\nclass DefaultScatterTemplate(Template):\n TEMPLATE_NAME = \"scatter\"\n DEFAULT_CONTENT = {\n \"$schema\": \"https://vega.github.io/schema/vega-lite/v4.json\",\n \"data\": {\"values\": Template.METRIC_DATA_ANCHOR},\n \"title\": Template.TITLE_ANCHOR,\n \"mark\": \"point\",\n \"encoding\": {\n \"x\": {\n \"field\": Template.X_ANCHOR,\n \"type\": \"quantitative\",\n \"title\": Template.X_TITLE_ANCHOR,\n },\n \"y\": {\n \"field\": Template.Y_ANCHOR,\n \"type\": \"quantitative\",\n \"title\": Template.Y_TITLE_ANCHOR,\n \"scale\": {\"zero\": False},\n },\n \"color\": {\"field\": \"rev\", \"type\": \"nominal\"},\n },\n }\n\n\nclass PlotTemplates:\n TEMPLATES_DIR = \"plots\"\n TEMPLATES = [\n DefaultLinearTemplate,\n DefaultConfusionTemplate,\n DefaultScatterTemplate,\n ]\n\n @cached_property\n def templates_dir(self):\n return os.path.join(self.dvc_dir, self.TEMPLATES_DIR)\n\n @cached_property\n def default_template(self):\n default_plot_path = os.path.join(self.templates_dir, \"default.json\")\n if not os.path.exists(default_plot_path):\n raise TemplateNotFoundError(os.path.relpath(default_plot_path))\n return default_plot_path\n\n def get_template(self, path):\n t_path = os.path.join(self.templates_dir, path)\n if os.path.exists(t_path):\n return t_path\n\n all_templates = [\n os.path.join(root, file)\n for root, _, files in os.walk(self.templates_dir)\n for file in files\n ]\n matches = [\n template\n for template in all_templates\n if os.path.splitext(template)[0] == t_path\n ]\n if matches:\n assert len(matches) == 1\n return matches[0]\n\n raise TemplateNotFoundError(path)\n\n def __init__(self, dvc_dir):\n self.dvc_dir = dvc_dir\n\n if not os.path.exists(self.templates_dir):\n makedirs(self.templates_dir, exist_ok=True)\n for t in self.TEMPLATES:\n t(self.templates_dir).dump()\n",
"path": "dvc/repo/plots/template.py"
}
] | diff --git a/dvc/repo/plots/template.py b/dvc/repo/plots/template.py
index 0bfe6b415a..731897f5d2 100644
--- a/dvc/repo/plots/template.py
+++ b/dvc/repo/plots/template.py
@@ -59,6 +59,7 @@ def dump(self):
indent=self.INDENT,
separators=self.SEPARATORS,
)
+ fobj.write("\n")
@staticmethod
def get_data_anchor(template_content):
|
pypa__pipenv-2111 | Newly added / changed sources not used
When an environment variable source (possibly any source) is updated in the Pipfile, the new source isn't used for resolution when `pipenv install` or `pipenv lock` is next run.
See for example:
<details><summary>Pipfile</summary>
```
[[source]]
url = "https://pypi.python.org/${ENV_VAR}"
verify_ssl = true
[dev-packages]
pytest = "==3.4.0"
[packages]
requests = "==2.18.0"
```
</details>
<details><summary>Pipfile.lock</summary>
```
{
"_meta": {
"hash": {
"sha256": "5f70d907b20123fa92bd105fff99886abbf573b68009a4eb8dfd3e18144ab001"
},
"pipfile-spec": 6,
"requires": {},
"sources": [
{
"url": "https://pypi.python.org/${ENV_VAR}",
"verify_ssl": true
}
]
},
"default": {
"certifi": {
"hashes": [
"sha256:13e698f54293db9f89122b0581843a782ad0934a4fe0172d2a980ba77fc61bb7",
"sha256:9fa520c1bacfb634fa7af20a76bcbd3d5fb390481724c597da32c719a7dca4b0"
],
"version": "==2018.4.16"
},
"chardet": {
"hashes": [
"sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae",
"sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"
],
"version": "==3.0.4"
},
"idna": {
"hashes": [
"sha256:3cb5ce08046c4e3a560fc02f138d0ac63e00f8ce5901a56b32ec8b7994082aab",
"sha256:cc19709fd6d0cbfed39ea875d29ba6d4e22c0cebc510a76d6302a28385e8bb70"
],
"version": "==2.5"
},
"requests": {
"hashes": [
"sha256:5e88d64aa56ac0fda54e77fb9762ebc65879e171b746d5479a33c4082519d6c6",
"sha256:cd0189f962787284bff715fddaad478eb4d9c15aa167bd64e52ea0f661e7ea5c"
],
"version": "==2.18.0"
},
"urllib3": {
"hashes": [
"sha256:8ed6d5c1ff9d6ba84677310060d6a3a78ca3072ce0684cb3c645023009c114b1",
"sha256:b14486978518ca0901a76ba973d7821047409d7f726f22156b24e83fd71382a5"
],
"version": "==1.21.1"
}
},
"develop": {
"attrs": {
"hashes": [
"sha256:1c7960ccfd6a005cd9f7ba884e6316b5e430a3f1a6c37c5f87d8b43f83b54ec9",
"sha256:a17a9573a6f475c99b551c0e0a812707ddda1ec9653bed04c13841404ed6f450"
],
"version": "==17.4.0"
},
"funcsigs": {
"hashes": [
"sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca",
"sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50"
],
"markers": "python_version < '3.0'",
"version": "==1.0.2"
},
"pluggy": {
"hashes": [
"sha256:7f8ae7f5bdf75671a718d2daf0a64b7885f74510bcd98b1a0bb420eb9a9d0cff",
"sha256:d345c8fe681115900d6da8d048ba67c25df42973bda370783cd58826442dcd7c",
"sha256:e160a7fcf25762bb60efc7e171d4497ff1d8d2d75a3d0df7a21b76821ecbf5c5"
],
"version": "==0.6.0"
},
"py": {
"hashes": [
"sha256:29c9fab495d7528e80ba1e343b958684f4ace687327e6f789a94bf3d1915f881",
"sha256:983f77f3331356039fdd792e9220b7b8ee1aa6bd2b25f567a963ff1de5a64f6a"
],
"version": "==1.5.3"
},
"pytest": {
"hashes": [
"sha256:6074ea3b9c999bd6d0df5fa9d12dd95ccd23550df2a582f5f5b848331d2e82ca",
"sha256:95fa025cd6deb5d937e04e368a00552332b58cae23f63b76c8c540ff1733ab6d"
],
"version": "==3.4.0"
},
"six": {
"hashes": [
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9",
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb"
],
"version": "==1.11.0"
}
}
}
```
</details>
<br>
Try updating the source in the Pipfile in the above to `"https://pypi.python.org/${ENV_VAR}"` and installation will still failing, complaining that `https://pypi.python.org/${ENV_VAR}` isn't reachable.
<details><summary>$ python -m pipenv.help output</summary>
Pipenv version: `'11.10.1'`
Pipenv location: `'/Users/greysteil/code/pipenv/pipenv'`
Python location: `'/Users/greysteil/.pyenv/versions/3.6.5/bin/python3'`
Other Python installations in `PATH`:
- `2.6`: `/usr/bin/python2.6`
- `2.6`: `/usr/bin/python2.6`
- `2.7`: `/Users/greysteil/.pyenv/shims/python2.7`
- `2.7`: `/Users/greysteil/.pyenv/shims/python2.7`
- `2.7`: `/usr/bin/python2.7`
- `3.5`: `/Users/greysteil/.pyenv/shims/python3.5`
- `3.6`: `/Users/greysteil/.pyenv/versions/3.6.5/bin/python3.6m`
- `3.6`: `/Users/greysteil/.pyenv/versions/3.6.5/bin/python3.6`
- `3.6`: `/Users/greysteil/.pyenv/shims/python3.6`
- `3.6`: `/usr/local/bin/python3.6`
- `3.6`: `/usr/local/bin/python3.6`
- `3.6.5`: `/Users/greysteil/.pyenv/versions/3.6.5/bin/python`
- `3.6.5`: `/Users/greysteil/.pyenv/shims/python`
- `3.6.5`: `/usr/local/bin/python`
- `3.6.5`: `/usr/local/bin/python`
- `2.7.10`: `/usr/bin/python`
- `None`: `/Users/greysteil/.pyenv/shims/python2`
- `3.6.5`: `/Users/greysteil/.pyenv/versions/3.6.5/bin/python3`
- `3.6.5`: `/Users/greysteil/.pyenv/shims/python3`
- `3.6.5`: `/usr/local/bin/python3`
- `3.6.5`: `/usr/local/bin/python3`
PEP 508 Information:
```
{'implementation_name': 'cpython',
'implementation_version': '3.6.5',
'os_name': 'posix',
'platform_machine': 'x86_64',
'platform_python_implementation': 'CPython',
'platform_release': '16.7.0',
'platform_system': 'Darwin',
'platform_version': 'Darwin Kernel Version 16.7.0: Wed Oct 4 00:17:00 PDT '
'2017; root:xnu-3789.71.6~1/RELEASE_X86_64',
'python_full_version': '3.6.5',
'python_version': '3.6',
'sys_platform': 'darwin'}
```
System environment variables:
- `TERM_PROGRAM`
- `PYENV_ROOT`
- `SHELL`
- `TERM`
- `CLICOLOR`
- `TMPDIR`
- `Apple_PubSub_Socket_Render`
- `TERM_PROGRAM_VERSION`
- `TERM_SESSION_ID`
- `PYENV_VERSION`
- `USER`
- `SSH_AUTH_SOCK`
- `PYENV_DIR`
- `__CF_USER_TEXT_ENCODING`
- `LSCOLORS`
- `PATH`
- `PWD`
- `EDITOR`
- `LANG`
- `PYENV_HOOK_PATH`
- `XPC_FLAGS`
- `RBENV_SHELL`
- `XPC_SERVICE_NAME`
- `SHLVL`
- `HOME`
- `PYENV_SHELL`
- `LOGNAME`
- `SECURITYSESSIONID`
- `PYTHONDONTWRITEBYTECODE`
- `PIP_PYTHON_PATH`
Pipenv–specific environment variables:
Debug–specific environment variables:
- `PATH`: `/Users/greysteil/.pyenv/versions/3.6.5/bin:/usr/local/Cellar/pyenv/1.2.3/libexec:/Users/greysteil/.pyenv/plugins/pyenv-virtualenv/bin:/Users/greysteil/.pyenv/plugins/pyenv-update/bin:/Users/greysteil/.pyenv/plugins/pyenv-installer/bin:/Users/greysteil/.pyenv/plugins/pyenv-doctor/bin:/Users/greysteil/.pyenv/shims:/Users/greysteil/.pyenv/bin:/Users/greysteil/.cargo/bin:/usr/local/heroku/bin:/Users/greysteil/.rbenv/shims:/usr/local/bin:./node_modules/.bin:.bundle/binstubs:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/git/bin:/Library/TeX/texbin:/usr/local/sbin`
- `SHELL`: `/bin/bash`
- `EDITOR`: `subl -w`
- `LANG`: `en_GB.UTF-8`
- `PWD`: `/Users/greysteil/code/python-test`
---------------------------
Contents of `Pipfile` ('/Users/greysteil/code/python-test/Pipfile'):
```toml
[[source]]
url = "https://pypi.python.org/simple"
verify_ssl = true
[dev-packages]
pytest = "==3.4.0"
[packages]
requests = "==2.18.0"
```
Contents of `Pipfile.lock` ('/Users/greysteil/code/python-test/Pipfile.lock'):
```json
{
"_meta": {
"hash": {
"sha256": "5f70d907b20123fa92bd105fff99886abbf573b68009a4eb8dfd3e18144ab001"
},
"pipfile-spec": 6,
"requires": {},
"sources": [
{
"url": "https://pypi.python.org/${ENV_VAR}",
"verify_ssl": true
}
]
},
"default": {
"certifi": {
"hashes": [
"sha256:13e698f54293db9f89122b0581843a782ad0934a4fe0172d2a980ba77fc61bb7",
"sha256:9fa520c1bacfb634fa7af20a76bcbd3d5fb390481724c597da32c719a7dca4b0"
],
"version": "==2018.4.16"
},
"chardet": {
"hashes": [
"sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae",
"sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"
],
"version": "==3.0.4"
},
"idna": {
"hashes": [
"sha256:3cb5ce08046c4e3a560fc02f138d0ac63e00f8ce5901a56b32ec8b7994082aab",
"sha256:cc19709fd6d0cbfed39ea875d29ba6d4e22c0cebc510a76d6302a28385e8bb70"
],
"version": "==2.5"
},
"requests": {
"hashes": [
"sha256:5e88d64aa56ac0fda54e77fb9762ebc65879e171b746d5479a33c4082519d6c6",
"sha256:cd0189f962787284bff715fddaad478eb4d9c15aa167bd64e52ea0f661e7ea5c"
],
"version": "==2.18.0"
},
"urllib3": {
"hashes": [
"sha256:8ed6d5c1ff9d6ba84677310060d6a3a78ca3072ce0684cb3c645023009c114b1",
"sha256:b14486978518ca0901a76ba973d7821047409d7f726f22156b24e83fd71382a5"
],
"version": "==1.21.1"
}
},
"develop": {
"attrs": {
"hashes": [
"sha256:1c7960ccfd6a005cd9f7ba884e6316b5e430a3f1a6c37c5f87d8b43f83b54ec9",
"sha256:a17a9573a6f475c99b551c0e0a812707ddda1ec9653bed04c13841404ed6f450"
],
"version": "==17.4.0"
},
"funcsigs": {
"hashes": [
"sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca",
"sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50"
],
"markers": "python_version < '3.0'",
"version": "==1.0.2"
},
"pluggy": {
"hashes": [
"sha256:7f8ae7f5bdf75671a718d2daf0a64b7885f74510bcd98b1a0bb420eb9a9d0cff",
"sha256:d345c8fe681115900d6da8d048ba67c25df42973bda370783cd58826442dcd7c",
"sha256:e160a7fcf25762bb60efc7e171d4497ff1d8d2d75a3d0df7a21b76821ecbf5c5"
],
"version": "==0.6.0"
},
"py": {
"hashes": [
"sha256:29c9fab495d7528e80ba1e343b958684f4ace687327e6f789a94bf3d1915f881",
"sha256:983f77f3331356039fdd792e9220b7b8ee1aa6bd2b25f567a963ff1de5a64f6a"
],
"version": "==1.5.3"
},
"pytest": {
"hashes": [
"sha256:6074ea3b9c999bd6d0df5fa9d12dd95ccd23550df2a582f5f5b848331d2e82ca",
"sha256:95fa025cd6deb5d937e04e368a00552332b58cae23f63b76c8c540ff1733ab6d"
],
"version": "==3.4.0"
},
"six": {
"hashes": [
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9",
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb"
],
"version": "==1.11.0"
}
}
}
```
</details>
| [
{
"content": "import os\nimport sys\nimport json\nimport logging\n\nos.environ['PIP_PYTHON_PATH'] = sys.executable\n\n\ndef _patch_path():\n pipenv_libdir = os.path.dirname(os.path.abspath(__file__))\n for _dir in ('vendor', 'patched'):\n sys.path.insert(0, os.path.join(pipenv_libdir, _dir))\n site_packages_dir = os.path.dirname(pipenv_libdir)\n if site_packages_dir not in sys.path:\n sys.path.append(site_packages_dir)\n\n\ndef which(*args, **kwargs):\n return sys.executable\n\n\ndef main():\n is_verbose = '--verbose' in ' '.join(sys.argv)\n do_pre = '--pre' in ' '.join(sys.argv)\n do_clear = '--clear' in ' '.join(sys.argv)\n is_debug = '--debug' in ' '.join(sys.argv)\n system = '--system' in ' '.join(sys.argv)\n new_sys_argv = []\n for v in sys.argv:\n if v.startswith('--'):\n continue\n\n else:\n new_sys_argv.append(v)\n sys.argv = new_sys_argv\n\n import pipenv.core\n\n if is_verbose:\n logging.getLogger('pip9').setLevel(logging.INFO)\n logging.getLogger('notpip').setLevel(logging.INFO)\n if is_debug:\n # Shit's getting real at this point.\n logging.getLogger('pip9').setLevel(logging.DEBUG)\n logging.getLogger('notpip').setLevel(logging.DEBUG)\n if 'PIPENV_PACKAGES' in os.environ:\n packages = os.environ['PIPENV_PACKAGES'].strip().split('\\n')\n else:\n packages = sys.argv[1:]\n for i, package in enumerate(packages):\n if package.startswith('--'):\n del packages[i]\n project = pipenv.core.project\n\n def resolve(packages, pre, sources, verbose, clear, system):\n import pipenv.utils\n return pipenv.utils.resolve_deps(\n packages,\n which,\n project=project,\n pre=pre,\n sources=sources,\n clear=clear,\n verbose=verbose,\n allow_global=system,\n )\n\n results = resolve(\n packages,\n pre=do_pre,\n sources=project.sources,\n verbose=is_verbose,\n clear=do_clear,\n system=system,\n )\n print('RESULTS:')\n if results:\n print(json.dumps(results))\n else:\n print(json.dumps([]))\n\n\nif __name__ == '__main__':\n _patch_path()\n main()\n",
"path": "pipenv/resolver.py"
}
] | [
{
"content": "import os\nimport sys\nimport json\nimport logging\n\nos.environ['PIP_PYTHON_PATH'] = sys.executable\n\n\ndef _patch_path():\n pipenv_libdir = os.path.dirname(os.path.abspath(__file__))\n for _dir in ('vendor', 'patched'):\n sys.path.insert(0, os.path.join(pipenv_libdir, _dir))\n site_packages_dir = os.path.dirname(pipenv_libdir)\n if site_packages_dir not in sys.path:\n sys.path.append(site_packages_dir)\n\n\ndef which(*args, **kwargs):\n return sys.executable\n\n\ndef main():\n is_verbose = '--verbose' in ' '.join(sys.argv)\n do_pre = '--pre' in ' '.join(sys.argv)\n do_clear = '--clear' in ' '.join(sys.argv)\n is_debug = '--debug' in ' '.join(sys.argv)\n system = '--system' in ' '.join(sys.argv)\n new_sys_argv = []\n for v in sys.argv:\n if v.startswith('--'):\n continue\n\n else:\n new_sys_argv.append(v)\n sys.argv = new_sys_argv\n\n import pipenv.core\n\n if is_verbose:\n logging.getLogger('pip9').setLevel(logging.INFO)\n logging.getLogger('notpip').setLevel(logging.INFO)\n if is_debug:\n # Shit's getting real at this point.\n logging.getLogger('pip9').setLevel(logging.DEBUG)\n logging.getLogger('notpip').setLevel(logging.DEBUG)\n if 'PIPENV_PACKAGES' in os.environ:\n packages = os.environ['PIPENV_PACKAGES'].strip().split('\\n')\n else:\n packages = sys.argv[1:]\n for i, package in enumerate(packages):\n if package.startswith('--'):\n del packages[i]\n project = pipenv.core.project\n\n def resolve(packages, pre, sources, verbose, clear, system):\n import pipenv.utils\n return pipenv.utils.resolve_deps(\n packages,\n which,\n project=project,\n pre=pre,\n sources=sources,\n clear=clear,\n verbose=verbose,\n allow_global=system,\n )\n\n results = resolve(\n packages,\n pre=do_pre,\n sources=project.pipfile_sources,\n verbose=is_verbose,\n clear=do_clear,\n system=system,\n )\n print('RESULTS:')\n if results:\n print(json.dumps(results))\n else:\n print(json.dumps([]))\n\n\nif __name__ == '__main__':\n _patch_path()\n main()\n",
"path": "pipenv/resolver.py"
}
] | diff --git a/pipenv/resolver.py b/pipenv/resolver.py
index c04a6b3cd3..7e4b95d36a 100644
--- a/pipenv/resolver.py
+++ b/pipenv/resolver.py
@@ -68,7 +68,7 @@ def resolve(packages, pre, sources, verbose, clear, system):
results = resolve(
packages,
pre=do_pre,
- sources=project.sources,
+ sources=project.pipfile_sources,
verbose=is_verbose,
clear=do_clear,
system=system,
diff --git a/tests/integration/test_lock.py b/tests/integration/test_lock.py
index f71e02b368..a14b8b10c9 100644
--- a/tests/integration/test_lock.py
+++ b/tests/integration/test_lock.py
@@ -1,4 +1,5 @@
import pytest
+import os
from flaky import flaky
@@ -247,3 +248,41 @@ def test_private_index_lock_requirements(PipenvInstance):
assert c.return_code == 0
assert '-i https://pypi.python.org/simple' in c.out.strip()
assert '--extra-index-url https://test.pypi.org/simple' in c.out.strip()
+
+
[email protected]
[email protected]
+def test_lock_updated_source(PipenvInstance, pypi):
+
+ with PipenvInstance(pypi=pypi) as p:
+ with open(p.pipfile_path, 'w') as f:
+ contents = """
+[[source]]
+url = "{url}/${{MY_ENV_VAR}}"
+
+[packages]
+requests = "==2.14.0"
+ """.strip().format(url=pypi.url)
+ f.write(contents)
+
+ os.environ['MY_ENV_VAR'] = 'simple'
+ c = p.pipenv('lock')
+ assert c.return_code == 0
+ assert 'requests' in p.lockfile['default']
+
+ del os.environ['MY_ENV_VAR']
+
+ with open(p.pipfile_path, 'w') as f:
+ contents = """
+[[source]]
+url = "{url}/simple"
+
+[packages]
+requests = "==2.14.0"
+ """.strip().format(url=pypi.url)
+ f.write(contents)
+
+ c = p.pipenv('lock')
+ assert c.return_code == 0
+ assert 'requests' in p.lockfile['default']
+
|
obspy__obspy-3012 | Station.identifiers[0] should not be URI type
Hello!
Just want to say that obspy continues to be an incredibly useful package!
I'm trying to set the identifiers on an obspy Station instance.
According to FDSN schema 1.1 IdentifierType should be a simple string with "type" attribute:
```
<xs:complexType name="IdentifierType">
<xs:annotation>
<xs:documentation>A type to document persistent identifiers.
Identifier values should be specified without a URI scheme (prefix),
instead the identifer type is documented as an attribute.
</xs:documentation>
</xs:annotation>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute name="type" type="xs:string"> </xs:attribute>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
```
However, obspy (v.1.2.2) seems to have encoded this as xsd:anyURI type instead:
>>> wes.identifiers = ['10.157778/RESIF.FR']
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/mth/mth/miniconda3/envs/test_yasmine/lib/python3.8/site-packages/obspy/core/inventory/util.py", line 123, in identifiers
_warn_on_invalid_uri(identifier)
File "/Users/mth/mth/miniconda3/envs/test_yasmine/lib/python3.8/site-packages/obspy/core/inventory/util.py", line 1076, in _warn_on_invalid_uri
msg = "Given string seems to not be a valid URI: ''" % uri
TypeError: not all arguments converted during string formatting
```
>>> wes.identifiers=['http://10.16778/RESIF.FR', 'http://32.2323/RESIF.CR']
>>> print("obspy is happy now!")
```
Tracking it down a bit further:
core/inventory/util.py:
```
@identifiers.setter
def identifiers(self, value):
if not hasattr(value, "__iter__"):
msg = "identifiers needs to be an iterable, e.g. a list."
raise ValueError(msg)
# make sure to unwind actual iterators, or the just might get exhausted
# at some point
identifiers = [identifier for identifier in value]
for identifier in identifiers:
_warn_on_invalid_uri(identifier)
self._identifiers = identifiers
```
This calls:
```
def _warn_on_invalid_uri(uri):
if not _is_valid_uri(uri):
msg = "Given string seems to not be a valid URI: ''" % uri
warnings.warn(msg)
```
And that msg seems to be missing the %s format to print uri and that seems to be
the error I'm getting.
So I guess there are 2 things:
1. identifiers - sholudn't be checked as valid_uri, at least not for basenode types
2. the _warn_on_invalid_uri() func has an error in msg.
Thanks!
-Mike
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nUtility objects.\n\n:copyright:\n Lion Krischer ([email protected]), 2013\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nimport copy\nimport re\nimport warnings\nfrom textwrap import TextWrapper\n\nfrom obspy import UTCDateTime\nfrom obspy.core.util.base import ComparingObject\nfrom obspy.core.util.decorator import deprecated_keywords\nfrom obspy.core.util.deprecation_helpers import ObsPyDeprecationWarning\nfrom obspy.core.util.obspy_types import (FloatWithUncertaintiesAndUnit,\n FloatWithUncertaintiesFixedUnit)\n\n\nclass BaseNode(ComparingObject):\n \"\"\"\n From the StationXML definition:\n A base node type for derivation of: Network, Station and Channel\n types.\n\n The parent class for the network, station and channel classes.\n \"\"\"\n def __init__(self, code, description=None, comments=None, start_date=None,\n end_date=None, restricted_status=None, alternate_code=None,\n historical_code=None, data_availability=None,\n identifiers=None, source_id=None):\n \"\"\"\n :type code: str\n :param code: The SEED network, station, or channel code\n :type description: str, optional\n :param description: A description of the resource\n :type comments: list of :class:`Comment`, optional\n :param comments: An arbitrary number of comments to the resource\n :type start_date: :class:`~obspy.core.utcdatetime.UTCDateTime`,\n optional\n :param start_date: The start date of the resource\n :type end_date: :class:`~obspy.core.utcdatetime.UTCDateTime`, optional\n :param end_date: The end date of the resource\n :type restricted_status: str, optional\n :param restricted_status: The restriction status\n :type alternate_code: str, optional\n :param alternate_code: A code used for display or association,\n alternate to the SEED-compliant code.\n :type historical_code: str, optional\n :param historical_code: A previously used code if different from the\n current code.\n :type data_availability:\n :class:`~obspy.core.inventory.util.DataAvailability`\n :param data_availability: Information about time series availability\n for the network/station/channel.\n :type identifiers: list[str], optional\n :param identifiers: Persistent identifiers for network/station/channel\n (schema version >=1.1). URIs are in general composed of a 'scheme'\n and a 'path' (optionally with additional components), the two of\n which separated by a colon.\n :type source_id: str, optional\n :param source_id: A data source identifier in URI form\n (schema version >=1.1). URIs are in general composed of a 'scheme'\n and a 'path' (optionally with additional components), the two of\n which separated by a colon.\n \"\"\"\n self.code = code\n self.comments = comments or []\n self.description = description\n self.start_date = start_date\n self.end_date = end_date\n self.restricted_status = restricted_status\n self.alternate_code = alternate_code\n self.historical_code = historical_code\n self.data_availability = data_availability\n self.identifiers = identifiers or []\n self.source_id = source_id\n\n @property\n def code(self):\n return self._code\n\n @code.setter\n def code(self, value):\n if value is None:\n msg = \"A code is required\"\n raise ValueError(msg)\n self._code = str(value).strip()\n\n @property\n def source_id(self):\n return self._source_id\n\n @source_id.setter\n def source_id(self, value):\n if value:\n _warn_on_invalid_uri(value)\n self._source_id = value.strip()\n else:\n self._source_id = None\n\n @property\n def identifiers(self):\n return self._identifiers\n\n @identifiers.setter\n def identifiers(self, value):\n if not hasattr(value, \"__iter__\"):\n msg = \"identifiers needs to be an iterable, e.g. a list.\"\n raise ValueError(msg)\n # make sure to unwind actual iterators, or the just might get exhausted\n # at some point\n identifiers = [identifier for identifier in value]\n for identifier in identifiers:\n _warn_on_invalid_uri(identifier)\n self._identifiers = identifiers\n\n @property\n def alternate_code(self):\n \"\"\"\n From the StationXML definition:\n A code used for display or association, alternate to the\n SEED-compliant code.\n \"\"\"\n return self._alternate_code\n\n @alternate_code.setter\n def alternate_code(self, value):\n if value:\n self._alternate_code = value.strip()\n else:\n self._alternate_code = None\n\n @property\n def historical_code(self):\n \"\"\"\n From the StationXML definition:\n A previously used code if different from the current code.\n \"\"\"\n return self._historical_code\n\n @historical_code.setter\n def historical_code(self, value):\n if value:\n self._historical_code = value.strip()\n else:\n self._historical_code = None\n\n def copy(self):\n \"\"\"\n Returns a deepcopy of the object.\n\n :rtype: same class as original object\n :return: Copy of current object.\n\n .. rubric:: Examples\n\n 1. Create a station object and copy it\n\n >>> from obspy import read_inventory\n >>> sta = read_inventory()[0][0]\n >>> sta2 = sta.copy()\n\n The two objects are not the same:\n\n >>> sta is sta2\n False\n\n But they have equal data (before applying further processing):\n\n >>> sta == sta2\n True\n\n 2. The following example shows how to make an alias but not copy the\n data. Any changes on ``st3`` would also change the contents of\n ``st``.\n\n >>> sta3 = sta\n >>> sta is sta3\n True\n >>> sta == sta3\n True\n \"\"\"\n return copy.deepcopy(self)\n\n def is_active(self, time=None, starttime=None, endtime=None):\n \"\"\"\n Checks if the item was active at some given point in time (`time`)\n and/or if it was active at some point during a certain time range\n (`starttime`, `endtime`).\n\n .. note::\n If none of the time constraints is specified the result will always\n be `True`.\n\n :type time: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param time: Only include networks/stations/channels active at given\n point in time.\n :type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param starttime: Only include networks/stations/channels active at or\n after given point in time (i.e. channels ending before given time\n will not be shown).\n :type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param endtime: Only include networks/stations/channels active before\n or at given point in time (i.e. channels starting after given time\n will not be shown).\n :rtype: bool\n :returns: `True`/`False` depending on whether the item matches the\n specified time criteria.\n \"\"\"\n if time is not None:\n if self.start_date is not None and time < self.start_date:\n return False\n if self.end_date is not None and time > self.end_date:\n return False\n if starttime is not None and self.end_date is not None:\n if starttime > self.end_date:\n return False\n if endtime is not None and self.start_date is not None:\n if endtime < self.start_date:\n return False\n\n return True\n\n\nclass DataAvailability(ComparingObject):\n \"\"\"\n A description of time series data availability. This information should\n be considered transient and is primarily useful as a guide for\n generating time series data requests. The information for a\n DataAvailability (time) span may be specific to the time range used in a\n request that resulted in the document or limited to the availability of\n data within the request range. These details may or may not be\n retained when synchronizing metadata between data centers.\n Spans of data are represented by a start time, end time, number of segments\n contained in the span and maximum time tear within a certain span.\n\n :param start: Start of time extent\n :type start: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param end: End of time extent\n :type end: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param spans: Time spans with detail information\n :type spans: list of :class:`DataAvailabilitySpan`\n \"\"\"\n def __init__(self, start=None, end=None, spans=None):\n start = start is not None and UTCDateTime(start)\n self.start = start\n end = end is not None and UTCDateTime(end)\n self.end = end\n self.spans = spans or []\n\n @property\n def spans(self):\n return self._spans\n\n @spans.setter\n def spans(self, value):\n msg = 'Data availability spans must be of DataAvailabilitySpan type.'\n try:\n for item in value:\n if not isinstance(item, DataAvailabilitySpan):\n raise TypeError\n except TypeError:\n raise TypeError(msg)\n self._spans = value\n\n def __str__(self):\n if not self.spans:\n span_info = 'no time span information'\n else:\n span_info = '%d time spans with details' % len(self.spans)\n return \"Data Availability from %s to %s, %s.\" % (self.start,\n self.end, span_info)\n\n def _repr_pretty_(self, p, cycle):\n p.text(str(self))\n\n\nclass DataAvailabilitySpan(ComparingObject):\n \"\"\"\n Data availability spans are represented by a start time, end time, number\n of segments contained in the span and maximum time tear within a certain\n span.\n\n :param start: Start of time span\n :type start: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param end: End of time span\n :type end: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param number_of_segments: The number of continuous time series segments\n contained in the specified time range. A value of 1 indicates that the\n time series is continuous from start to end.\n :type number_of_segments: int\n :param maximum_time_tear: The maximum time tear (gap or overlap) in seconds\n between time series segments in the specified range.\n :type maximum_time_tear: float\n \"\"\"\n def __init__(self, start, end, number_of_segments, maximum_time_tear=None):\n self.start = UTCDateTime(start)\n self.end = UTCDateTime(end)\n self.number_of_segments = number_of_segments\n self.maximum_time_tear = maximum_time_tear\n\n def __str__(self):\n if self.maximum_time_tear is None:\n tear_info = 'maximum time tear not specified'\n elif abs(self.maximum_time_tear) < 0.1:\n tear_info = '%.6fs maximum time tear'\n elif abs(self.maximum_time_tear) < 2:\n tear_info = '%.3fs maximum time tear'\n elif abs(self.maximum_time_tear) < 10:\n tear_info = '%.1fs maximum time tear'\n else:\n tear_info = '%.0fs maximum time tear'\n return \"Data Availability Span: %d segments from %s to %s, %s.\" % (\n self.number_of_segments, self.start, self.end, tear_info)\n\n def _repr_pretty_(self, p, cycle):\n p.text(str(self))\n\n\nclass Equipment(ComparingObject):\n \"\"\"\n An object containing a detailed description of an equipment.\n \"\"\"\n def __init__(self, type=None, description=None, manufacturer=None,\n vendor=None, model=None, serial_number=None,\n installation_date=None, removal_date=None,\n calibration_dates=None, resource_id=None):\n \"\"\"\n :type type: str\n :param type: The equipment type\n :type description: str\n :param description: Description of the equipment\n :type manufacturer: str\n :param manufacturer: The manufacturer of the equipment\n :type vendor: str\n :param vendor: The vendor of the equipment\n :type model: str\n :param model: The model of the equipment\n :type serial_number: str\n :param serial_number: The serial number of the equipment\n :type installation_date: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param installation_date: The installation date of the equipment\n :type removal_date: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param removal_date: The removal data of the equipment\n :type calibration_dates: list of\n :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param calibration_dates: A list with all calibration dates of the\n equipment.\n :type resource_id: str\n :param resource_id: This field contains a string that should serve as a\n unique resource identifier. This identifier can be interpreted\n differently depending on the data center/software that generated\n the document. Also, we recommend to use something like\n GENERATOR:Meaningful ID. As a common behavior equipment with the\n same ID should contain the same information/be derived from the\n same base instruments.\n \"\"\"\n self.type = type\n self.description = description\n self.manufacturer = manufacturer\n self.vendor = vendor\n self.model = model\n self.serial_number = serial_number\n self.installation_date = installation_date\n self.removal_date = removal_date\n self.calibration_dates = calibration_dates or []\n self.resource_id = resource_id\n\n @property\n def installation_date(self):\n return self._installation_date\n\n @installation_date.setter\n def installation_date(self, value):\n if value is None or isinstance(value, UTCDateTime):\n self._installation_date = value\n return\n self._installation_date = UTCDateTime(value)\n\n @property\n def removal_date(self):\n return self._removal_date\n\n @removal_date.setter\n def removal_date(self, value):\n if value is None or isinstance(value, UTCDateTime):\n self._removal_date = value\n return\n self._removal_date = UTCDateTime(value)\n\n def __str__(self):\n ret = (\"Equipment:\\n\"\n \"\\tType: {type}\\n\"\n \"\\tDescription: {description}\\n\"\n \"\\tManufacturer: {manufacturer}\\n\"\n \"\\tVendor: {vendor}\\n\"\n \"\\tModel: {model}\\n\"\n \"\\tSerial number: {serial_number}\\n\"\n \"\\tInstallation date: {installation_date}\\n\"\n \"\\tRemoval date: {removal_date}\\n\"\n \"\\tResource id: {resource_id}\\n\"\n \"\\tCalibration Dates:\\n\")\n for calib_date in self.calibration_dates:\n ret += \"\\t\\t%s\\n\" % calib_date\n ret = ret.format(**self.__dict__)\n return ret\n\n def _repr_pretty_(self, p, cycle):\n p.text(str(self))\n\n\nclass Operator(ComparingObject):\n \"\"\"\n An operating agency and associated contact persons. If there are multiple\n operators, each one should be encapsulated within an Operator object. Since\n the Contact element is a generic type that represents any contact person,\n it also has its own optional Agency element.\n \"\"\"\n @deprecated_keywords({\"agencies\": \"agency\"})\n def __init__(self, agency, contacts=None, website=None):\n \"\"\"\n :type agency: str\n :param agency: The agency of the operator.\n :type contacts: list of :class:`Person`, optional\n :param contacts: One or more contact persons.\n :type website: str, optional\n :param website: The website.\n \"\"\"\n self.agency = agency\n self.contacts = contacts or []\n self.website = website\n\n @property\n def agency(self):\n return self._agency\n\n @agency.setter\n def agency(self, value):\n # check if a list of agencies was provided, which is not supported\n # anymore (if we get a string, types of provided value and any index\n # will match)\n if not isinstance(value[0], type(value)):\n msg = (\"Only a single agency can be assigned to Operator due to \"\n \"the changes in StationXML 1.1. Subsequent agencies are \"\n \"ignored.\")\n warnings.warn(msg, ObsPyDeprecationWarning)\n value = value[0]\n self._agency = value\n\n @property\n def agencies(self):\n msg = (\"Attribute 'agencies' (holding a list of strings as Agencies) \"\n \"is deprecated in favor of 'agency' which now holds a single \"\n \"string (following changes in StationXML 1.1) and might be \"\n \"removed in the future. Returning a list built up of the \"\n \"single agency or an empty list if agency is None.\")\n warnings.warn(msg, ObsPyDeprecationWarning)\n if self.agency is not None:\n return [self.agency]\n return []\n\n @agencies.setter\n def agencies(self, value):\n msg = (\"Attribute 'agencies' (holding a list of strings as Agencies) \"\n \"is deprecated in favor of 'agency' which now holds a single \"\n \"string (following changes in StationXML 1.1) and might be \"\n \"removed in the future. Setting 'agency' with first item in \"\n \"provided list.\")\n warnings.warn(msg, ObsPyDeprecationWarning)\n if not hasattr(value, \"__iter__\") or len(value) < 1:\n msg = (\"agencies needs to be iterable, e.g. a list, and contain \"\n \"at least one entry.\")\n raise ValueError(msg)\n self._agency = value[0]\n\n @property\n def contacts(self):\n return self._contacts\n\n @contacts.setter\n def contacts(self, value):\n if not hasattr(value, \"__iter__\"):\n msg = (\"contacts needs to be iterable, e.g. a list.\")\n raise ValueError(msg)\n self._contacts = value\n\n\nclass Person(ComparingObject):\n \"\"\"\n From the StationXML definition:\n Representation of a person's contact information. A person can belong\n to multiple agencies and have multiple email addresses and phone\n numbers.\n \"\"\"\n email_pattern = re.compile(r\"[\\w\\.\\-_]+@[\\w\\.\\-_]+\")\n\n def __init__(self, names=None, agencies=None, emails=None, phones=None):\n \"\"\"\n :type names: list[str], optional\n :param names: Self-explanatory. Multiple names allowed.\n :type agencies: list[str], optional\n :param agencies: Self-explanatory. Multiple agencies allowed.\n :type emails: list[str], optional\n :param emails: Self-explanatory. Multiple emails allowed.\n :type phones: list[:class:`PhoneNumber`], optional\n :param phones: Self-explanatory. Multiple phone numbers allowed.\n \"\"\"\n self.names = names or []\n self.agencies = agencies or []\n self.emails = emails or []\n self.phones = phones or []\n\n @property\n def names(self):\n return self._names\n\n @names.setter\n def names(self, value):\n if not hasattr(value, \"__iter__\"):\n msg = \"names needs to be iterable, e.g. a list.\"\n raise ValueError(msg)\n self._names = value\n\n @property\n def agencies(self):\n return self._agencies\n\n @agencies.setter\n def agencies(self, value):\n if not hasattr(value, \"__iter__\"):\n msg = \"agencies needs to be iterable, e.g. a list.\"\n raise ValueError(msg)\n self._agencies = value\n\n @property\n def emails(self):\n return self._emails\n\n @emails.setter\n def emails(self, values):\n if not hasattr(values, \"__iter__\"):\n msg = \"emails needs to be iterable, e.g. a list.\"\n raise ValueError(msg)\n for value in values:\n if re.match(self.email_pattern, value) is None:\n msg = (\"emails needs to match the pattern \"\n r\"'[\\w\\.\\-_]+@[\\w\\.\\-_]+'\")\n raise ValueError(msg)\n self._emails = values\n\n @property\n def phones(self):\n return self._phones\n\n @phones.setter\n def phones(self, values):\n if not hasattr(values, \"__iter__\"):\n msg = \"phones needs to be iterable, e.g. a list.\"\n raise ValueError(msg)\n self._phones = values\n\n\nclass PhoneNumber(ComparingObject):\n \"\"\"\n A simple object representing a phone number.\n \"\"\"\n phone_pattern = re.compile(\"^[0-9]+-[0-9]+$\")\n\n def __init__(self, area_code, phone_number, country_code=None,\n description=None):\n \"\"\"\n :type area_code: int\n :param area_code: The area code.\n :type phone_number: str\n :param phone_number: The phone number minus the country and area code.\n Must be in the form \"[0-9]+-[0-9]+\", e.g. 1234-5678.\n :type country_code: int, optional\n :param country_code: The country code.\n :type description: str, optional\n :param description: Any additional information.\n \"\"\"\n self.country_code = country_code\n self.area_code = area_code\n self.phone_number = phone_number\n self.description = description\n\n @property\n def phone_number(self):\n return self._phone_number\n\n @phone_number.setter\n def phone_number(self, value):\n if re.match(self.phone_pattern, value) is None:\n msg = \"phone_number needs to match the pattern '[0-9]+-[0-9]+'\"\n raise ValueError(msg)\n self._phone_number = value\n\n\nclass ExternalReference(ComparingObject):\n \"\"\"\n From the StationXML definition:\n This type contains a URI and description for external data that users\n may want to reference in StationXML.\n \"\"\"\n def __init__(self, uri, description):\n \"\"\"\n :type uri: str\n :param uri: The URI to the external data.\n :type description: str\n :param description: A description of the external data.\n \"\"\"\n self.uri = uri\n self.description = description\n\n\nclass Comment(ComparingObject):\n \"\"\"\n From the StationXML definition:\n Container for a comment or log entry. Corresponds to SEED blockettes\n 31, 51 and 59.\n \"\"\"\n def __init__(self, value, id=None, begin_effective_time=None,\n end_effective_time=None, authors=None, subject=None):\n \"\"\"\n :type value: str\n :param value: The actual comment string\n :type id: int\n :param id: ID of comment, must be 0 or greater.\n :type begin_effective_time:\n :class:`~obspy.core.utcdatetime.UTCDateTime`, optional\n :param begin_effective_time: The effective start date.\n :type end_effective_time:\n :class:`~obspy.core.utcdatetime.UTCDateTime`, optional\n :param end_effective_time: The effective end date.\n :type authors: list of :class:`Person`, optional\n :param authors: The authors of this comment.\n :type subject: str, optional\n :param subject: Subject for relating comment, optional\n \"\"\"\n self.value = value\n self.begin_effective_time = begin_effective_time\n self.end_effective_time = end_effective_time\n self.authors = authors or []\n self.id = id\n self.subject = subject\n\n @property\n def id(self):\n return self._id\n\n @id.setter\n def id(self, value):\n if value is None:\n self._id = value\n return\n if not int(value) >= 0:\n msg = \"ID must be 0 or positive integer.\"\n raise ValueError(msg)\n self._id = value\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n self._value = str(value)\n\n @property\n def begin_effective_time(self):\n return self._begin_effective_time\n\n @begin_effective_time.setter\n def begin_effective_time(self, value):\n if value is None:\n self._begin_effective_time = None\n return\n self._begin_effective_time = UTCDateTime(value)\n\n @property\n def end_effective_time(self):\n return self._end_effective_time\n\n @end_effective_time.setter\n def end_effective_time(self, value):\n if value is None:\n self._end_effective_time = None\n return\n self._end_effective_time = UTCDateTime(value)\n\n @property\n def authors(self):\n return self._authors\n\n @authors.setter\n def authors(self, values):\n if not hasattr(values, \"__iter__\"):\n msg = \"authors needs to be iterable, e.g. a list.\"\n raise ValueError(msg)\n self._authors = values\n\n def __str__(self):\n ret = (\"Comment:\\t{value}\\n\"\n \"\\tBegin Effective Time:\\t{begin_effective_time}\\n\"\n \"\\tEnd Effective Time:\\t{end_effective_time}\\n\"\n \"\\tAuthors:\\t\\t{authors}\\n\"\n \"\\tId:\\t\\t\\t{id}\")\n ret = ret.format(\n value=self.value, begin_effective_time=self.begin_effective_time,\n end_effective_time=self.end_effective_time, authors=self.authors,\n id=self.id)\n return ret\n\n def _repr_pretty_(self, p, cycle):\n p.text(str(self))\n\n\nclass Site(ComparingObject):\n \"\"\"\n From the StationXML definition:\n Description of a site location using name and optional geopolitical\n boundaries (country, city, etc.).\n \"\"\"\n def __init__(self, name=\"\", description=None, town=None, county=None,\n region=None, country=None):\n \"\"\"\n :type name: str\n :param name: The commonly used name of this station, equivalent to the\n SEED blockette 50, field 9.\n :type description: str, optional\n :param description: A longer description of the location of this\n station, e.g. \"NW corner of Yellowstone National Park\" or \"20\n miles west of Highway 40.\"\n :type town: str, optional\n :param town: The town or city closest to the station.\n :type county: str, optional\n :param county: The county.\n :type region: str, optional\n :param region: The state, province, or region of this site.\n :type country: str, optional\n :param country: The country.\n \"\"\"\n self.name = name\n self.description = description\n self.town = town\n self.county = county\n self.region = region\n self.country = country\n\n def __str__(self):\n ret = (\"Site: {name}\\n\"\n \"\\tDescription: {description}\\n\"\n \"\\tTown: {town}\\n\"\n \"\\tCounty: {county}\\n\"\n \"\\tRegion: {region}\\n\"\n \"\\tCountry: {country}\")\n ret = ret.format(\n name=self.name, description=self.description,\n town=self.town, county=self.county, region=self.region,\n country=self.country)\n return ret\n\n def _repr_pretty_(self, p, cycle):\n p.text(str(self))\n\n\nclass Latitude(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n Latitude object\n\n :type value: float\n :param value: Latitude value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type datum: str\n :param datum: Datum for latitude coordinate\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n _minimum = -90\n _maximum = 90\n _unit = \"DEGREES\"\n\n def __init__(self, value, lower_uncertainty=None, upper_uncertainty=None,\n datum=None):\n \"\"\"\n \"\"\"\n self.datum = datum\n super(Latitude, self).__init__(\n value, lower_uncertainty=lower_uncertainty,\n upper_uncertainty=upper_uncertainty)\n\n\nclass Longitude(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n Longitude object\n\n :type value: float\n :param value: Longitude value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type datum: str\n :param datum: Datum for longitude coordinate\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n _minimum = -180\n _maximum = 180\n unit = \"DEGREES\"\n\n def __init__(self, value, lower_uncertainty=None, upper_uncertainty=None,\n datum=None):\n \"\"\"\n \"\"\"\n self.datum = datum\n super(Longitude, self).__init__(\n value, lower_uncertainty=lower_uncertainty,\n upper_uncertainty=upper_uncertainty)\n\n\nclass Distance(FloatWithUncertaintiesAndUnit):\n \"\"\"\n Distance object\n\n :type value: float\n :param value: Distance value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type unit: str\n :param unit: Unit for distance measure.\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n def __init__(self, value, lower_uncertainty=None, upper_uncertainty=None,\n unit=\"METERS\"):\n super(Distance, self).__init__(\n value, lower_uncertainty=lower_uncertainty,\n upper_uncertainty=upper_uncertainty)\n self._unit = unit\n\n\nclass Azimuth(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n Azimuth object\n\n :type value: float\n :param value: Azimuth value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n _minimum = 0\n _maximum = 360\n unit = \"DEGREES\"\n\n\nclass Dip(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n Dip object\n\n :type value: float\n :param value: Dip value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n _minimum = -90\n _maximum = 90\n unit = \"DEGREES\"\n\n\nclass ClockDrift(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n ClockDrift object\n\n :type value: float\n :param value: ClockDrift value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n _minimum = 0\n unit = \"SECONDS/SAMPLE\"\n\n\nclass SampleRate(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n SampleRate object\n\n :type value: float\n :param value: ClockDrift value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n unit = \"SAMPLES/S\"\n\n\nclass Frequency(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n Frequency object\n\n :type value: float\n :param value: Frequency value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n unit = \"HERTZ\"\n\n\nclass Angle(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n Angle object\n\n :type value: float\n :param value: Angle value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n _minimum = -360\n _maximum = 360\n unit = \"DEGREES\"\n\n\ndef _unified_content_strings(contents):\n contents_unique = sorted(set(contents), key=_seed_id_keyfunction)\n contents_counts = [\n (item, contents.count(item)) for item in contents_unique]\n items = [item if count == 1 else \"{} ({}x)\".format(item, count)\n for item, count in contents_counts]\n return items\n\n\n# make TextWrapper only split on colons, so that we avoid splitting in between\n# e.g. network code and network code occurence count (can be controlled with\n# class attributes).\n# Also avoid lines starting with \", \" (need to patch the class for this)\nclass InventoryTextWrapper(TextWrapper):\n wordsep_re = re.compile(r'(, )')\n wordsep_simple_re = re.compile(r'(, )')\n\n def _wrap_chunks(self, *args, **kwargs):\n \"\"\"\n \"\"\"\n # the following doesn't work somehow (likely because of future??)\n # lines = super(InventoryTextWrapper, self)._wrap_chunks(\n # *args, **kwargs)\n lines = TextWrapper._wrap_chunks(self, *args, **kwargs)\n lines = [re.sub(r'([\\b\\s]+), (.*)', r'\\1\\2', line, count=1)\n for line in lines]\n return lines\n\n\ndef _textwrap(text, *args, **kwargs):\n return InventoryTextWrapper(*args, **kwargs).wrap(text)\n\n\ndef _seed_id_keyfunction(x):\n \"\"\"\n Keyfunction to use in sorting two (partial) SEED IDs\n\n Assumes that the last (or only) \".\"-separated part is a channel code.\n Assumes the last character is a the component code and sorts it\n \"Z\"-\"N\"-\"E\"-others_lexical.\n \"\"\"\n # for comparison we build a list of 5 SEED code pieces:\n # [network, station, location, band+instrument, component]\n # with partial codes (i.e. not 4 fields after splitting at dots),\n # we go with the following assumptions (these seem a bit random, but that's\n # what can be encountered in string representations of the Inventory object\n # hierarchy):\n # - no dot means network code only (e.g. \"IU\")\n # - one dot means network.station code only (e.g. \"IU.ANMO\")\n # - two dots means station.location.channel code only (e.g. \"ANMO.10.BHZ\")\n # - three dots: full SEED ID (e.g. \"IU.ANMO.10.BHZ\")\n # - more dots: sort after any of the previous, plain lexical sort\n # if no \".\" in the string: assume it's a network code\n\n # split to get rid of the description that that is added to networks and\n # stations which might also contain dots.\n number_of_dots = x.strip().split()[0].count(\".\")\n\n x = x.upper()\n if number_of_dots == 0:\n x = [x] + [\"\"] * 4\n elif number_of_dots == 1:\n x = x.split(\".\") + [\"\"] * 3\n elif number_of_dots in (2, 3):\n x = x.split(\".\")\n if number_of_dots == 2:\n x = [\"\"] + x\n # split channel code into band+instrument code and component code\n x = x[:-1] + [x[-1][:-1], x[-1] and x[-1][-1] or '']\n # special comparison for component code, convert \"ZNE\" to integers\n # which compare less than any character\n comp = \"ZNE\".find(x[-1])\n # last item is component code, either the original 1-char string, or an\n # int from 0-2 if any of \"ZNE\". Python3 does not allow comparison of\n # int and string anymore (Python 2 always compares ints smaller than\n # any string), so we need to work around this by making this last item\n # a tuple with first item False for ints and True for strings.\n if comp >= 0:\n x[-1] = (False, comp)\n else:\n x[-1] = (True, x[-1])\n # all other cases, just convert the upper case string to a single item\n # list - it will compare greater than any of the split lists.\n else:\n x = [x, ]\n\n return x\n\n\ndef _response_plot_label(network, station, channel, label_epoch_dates):\n label = \".\".join((network.code, station.code,\n channel.location_code, channel.code))\n if label_epoch_dates:\n start = channel.start_date\n if start is None:\n start = 'open'\n else:\n start = str(start.date)\n end = channel.end_date\n if end is None:\n end = 'open'\n else:\n end = str(end.date)\n label += '\\n{} -- {}'.format(start, end)\n return label\n\n\ndef _is_valid_uri(uri):\n if ':' not in uri:\n return False\n scheme, path = uri.split(':', 1)\n if any(not x.strip() for x in (scheme, path)):\n return False\n return True\n\n\ndef _warn_on_invalid_uri(uri):\n if not _is_valid_uri(uri):\n msg = \"Given string seems to not be a valid URI: ''\" % uri\n warnings.warn(msg)\n\n\ndef _add_resolve_seedid_doc(func):\n \"\"\"\n The following parameters deal with the problem, that the format\n only stores station names for the picks, but the Pick object expects\n a SEED id. The SEED id is looked up for every pick by the\n following procedure:\n\n 1. look at seedid_map for a direct station name match and use the specified\n template\n 2. if 1 did not succeed, look if the station is present in inventory and\n use its first channel as template\n 3. if 1 and 2 did not succeed, use specified default template\n (default_seedid)\n\n :param str filename: File or file-like object in text mode.\n :type inventory: :class:`~obspy.core.inventory.inventory.Inventory`\n :param inventory: Inventory used to retrieve network code, location code\n and channel code of stations (SEED id).\n :param dict seedid_map: Default templates for each station\n (example: `seedid_map={'MOX': 'GR.{}..HH{}'`).\n The values must contain three dots and two `{}` which are\n substituted by station code and component.\n :param str default_seedid: Default SEED id template.\n The value must contain three dots and two `{}` which are\n substituted by station code and component.\n :param bool warn: Whether or not to warn on failed look ups\n (no matching data found or ambiguous results) in the inventory\n \"\"\"\n if func.__doc__ is not None:\n func.__doc__ = func.__doc__ + __doc__\n return func\n\n\ndef _add_resolve_seedid_ph2comp_doc(func):\n \"\"\"\n :param dict ph2comp: mapping of phases to components if format does not\n specify the component or if the component ends with '?'. Set it to\n `None` for no mapping of components. (default: {'P': 'Z', 'S': 'N'})\n \"\"\"\n if func.__doc__ is not None:\n func.__doc__ = func.__doc__ + __doc__\n return func\n\n\ndef _resolve_seedid(station, component, inventory=None,\n time=None, seedid_map=None, default_seedid=None,\n key='{sta.code}', id_map=None, id_default=None,\n phase=None, ph2comp={'P': 'Z', 'S': 'N'},\n unused_kwargs=False, warn=True, **kwargs):\n if not unused_kwargs and len(kwargs) > 0:\n raise ValueError(f'Unexpected arguments: {kwargs}')\n if id_map is not None: # backwards compatibility\n seedid_map = id_map\n if id_default is not None: # backwards compatibility\n default_seedid = id_default\n if phase is not None and ph2comp is not None and (\n component == '' or component.endswith('?')):\n component = component[:-1] + ph2comp.get(phase.upper(), '')\n seedid = None\n if seedid_map is not None and station in seedid_map:\n seedid = seedid_map[station].format(station, component)\n elif inventory is not None:\n seedid = _resolve_seedid_from_inventory(\n station, component, inventory, time=time, warn=warn)\n if seedid is None and default_seedid is not None:\n seedid = default_seedid.format(station, component)\n if seedid is None:\n return '', station, None, component\n else:\n return tuple(seedid.split('.'))\n\n\ndef _resolve_seedid_from_inventory(\n station, component, inventory, time=None, network=None,\n location=None, warn=True):\n \"\"\"\n Return a (Network, Station, Location, Channel) tuple.\n\n Given a station and channel code and station metadata (and optionally a\n certain point in time), try to resolve the full SEED ID, i.e. fill in\n a missing/unknown network and/or location code.\n If no matching data is found in metadata or if ambiguities in the station\n metadata are encountered, returns ``None`` for network and/or location\n code.\n\n Simply returns the given (Network, Station, Location, Channel) input if\n *both* ``location`` and ``network`` are already specified.\n\n :type station: str\n :param station: Station code to look up.\n :type channel: str\n :param channel: Channel code to look up.\n :type inventory: :class:`~obspy.core.inventory.inventory.Inventory`\n :param inventory: Station metadata to use for look up of missing parts of\n the full SEED ID.\n :type time: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param time: Optionally restrict lookup from metadata to given timestamp.\n :type network: str\n :param network: Also specify network code for lookup (not intended to be\n used together with ``location``, see above)\n :type location: str\n :param location: Also specify location code for lookup (not intended to be\n used together with ``network``, see above)\n :type warn: bool\n :param warn: Whether or not to warn on failed look ups (no matching data\n found or ambiguous results) that return some ``None``s.\n :rtype: str\n :returns: SEED id string\n \"\"\"\n inv = inventory.select(station=station, channel='*' + component, time=time,\n network=network, location=location,\n keep_empty=False)\n if len(inv.networks) != 1 or len(inv.networks[0].stations) == 0:\n if warn:\n msg = ('No matching metadata found for station '\n f'{station}, component {component}.')\n warnings.warn(msg)\n return\n net = inv.networks[0]\n seedids = [f'{net.code}.{station}.{cha.location_code}.{cha.code}'\n for cha in net.stations[0] if cha.is_active(time=time)]\n seedids = [id_[:len(id_) - len(component)] + component for id_ in seedids]\n if len(seedids) == 0:\n if warn:\n msg = ('No matching metadata found for station '\n f'{station}, component {component}.')\n warnings.warn(msg)\n return\n if len(set(seedids)) > 1 and warn:\n msg = ('Multiple SEED ids found for station '\n f'{station}, component {component}. Use first.')\n warnings.warn(msg)\n return seedids.pop(0)\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(exclude_empty=True)\n",
"path": "obspy/core/inventory/util.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nUtility objects.\n\n:copyright:\n Lion Krischer ([email protected]), 2013\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nimport copy\nimport re\nimport warnings\nfrom textwrap import TextWrapper\n\nfrom obspy import UTCDateTime\nfrom obspy.core.util.base import ComparingObject\nfrom obspy.core.util.decorator import deprecated_keywords\nfrom obspy.core.util.deprecation_helpers import ObsPyDeprecationWarning\nfrom obspy.core.util.obspy_types import (FloatWithUncertaintiesAndUnit,\n FloatWithUncertaintiesFixedUnit)\n\n\nclass BaseNode(ComparingObject):\n \"\"\"\n From the StationXML definition:\n A base node type for derivation of: Network, Station and Channel\n types.\n\n The parent class for the network, station and channel classes.\n \"\"\"\n def __init__(self, code, description=None, comments=None, start_date=None,\n end_date=None, restricted_status=None, alternate_code=None,\n historical_code=None, data_availability=None,\n identifiers=None, source_id=None):\n \"\"\"\n :type code: str\n :param code: The SEED network, station, or channel code\n :type description: str, optional\n :param description: A description of the resource\n :type comments: list of :class:`Comment`, optional\n :param comments: An arbitrary number of comments to the resource\n :type start_date: :class:`~obspy.core.utcdatetime.UTCDateTime`,\n optional\n :param start_date: The start date of the resource\n :type end_date: :class:`~obspy.core.utcdatetime.UTCDateTime`, optional\n :param end_date: The end date of the resource\n :type restricted_status: str, optional\n :param restricted_status: The restriction status\n :type alternate_code: str, optional\n :param alternate_code: A code used for display or association,\n alternate to the SEED-compliant code.\n :type historical_code: str, optional\n :param historical_code: A previously used code if different from the\n current code.\n :type data_availability:\n :class:`~obspy.core.inventory.util.DataAvailability`\n :param data_availability: Information about time series availability\n for the network/station/channel.\n :type identifiers: list[str], optional\n :param identifiers: Persistent identifiers for network/station/channel\n (schema version >=1.1). URIs are in general composed of a 'scheme'\n and a 'path' (optionally with additional components), the two of\n which separated by a colon.\n :type source_id: str, optional\n :param source_id: A data source identifier in URI form\n (schema version >=1.1). URIs are in general composed of a 'scheme'\n and a 'path' (optionally with additional components), the two of\n which separated by a colon.\n \"\"\"\n self.code = code\n self.comments = comments or []\n self.description = description\n self.start_date = start_date\n self.end_date = end_date\n self.restricted_status = restricted_status\n self.alternate_code = alternate_code\n self.historical_code = historical_code\n self.data_availability = data_availability\n self.identifiers = identifiers or []\n self.source_id = source_id\n\n @property\n def code(self):\n return self._code\n\n @code.setter\n def code(self, value):\n if value is None:\n msg = \"A code is required\"\n raise ValueError(msg)\n self._code = str(value).strip()\n\n @property\n def source_id(self):\n return self._source_id\n\n @source_id.setter\n def source_id(self, value):\n if value:\n _warn_on_invalid_uri(value)\n self._source_id = value.strip()\n else:\n self._source_id = None\n\n @property\n def identifiers(self):\n return self._identifiers\n\n @identifiers.setter\n def identifiers(self, value):\n if not hasattr(value, \"__iter__\"):\n msg = \"identifiers needs to be an iterable, e.g. a list.\"\n raise ValueError(msg)\n # make sure to unwind actual iterators, or the just might get exhausted\n # at some point\n identifiers = [identifier for identifier in value]\n for identifier in identifiers:\n _warn_on_invalid_uri(identifier)\n self._identifiers = identifiers\n\n @property\n def alternate_code(self):\n \"\"\"\n From the StationXML definition:\n A code used for display or association, alternate to the\n SEED-compliant code.\n \"\"\"\n return self._alternate_code\n\n @alternate_code.setter\n def alternate_code(self, value):\n if value:\n self._alternate_code = value.strip()\n else:\n self._alternate_code = None\n\n @property\n def historical_code(self):\n \"\"\"\n From the StationXML definition:\n A previously used code if different from the current code.\n \"\"\"\n return self._historical_code\n\n @historical_code.setter\n def historical_code(self, value):\n if value:\n self._historical_code = value.strip()\n else:\n self._historical_code = None\n\n def copy(self):\n \"\"\"\n Returns a deepcopy of the object.\n\n :rtype: same class as original object\n :return: Copy of current object.\n\n .. rubric:: Examples\n\n 1. Create a station object and copy it\n\n >>> from obspy import read_inventory\n >>> sta = read_inventory()[0][0]\n >>> sta2 = sta.copy()\n\n The two objects are not the same:\n\n >>> sta is sta2\n False\n\n But they have equal data (before applying further processing):\n\n >>> sta == sta2\n True\n\n 2. The following example shows how to make an alias but not copy the\n data. Any changes on ``st3`` would also change the contents of\n ``st``.\n\n >>> sta3 = sta\n >>> sta is sta3\n True\n >>> sta == sta3\n True\n \"\"\"\n return copy.deepcopy(self)\n\n def is_active(self, time=None, starttime=None, endtime=None):\n \"\"\"\n Checks if the item was active at some given point in time (`time`)\n and/or if it was active at some point during a certain time range\n (`starttime`, `endtime`).\n\n .. note::\n If none of the time constraints is specified the result will always\n be `True`.\n\n :type time: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param time: Only include networks/stations/channels active at given\n point in time.\n :type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param starttime: Only include networks/stations/channels active at or\n after given point in time (i.e. channels ending before given time\n will not be shown).\n :type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param endtime: Only include networks/stations/channels active before\n or at given point in time (i.e. channels starting after given time\n will not be shown).\n :rtype: bool\n :returns: `True`/`False` depending on whether the item matches the\n specified time criteria.\n \"\"\"\n if time is not None:\n if self.start_date is not None and time < self.start_date:\n return False\n if self.end_date is not None and time > self.end_date:\n return False\n if starttime is not None and self.end_date is not None:\n if starttime > self.end_date:\n return False\n if endtime is not None and self.start_date is not None:\n if endtime < self.start_date:\n return False\n\n return True\n\n\nclass DataAvailability(ComparingObject):\n \"\"\"\n A description of time series data availability. This information should\n be considered transient and is primarily useful as a guide for\n generating time series data requests. The information for a\n DataAvailability (time) span may be specific to the time range used in a\n request that resulted in the document or limited to the availability of\n data within the request range. These details may or may not be\n retained when synchronizing metadata between data centers.\n Spans of data are represented by a start time, end time, number of segments\n contained in the span and maximum time tear within a certain span.\n\n :param start: Start of time extent\n :type start: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param end: End of time extent\n :type end: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param spans: Time spans with detail information\n :type spans: list of :class:`DataAvailabilitySpan`\n \"\"\"\n def __init__(self, start=None, end=None, spans=None):\n start = start is not None and UTCDateTime(start)\n self.start = start\n end = end is not None and UTCDateTime(end)\n self.end = end\n self.spans = spans or []\n\n @property\n def spans(self):\n return self._spans\n\n @spans.setter\n def spans(self, value):\n msg = 'Data availability spans must be of DataAvailabilitySpan type.'\n try:\n for item in value:\n if not isinstance(item, DataAvailabilitySpan):\n raise TypeError\n except TypeError:\n raise TypeError(msg)\n self._spans = value\n\n def __str__(self):\n if not self.spans:\n span_info = 'no time span information'\n else:\n span_info = '%d time spans with details' % len(self.spans)\n return \"Data Availability from %s to %s, %s.\" % (self.start,\n self.end, span_info)\n\n def _repr_pretty_(self, p, cycle):\n p.text(str(self))\n\n\nclass DataAvailabilitySpan(ComparingObject):\n \"\"\"\n Data availability spans are represented by a start time, end time, number\n of segments contained in the span and maximum time tear within a certain\n span.\n\n :param start: Start of time span\n :type start: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param end: End of time span\n :type end: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param number_of_segments: The number of continuous time series segments\n contained in the specified time range. A value of 1 indicates that the\n time series is continuous from start to end.\n :type number_of_segments: int\n :param maximum_time_tear: The maximum time tear (gap or overlap) in seconds\n between time series segments in the specified range.\n :type maximum_time_tear: float\n \"\"\"\n def __init__(self, start, end, number_of_segments, maximum_time_tear=None):\n self.start = UTCDateTime(start)\n self.end = UTCDateTime(end)\n self.number_of_segments = number_of_segments\n self.maximum_time_tear = maximum_time_tear\n\n def __str__(self):\n if self.maximum_time_tear is None:\n tear_info = 'maximum time tear not specified'\n elif abs(self.maximum_time_tear) < 0.1:\n tear_info = '%.6fs maximum time tear'\n elif abs(self.maximum_time_tear) < 2:\n tear_info = '%.3fs maximum time tear'\n elif abs(self.maximum_time_tear) < 10:\n tear_info = '%.1fs maximum time tear'\n else:\n tear_info = '%.0fs maximum time tear'\n return \"Data Availability Span: %d segments from %s to %s, %s.\" % (\n self.number_of_segments, self.start, self.end, tear_info)\n\n def _repr_pretty_(self, p, cycle):\n p.text(str(self))\n\n\nclass Equipment(ComparingObject):\n \"\"\"\n An object containing a detailed description of an equipment.\n \"\"\"\n def __init__(self, type=None, description=None, manufacturer=None,\n vendor=None, model=None, serial_number=None,\n installation_date=None, removal_date=None,\n calibration_dates=None, resource_id=None):\n \"\"\"\n :type type: str\n :param type: The equipment type\n :type description: str\n :param description: Description of the equipment\n :type manufacturer: str\n :param manufacturer: The manufacturer of the equipment\n :type vendor: str\n :param vendor: The vendor of the equipment\n :type model: str\n :param model: The model of the equipment\n :type serial_number: str\n :param serial_number: The serial number of the equipment\n :type installation_date: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param installation_date: The installation date of the equipment\n :type removal_date: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param removal_date: The removal data of the equipment\n :type calibration_dates: list of\n :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param calibration_dates: A list with all calibration dates of the\n equipment.\n :type resource_id: str\n :param resource_id: This field contains a string that should serve as a\n unique resource identifier. This identifier can be interpreted\n differently depending on the data center/software that generated\n the document. Also, we recommend to use something like\n GENERATOR:Meaningful ID. As a common behavior equipment with the\n same ID should contain the same information/be derived from the\n same base instruments.\n \"\"\"\n self.type = type\n self.description = description\n self.manufacturer = manufacturer\n self.vendor = vendor\n self.model = model\n self.serial_number = serial_number\n self.installation_date = installation_date\n self.removal_date = removal_date\n self.calibration_dates = calibration_dates or []\n self.resource_id = resource_id\n\n @property\n def installation_date(self):\n return self._installation_date\n\n @installation_date.setter\n def installation_date(self, value):\n if value is None or isinstance(value, UTCDateTime):\n self._installation_date = value\n return\n self._installation_date = UTCDateTime(value)\n\n @property\n def removal_date(self):\n return self._removal_date\n\n @removal_date.setter\n def removal_date(self, value):\n if value is None or isinstance(value, UTCDateTime):\n self._removal_date = value\n return\n self._removal_date = UTCDateTime(value)\n\n def __str__(self):\n ret = (\"Equipment:\\n\"\n \"\\tType: {type}\\n\"\n \"\\tDescription: {description}\\n\"\n \"\\tManufacturer: {manufacturer}\\n\"\n \"\\tVendor: {vendor}\\n\"\n \"\\tModel: {model}\\n\"\n \"\\tSerial number: {serial_number}\\n\"\n \"\\tInstallation date: {installation_date}\\n\"\n \"\\tRemoval date: {removal_date}\\n\"\n \"\\tResource id: {resource_id}\\n\"\n \"\\tCalibration Dates:\\n\")\n for calib_date in self.calibration_dates:\n ret += \"\\t\\t%s\\n\" % calib_date\n ret = ret.format(**self.__dict__)\n return ret\n\n def _repr_pretty_(self, p, cycle):\n p.text(str(self))\n\n\nclass Operator(ComparingObject):\n \"\"\"\n An operating agency and associated contact persons. If there are multiple\n operators, each one should be encapsulated within an Operator object. Since\n the Contact element is a generic type that represents any contact person,\n it also has its own optional Agency element.\n \"\"\"\n @deprecated_keywords({\"agencies\": \"agency\"})\n def __init__(self, agency, contacts=None, website=None):\n \"\"\"\n :type agency: str\n :param agency: The agency of the operator.\n :type contacts: list of :class:`Person`, optional\n :param contacts: One or more contact persons.\n :type website: str, optional\n :param website: The website.\n \"\"\"\n self.agency = agency\n self.contacts = contacts or []\n self.website = website\n\n @property\n def agency(self):\n return self._agency\n\n @agency.setter\n def agency(self, value):\n # check if a list of agencies was provided, which is not supported\n # anymore (if we get a string, types of provided value and any index\n # will match)\n if not isinstance(value[0], type(value)):\n msg = (\"Only a single agency can be assigned to Operator due to \"\n \"the changes in StationXML 1.1. Subsequent agencies are \"\n \"ignored.\")\n warnings.warn(msg, ObsPyDeprecationWarning)\n value = value[0]\n self._agency = value\n\n @property\n def agencies(self):\n msg = (\"Attribute 'agencies' (holding a list of strings as Agencies) \"\n \"is deprecated in favor of 'agency' which now holds a single \"\n \"string (following changes in StationXML 1.1) and might be \"\n \"removed in the future. Returning a list built up of the \"\n \"single agency or an empty list if agency is None.\")\n warnings.warn(msg, ObsPyDeprecationWarning)\n if self.agency is not None:\n return [self.agency]\n return []\n\n @agencies.setter\n def agencies(self, value):\n msg = (\"Attribute 'agencies' (holding a list of strings as Agencies) \"\n \"is deprecated in favor of 'agency' which now holds a single \"\n \"string (following changes in StationXML 1.1) and might be \"\n \"removed in the future. Setting 'agency' with first item in \"\n \"provided list.\")\n warnings.warn(msg, ObsPyDeprecationWarning)\n if not hasattr(value, \"__iter__\") or len(value) < 1:\n msg = (\"agencies needs to be iterable, e.g. a list, and contain \"\n \"at least one entry.\")\n raise ValueError(msg)\n self._agency = value[0]\n\n @property\n def contacts(self):\n return self._contacts\n\n @contacts.setter\n def contacts(self, value):\n if not hasattr(value, \"__iter__\"):\n msg = (\"contacts needs to be iterable, e.g. a list.\")\n raise ValueError(msg)\n self._contacts = value\n\n\nclass Person(ComparingObject):\n \"\"\"\n From the StationXML definition:\n Representation of a person's contact information. A person can belong\n to multiple agencies and have multiple email addresses and phone\n numbers.\n \"\"\"\n email_pattern = re.compile(r\"[\\w\\.\\-_]+@[\\w\\.\\-_]+\")\n\n def __init__(self, names=None, agencies=None, emails=None, phones=None):\n \"\"\"\n :type names: list[str], optional\n :param names: Self-explanatory. Multiple names allowed.\n :type agencies: list[str], optional\n :param agencies: Self-explanatory. Multiple agencies allowed.\n :type emails: list[str], optional\n :param emails: Self-explanatory. Multiple emails allowed.\n :type phones: list[:class:`PhoneNumber`], optional\n :param phones: Self-explanatory. Multiple phone numbers allowed.\n \"\"\"\n self.names = names or []\n self.agencies = agencies or []\n self.emails = emails or []\n self.phones = phones or []\n\n @property\n def names(self):\n return self._names\n\n @names.setter\n def names(self, value):\n if not hasattr(value, \"__iter__\"):\n msg = \"names needs to be iterable, e.g. a list.\"\n raise ValueError(msg)\n self._names = value\n\n @property\n def agencies(self):\n return self._agencies\n\n @agencies.setter\n def agencies(self, value):\n if not hasattr(value, \"__iter__\"):\n msg = \"agencies needs to be iterable, e.g. a list.\"\n raise ValueError(msg)\n self._agencies = value\n\n @property\n def emails(self):\n return self._emails\n\n @emails.setter\n def emails(self, values):\n if not hasattr(values, \"__iter__\"):\n msg = \"emails needs to be iterable, e.g. a list.\"\n raise ValueError(msg)\n for value in values:\n if re.match(self.email_pattern, value) is None:\n msg = (\"emails needs to match the pattern \"\n r\"'[\\w\\.\\-_]+@[\\w\\.\\-_]+'\")\n raise ValueError(msg)\n self._emails = values\n\n @property\n def phones(self):\n return self._phones\n\n @phones.setter\n def phones(self, values):\n if not hasattr(values, \"__iter__\"):\n msg = \"phones needs to be iterable, e.g. a list.\"\n raise ValueError(msg)\n self._phones = values\n\n\nclass PhoneNumber(ComparingObject):\n \"\"\"\n A simple object representing a phone number.\n \"\"\"\n phone_pattern = re.compile(\"^[0-9]+-[0-9]+$\")\n\n def __init__(self, area_code, phone_number, country_code=None,\n description=None):\n \"\"\"\n :type area_code: int\n :param area_code: The area code.\n :type phone_number: str\n :param phone_number: The phone number minus the country and area code.\n Must be in the form \"[0-9]+-[0-9]+\", e.g. 1234-5678.\n :type country_code: int, optional\n :param country_code: The country code.\n :type description: str, optional\n :param description: Any additional information.\n \"\"\"\n self.country_code = country_code\n self.area_code = area_code\n self.phone_number = phone_number\n self.description = description\n\n @property\n def phone_number(self):\n return self._phone_number\n\n @phone_number.setter\n def phone_number(self, value):\n if re.match(self.phone_pattern, value) is None:\n msg = \"phone_number needs to match the pattern '[0-9]+-[0-9]+'\"\n raise ValueError(msg)\n self._phone_number = value\n\n\nclass ExternalReference(ComparingObject):\n \"\"\"\n From the StationXML definition:\n This type contains a URI and description for external data that users\n may want to reference in StationXML.\n \"\"\"\n def __init__(self, uri, description):\n \"\"\"\n :type uri: str\n :param uri: The URI to the external data.\n :type description: str\n :param description: A description of the external data.\n \"\"\"\n self.uri = uri\n self.description = description\n\n\nclass Comment(ComparingObject):\n \"\"\"\n From the StationXML definition:\n Container for a comment or log entry. Corresponds to SEED blockettes\n 31, 51 and 59.\n \"\"\"\n def __init__(self, value, id=None, begin_effective_time=None,\n end_effective_time=None, authors=None, subject=None):\n \"\"\"\n :type value: str\n :param value: The actual comment string\n :type id: int\n :param id: ID of comment, must be 0 or greater.\n :type begin_effective_time:\n :class:`~obspy.core.utcdatetime.UTCDateTime`, optional\n :param begin_effective_time: The effective start date.\n :type end_effective_time:\n :class:`~obspy.core.utcdatetime.UTCDateTime`, optional\n :param end_effective_time: The effective end date.\n :type authors: list of :class:`Person`, optional\n :param authors: The authors of this comment.\n :type subject: str, optional\n :param subject: Subject for relating comment, optional\n \"\"\"\n self.value = value\n self.begin_effective_time = begin_effective_time\n self.end_effective_time = end_effective_time\n self.authors = authors or []\n self.id = id\n self.subject = subject\n\n @property\n def id(self):\n return self._id\n\n @id.setter\n def id(self, value):\n if value is None:\n self._id = value\n return\n if not int(value) >= 0:\n msg = \"ID must be 0 or positive integer.\"\n raise ValueError(msg)\n self._id = value\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n self._value = str(value)\n\n @property\n def begin_effective_time(self):\n return self._begin_effective_time\n\n @begin_effective_time.setter\n def begin_effective_time(self, value):\n if value is None:\n self._begin_effective_time = None\n return\n self._begin_effective_time = UTCDateTime(value)\n\n @property\n def end_effective_time(self):\n return self._end_effective_time\n\n @end_effective_time.setter\n def end_effective_time(self, value):\n if value is None:\n self._end_effective_time = None\n return\n self._end_effective_time = UTCDateTime(value)\n\n @property\n def authors(self):\n return self._authors\n\n @authors.setter\n def authors(self, values):\n if not hasattr(values, \"__iter__\"):\n msg = \"authors needs to be iterable, e.g. a list.\"\n raise ValueError(msg)\n self._authors = values\n\n def __str__(self):\n ret = (\"Comment:\\t{value}\\n\"\n \"\\tBegin Effective Time:\\t{begin_effective_time}\\n\"\n \"\\tEnd Effective Time:\\t{end_effective_time}\\n\"\n \"\\tAuthors:\\t\\t{authors}\\n\"\n \"\\tId:\\t\\t\\t{id}\")\n ret = ret.format(\n value=self.value, begin_effective_time=self.begin_effective_time,\n end_effective_time=self.end_effective_time, authors=self.authors,\n id=self.id)\n return ret\n\n def _repr_pretty_(self, p, cycle):\n p.text(str(self))\n\n\nclass Site(ComparingObject):\n \"\"\"\n From the StationXML definition:\n Description of a site location using name and optional geopolitical\n boundaries (country, city, etc.).\n \"\"\"\n def __init__(self, name=\"\", description=None, town=None, county=None,\n region=None, country=None):\n \"\"\"\n :type name: str\n :param name: The commonly used name of this station, equivalent to the\n SEED blockette 50, field 9.\n :type description: str, optional\n :param description: A longer description of the location of this\n station, e.g. \"NW corner of Yellowstone National Park\" or \"20\n miles west of Highway 40.\"\n :type town: str, optional\n :param town: The town or city closest to the station.\n :type county: str, optional\n :param county: The county.\n :type region: str, optional\n :param region: The state, province, or region of this site.\n :type country: str, optional\n :param country: The country.\n \"\"\"\n self.name = name\n self.description = description\n self.town = town\n self.county = county\n self.region = region\n self.country = country\n\n def __str__(self):\n ret = (\"Site: {name}\\n\"\n \"\\tDescription: {description}\\n\"\n \"\\tTown: {town}\\n\"\n \"\\tCounty: {county}\\n\"\n \"\\tRegion: {region}\\n\"\n \"\\tCountry: {country}\")\n ret = ret.format(\n name=self.name, description=self.description,\n town=self.town, county=self.county, region=self.region,\n country=self.country)\n return ret\n\n def _repr_pretty_(self, p, cycle):\n p.text(str(self))\n\n\nclass Latitude(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n Latitude object\n\n :type value: float\n :param value: Latitude value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type datum: str\n :param datum: Datum for latitude coordinate\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n _minimum = -90\n _maximum = 90\n _unit = \"DEGREES\"\n\n def __init__(self, value, lower_uncertainty=None, upper_uncertainty=None,\n datum=None):\n \"\"\"\n \"\"\"\n self.datum = datum\n super(Latitude, self).__init__(\n value, lower_uncertainty=lower_uncertainty,\n upper_uncertainty=upper_uncertainty)\n\n\nclass Longitude(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n Longitude object\n\n :type value: float\n :param value: Longitude value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type datum: str\n :param datum: Datum for longitude coordinate\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n _minimum = -180\n _maximum = 180\n unit = \"DEGREES\"\n\n def __init__(self, value, lower_uncertainty=None, upper_uncertainty=None,\n datum=None):\n \"\"\"\n \"\"\"\n self.datum = datum\n super(Longitude, self).__init__(\n value, lower_uncertainty=lower_uncertainty,\n upper_uncertainty=upper_uncertainty)\n\n\nclass Distance(FloatWithUncertaintiesAndUnit):\n \"\"\"\n Distance object\n\n :type value: float\n :param value: Distance value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type unit: str\n :param unit: Unit for distance measure.\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n def __init__(self, value, lower_uncertainty=None, upper_uncertainty=None,\n unit=\"METERS\"):\n super(Distance, self).__init__(\n value, lower_uncertainty=lower_uncertainty,\n upper_uncertainty=upper_uncertainty)\n self._unit = unit\n\n\nclass Azimuth(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n Azimuth object\n\n :type value: float\n :param value: Azimuth value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n _minimum = 0\n _maximum = 360\n unit = \"DEGREES\"\n\n\nclass Dip(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n Dip object\n\n :type value: float\n :param value: Dip value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n _minimum = -90\n _maximum = 90\n unit = \"DEGREES\"\n\n\nclass ClockDrift(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n ClockDrift object\n\n :type value: float\n :param value: ClockDrift value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n _minimum = 0\n unit = \"SECONDS/SAMPLE\"\n\n\nclass SampleRate(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n SampleRate object\n\n :type value: float\n :param value: ClockDrift value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n unit = \"SAMPLES/S\"\n\n\nclass Frequency(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n Frequency object\n\n :type value: float\n :param value: Frequency value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n unit = \"HERTZ\"\n\n\nclass Angle(FloatWithUncertaintiesFixedUnit):\n \"\"\"\n Angle object\n\n :type value: float\n :param value: Angle value\n :type lower_uncertainty: float\n :param lower_uncertainty: Lower uncertainty (aka minusError)\n :type upper_uncertainty: float\n :param upper_uncertainty: Upper uncertainty (aka plusError)\n :type measurement_method: str\n :param measurement_method: Method used in the measurement.\n \"\"\"\n _minimum = -360\n _maximum = 360\n unit = \"DEGREES\"\n\n\ndef _unified_content_strings(contents):\n contents_unique = sorted(set(contents), key=_seed_id_keyfunction)\n contents_counts = [\n (item, contents.count(item)) for item in contents_unique]\n items = [item if count == 1 else \"{} ({}x)\".format(item, count)\n for item, count in contents_counts]\n return items\n\n\n# make TextWrapper only split on colons, so that we avoid splitting in between\n# e.g. network code and network code occurence count (can be controlled with\n# class attributes).\n# Also avoid lines starting with \", \" (need to patch the class for this)\nclass InventoryTextWrapper(TextWrapper):\n wordsep_re = re.compile(r'(, )')\n wordsep_simple_re = re.compile(r'(, )')\n\n def _wrap_chunks(self, *args, **kwargs):\n \"\"\"\n \"\"\"\n # the following doesn't work somehow (likely because of future??)\n # lines = super(InventoryTextWrapper, self)._wrap_chunks(\n # *args, **kwargs)\n lines = TextWrapper._wrap_chunks(self, *args, **kwargs)\n lines = [re.sub(r'([\\b\\s]+), (.*)', r'\\1\\2', line, count=1)\n for line in lines]\n return lines\n\n\ndef _textwrap(text, *args, **kwargs):\n return InventoryTextWrapper(*args, **kwargs).wrap(text)\n\n\ndef _seed_id_keyfunction(x):\n \"\"\"\n Keyfunction to use in sorting two (partial) SEED IDs\n\n Assumes that the last (or only) \".\"-separated part is a channel code.\n Assumes the last character is a the component code and sorts it\n \"Z\"-\"N\"-\"E\"-others_lexical.\n \"\"\"\n # for comparison we build a list of 5 SEED code pieces:\n # [network, station, location, band+instrument, component]\n # with partial codes (i.e. not 4 fields after splitting at dots),\n # we go with the following assumptions (these seem a bit random, but that's\n # what can be encountered in string representations of the Inventory object\n # hierarchy):\n # - no dot means network code only (e.g. \"IU\")\n # - one dot means network.station code only (e.g. \"IU.ANMO\")\n # - two dots means station.location.channel code only (e.g. \"ANMO.10.BHZ\")\n # - three dots: full SEED ID (e.g. \"IU.ANMO.10.BHZ\")\n # - more dots: sort after any of the previous, plain lexical sort\n # if no \".\" in the string: assume it's a network code\n\n # split to get rid of the description that that is added to networks and\n # stations which might also contain dots.\n number_of_dots = x.strip().split()[0].count(\".\")\n\n x = x.upper()\n if number_of_dots == 0:\n x = [x] + [\"\"] * 4\n elif number_of_dots == 1:\n x = x.split(\".\") + [\"\"] * 3\n elif number_of_dots in (2, 3):\n x = x.split(\".\")\n if number_of_dots == 2:\n x = [\"\"] + x\n # split channel code into band+instrument code and component code\n x = x[:-1] + [x[-1][:-1], x[-1] and x[-1][-1] or '']\n # special comparison for component code, convert \"ZNE\" to integers\n # which compare less than any character\n comp = \"ZNE\".find(x[-1])\n # last item is component code, either the original 1-char string, or an\n # int from 0-2 if any of \"ZNE\". Python3 does not allow comparison of\n # int and string anymore (Python 2 always compares ints smaller than\n # any string), so we need to work around this by making this last item\n # a tuple with first item False for ints and True for strings.\n if comp >= 0:\n x[-1] = (False, comp)\n else:\n x[-1] = (True, x[-1])\n # all other cases, just convert the upper case string to a single item\n # list - it will compare greater than any of the split lists.\n else:\n x = [x, ]\n\n return x\n\n\ndef _response_plot_label(network, station, channel, label_epoch_dates):\n label = \".\".join((network.code, station.code,\n channel.location_code, channel.code))\n if label_epoch_dates:\n start = channel.start_date\n if start is None:\n start = 'open'\n else:\n start = str(start.date)\n end = channel.end_date\n if end is None:\n end = 'open'\n else:\n end = str(end.date)\n label += '\\n{} -- {}'.format(start, end)\n return label\n\n\ndef _is_valid_uri(uri):\n if ':' not in uri:\n return False\n scheme, path = uri.split(':', 1)\n if any(not x.strip() for x in (scheme, path)):\n return False\n return True\n\n\ndef _warn_on_invalid_uri(uri):\n if not _is_valid_uri(uri):\n msg = f\"Given string seems to not be a valid URI: '{uri}'\"\n warnings.warn(msg)\n\n\ndef _add_resolve_seedid_doc(func):\n \"\"\"\n The following parameters deal with the problem, that the format\n only stores station names for the picks, but the Pick object expects\n a SEED id. The SEED id is looked up for every pick by the\n following procedure:\n\n 1. look at seedid_map for a direct station name match and use the specified\n template\n 2. if 1 did not succeed, look if the station is present in inventory and\n use its first channel as template\n 3. if 1 and 2 did not succeed, use specified default template\n (default_seedid)\n\n :param str filename: File or file-like object in text mode.\n :type inventory: :class:`~obspy.core.inventory.inventory.Inventory`\n :param inventory: Inventory used to retrieve network code, location code\n and channel code of stations (SEED id).\n :param dict seedid_map: Default templates for each station\n (example: `seedid_map={'MOX': 'GR.{}..HH{}'`).\n The values must contain three dots and two `{}` which are\n substituted by station code and component.\n :param str default_seedid: Default SEED id template.\n The value must contain three dots and two `{}` which are\n substituted by station code and component.\n :param bool warn: Whether or not to warn on failed look ups\n (no matching data found or ambiguous results) in the inventory\n \"\"\"\n if func.__doc__ is not None:\n func.__doc__ = func.__doc__ + __doc__\n return func\n\n\ndef _add_resolve_seedid_ph2comp_doc(func):\n \"\"\"\n :param dict ph2comp: mapping of phases to components if format does not\n specify the component or if the component ends with '?'. Set it to\n `None` for no mapping of components. (default: {'P': 'Z', 'S': 'N'})\n \"\"\"\n if func.__doc__ is not None:\n func.__doc__ = func.__doc__ + __doc__\n return func\n\n\ndef _resolve_seedid(station, component, inventory=None,\n time=None, seedid_map=None, default_seedid=None,\n key='{sta.code}', id_map=None, id_default=None,\n phase=None, ph2comp={'P': 'Z', 'S': 'N'},\n unused_kwargs=False, warn=True, **kwargs):\n if not unused_kwargs and len(kwargs) > 0:\n raise ValueError(f'Unexpected arguments: {kwargs}')\n if id_map is not None: # backwards compatibility\n seedid_map = id_map\n if id_default is not None: # backwards compatibility\n default_seedid = id_default\n if phase is not None and ph2comp is not None and (\n component == '' or component.endswith('?')):\n component = component[:-1] + ph2comp.get(phase.upper(), '')\n seedid = None\n if seedid_map is not None and station in seedid_map:\n seedid = seedid_map[station].format(station, component)\n elif inventory is not None:\n seedid = _resolve_seedid_from_inventory(\n station, component, inventory, time=time, warn=warn)\n if seedid is None and default_seedid is not None:\n seedid = default_seedid.format(station, component)\n if seedid is None:\n return '', station, None, component\n else:\n return tuple(seedid.split('.'))\n\n\ndef _resolve_seedid_from_inventory(\n station, component, inventory, time=None, network=None,\n location=None, warn=True):\n \"\"\"\n Return a (Network, Station, Location, Channel) tuple.\n\n Given a station and channel code and station metadata (and optionally a\n certain point in time), try to resolve the full SEED ID, i.e. fill in\n a missing/unknown network and/or location code.\n If no matching data is found in metadata or if ambiguities in the station\n metadata are encountered, returns ``None`` for network and/or location\n code.\n\n Simply returns the given (Network, Station, Location, Channel) input if\n *both* ``location`` and ``network`` are already specified.\n\n :type station: str\n :param station: Station code to look up.\n :type channel: str\n :param channel: Channel code to look up.\n :type inventory: :class:`~obspy.core.inventory.inventory.Inventory`\n :param inventory: Station metadata to use for look up of missing parts of\n the full SEED ID.\n :type time: :class:`~obspy.core.utcdatetime.UTCDateTime`\n :param time: Optionally restrict lookup from metadata to given timestamp.\n :type network: str\n :param network: Also specify network code for lookup (not intended to be\n used together with ``location``, see above)\n :type location: str\n :param location: Also specify location code for lookup (not intended to be\n used together with ``network``, see above)\n :type warn: bool\n :param warn: Whether or not to warn on failed look ups (no matching data\n found or ambiguous results) that return some ``None``s.\n :rtype: str\n :returns: SEED id string\n \"\"\"\n inv = inventory.select(station=station, channel='*' + component, time=time,\n network=network, location=location,\n keep_empty=False)\n if len(inv.networks) != 1 or len(inv.networks[0].stations) == 0:\n if warn:\n msg = ('No matching metadata found for station '\n f'{station}, component {component}.')\n warnings.warn(msg)\n return\n net = inv.networks[0]\n seedids = [f'{net.code}.{station}.{cha.location_code}.{cha.code}'\n for cha in net.stations[0] if cha.is_active(time=time)]\n seedids = [id_[:len(id_) - len(component)] + component for id_ in seedids]\n if len(seedids) == 0:\n if warn:\n msg = ('No matching metadata found for station '\n f'{station}, component {component}.')\n warnings.warn(msg)\n return\n if len(set(seedids)) > 1 and warn:\n msg = ('Multiple SEED ids found for station '\n f'{station}, component {component}. Use first.')\n warnings.warn(msg)\n return seedids.pop(0)\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(exclude_empty=True)\n",
"path": "obspy/core/inventory/util.py"
}
] | diff --git a/CHANGELOG.txt b/CHANGELOG.txt
index 8cdb7bc8c2f..bb87264e9ca 100644
--- a/CHANGELOG.txt
+++ b/CHANGELOG.txt
@@ -46,6 +46,8 @@ Changes:
safe route but it also prevents valid calculations as it is up to the user
to make sure that signal spectrum is properly suppressed in those
frequency ranges outside of the valid response information (see #2988)
+ * fix a bug while checking for valid URI syntax when setting identifiers on
+ inventory type objects (see #2905)
- obspy.clients.arclink:
* submodule removed completely, since ArcLink was officially deprecated and
deactivated on all big datacenters years ago (see #2994)
diff --git a/obspy/core/inventory/util.py b/obspy/core/inventory/util.py
index 230cc12fa38..160ab8ffbdf 100644
--- a/obspy/core/inventory/util.py
+++ b/obspy/core/inventory/util.py
@@ -1072,7 +1072,7 @@ def _is_valid_uri(uri):
def _warn_on_invalid_uri(uri):
if not _is_valid_uri(uri):
- msg = "Given string seems to not be a valid URI: ''" % uri
+ msg = f"Given string seems to not be a valid URI: '{uri}'"
warnings.warn(msg)
diff --git a/obspy/core/tests/test_station.py b/obspy/core/tests/test_station.py
index 31fa7853957..6b12c66ae81 100644
--- a/obspy/core/tests/test_station.py
+++ b/obspy/core/tests/test_station.py
@@ -13,6 +13,8 @@
import pytest
from obspy import read_inventory, UTCDateTime
+from obspy.core.inventory import Station
+from obspy.core.util import CatchAndAssertWarnings
from obspy.core.util.testing import WarningsCapture
@@ -153,3 +155,14 @@ def test_station_select(self):
assert len(sta.select(
latitude=47.95, longitude=12.95,
minradius=0.08, maxradius=0.1)) == 0
+
+ def test_warn_identifier_invalid_uri_syntax(self):
+ """
+ Tests the warning on Identifiers getting set with an invalid URI (not
+ having scheme-colon-path)
+ """
+ sta = Station(code='A', latitude=1, longitude=1, elevation=1)
+ invalid_uri = "this-has-no-URI-scheme-and-no-colon"
+ msg = f"Given string seems to not be a valid URI: '{invalid_uri}'"
+ with CatchAndAssertWarnings(expected=[(UserWarning, msg)]):
+ sta.identifiers = [invalid_uri]
|
adamchainz__django-cors-headers-238 | CORS_HEADER value is set to regex match object
The `CORS_ENABLED` response header is set to the regex match object in line 82 of `middleware.py` as returned by `CorsPostCsrfMiddleware.is_enabled`
I am not aware of the best practice here, but I would assume a boolean response makes more sense.

| [
{
"content": "import re\n\nfrom django import http\nfrom django.apps import apps\nfrom django.utils.cache import patch_vary_headers\nfrom django.utils.six.moves.urllib.parse import urlparse\n\nfrom .compat import MiddlewareMixin\nfrom .conf import conf\nfrom .signals import check_request_enabled\n\nACCESS_CONTROL_ALLOW_ORIGIN = 'Access-Control-Allow-Origin'\nACCESS_CONTROL_EXPOSE_HEADERS = 'Access-Control-Expose-Headers'\nACCESS_CONTROL_ALLOW_CREDENTIALS = 'Access-Control-Allow-Credentials'\nACCESS_CONTROL_ALLOW_HEADERS = 'Access-Control-Allow-Headers'\nACCESS_CONTROL_ALLOW_METHODS = 'Access-Control-Allow-Methods'\nACCESS_CONTROL_MAX_AGE = 'Access-Control-Max-Age'\n\n\nclass CorsPostCsrfMiddleware(MiddlewareMixin):\n\n def _https_referer_replace_reverse(self, request):\n \"\"\"\n Put the HTTP_REFERER back to its original value and delete the\n temporary storage\n \"\"\"\n if conf.CORS_REPLACE_HTTPS_REFERER and 'ORIGINAL_HTTP_REFERER' in request.META:\n http_referer = request.META['ORIGINAL_HTTP_REFERER']\n request.META['HTTP_REFERER'] = http_referer\n del request.META['ORIGINAL_HTTP_REFERER']\n\n def process_request(self, request):\n self._https_referer_replace_reverse(request)\n return None\n\n def process_view(self, request, callback, callback_args, callback_kwargs):\n self._https_referer_replace_reverse(request)\n return None\n\n\nclass CorsMiddleware(MiddlewareMixin):\n\n def _https_referer_replace(self, request):\n \"\"\"\n When https is enabled, django CSRF checking includes referer checking\n which breaks when using CORS. This function updates the HTTP_REFERER\n header to make sure it matches HTTP_HOST, provided that our cors logic\n succeeds\n \"\"\"\n origin = request.META.get('HTTP_ORIGIN')\n\n if request.is_secure() and origin and 'ORIGINAL_HTTP_REFERER' not in request.META:\n\n url = urlparse(origin)\n if not conf.CORS_ORIGIN_ALLOW_ALL and not self.origin_found_in_white_lists(origin, url):\n return\n\n try:\n http_referer = request.META['HTTP_REFERER']\n http_host = \"https://%s/\" % request.META['HTTP_HOST']\n request.META = request.META.copy()\n request.META['ORIGINAL_HTTP_REFERER'] = http_referer\n request.META['HTTP_REFERER'] = http_host\n except KeyError:\n pass\n\n def process_request(self, request):\n \"\"\"\n If CORS preflight header, then create an\n empty body response (200 OK) and return it\n\n Django won't bother calling any other request\n view/exception middleware along with the requested view;\n it will call any response middlewares\n \"\"\"\n request._cors_enabled = self.is_enabled(request)\n if request._cors_enabled:\n if conf.CORS_REPLACE_HTTPS_REFERER:\n self._https_referer_replace(request)\n\n if (\n request.method == 'OPTIONS' and\n 'HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META\n ):\n return http.HttpResponse()\n\n def process_view(self, request, callback, callback_args, callback_kwargs):\n \"\"\"\n Do the referer replacement here as well\n \"\"\"\n if request._cors_enabled and conf.CORS_REPLACE_HTTPS_REFERER:\n self._https_referer_replace(request)\n return None\n\n def process_response(self, request, response):\n \"\"\"\n Add the respective CORS headers\n \"\"\"\n origin = request.META.get('HTTP_ORIGIN')\n if not origin:\n return response\n\n enabled = getattr(request, '_cors_enabled', None)\n if enabled is None:\n enabled = self.is_enabled(request)\n\n if not enabled:\n return response\n\n # todo: check hostname from db instead\n url = urlparse(origin)\n\n if conf.CORS_ALLOW_CREDENTIALS:\n response[ACCESS_CONTROL_ALLOW_CREDENTIALS] = 'true'\n\n if (\n not conf.CORS_ORIGIN_ALLOW_ALL and\n not self.origin_found_in_white_lists(origin, url) and\n not self.origin_found_in_model(url) and\n not self.check_signal(request)\n ):\n return response\n\n if conf.CORS_ORIGIN_ALLOW_ALL and not conf.CORS_ALLOW_CREDENTIALS:\n response[ACCESS_CONTROL_ALLOW_ORIGIN] = \"*\"\n else:\n response[ACCESS_CONTROL_ALLOW_ORIGIN] = origin\n patch_vary_headers(response, ['Origin'])\n\n if len(conf.CORS_EXPOSE_HEADERS):\n response[ACCESS_CONTROL_EXPOSE_HEADERS] = ', '.join(conf.CORS_EXPOSE_HEADERS)\n\n if request.method == 'OPTIONS':\n response[ACCESS_CONTROL_ALLOW_HEADERS] = ', '.join(conf.CORS_ALLOW_HEADERS)\n response[ACCESS_CONTROL_ALLOW_METHODS] = ', '.join(conf.CORS_ALLOW_METHODS)\n if conf.CORS_PREFLIGHT_MAX_AGE:\n response[ACCESS_CONTROL_MAX_AGE] = conf.CORS_PREFLIGHT_MAX_AGE\n\n return response\n\n def origin_found_in_white_lists(self, origin, url):\n return (\n url.netloc in conf.CORS_ORIGIN_WHITELIST or\n (origin == 'null' and origin in conf.CORS_ORIGIN_WHITELIST) or\n self.regex_domain_match(origin)\n )\n\n def regex_domain_match(self, origin):\n for domain_pattern in conf.CORS_ORIGIN_REGEX_WHITELIST:\n if re.match(domain_pattern, origin):\n return origin\n\n def origin_found_in_model(self, url):\n if conf.CORS_MODEL is None:\n return False\n model = apps.get_model(*conf.CORS_MODEL.split('.'))\n return model.objects.filter(cors=url.netloc).exists()\n\n def is_enabled(self, request):\n return (\n re.match(conf.CORS_URLS_REGEX, request.path) or\n self.check_signal(request)\n )\n\n def check_signal(self, request):\n signal_responses = check_request_enabled.send(\n sender=None,\n request=request,\n )\n return any(\n return_value for\n function, return_value in signal_responses\n )\n",
"path": "corsheaders/middleware.py"
}
] | [
{
"content": "import re\n\nfrom django import http\nfrom django.apps import apps\nfrom django.utils.cache import patch_vary_headers\nfrom django.utils.six.moves.urllib.parse import urlparse\n\nfrom .compat import MiddlewareMixin\nfrom .conf import conf\nfrom .signals import check_request_enabled\n\nACCESS_CONTROL_ALLOW_ORIGIN = 'Access-Control-Allow-Origin'\nACCESS_CONTROL_EXPOSE_HEADERS = 'Access-Control-Expose-Headers'\nACCESS_CONTROL_ALLOW_CREDENTIALS = 'Access-Control-Allow-Credentials'\nACCESS_CONTROL_ALLOW_HEADERS = 'Access-Control-Allow-Headers'\nACCESS_CONTROL_ALLOW_METHODS = 'Access-Control-Allow-Methods'\nACCESS_CONTROL_MAX_AGE = 'Access-Control-Max-Age'\n\n\nclass CorsPostCsrfMiddleware(MiddlewareMixin):\n\n def _https_referer_replace_reverse(self, request):\n \"\"\"\n Put the HTTP_REFERER back to its original value and delete the\n temporary storage\n \"\"\"\n if conf.CORS_REPLACE_HTTPS_REFERER and 'ORIGINAL_HTTP_REFERER' in request.META:\n http_referer = request.META['ORIGINAL_HTTP_REFERER']\n request.META['HTTP_REFERER'] = http_referer\n del request.META['ORIGINAL_HTTP_REFERER']\n\n def process_request(self, request):\n self._https_referer_replace_reverse(request)\n return None\n\n def process_view(self, request, callback, callback_args, callback_kwargs):\n self._https_referer_replace_reverse(request)\n return None\n\n\nclass CorsMiddleware(MiddlewareMixin):\n\n def _https_referer_replace(self, request):\n \"\"\"\n When https is enabled, django CSRF checking includes referer checking\n which breaks when using CORS. This function updates the HTTP_REFERER\n header to make sure it matches HTTP_HOST, provided that our cors logic\n succeeds\n \"\"\"\n origin = request.META.get('HTTP_ORIGIN')\n\n if request.is_secure() and origin and 'ORIGINAL_HTTP_REFERER' not in request.META:\n\n url = urlparse(origin)\n if not conf.CORS_ORIGIN_ALLOW_ALL and not self.origin_found_in_white_lists(origin, url):\n return\n\n try:\n http_referer = request.META['HTTP_REFERER']\n http_host = \"https://%s/\" % request.META['HTTP_HOST']\n request.META = request.META.copy()\n request.META['ORIGINAL_HTTP_REFERER'] = http_referer\n request.META['HTTP_REFERER'] = http_host\n except KeyError:\n pass\n\n def process_request(self, request):\n \"\"\"\n If CORS preflight header, then create an\n empty body response (200 OK) and return it\n\n Django won't bother calling any other request\n view/exception middleware along with the requested view;\n it will call any response middlewares\n \"\"\"\n request._cors_enabled = self.is_enabled(request)\n if request._cors_enabled:\n if conf.CORS_REPLACE_HTTPS_REFERER:\n self._https_referer_replace(request)\n\n if (\n request.method == 'OPTIONS' and\n 'HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META\n ):\n return http.HttpResponse()\n\n def process_view(self, request, callback, callback_args, callback_kwargs):\n \"\"\"\n Do the referer replacement here as well\n \"\"\"\n if request._cors_enabled and conf.CORS_REPLACE_HTTPS_REFERER:\n self._https_referer_replace(request)\n return None\n\n def process_response(self, request, response):\n \"\"\"\n Add the respective CORS headers\n \"\"\"\n origin = request.META.get('HTTP_ORIGIN')\n if not origin:\n return response\n\n enabled = getattr(request, '_cors_enabled', None)\n if enabled is None:\n enabled = self.is_enabled(request)\n\n if not enabled:\n return response\n\n # todo: check hostname from db instead\n url = urlparse(origin)\n\n if conf.CORS_ALLOW_CREDENTIALS:\n response[ACCESS_CONTROL_ALLOW_CREDENTIALS] = 'true'\n\n if (\n not conf.CORS_ORIGIN_ALLOW_ALL and\n not self.origin_found_in_white_lists(origin, url) and\n not self.origin_found_in_model(url) and\n not self.check_signal(request)\n ):\n return response\n\n if conf.CORS_ORIGIN_ALLOW_ALL and not conf.CORS_ALLOW_CREDENTIALS:\n response[ACCESS_CONTROL_ALLOW_ORIGIN] = \"*\"\n else:\n response[ACCESS_CONTROL_ALLOW_ORIGIN] = origin\n patch_vary_headers(response, ['Origin'])\n\n if len(conf.CORS_EXPOSE_HEADERS):\n response[ACCESS_CONTROL_EXPOSE_HEADERS] = ', '.join(conf.CORS_EXPOSE_HEADERS)\n\n if request.method == 'OPTIONS':\n response[ACCESS_CONTROL_ALLOW_HEADERS] = ', '.join(conf.CORS_ALLOW_HEADERS)\n response[ACCESS_CONTROL_ALLOW_METHODS] = ', '.join(conf.CORS_ALLOW_METHODS)\n if conf.CORS_PREFLIGHT_MAX_AGE:\n response[ACCESS_CONTROL_MAX_AGE] = conf.CORS_PREFLIGHT_MAX_AGE\n\n return response\n\n def origin_found_in_white_lists(self, origin, url):\n return (\n url.netloc in conf.CORS_ORIGIN_WHITELIST or\n (origin == 'null' and origin in conf.CORS_ORIGIN_WHITELIST) or\n self.regex_domain_match(origin)\n )\n\n def regex_domain_match(self, origin):\n for domain_pattern in conf.CORS_ORIGIN_REGEX_WHITELIST:\n if re.match(domain_pattern, origin):\n return origin\n\n def origin_found_in_model(self, url):\n if conf.CORS_MODEL is None:\n return False\n model = apps.get_model(*conf.CORS_MODEL.split('.'))\n return model.objects.filter(cors=url.netloc).exists()\n\n def is_enabled(self, request):\n return (\n bool(re.match(conf.CORS_URLS_REGEX, request.path)) or\n self.check_signal(request)\n )\n\n def check_signal(self, request):\n signal_responses = check_request_enabled.send(\n sender=None,\n request=request,\n )\n return any(\n return_value for\n function, return_value in signal_responses\n )\n",
"path": "corsheaders/middleware.py"
}
] | diff --git a/HISTORY.rst b/HISTORY.rst
index 1241332d..e99b84a9 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -5,6 +5,8 @@ Pending
-------
* New release notes go here.
+* Ensured that ``request._cors_enabled`` is always a ``bool()`` - previously it
+ could be set to a regex match object.
2.1.0 (2017-05-28)
------------------
diff --git a/corsheaders/middleware.py b/corsheaders/middleware.py
index a39e94b5..1b1e672a 100644
--- a/corsheaders/middleware.py
+++ b/corsheaders/middleware.py
@@ -158,7 +158,7 @@ def origin_found_in_model(self, url):
def is_enabled(self, request):
return (
- re.match(conf.CORS_URLS_REGEX, request.path) or
+ bool(re.match(conf.CORS_URLS_REGEX, request.path)) or
self.check_signal(request)
)
diff --git a/tests/test_middleware.py b/tests/test_middleware.py
index a6b7f166..7824cfce 100644
--- a/tests/test_middleware.py
+++ b/tests/test_middleware.py
@@ -322,10 +322,21 @@ def test_get_no_origin_not_enabled(self):
assert ACCESS_CONTROL_ALLOW_ORIGIN not in resp
@override_settings(CORS_ORIGIN_WHITELIST=['example.com'])
- def test_works_if_view_deletes_is_enabled(self):
+ def test_cors_enabled_is_attached_and_bool(self):
+ """
+ Ensure that request._cors_enabled is available - although a private API
+ someone might use it for debugging
+ """
+ resp = self.client.get('/', HTTP_ORIGIN='http://example.com')
+ request = resp.wsgi_request
+ assert isinstance(request._cors_enabled, bool)
+ assert request._cors_enabled
+
+ @override_settings(CORS_ORIGIN_WHITELIST=['example.com'])
+ def test_works_if_view_deletes_cors_enabled(self):
"""
Just in case something crazy happens in the view or other middleware,
- check that get_response doesn't fall over if `is_enabled` is removed
+ check that get_response doesn't fall over if `_cors_enabled` is removed
"""
resp = self.client.get(
'/delete-is-enabled/',
|
getsentry__sentry-15491 | Simple typo in the compact docstring for utils.functional
## Important Details
How are you running Sentry?
* [ ] On-Premise docker [Version xyz]
* [ ] Saas (sentry.io)
* [x] Other [briefly describe your environment]
Observed documentation - not running sentry.
## Description
Simple typo should be values rather than valules.
## Steps to Reproduce
1. Observe docstring in utils.functional.compact method
### What you expected to happen
Should be values rather than valules.
### Possible Solution
Replace valules with values.
| [
{
"content": "from __future__ import absolute_import\n\nimport six\n\nfrom django.utils.functional import empty\n\n\ndef extract_lazy_object(lo):\n \"\"\"\n Unwrap a LazyObject and return the inner object. Whatever that may be.\n\n ProTip: This is relying on `django.utils.functional.empty`, which may\n or may not be removed in the future. It's 100% undocumented.\n \"\"\"\n if not hasattr(lo, \"_wrapped\"):\n return lo\n if lo._wrapped is empty:\n lo._setup()\n return lo._wrapped\n\n\ndef apply_values(function, mapping):\n \"\"\"\\\n Applies ``function`` to a sequence containing all of the values in the\n provided mapping, returing a new mapping with the values replaced with\n the results of the provided function.\n\n >>> apply_values(\n ... lambda values: map(u'{} fish'.format, values),\n ... {1: 'red', 2: 'blue'},\n ... )\n {1: u'red fish', 2: u'blue fish'}\n \"\"\"\n if not mapping:\n return {}\n\n keys, values = zip(*mapping.items())\n return dict(zip(keys, function(values)))\n\n\ndef compact(seq):\n \"\"\"\n Removes ``None`` values from various sequence-based data structures.\n\n dict:\n Removes keys with a corresponding ``None`` value.\n\n list:\n Removes ``None`` valules.\n\n >>> compact({'foo': 'bar', 'baz': None})\n {'foo': 'bar'}\n\n >>> compact([1, None, 2])\n [1, 2]\n \"\"\"\n if isinstance(seq, dict):\n return {k: v for k, v in six.iteritems(seq) if v is not None}\n\n elif isinstance(seq, list):\n return [k for k in seq if k is not None]\n",
"path": "src/sentry/utils/functional.py"
}
] | [
{
"content": "from __future__ import absolute_import\n\nimport six\n\nfrom django.utils.functional import empty\n\n\ndef extract_lazy_object(lo):\n \"\"\"\n Unwrap a LazyObject and return the inner object. Whatever that may be.\n\n ProTip: This is relying on `django.utils.functional.empty`, which may\n or may not be removed in the future. It's 100% undocumented.\n \"\"\"\n if not hasattr(lo, \"_wrapped\"):\n return lo\n if lo._wrapped is empty:\n lo._setup()\n return lo._wrapped\n\n\ndef apply_values(function, mapping):\n \"\"\"\\\n Applies ``function`` to a sequence containing all of the values in the\n provided mapping, returing a new mapping with the values replaced with\n the results of the provided function.\n\n >>> apply_values(\n ... lambda values: map(u'{} fish'.format, values),\n ... {1: 'red', 2: 'blue'},\n ... )\n {1: u'red fish', 2: u'blue fish'}\n \"\"\"\n if not mapping:\n return {}\n\n keys, values = zip(*mapping.items())\n return dict(zip(keys, function(values)))\n\n\ndef compact(seq):\n \"\"\"\n Removes ``None`` values from various sequence-based data structures.\n\n dict:\n Removes keys with a corresponding ``None`` value.\n\n list:\n Removes ``None`` values.\n\n >>> compact({'foo': 'bar', 'baz': None})\n {'foo': 'bar'}\n\n >>> compact([1, None, 2])\n [1, 2]\n \"\"\"\n if isinstance(seq, dict):\n return {k: v for k, v in six.iteritems(seq) if v is not None}\n\n elif isinstance(seq, list):\n return [k for k in seq if k is not None]\n",
"path": "src/sentry/utils/functional.py"
}
] | diff --git a/src/sentry/utils/functional.py b/src/sentry/utils/functional.py
index ee23e33a38f021..91a5e2f4200ff9 100644
--- a/src/sentry/utils/functional.py
+++ b/src/sentry/utils/functional.py
@@ -46,7 +46,7 @@ def compact(seq):
Removes keys with a corresponding ``None`` value.
list:
- Removes ``None`` valules.
+ Removes ``None`` values.
>>> compact({'foo': 'bar', 'baz': None})
{'foo': 'bar'}
|
googleapis__google-cloud-python-9973 | Bigquery: Missing Entity Type when reading dataset.access_entries
When running the following code:
```python
from google.cloud import bigquery
gbq_client = bigquery.Client(project='project-name')
dataset_ref = gbq_client.dataset(dataset_id='dataset1', project='project-name')
dataset = gbq_client.get_dataset(dataset_ref=dataset_ref)
print(len(dataset.access_entries))
```
the following error will happen about 25% of the time:
```python
Traceback (most recent call last):
File "iam.py", line 5, in <module>
print(len(dataset.access_entries))
File "/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dataset.py", line 376, in access_entries
return [AccessEntry.from_api_repr(entry) for entry in entries]
File "/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dataset.py", line 376, in <listcomp>
return [AccessEntry.from_api_repr(entry) for entry in entries]
File "/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dataset.py", line 183, in from_api_repr
return cls(role, entity_type, entity_id)
File "/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dataset.py", line 115, in __init__
raise ValueError(message)
ValueError: Entity type 'iamMember' not among: domain, groupByEmail, specialGroup, userByEmail, view
```
It seems the Google API is returning a new 'iamMember' entity type that is not in the hard coded list of allowed entity types in [dataset.py](https://github.com/googleapis/google-cloud-python/blob/master/bigquery/google/cloud/bigquery/dataset.py)
| [
{
"content": "# Copyright 2015 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Datasets.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport six\nimport copy\n\nimport google.cloud._helpers\nfrom google.cloud.bigquery import _helpers\nfrom google.cloud.bigquery.model import ModelReference\nfrom google.cloud.bigquery.routine import RoutineReference\nfrom google.cloud.bigquery.table import TableReference\nfrom google.cloud.bigquery.encryption_configuration import EncryptionConfiguration\n\n\ndef _get_table_reference(self, table_id):\n \"\"\"Constructs a TableReference.\n\n Args:\n table_id (str): The ID of the table.\n\n Returns:\n google.cloud.bigquery.table.TableReference:\n A table reference for a table in this dataset.\n \"\"\"\n return TableReference(self, table_id)\n\n\ndef _get_model_reference(self, model_id):\n \"\"\"Constructs a ModelReference.\n\n Args:\n model_id (str): the ID of the model.\n\n Returns:\n google.cloud.bigquery.model.ModelReference:\n A ModelReference for a model in this dataset.\n \"\"\"\n return ModelReference.from_api_repr(\n {\"projectId\": self.project, \"datasetId\": self.dataset_id, \"modelId\": model_id}\n )\n\n\ndef _get_routine_reference(self, routine_id):\n \"\"\"Constructs a RoutineReference.\n\n Args:\n routine_id (str): the ID of the routine.\n\n Returns:\n google.cloud.bigquery.routine.RoutineReference:\n A RoutineReference for a routine in this dataset.\n \"\"\"\n return RoutineReference.from_api_repr(\n {\n \"projectId\": self.project,\n \"datasetId\": self.dataset_id,\n \"routineId\": routine_id,\n }\n )\n\n\nclass AccessEntry(object):\n \"\"\"Represents grant of an access role to an entity.\n\n An entry must have exactly one of the allowed :attr:`ENTITY_TYPES`. If\n anything but ``view`` is set, a ``role`` is also required. ``role`` is\n omitted for a ``view``, because ``view`` s are always read-only.\n\n See https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets.\n\n Attributes:\n role (str):\n Role granted to the entity. The following string values are\n supported: `'READER'`, `'WRITER'`, `'OWNER'`. It may also be\n :data:`None` if the ``entity_type`` is ``view``.\n\n entity_type (str):\n Type of entity being granted the role. One of :attr:`ENTITY_TYPES`.\n\n entity_id (Union[str, Dict[str, str]]):\n If the ``entity_type`` is not 'view', the ``entity_id`` is the\n ``str`` ID of the entity being granted the role. If the\n ``entity_type`` is 'view', the ``entity_id`` is a ``dict``\n representing the view from a different dataset to grant access to\n in the following format::\n\n {\n 'projectId': string,\n 'datasetId': string,\n 'tableId': string\n }\n\n Raises:\n ValueError:\n If the ``entity_type`` is not among :attr:`ENTITY_TYPES`, or if a\n ``view`` has ``role`` set, or a non ``view`` **does not** have a\n ``role`` set.\n\n Examples:\n >>> entry = AccessEntry('OWNER', 'userByEmail', '[email protected]')\n\n >>> view = {\n ... 'projectId': 'my-project',\n ... 'datasetId': 'my_dataset',\n ... 'tableId': 'my_table'\n ... }\n >>> entry = AccessEntry(None, 'view', view)\n \"\"\"\n\n ENTITY_TYPES = frozenset(\n [\"userByEmail\", \"groupByEmail\", \"domain\", \"specialGroup\", \"view\"]\n )\n \"\"\"Allowed entity types.\"\"\"\n\n def __init__(self, role, entity_type, entity_id):\n if entity_type not in self.ENTITY_TYPES:\n message = \"Entity type %r not among: %s\" % (\n entity_type,\n \", \".join(self.ENTITY_TYPES),\n )\n raise ValueError(message)\n if entity_type == \"view\":\n if role is not None:\n raise ValueError(\n \"Role must be None for a view. Received \" \"role: %r\" % (role,)\n )\n else:\n if role is None:\n raise ValueError(\n \"Role must be set for entity \" \"type %r\" % (entity_type,)\n )\n\n self.role = role\n self.entity_type = entity_type\n self.entity_id = entity_id\n\n def __eq__(self, other):\n if not isinstance(other, AccessEntry):\n return NotImplemented\n return (\n self.role == other.role\n and self.entity_type == other.entity_type\n and self.entity_id == other.entity_id\n )\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self):\n return \"<AccessEntry: role=%s, %s=%s>\" % (\n self.role,\n self.entity_type,\n self.entity_id,\n )\n\n def to_api_repr(self):\n \"\"\"Construct the API resource representation of this access entry\n\n Returns:\n Dict[str, object]: Access entry represented as an API resource\n \"\"\"\n resource = {self.entity_type: self.entity_id}\n if self.role is not None:\n resource[\"role\"] = self.role\n return resource\n\n @classmethod\n def from_api_repr(cls, resource):\n \"\"\"Factory: construct an access entry given its API representation\n\n Args:\n resource (Dict[str, object]):\n Access entry resource representation returned from the API\n\n Returns:\n google.cloud.bigquery.dataset.AccessEntry:\n Access entry parsed from ``resource``.\n\n Raises:\n ValueError:\n If the resource has more keys than ``role`` and one additional\n key.\n \"\"\"\n entry = resource.copy()\n role = entry.pop(\"role\", None)\n entity_type, entity_id = entry.popitem()\n if len(entry) != 0:\n raise ValueError(\"Entry has unexpected keys remaining.\", entry)\n return cls(role, entity_type, entity_id)\n\n\nclass DatasetReference(object):\n \"\"\"DatasetReferences are pointers to datasets.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#datasetreference\n\n Args:\n project (str): The ID of the project\n dataset_id (str): The ID of the dataset\n\n Raises:\n ValueError: If either argument is not of type ``str``.\n \"\"\"\n\n def __init__(self, project, dataset_id):\n if not isinstance(project, six.string_types):\n raise ValueError(\"Pass a string for project\")\n if not isinstance(dataset_id, six.string_types):\n raise ValueError(\"Pass a string for dataset_id\")\n self._project = project\n self._dataset_id = dataset_id\n\n @property\n def project(self):\n \"\"\"str: Project ID of the dataset.\"\"\"\n return self._project\n\n @property\n def dataset_id(self):\n \"\"\"str: Dataset ID.\"\"\"\n return self._dataset_id\n\n @property\n def path(self):\n \"\"\"str: URL path for the dataset based on project and dataset ID.\"\"\"\n return \"/projects/%s/datasets/%s\" % (self.project, self.dataset_id)\n\n table = _get_table_reference\n\n model = _get_model_reference\n\n routine = _get_routine_reference\n\n @classmethod\n def from_api_repr(cls, resource):\n \"\"\"Factory: construct a dataset reference given its API representation\n\n Args:\n resource (Dict[str, str]):\n Dataset reference resource representation returned from the API\n\n Returns:\n google.cloud.bigquery.dataset.DatasetReference:\n Dataset reference parsed from ``resource``.\n \"\"\"\n project = resource[\"projectId\"]\n dataset_id = resource[\"datasetId\"]\n return cls(project, dataset_id)\n\n @classmethod\n def from_string(cls, dataset_id, default_project=None):\n \"\"\"Construct a dataset reference from dataset ID string.\n\n Args:\n dataset_id (str):\n A dataset ID in standard SQL format. If ``default_project``\n is not specified, this must include both the project ID and\n the dataset ID, separated by ``.``.\n default_project (str):\n Optional. The project ID to use when ``dataset_id`` does not\n include a project ID.\n\n Returns:\n DatasetReference:\n Dataset reference parsed from ``dataset_id``.\n\n Examples:\n >>> DatasetReference.from_string('my-project-id.some_dataset')\n DatasetReference('my-project-id', 'some_dataset')\n\n Raises:\n ValueError:\n If ``dataset_id`` is not a fully-qualified dataset ID in\n standard SQL format.\n \"\"\"\n output_dataset_id = dataset_id\n output_project_id = default_project\n parts = _helpers._split_id(dataset_id)\n\n if len(parts) == 1 and not default_project:\n raise ValueError(\n \"When default_project is not set, dataset_id must be a \"\n \"fully-qualified dataset ID in standard SQL format, \"\n 'e.g., \"project.dataset_id\" got {}'.format(dataset_id)\n )\n elif len(parts) == 2:\n output_project_id, output_dataset_id = parts\n elif len(parts) > 2:\n raise ValueError(\n \"Too many parts in dataset_id. Expected a fully-qualified \"\n \"dataset ID in standard SQL format. e.g. \"\n '\"project.dataset_id\", got {}'.format(dataset_id)\n )\n\n return cls(output_project_id, output_dataset_id)\n\n def to_api_repr(self):\n \"\"\"Construct the API resource representation of this dataset reference\n\n Returns:\n Dict[str, str]: dataset reference represented as an API resource\n \"\"\"\n return {\"projectId\": self._project, \"datasetId\": self._dataset_id}\n\n def _key(self):\n \"\"\"A tuple key that uniquely describes this field.\n\n Used to compute this instance's hashcode and evaluate equality.\n\n Returns:\n Tuple[str]: The contents of this :class:`.DatasetReference`.\n \"\"\"\n return (self._project, self._dataset_id)\n\n def __eq__(self, other):\n if not isinstance(other, DatasetReference):\n return NotImplemented\n return self._key() == other._key()\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash(self._key())\n\n def __repr__(self):\n return \"DatasetReference{}\".format(self._key())\n\n\nclass Dataset(object):\n \"\"\"Datasets are containers for tables.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource-dataset\n\n Args:\n dataset_ref (Union[google.cloud.bigquery.dataset.DatasetReference, str]):\n A pointer to a dataset. If ``dataset_ref`` is a string, it must\n include both the project ID and the dataset ID, separated by\n ``.``.\n \"\"\"\n\n _PROPERTY_TO_API_FIELD = {\n \"access_entries\": \"access\",\n \"created\": \"creationTime\",\n \"default_partition_expiration_ms\": \"defaultPartitionExpirationMs\",\n \"default_table_expiration_ms\": \"defaultTableExpirationMs\",\n \"friendly_name\": \"friendlyName\",\n \"default_encryption_configuration\": \"defaultEncryptionConfiguration\",\n }\n\n def __init__(self, dataset_ref):\n if isinstance(dataset_ref, six.string_types):\n dataset_ref = DatasetReference.from_string(dataset_ref)\n self._properties = {\"datasetReference\": dataset_ref.to_api_repr(), \"labels\": {}}\n\n @property\n def project(self):\n \"\"\"str: Project ID of the project bound to the dataset.\"\"\"\n return self._properties[\"datasetReference\"][\"projectId\"]\n\n @property\n def path(self):\n \"\"\"str: URL path for the dataset based on project and dataset ID.\"\"\"\n return \"/projects/%s/datasets/%s\" % (self.project, self.dataset_id)\n\n @property\n def access_entries(self):\n \"\"\"List[google.cloud.bigquery.dataset.AccessEntry]: Dataset's access\n entries.\n\n ``role`` augments the entity type and must be present **unless** the\n entity type is ``view``.\n\n Raises:\n TypeError: If 'value' is not a sequence\n ValueError:\n If any item in the sequence is not an\n :class:`~google.cloud.bigquery.dataset.AccessEntry`.\n \"\"\"\n entries = self._properties.get(\"access\", [])\n return [AccessEntry.from_api_repr(entry) for entry in entries]\n\n @access_entries.setter\n def access_entries(self, value):\n if not all(isinstance(field, AccessEntry) for field in value):\n raise ValueError(\"Values must be AccessEntry instances\")\n entries = [entry.to_api_repr() for entry in value]\n self._properties[\"access\"] = entries\n\n @property\n def created(self):\n \"\"\"Union[datetime.datetime, None]: Datetime at which the dataset was\n created (:data:`None` until set from the server).\n \"\"\"\n creation_time = self._properties.get(\"creationTime\")\n if creation_time is not None:\n # creation_time will be in milliseconds.\n return google.cloud._helpers._datetime_from_microseconds(\n 1000.0 * float(creation_time)\n )\n\n @property\n def dataset_id(self):\n \"\"\"str: Dataset ID.\"\"\"\n return self._properties[\"datasetReference\"][\"datasetId\"]\n\n @property\n def full_dataset_id(self):\n \"\"\"Union[str, None]: ID for the dataset resource (:data:`None` until\n set from the server)\n\n In the format ``project_id:dataset_id``.\n \"\"\"\n return self._properties.get(\"id\")\n\n @property\n def reference(self):\n \"\"\"google.cloud.bigquery.dataset.DatasetReference: A reference to this\n dataset.\n \"\"\"\n return DatasetReference(self.project, self.dataset_id)\n\n @property\n def etag(self):\n \"\"\"Union[str, None]: ETag for the dataset resource (:data:`None` until\n set from the server).\n \"\"\"\n return self._properties.get(\"etag\")\n\n @property\n def modified(self):\n \"\"\"Union[datetime.datetime, None]: Datetime at which the dataset was\n last modified (:data:`None` until set from the server).\n \"\"\"\n modified_time = self._properties.get(\"lastModifiedTime\")\n if modified_time is not None:\n # modified_time will be in milliseconds.\n return google.cloud._helpers._datetime_from_microseconds(\n 1000.0 * float(modified_time)\n )\n\n @property\n def self_link(self):\n \"\"\"Union[str, None]: URL for the dataset resource (:data:`None` until\n set from the server).\n \"\"\"\n return self._properties.get(\"selfLink\")\n\n @property\n def default_partition_expiration_ms(self):\n \"\"\"Optional[int]: The default partition expiration for all\n partitioned tables in the dataset, in milliseconds.\n\n Once this property is set, all newly-created partitioned tables in\n the dataset will have an ``time_paritioning.expiration_ms`` property\n set to this value, and changing the value will only affect new\n tables, not existing ones. The storage in a partition will have an\n expiration time of its partition time plus this value.\n\n Setting this property overrides the use of\n ``default_table_expiration_ms`` for partitioned tables: only one of\n ``default_table_expiration_ms`` and\n ``default_partition_expiration_ms`` will be used for any new\n partitioned table. If you provide an explicit\n ``time_partitioning.expiration_ms`` when creating or updating a\n partitioned table, that value takes precedence over the default\n partition expiration time indicated by this property.\n \"\"\"\n return _helpers._int_or_none(\n self._properties.get(\"defaultPartitionExpirationMs\")\n )\n\n @default_partition_expiration_ms.setter\n def default_partition_expiration_ms(self, value):\n self._properties[\"defaultPartitionExpirationMs\"] = _helpers._str_or_none(value)\n\n @property\n def default_table_expiration_ms(self):\n \"\"\"Union[int, None]: Default expiration time for tables in the dataset\n (defaults to :data:`None`).\n\n Raises:\n ValueError: For invalid value types.\n \"\"\"\n return _helpers._int_or_none(self._properties.get(\"defaultTableExpirationMs\"))\n\n @default_table_expiration_ms.setter\n def default_table_expiration_ms(self, value):\n if not isinstance(value, six.integer_types) and value is not None:\n raise ValueError(\"Pass an integer, or None\")\n self._properties[\"defaultTableExpirationMs\"] = _helpers._str_or_none(value)\n\n @property\n def description(self):\n \"\"\"Optional[str]: Description of the dataset as set by the user\n (defaults to :data:`None`).\n\n Raises:\n ValueError: for invalid value types.\n \"\"\"\n return self._properties.get(\"description\")\n\n @description.setter\n def description(self, value):\n if not isinstance(value, six.string_types) and value is not None:\n raise ValueError(\"Pass a string, or None\")\n self._properties[\"description\"] = value\n\n @property\n def friendly_name(self):\n \"\"\"Union[str, None]: Title of the dataset as set by the user\n (defaults to :data:`None`).\n\n Raises:\n ValueError: for invalid value types.\n \"\"\"\n return self._properties.get(\"friendlyName\")\n\n @friendly_name.setter\n def friendly_name(self, value):\n if not isinstance(value, six.string_types) and value is not None:\n raise ValueError(\"Pass a string, or None\")\n self._properties[\"friendlyName\"] = value\n\n @property\n def location(self):\n \"\"\"Union[str, None]: Location in which the dataset is hosted as set by\n the user (defaults to :data:`None`).\n\n Raises:\n ValueError: for invalid value types.\n \"\"\"\n return self._properties.get(\"location\")\n\n @location.setter\n def location(self, value):\n if not isinstance(value, six.string_types) and value is not None:\n raise ValueError(\"Pass a string, or None\")\n self._properties[\"location\"] = value\n\n @property\n def labels(self):\n \"\"\"Dict[str, str]: Labels for the dataset.\n\n This method always returns a dict. To change a dataset's labels,\n modify the dict, then call\n :meth:`google.cloud.bigquery.client.Client.update_dataset`. To delete\n a label, set its value to :data:`None` before updating.\n\n Raises:\n ValueError: for invalid value types.\n \"\"\"\n return self._properties.setdefault(\"labels\", {})\n\n @labels.setter\n def labels(self, value):\n if not isinstance(value, dict):\n raise ValueError(\"Pass a dict\")\n self._properties[\"labels\"] = value\n\n @property\n def default_encryption_configuration(self):\n \"\"\"google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom\n encryption configuration for all tables in the dataset.\n\n Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`\n if using default encryption.\n\n See `protecting data with Cloud KMS keys\n <https://cloud.google.com/bigquery/docs/customer-managed-encryption>`_\n in the BigQuery documentation.\n \"\"\"\n prop = self._properties.get(\"defaultEncryptionConfiguration\")\n if prop:\n prop = EncryptionConfiguration.from_api_repr(prop)\n return prop\n\n @default_encryption_configuration.setter\n def default_encryption_configuration(self, value):\n api_repr = value\n if value:\n api_repr = value.to_api_repr()\n self._properties[\"defaultEncryptionConfiguration\"] = api_repr\n\n @classmethod\n def from_string(cls, full_dataset_id):\n \"\"\"Construct a dataset from fully-qualified dataset ID.\n\n Args:\n full_dataset_id (str):\n A fully-qualified dataset ID in standard SQL format. Must\n include both the project ID and the dataset ID, separated by\n ``.``.\n\n Returns:\n Dataset: Dataset parsed from ``full_dataset_id``.\n\n Examples:\n >>> Dataset.from_string('my-project-id.some_dataset')\n Dataset(DatasetReference('my-project-id', 'some_dataset'))\n\n Raises:\n ValueError:\n If ``full_dataset_id`` is not a fully-qualified dataset ID in\n standard SQL format.\n \"\"\"\n return cls(DatasetReference.from_string(full_dataset_id))\n\n @classmethod\n def from_api_repr(cls, resource):\n \"\"\"Factory: construct a dataset given its API representation\n\n Args:\n resource (Dict[str: object]):\n Dataset resource representation returned from the API\n\n Returns:\n google.cloud.bigquery.dataset.Dataset:\n Dataset parsed from ``resource``.\n \"\"\"\n if (\n \"datasetReference\" not in resource\n or \"datasetId\" not in resource[\"datasetReference\"]\n ):\n raise KeyError(\n \"Resource lacks required identity information:\"\n '[\"datasetReference\"][\"datasetId\"]'\n )\n project_id = resource[\"datasetReference\"][\"projectId\"]\n dataset_id = resource[\"datasetReference\"][\"datasetId\"]\n dataset = cls(DatasetReference(project_id, dataset_id))\n dataset._properties = copy.deepcopy(resource)\n return dataset\n\n def to_api_repr(self):\n \"\"\"Construct the API resource representation of this dataset\n\n Returns:\n Dict[str, object]: The dataset represented as an API resource\n \"\"\"\n return copy.deepcopy(self._properties)\n\n def _build_resource(self, filter_fields):\n \"\"\"Generate a resource for ``update``.\"\"\"\n return _helpers._build_resource_from_properties(self, filter_fields)\n\n table = _get_table_reference\n\n model = _get_model_reference\n\n routine = _get_routine_reference\n\n def __repr__(self):\n return \"Dataset({})\".format(repr(self.reference))\n\n\nclass DatasetListItem(object):\n \"\"\"A read-only dataset resource from a list operation.\n\n For performance reasons, the BigQuery API only includes some of the\n dataset properties when listing datasets. Notably,\n :attr:`~google.cloud.bigquery.dataset.Dataset.access_entries` is missing.\n\n For a full list of the properties that the BigQuery API returns, see the\n `REST documentation for datasets.list\n <https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list>`_.\n\n\n Args:\n resource (Dict[str, str]):\n A dataset-like resource object from a dataset list response. A\n ``datasetReference`` property is required.\n\n Raises:\n ValueError:\n If ``datasetReference`` or one of its required members is missing\n from ``resource``.\n \"\"\"\n\n def __init__(self, resource):\n if \"datasetReference\" not in resource:\n raise ValueError(\"resource must contain a datasetReference value\")\n if \"projectId\" not in resource[\"datasetReference\"]:\n raise ValueError(\n \"resource['datasetReference'] must contain a projectId value\"\n )\n if \"datasetId\" not in resource[\"datasetReference\"]:\n raise ValueError(\n \"resource['datasetReference'] must contain a datasetId value\"\n )\n self._properties = resource\n\n @property\n def project(self):\n \"\"\"str: Project bound to the dataset.\"\"\"\n return self._properties[\"datasetReference\"][\"projectId\"]\n\n @property\n def dataset_id(self):\n \"\"\"str: Dataset ID.\"\"\"\n return self._properties[\"datasetReference\"][\"datasetId\"]\n\n @property\n def full_dataset_id(self):\n \"\"\"Union[str, None]: ID for the dataset resource (:data:`None` until\n set from the server)\n\n In the format ``project_id:dataset_id``.\n \"\"\"\n return self._properties.get(\"id\")\n\n @property\n def friendly_name(self):\n \"\"\"Union[str, None]: Title of the dataset as set by the user\n (defaults to :data:`None`).\n \"\"\"\n return self._properties.get(\"friendlyName\")\n\n @property\n def labels(self):\n \"\"\"Dict[str, str]: Labels for the dataset.\"\"\"\n return self._properties.setdefault(\"labels\", {})\n\n @property\n def reference(self):\n \"\"\"google.cloud.bigquery.dataset.DatasetReference: A reference to this\n dataset.\n \"\"\"\n return DatasetReference(self.project, self.dataset_id)\n\n table = _get_table_reference\n\n model = _get_model_reference\n\n routine = _get_routine_reference\n",
"path": "bigquery/google/cloud/bigquery/dataset.py"
}
] | [
{
"content": "# Copyright 2015 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Datasets.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport six\nimport copy\n\nimport google.cloud._helpers\nfrom google.cloud.bigquery import _helpers\nfrom google.cloud.bigquery.model import ModelReference\nfrom google.cloud.bigquery.routine import RoutineReference\nfrom google.cloud.bigquery.table import TableReference\nfrom google.cloud.bigquery.encryption_configuration import EncryptionConfiguration\n\n\ndef _get_table_reference(self, table_id):\n \"\"\"Constructs a TableReference.\n\n Args:\n table_id (str): The ID of the table.\n\n Returns:\n google.cloud.bigquery.table.TableReference:\n A table reference for a table in this dataset.\n \"\"\"\n return TableReference(self, table_id)\n\n\ndef _get_model_reference(self, model_id):\n \"\"\"Constructs a ModelReference.\n\n Args:\n model_id (str): the ID of the model.\n\n Returns:\n google.cloud.bigquery.model.ModelReference:\n A ModelReference for a model in this dataset.\n \"\"\"\n return ModelReference.from_api_repr(\n {\"projectId\": self.project, \"datasetId\": self.dataset_id, \"modelId\": model_id}\n )\n\n\ndef _get_routine_reference(self, routine_id):\n \"\"\"Constructs a RoutineReference.\n\n Args:\n routine_id (str): the ID of the routine.\n\n Returns:\n google.cloud.bigquery.routine.RoutineReference:\n A RoutineReference for a routine in this dataset.\n \"\"\"\n return RoutineReference.from_api_repr(\n {\n \"projectId\": self.project,\n \"datasetId\": self.dataset_id,\n \"routineId\": routine_id,\n }\n )\n\n\nclass AccessEntry(object):\n \"\"\"Represents grant of an access role to an entity.\n\n An entry must have exactly one of the allowed :attr:`ENTITY_TYPES`. If\n anything but ``view`` is set, a ``role`` is also required. ``role`` is\n omitted for a ``view``, because ``view`` s are always read-only.\n\n See https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets.\n\n Attributes:\n role (str):\n Role granted to the entity. The following string values are\n supported: `'READER'`, `'WRITER'`, `'OWNER'`. It may also be\n :data:`None` if the ``entity_type`` is ``view``.\n\n entity_type (str):\n Type of entity being granted the role. One of :attr:`ENTITY_TYPES`.\n\n entity_id (Union[str, Dict[str, str]]):\n If the ``entity_type`` is not 'view', the ``entity_id`` is the\n ``str`` ID of the entity being granted the role. If the\n ``entity_type`` is 'view', the ``entity_id`` is a ``dict``\n representing the view from a different dataset to grant access to\n in the following format::\n\n {\n 'projectId': string,\n 'datasetId': string,\n 'tableId': string\n }\n\n Raises:\n ValueError:\n If the ``entity_type`` is not among :attr:`ENTITY_TYPES`, or if a\n ``view`` has ``role`` set, or a non ``view`` **does not** have a\n ``role`` set.\n\n Examples:\n >>> entry = AccessEntry('OWNER', 'userByEmail', '[email protected]')\n\n >>> view = {\n ... 'projectId': 'my-project',\n ... 'datasetId': 'my_dataset',\n ... 'tableId': 'my_table'\n ... }\n >>> entry = AccessEntry(None, 'view', view)\n \"\"\"\n\n ENTITY_TYPES = frozenset(\n [\"userByEmail\", \"groupByEmail\", \"domain\", \"specialGroup\", \"view\", \"iamMember\"]\n )\n \"\"\"Allowed entity types.\"\"\"\n\n def __init__(self, role, entity_type, entity_id):\n if entity_type not in self.ENTITY_TYPES:\n message = \"Entity type %r not among: %s\" % (\n entity_type,\n \", \".join(self.ENTITY_TYPES),\n )\n raise ValueError(message)\n if entity_type == \"view\":\n if role is not None:\n raise ValueError(\n \"Role must be None for a view. Received \" \"role: %r\" % (role,)\n )\n else:\n if role is None:\n raise ValueError(\n \"Role must be set for entity \" \"type %r\" % (entity_type,)\n )\n\n self.role = role\n self.entity_type = entity_type\n self.entity_id = entity_id\n\n def __eq__(self, other):\n if not isinstance(other, AccessEntry):\n return NotImplemented\n return (\n self.role == other.role\n and self.entity_type == other.entity_type\n and self.entity_id == other.entity_id\n )\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self):\n return \"<AccessEntry: role=%s, %s=%s>\" % (\n self.role,\n self.entity_type,\n self.entity_id,\n )\n\n def to_api_repr(self):\n \"\"\"Construct the API resource representation of this access entry\n\n Returns:\n Dict[str, object]: Access entry represented as an API resource\n \"\"\"\n resource = {self.entity_type: self.entity_id}\n if self.role is not None:\n resource[\"role\"] = self.role\n return resource\n\n @classmethod\n def from_api_repr(cls, resource):\n \"\"\"Factory: construct an access entry given its API representation\n\n Args:\n resource (Dict[str, object]):\n Access entry resource representation returned from the API\n\n Returns:\n google.cloud.bigquery.dataset.AccessEntry:\n Access entry parsed from ``resource``.\n\n Raises:\n ValueError:\n If the resource has more keys than ``role`` and one additional\n key.\n \"\"\"\n entry = resource.copy()\n role = entry.pop(\"role\", None)\n entity_type, entity_id = entry.popitem()\n if len(entry) != 0:\n raise ValueError(\"Entry has unexpected keys remaining.\", entry)\n return cls(role, entity_type, entity_id)\n\n\nclass DatasetReference(object):\n \"\"\"DatasetReferences are pointers to datasets.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#datasetreference\n\n Args:\n project (str): The ID of the project\n dataset_id (str): The ID of the dataset\n\n Raises:\n ValueError: If either argument is not of type ``str``.\n \"\"\"\n\n def __init__(self, project, dataset_id):\n if not isinstance(project, six.string_types):\n raise ValueError(\"Pass a string for project\")\n if not isinstance(dataset_id, six.string_types):\n raise ValueError(\"Pass a string for dataset_id\")\n self._project = project\n self._dataset_id = dataset_id\n\n @property\n def project(self):\n \"\"\"str: Project ID of the dataset.\"\"\"\n return self._project\n\n @property\n def dataset_id(self):\n \"\"\"str: Dataset ID.\"\"\"\n return self._dataset_id\n\n @property\n def path(self):\n \"\"\"str: URL path for the dataset based on project and dataset ID.\"\"\"\n return \"/projects/%s/datasets/%s\" % (self.project, self.dataset_id)\n\n table = _get_table_reference\n\n model = _get_model_reference\n\n routine = _get_routine_reference\n\n @classmethod\n def from_api_repr(cls, resource):\n \"\"\"Factory: construct a dataset reference given its API representation\n\n Args:\n resource (Dict[str, str]):\n Dataset reference resource representation returned from the API\n\n Returns:\n google.cloud.bigquery.dataset.DatasetReference:\n Dataset reference parsed from ``resource``.\n \"\"\"\n project = resource[\"projectId\"]\n dataset_id = resource[\"datasetId\"]\n return cls(project, dataset_id)\n\n @classmethod\n def from_string(cls, dataset_id, default_project=None):\n \"\"\"Construct a dataset reference from dataset ID string.\n\n Args:\n dataset_id (str):\n A dataset ID in standard SQL format. If ``default_project``\n is not specified, this must include both the project ID and\n the dataset ID, separated by ``.``.\n default_project (str):\n Optional. The project ID to use when ``dataset_id`` does not\n include a project ID.\n\n Returns:\n DatasetReference:\n Dataset reference parsed from ``dataset_id``.\n\n Examples:\n >>> DatasetReference.from_string('my-project-id.some_dataset')\n DatasetReference('my-project-id', 'some_dataset')\n\n Raises:\n ValueError:\n If ``dataset_id`` is not a fully-qualified dataset ID in\n standard SQL format.\n \"\"\"\n output_dataset_id = dataset_id\n output_project_id = default_project\n parts = _helpers._split_id(dataset_id)\n\n if len(parts) == 1 and not default_project:\n raise ValueError(\n \"When default_project is not set, dataset_id must be a \"\n \"fully-qualified dataset ID in standard SQL format, \"\n 'e.g., \"project.dataset_id\" got {}'.format(dataset_id)\n )\n elif len(parts) == 2:\n output_project_id, output_dataset_id = parts\n elif len(parts) > 2:\n raise ValueError(\n \"Too many parts in dataset_id. Expected a fully-qualified \"\n \"dataset ID in standard SQL format. e.g. \"\n '\"project.dataset_id\", got {}'.format(dataset_id)\n )\n\n return cls(output_project_id, output_dataset_id)\n\n def to_api_repr(self):\n \"\"\"Construct the API resource representation of this dataset reference\n\n Returns:\n Dict[str, str]: dataset reference represented as an API resource\n \"\"\"\n return {\"projectId\": self._project, \"datasetId\": self._dataset_id}\n\n def _key(self):\n \"\"\"A tuple key that uniquely describes this field.\n\n Used to compute this instance's hashcode and evaluate equality.\n\n Returns:\n Tuple[str]: The contents of this :class:`.DatasetReference`.\n \"\"\"\n return (self._project, self._dataset_id)\n\n def __eq__(self, other):\n if not isinstance(other, DatasetReference):\n return NotImplemented\n return self._key() == other._key()\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash(self._key())\n\n def __repr__(self):\n return \"DatasetReference{}\".format(self._key())\n\n\nclass Dataset(object):\n \"\"\"Datasets are containers for tables.\n\n See\n https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource-dataset\n\n Args:\n dataset_ref (Union[google.cloud.bigquery.dataset.DatasetReference, str]):\n A pointer to a dataset. If ``dataset_ref`` is a string, it must\n include both the project ID and the dataset ID, separated by\n ``.``.\n \"\"\"\n\n _PROPERTY_TO_API_FIELD = {\n \"access_entries\": \"access\",\n \"created\": \"creationTime\",\n \"default_partition_expiration_ms\": \"defaultPartitionExpirationMs\",\n \"default_table_expiration_ms\": \"defaultTableExpirationMs\",\n \"friendly_name\": \"friendlyName\",\n \"default_encryption_configuration\": \"defaultEncryptionConfiguration\",\n }\n\n def __init__(self, dataset_ref):\n if isinstance(dataset_ref, six.string_types):\n dataset_ref = DatasetReference.from_string(dataset_ref)\n self._properties = {\"datasetReference\": dataset_ref.to_api_repr(), \"labels\": {}}\n\n @property\n def project(self):\n \"\"\"str: Project ID of the project bound to the dataset.\"\"\"\n return self._properties[\"datasetReference\"][\"projectId\"]\n\n @property\n def path(self):\n \"\"\"str: URL path for the dataset based on project and dataset ID.\"\"\"\n return \"/projects/%s/datasets/%s\" % (self.project, self.dataset_id)\n\n @property\n def access_entries(self):\n \"\"\"List[google.cloud.bigquery.dataset.AccessEntry]: Dataset's access\n entries.\n\n ``role`` augments the entity type and must be present **unless** the\n entity type is ``view``.\n\n Raises:\n TypeError: If 'value' is not a sequence\n ValueError:\n If any item in the sequence is not an\n :class:`~google.cloud.bigquery.dataset.AccessEntry`.\n \"\"\"\n entries = self._properties.get(\"access\", [])\n return [AccessEntry.from_api_repr(entry) for entry in entries]\n\n @access_entries.setter\n def access_entries(self, value):\n if not all(isinstance(field, AccessEntry) for field in value):\n raise ValueError(\"Values must be AccessEntry instances\")\n entries = [entry.to_api_repr() for entry in value]\n self._properties[\"access\"] = entries\n\n @property\n def created(self):\n \"\"\"Union[datetime.datetime, None]: Datetime at which the dataset was\n created (:data:`None` until set from the server).\n \"\"\"\n creation_time = self._properties.get(\"creationTime\")\n if creation_time is not None:\n # creation_time will be in milliseconds.\n return google.cloud._helpers._datetime_from_microseconds(\n 1000.0 * float(creation_time)\n )\n\n @property\n def dataset_id(self):\n \"\"\"str: Dataset ID.\"\"\"\n return self._properties[\"datasetReference\"][\"datasetId\"]\n\n @property\n def full_dataset_id(self):\n \"\"\"Union[str, None]: ID for the dataset resource (:data:`None` until\n set from the server)\n\n In the format ``project_id:dataset_id``.\n \"\"\"\n return self._properties.get(\"id\")\n\n @property\n def reference(self):\n \"\"\"google.cloud.bigquery.dataset.DatasetReference: A reference to this\n dataset.\n \"\"\"\n return DatasetReference(self.project, self.dataset_id)\n\n @property\n def etag(self):\n \"\"\"Union[str, None]: ETag for the dataset resource (:data:`None` until\n set from the server).\n \"\"\"\n return self._properties.get(\"etag\")\n\n @property\n def modified(self):\n \"\"\"Union[datetime.datetime, None]: Datetime at which the dataset was\n last modified (:data:`None` until set from the server).\n \"\"\"\n modified_time = self._properties.get(\"lastModifiedTime\")\n if modified_time is not None:\n # modified_time will be in milliseconds.\n return google.cloud._helpers._datetime_from_microseconds(\n 1000.0 * float(modified_time)\n )\n\n @property\n def self_link(self):\n \"\"\"Union[str, None]: URL for the dataset resource (:data:`None` until\n set from the server).\n \"\"\"\n return self._properties.get(\"selfLink\")\n\n @property\n def default_partition_expiration_ms(self):\n \"\"\"Optional[int]: The default partition expiration for all\n partitioned tables in the dataset, in milliseconds.\n\n Once this property is set, all newly-created partitioned tables in\n the dataset will have an ``time_paritioning.expiration_ms`` property\n set to this value, and changing the value will only affect new\n tables, not existing ones. The storage in a partition will have an\n expiration time of its partition time plus this value.\n\n Setting this property overrides the use of\n ``default_table_expiration_ms`` for partitioned tables: only one of\n ``default_table_expiration_ms`` and\n ``default_partition_expiration_ms`` will be used for any new\n partitioned table. If you provide an explicit\n ``time_partitioning.expiration_ms`` when creating or updating a\n partitioned table, that value takes precedence over the default\n partition expiration time indicated by this property.\n \"\"\"\n return _helpers._int_or_none(\n self._properties.get(\"defaultPartitionExpirationMs\")\n )\n\n @default_partition_expiration_ms.setter\n def default_partition_expiration_ms(self, value):\n self._properties[\"defaultPartitionExpirationMs\"] = _helpers._str_or_none(value)\n\n @property\n def default_table_expiration_ms(self):\n \"\"\"Union[int, None]: Default expiration time for tables in the dataset\n (defaults to :data:`None`).\n\n Raises:\n ValueError: For invalid value types.\n \"\"\"\n return _helpers._int_or_none(self._properties.get(\"defaultTableExpirationMs\"))\n\n @default_table_expiration_ms.setter\n def default_table_expiration_ms(self, value):\n if not isinstance(value, six.integer_types) and value is not None:\n raise ValueError(\"Pass an integer, or None\")\n self._properties[\"defaultTableExpirationMs\"] = _helpers._str_or_none(value)\n\n @property\n def description(self):\n \"\"\"Optional[str]: Description of the dataset as set by the user\n (defaults to :data:`None`).\n\n Raises:\n ValueError: for invalid value types.\n \"\"\"\n return self._properties.get(\"description\")\n\n @description.setter\n def description(self, value):\n if not isinstance(value, six.string_types) and value is not None:\n raise ValueError(\"Pass a string, or None\")\n self._properties[\"description\"] = value\n\n @property\n def friendly_name(self):\n \"\"\"Union[str, None]: Title of the dataset as set by the user\n (defaults to :data:`None`).\n\n Raises:\n ValueError: for invalid value types.\n \"\"\"\n return self._properties.get(\"friendlyName\")\n\n @friendly_name.setter\n def friendly_name(self, value):\n if not isinstance(value, six.string_types) and value is not None:\n raise ValueError(\"Pass a string, or None\")\n self._properties[\"friendlyName\"] = value\n\n @property\n def location(self):\n \"\"\"Union[str, None]: Location in which the dataset is hosted as set by\n the user (defaults to :data:`None`).\n\n Raises:\n ValueError: for invalid value types.\n \"\"\"\n return self._properties.get(\"location\")\n\n @location.setter\n def location(self, value):\n if not isinstance(value, six.string_types) and value is not None:\n raise ValueError(\"Pass a string, or None\")\n self._properties[\"location\"] = value\n\n @property\n def labels(self):\n \"\"\"Dict[str, str]: Labels for the dataset.\n\n This method always returns a dict. To change a dataset's labels,\n modify the dict, then call\n :meth:`google.cloud.bigquery.client.Client.update_dataset`. To delete\n a label, set its value to :data:`None` before updating.\n\n Raises:\n ValueError: for invalid value types.\n \"\"\"\n return self._properties.setdefault(\"labels\", {})\n\n @labels.setter\n def labels(self, value):\n if not isinstance(value, dict):\n raise ValueError(\"Pass a dict\")\n self._properties[\"labels\"] = value\n\n @property\n def default_encryption_configuration(self):\n \"\"\"google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom\n encryption configuration for all tables in the dataset.\n\n Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`\n if using default encryption.\n\n See `protecting data with Cloud KMS keys\n <https://cloud.google.com/bigquery/docs/customer-managed-encryption>`_\n in the BigQuery documentation.\n \"\"\"\n prop = self._properties.get(\"defaultEncryptionConfiguration\")\n if prop:\n prop = EncryptionConfiguration.from_api_repr(prop)\n return prop\n\n @default_encryption_configuration.setter\n def default_encryption_configuration(self, value):\n api_repr = value\n if value:\n api_repr = value.to_api_repr()\n self._properties[\"defaultEncryptionConfiguration\"] = api_repr\n\n @classmethod\n def from_string(cls, full_dataset_id):\n \"\"\"Construct a dataset from fully-qualified dataset ID.\n\n Args:\n full_dataset_id (str):\n A fully-qualified dataset ID in standard SQL format. Must\n include both the project ID and the dataset ID, separated by\n ``.``.\n\n Returns:\n Dataset: Dataset parsed from ``full_dataset_id``.\n\n Examples:\n >>> Dataset.from_string('my-project-id.some_dataset')\n Dataset(DatasetReference('my-project-id', 'some_dataset'))\n\n Raises:\n ValueError:\n If ``full_dataset_id`` is not a fully-qualified dataset ID in\n standard SQL format.\n \"\"\"\n return cls(DatasetReference.from_string(full_dataset_id))\n\n @classmethod\n def from_api_repr(cls, resource):\n \"\"\"Factory: construct a dataset given its API representation\n\n Args:\n resource (Dict[str: object]):\n Dataset resource representation returned from the API\n\n Returns:\n google.cloud.bigquery.dataset.Dataset:\n Dataset parsed from ``resource``.\n \"\"\"\n if (\n \"datasetReference\" not in resource\n or \"datasetId\" not in resource[\"datasetReference\"]\n ):\n raise KeyError(\n \"Resource lacks required identity information:\"\n '[\"datasetReference\"][\"datasetId\"]'\n )\n project_id = resource[\"datasetReference\"][\"projectId\"]\n dataset_id = resource[\"datasetReference\"][\"datasetId\"]\n dataset = cls(DatasetReference(project_id, dataset_id))\n dataset._properties = copy.deepcopy(resource)\n return dataset\n\n def to_api_repr(self):\n \"\"\"Construct the API resource representation of this dataset\n\n Returns:\n Dict[str, object]: The dataset represented as an API resource\n \"\"\"\n return copy.deepcopy(self._properties)\n\n def _build_resource(self, filter_fields):\n \"\"\"Generate a resource for ``update``.\"\"\"\n return _helpers._build_resource_from_properties(self, filter_fields)\n\n table = _get_table_reference\n\n model = _get_model_reference\n\n routine = _get_routine_reference\n\n def __repr__(self):\n return \"Dataset({})\".format(repr(self.reference))\n\n\nclass DatasetListItem(object):\n \"\"\"A read-only dataset resource from a list operation.\n\n For performance reasons, the BigQuery API only includes some of the\n dataset properties when listing datasets. Notably,\n :attr:`~google.cloud.bigquery.dataset.Dataset.access_entries` is missing.\n\n For a full list of the properties that the BigQuery API returns, see the\n `REST documentation for datasets.list\n <https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list>`_.\n\n\n Args:\n resource (Dict[str, str]):\n A dataset-like resource object from a dataset list response. A\n ``datasetReference`` property is required.\n\n Raises:\n ValueError:\n If ``datasetReference`` or one of its required members is missing\n from ``resource``.\n \"\"\"\n\n def __init__(self, resource):\n if \"datasetReference\" not in resource:\n raise ValueError(\"resource must contain a datasetReference value\")\n if \"projectId\" not in resource[\"datasetReference\"]:\n raise ValueError(\n \"resource['datasetReference'] must contain a projectId value\"\n )\n if \"datasetId\" not in resource[\"datasetReference\"]:\n raise ValueError(\n \"resource['datasetReference'] must contain a datasetId value\"\n )\n self._properties = resource\n\n @property\n def project(self):\n \"\"\"str: Project bound to the dataset.\"\"\"\n return self._properties[\"datasetReference\"][\"projectId\"]\n\n @property\n def dataset_id(self):\n \"\"\"str: Dataset ID.\"\"\"\n return self._properties[\"datasetReference\"][\"datasetId\"]\n\n @property\n def full_dataset_id(self):\n \"\"\"Union[str, None]: ID for the dataset resource (:data:`None` until\n set from the server)\n\n In the format ``project_id:dataset_id``.\n \"\"\"\n return self._properties.get(\"id\")\n\n @property\n def friendly_name(self):\n \"\"\"Union[str, None]: Title of the dataset as set by the user\n (defaults to :data:`None`).\n \"\"\"\n return self._properties.get(\"friendlyName\")\n\n @property\n def labels(self):\n \"\"\"Dict[str, str]: Labels for the dataset.\"\"\"\n return self._properties.setdefault(\"labels\", {})\n\n @property\n def reference(self):\n \"\"\"google.cloud.bigquery.dataset.DatasetReference: A reference to this\n dataset.\n \"\"\"\n return DatasetReference(self.project, self.dataset_id)\n\n table = _get_table_reference\n\n model = _get_model_reference\n\n routine = _get_routine_reference\n",
"path": "bigquery/google/cloud/bigquery/dataset.py"
}
] | diff --git a/bigquery/google/cloud/bigquery/dataset.py b/bigquery/google/cloud/bigquery/dataset.py
index 02664d87b153..99c47026fe3a 100644
--- a/bigquery/google/cloud/bigquery/dataset.py
+++ b/bigquery/google/cloud/bigquery/dataset.py
@@ -123,7 +123,7 @@ class AccessEntry(object):
"""
ENTITY_TYPES = frozenset(
- ["userByEmail", "groupByEmail", "domain", "specialGroup", "view"]
+ ["userByEmail", "groupByEmail", "domain", "specialGroup", "view", "iamMember"]
)
"""Allowed entity types."""
|
huggingface__transformers-6719 | Some weights of AlbertModel were not initialized ['albert.embeddings.position_ids']
Hello!
There seems to be a problem with the current code to load a pre-trained Albert model. This warning appears in any configuration of the Albert model:
`Some weights of AlbertModel were not initialized from the model checkpoint at albert-base-v2 and are newly initialized: ['albert.embeddings.position_ids']`
`You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.`
I found this happens only when I install it from the source. Models load correctly (without warning) when installing the library with pip.
| [
{
"content": "# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch ALBERT model. \"\"\"\n\nimport logging\nimport math\nimport os\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom .configuration_albert import AlbertConfig\nfrom .file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_callable,\n replace_return_docstrings,\n)\nfrom .modeling_bert import ACT2FN, BertEmbeddings, BertSelfAttention, prune_linear_layer\nfrom .modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPooling,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom .modeling_utils import PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices\n\n\nlogger = logging.getLogger(__name__)\n\n_CONFIG_FOR_DOC = \"AlbertConfig\"\n_TOKENIZER_FOR_DOC = \"AlbertTokenizer\"\n\n\nALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"albert-base-v1\",\n \"albert-large-v1\",\n \"albert-xlarge-v1\",\n \"albert-xxlarge-v1\",\n \"albert-base-v2\",\n \"albert-large-v2\",\n \"albert-xlarge-v2\",\n \"albert-xxlarge-v2\",\n # See all ALBERT models at https://huggingface.co/models?filter=albert\n]\n\n\ndef load_tf_weights_in_albert(model, config, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n print(name)\n\n for name, array in zip(names, arrays):\n original_name = name\n\n # If saved from the TF HUB module\n name = name.replace(\"module/\", \"\")\n\n # Renaming and simplifying\n name = name.replace(\"ffn_1\", \"ffn\")\n name = name.replace(\"bert/\", \"albert/\")\n name = name.replace(\"attention_1\", \"attention\")\n name = name.replace(\"transform/\", \"\")\n name = name.replace(\"LayerNorm_1\", \"full_layer_layer_norm\")\n name = name.replace(\"LayerNorm\", \"attention/LayerNorm\")\n name = name.replace(\"transformer/\", \"\")\n\n # The feed forward layer had an 'intermediate' step which has been abstracted away\n name = name.replace(\"intermediate/dense/\", \"\")\n name = name.replace(\"ffn/intermediate/output/dense/\", \"ffn_output/\")\n\n # ALBERT attention was split between self and output which have been abstracted away\n name = name.replace(\"/output/\", \"/\")\n name = name.replace(\"/self/\", \"/\")\n\n # The pooler is a linear layer\n name = name.replace(\"pooler/dense\", \"pooler\")\n\n # The classifier was simplified to predictions from cls/predictions\n name = name.replace(\"cls/predictions\", \"predictions\")\n name = name.replace(\"predictions/attention\", \"predictions\")\n\n # Naming was changed to be more explicit\n name = name.replace(\"embeddings/attention\", \"embeddings\")\n name = name.replace(\"inner_group_\", \"albert_layers/\")\n name = name.replace(\"group_\", \"albert_layer_groups/\")\n\n # Classifier\n if len(name.split(\"/\")) == 1 and (\"output_bias\" in name or \"output_weights\" in name):\n name = \"classifier/\" + name\n\n # No ALBERT model currently handles the next sentence prediction task\n if \"seq_relationship\" in name:\n name = name.replace(\"seq_relationship/output_\", \"sop_classifier/classifier/\")\n name = name.replace(\"weights\", \"weight\")\n\n name = name.split(\"/\")\n\n # Ignore the gradients applied by the LAMB/ADAM optimizers.\n if (\n \"adam_m\" in name\n or \"adam_v\" in name\n or \"AdamWeightDecayOptimizer\" in name\n or \"AdamWeightDecayOptimizer_1\" in name\n or \"global_step\" in name\n ):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {} from {}\".format(name, original_name))\n pointer.data = torch.from_numpy(array)\n\n return model\n\n\nclass AlbertEmbeddings(BertEmbeddings):\n \"\"\"\n Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n\n self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)\n self.LayerNorm = torch.nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)\n\n\nclass AlbertAttention(BertSelfAttention):\n def __init__(self, config):\n super().__init__(config)\n\n self.num_attention_heads = config.num_attention_heads\n self.hidden_size = config.hidden_size\n self.attention_head_size = config.hidden_size // config.num_attention_heads\n self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.output_dropout = nn.Dropout(config.hidden_dropout_prob)\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.query = prune_linear_layer(self.query, index)\n self.key = prune_linear_layer(self.key, index)\n self.value = prune_linear_layer(self.value, index)\n self.dense = prune_linear_layer(self.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.num_attention_heads = self.num_attention_heads - len(heads)\n self.all_head_size = self.attention_head_size * self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(self, input_ids, attention_mask=None, head_mask=None, output_attentions=False):\n mixed_query_layer = self.query(input_ids)\n mixed_key_layer = self.key(input_ids)\n mixed_value_layer = self.value(input_ids)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.attention_dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n\n # Should find a better way to do this\n w = (\n self.dense.weight.t()\n .view(self.num_attention_heads, self.attention_head_size, self.hidden_size)\n .to(context_layer.dtype)\n )\n b = self.dense.bias.to(context_layer.dtype)\n\n projected_context_layer = torch.einsum(\"bfnd,ndh->bfh\", context_layer, w) + b\n projected_context_layer_dropout = self.output_dropout(projected_context_layer)\n layernormed_context_layer = self.LayerNorm(input_ids + projected_context_layer_dropout)\n return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,)\n\n\nclass AlbertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.config = config\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.attention = AlbertAttention(config)\n self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)\n self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)\n self.activation = ACT2FN[config.hidden_act]\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(\n self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False\n ):\n attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions)\n\n ffn_output = apply_chunking_to_forward(\n self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[0],\n )\n hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])\n\n return (hidden_states,) + attention_output[1:] # add attentions if we output them\n\n def ff_chunk(self, attention_output):\n ffn_output = self.ffn(attention_output)\n ffn_output = self.activation(ffn_output)\n ffn_output = self.ffn_output(ffn_output)\n return ffn_output\n\n\nclass AlbertLayerGroup(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])\n\n def forward(\n self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False\n ):\n layer_hidden_states = ()\n layer_attentions = ()\n\n for layer_index, albert_layer in enumerate(self.albert_layers):\n layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions)\n hidden_states = layer_output[0]\n\n if output_attentions:\n layer_attentions = layer_attentions + (layer_output[1],)\n\n if output_hidden_states:\n layer_hidden_states = layer_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if output_hidden_states:\n outputs = outputs + (layer_hidden_states,)\n if output_attentions:\n outputs = outputs + (layer_attentions,)\n return outputs # last-layer hidden state, (layer hidden states), (layer attentions)\n\n\nclass AlbertTransformer(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.config = config\n self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)\n self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=False,\n ):\n hidden_states = self.embedding_hidden_mapping_in(hidden_states)\n\n all_hidden_states = (hidden_states,) if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n for i in range(self.config.num_hidden_layers):\n # Number of layers in a hidden group\n layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)\n\n # Index of the hidden group\n group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))\n\n layer_group_output = self.albert_layer_groups[group_idx](\n hidden_states,\n attention_mask,\n head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],\n output_attentions,\n output_hidden_states,\n )\n hidden_states = layer_group_output[0]\n\n if output_attentions:\n all_attentions = all_attentions + layer_group_output[-1]\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions\n )\n\n\nclass AlbertPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = AlbertConfig\n base_model_prefix = \"albert\"\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, (nn.Linear)) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\n@dataclass\nclass AlbertForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.AlbertForPreTrainingModel`.\n\n Args:\n loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):\n Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.\n prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n sop_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False\n continuation before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n prediction_logits: torch.FloatTensor = None\n sop_logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nALBERT_START_DOCSTRING = r\"\"\"\n\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n usage and behavior.\n\n Args:\n config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nALBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.AlbertTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned tensors for more detail.\n return_dict (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a\n plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertModel(AlbertPreTrainedModel):\n\n config_class = AlbertConfig\n load_tf_weights = load_tf_weights_in_albert\n base_model_prefix = \"albert\"\n\n def __init__(self, config):\n super().__init__(config)\n\n self.config = config\n self.embeddings = AlbertEmbeddings(config)\n self.encoder = AlbertTransformer(config)\n self.pooler = nn.Linear(config.hidden_size, config.hidden_size)\n self.pooler_activation = nn.Tanh()\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _resize_token_embeddings(self, new_num_tokens):\n old_embeddings = self.embeddings.word_embeddings\n new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)\n self.embeddings.word_embeddings = new_embeddings\n return self.embeddings.word_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n ALBERT has a different architecture in that its layers are shared across groups, which then has inner groups.\n If an ALBERT model has 12 hidden layers and 2 hidden groups, with two inner groups, there\n is a total of 4 different layers.\n\n These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,\n while [2,3] correspond to the two inner groups of the second hidden layer.\n\n Any layer with in index other than [0,1,2,3] will result in an error.\n See base class PreTrainedModel for more information about head pruning\n \"\"\"\n for layer, heads in heads_to_prune.items():\n group_idx = int(layer / self.config.inner_group_num)\n inner_group_idx = int(layer - group_idx * self.config.inner_group_num)\n self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=BaseModelOutputWithPooling,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\n )\n encoder_outputs = self.encoder(\n embedding_output,\n extended_attention_mask,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = encoder_outputs[0]\n\n pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0]))\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Albert Model with two heads on top as done during the pre-training: a `masked language modeling` head and\n a `sentence order prediction (classification)` head. \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForPreTraining(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.albert = AlbertModel(config)\n self.predictions = AlbertMLMHead(config)\n self.sop_classifier = AlbertSOPHead(config)\n\n self.init_weights()\n self.tie_weights()\n\n def tie_weights(self):\n self._tie_or_clone_weights(self.predictions.decoder, self.albert.embeddings.word_embeddings)\n\n def get_output_embeddings(self):\n return self.predictions.decoder\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n sentence_order_label=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n sentence_order_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring)\n Indices should be in ``[0, 1]``.\n ``0`` indicates original order (sequence A, then sequence B),\n ``1`` indicates switched order (sequence B, then sequence A).\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n\n Returns:\n\n Examples::\n\n >>> from transformers import AlbertTokenizer, AlbertForPreTraining\n >>> import torch\n\n >>> tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')\n >>> model = AlbertForPreTraining.from_pretrained('albert-base-v2', return_dict=True)\n\n >>> input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n >>> outputs = model(input_ids)\n\n >>> prediction_logits = outputs.prediction_logits\n >>> sop_logits = outputs.sop_logits\n\n \"\"\"\n\n if \"masked_lm_labels\" in kwargs:\n warnings.warn(\n \"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"masked_lm_labels\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output, pooled_output = outputs[:2]\n\n prediction_scores = self.predictions(sequence_output)\n sop_scores = self.sop_classifier(pooled_output)\n\n total_loss = None\n if labels is not None and sentence_order_label is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1))\n total_loss = masked_lm_loss + sentence_order_loss\n\n if not return_dict:\n output = (prediction_scores, sop_scores) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return AlbertForPreTrainingOutput(\n loss=total_loss,\n prediction_logits=prediction_scores,\n sop_logits=sop_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass AlbertMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.LayerNorm = nn.LayerNorm(config.embedding_size)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n self.dense = nn.Linear(config.hidden_size, config.embedding_size)\n self.decoder = nn.Linear(config.embedding_size, config.vocab_size)\n self.activation = ACT2FN[config.hidden_act]\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.activation(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n hidden_states = self.decoder(hidden_states)\n\n prediction_scores = hidden_states\n\n return prediction_scores\n\n\nclass AlbertSOPHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.dropout = nn.Dropout(config.classifier_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, pooled_output):\n dropout_pooled_output = self.dropout(pooled_output)\n logits = self.classifier(dropout_pooled_output)\n return logits\n\n\n@add_start_docstrings(\n \"Albert Model with a `language modeling` head on top.\", ALBERT_START_DOCSTRING,\n)\nclass AlbertForMaskedLM(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.albert = AlbertModel(config)\n self.predictions = AlbertMLMHead(config)\n\n self.init_weights()\n self.tie_weights()\n\n def tie_weights(self):\n self._tie_or_clone_weights(self.predictions.decoder, self.albert.embeddings.word_embeddings)\n\n def get_output_embeddings(self):\n return self.predictions.decoder\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with\n labels in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n \"\"\"\n if \"masked_lm_labels\" in kwargs:\n warnings.warn(\n \"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"masked_lm_labels\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_outputs = outputs[0]\n\n prediction_scores = self.predictions(sequence_outputs)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForSequenceClassification(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.albert = AlbertModel(config)\n self.dropout = nn.Dropout(config.classifier_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Albert Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForTokenClassification(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.albert = AlbertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of\n the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForQuestionAnswering(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.albert = AlbertModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Albert Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks. \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForMultipleChoice(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.albert = AlbertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING.format(\"(batch_size, num_choices, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the multiple choice classification loss.\n Indices should be in ``[0, ..., num_choices-1]`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n outputs = self.albert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions,\n )\n",
"path": "src/transformers/modeling_albert.py"
}
] | [
{
"content": "# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch ALBERT model. \"\"\"\n\nimport logging\nimport math\nimport os\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom .configuration_albert import AlbertConfig\nfrom .file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_callable,\n replace_return_docstrings,\n)\nfrom .modeling_bert import ACT2FN, BertEmbeddings, BertSelfAttention, prune_linear_layer\nfrom .modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPooling,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom .modeling_utils import PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices\n\n\nlogger = logging.getLogger(__name__)\n\n_CONFIG_FOR_DOC = \"AlbertConfig\"\n_TOKENIZER_FOR_DOC = \"AlbertTokenizer\"\n\n\nALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"albert-base-v1\",\n \"albert-large-v1\",\n \"albert-xlarge-v1\",\n \"albert-xxlarge-v1\",\n \"albert-base-v2\",\n \"albert-large-v2\",\n \"albert-xlarge-v2\",\n \"albert-xxlarge-v2\",\n # See all ALBERT models at https://huggingface.co/models?filter=albert\n]\n\n\ndef load_tf_weights_in_albert(model, config, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n print(name)\n\n for name, array in zip(names, arrays):\n original_name = name\n\n # If saved from the TF HUB module\n name = name.replace(\"module/\", \"\")\n\n # Renaming and simplifying\n name = name.replace(\"ffn_1\", \"ffn\")\n name = name.replace(\"bert/\", \"albert/\")\n name = name.replace(\"attention_1\", \"attention\")\n name = name.replace(\"transform/\", \"\")\n name = name.replace(\"LayerNorm_1\", \"full_layer_layer_norm\")\n name = name.replace(\"LayerNorm\", \"attention/LayerNorm\")\n name = name.replace(\"transformer/\", \"\")\n\n # The feed forward layer had an 'intermediate' step which has been abstracted away\n name = name.replace(\"intermediate/dense/\", \"\")\n name = name.replace(\"ffn/intermediate/output/dense/\", \"ffn_output/\")\n\n # ALBERT attention was split between self and output which have been abstracted away\n name = name.replace(\"/output/\", \"/\")\n name = name.replace(\"/self/\", \"/\")\n\n # The pooler is a linear layer\n name = name.replace(\"pooler/dense\", \"pooler\")\n\n # The classifier was simplified to predictions from cls/predictions\n name = name.replace(\"cls/predictions\", \"predictions\")\n name = name.replace(\"predictions/attention\", \"predictions\")\n\n # Naming was changed to be more explicit\n name = name.replace(\"embeddings/attention\", \"embeddings\")\n name = name.replace(\"inner_group_\", \"albert_layers/\")\n name = name.replace(\"group_\", \"albert_layer_groups/\")\n\n # Classifier\n if len(name.split(\"/\")) == 1 and (\"output_bias\" in name or \"output_weights\" in name):\n name = \"classifier/\" + name\n\n # No ALBERT model currently handles the next sentence prediction task\n if \"seq_relationship\" in name:\n name = name.replace(\"seq_relationship/output_\", \"sop_classifier/classifier/\")\n name = name.replace(\"weights\", \"weight\")\n\n name = name.split(\"/\")\n\n # Ignore the gradients applied by the LAMB/ADAM optimizers.\n if (\n \"adam_m\" in name\n or \"adam_v\" in name\n or \"AdamWeightDecayOptimizer\" in name\n or \"AdamWeightDecayOptimizer_1\" in name\n or \"global_step\" in name\n ):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {} from {}\".format(name, original_name))\n pointer.data = torch.from_numpy(array)\n\n return model\n\n\nclass AlbertEmbeddings(BertEmbeddings):\n \"\"\"\n Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n\n self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)\n self.LayerNorm = torch.nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)\n\n\nclass AlbertAttention(BertSelfAttention):\n def __init__(self, config):\n super().__init__(config)\n\n self.num_attention_heads = config.num_attention_heads\n self.hidden_size = config.hidden_size\n self.attention_head_size = config.hidden_size // config.num_attention_heads\n self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.output_dropout = nn.Dropout(config.hidden_dropout_prob)\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.query = prune_linear_layer(self.query, index)\n self.key = prune_linear_layer(self.key, index)\n self.value = prune_linear_layer(self.value, index)\n self.dense = prune_linear_layer(self.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.num_attention_heads = self.num_attention_heads - len(heads)\n self.all_head_size = self.attention_head_size * self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(self, input_ids, attention_mask=None, head_mask=None, output_attentions=False):\n mixed_query_layer = self.query(input_ids)\n mixed_key_layer = self.key(input_ids)\n mixed_value_layer = self.value(input_ids)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.attention_dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n\n # Should find a better way to do this\n w = (\n self.dense.weight.t()\n .view(self.num_attention_heads, self.attention_head_size, self.hidden_size)\n .to(context_layer.dtype)\n )\n b = self.dense.bias.to(context_layer.dtype)\n\n projected_context_layer = torch.einsum(\"bfnd,ndh->bfh\", context_layer, w) + b\n projected_context_layer_dropout = self.output_dropout(projected_context_layer)\n layernormed_context_layer = self.LayerNorm(input_ids + projected_context_layer_dropout)\n return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,)\n\n\nclass AlbertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.config = config\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.attention = AlbertAttention(config)\n self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)\n self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)\n self.activation = ACT2FN[config.hidden_act]\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(\n self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False\n ):\n attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions)\n\n ffn_output = apply_chunking_to_forward(\n self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[0],\n )\n hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])\n\n return (hidden_states,) + attention_output[1:] # add attentions if we output them\n\n def ff_chunk(self, attention_output):\n ffn_output = self.ffn(attention_output)\n ffn_output = self.activation(ffn_output)\n ffn_output = self.ffn_output(ffn_output)\n return ffn_output\n\n\nclass AlbertLayerGroup(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])\n\n def forward(\n self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False\n ):\n layer_hidden_states = ()\n layer_attentions = ()\n\n for layer_index, albert_layer in enumerate(self.albert_layers):\n layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions)\n hidden_states = layer_output[0]\n\n if output_attentions:\n layer_attentions = layer_attentions + (layer_output[1],)\n\n if output_hidden_states:\n layer_hidden_states = layer_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if output_hidden_states:\n outputs = outputs + (layer_hidden_states,)\n if output_attentions:\n outputs = outputs + (layer_attentions,)\n return outputs # last-layer hidden state, (layer hidden states), (layer attentions)\n\n\nclass AlbertTransformer(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.config = config\n self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)\n self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=False,\n ):\n hidden_states = self.embedding_hidden_mapping_in(hidden_states)\n\n all_hidden_states = (hidden_states,) if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n for i in range(self.config.num_hidden_layers):\n # Number of layers in a hidden group\n layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)\n\n # Index of the hidden group\n group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))\n\n layer_group_output = self.albert_layer_groups[group_idx](\n hidden_states,\n attention_mask,\n head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],\n output_attentions,\n output_hidden_states,\n )\n hidden_states = layer_group_output[0]\n\n if output_attentions:\n all_attentions = all_attentions + layer_group_output[-1]\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions\n )\n\n\nclass AlbertPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = AlbertConfig\n base_model_prefix = \"albert\"\n authorized_missing_keys = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, (nn.Linear)) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\n@dataclass\nclass AlbertForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.AlbertForPreTrainingModel`.\n\n Args:\n loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):\n Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.\n prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n sop_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False\n continuation before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n prediction_logits: torch.FloatTensor = None\n sop_logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nALBERT_START_DOCSTRING = r\"\"\"\n\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n usage and behavior.\n\n Args:\n config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nALBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.AlbertTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned tensors for more detail.\n return_dict (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a\n plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertModel(AlbertPreTrainedModel):\n\n config_class = AlbertConfig\n load_tf_weights = load_tf_weights_in_albert\n base_model_prefix = \"albert\"\n\n def __init__(self, config):\n super().__init__(config)\n\n self.config = config\n self.embeddings = AlbertEmbeddings(config)\n self.encoder = AlbertTransformer(config)\n self.pooler = nn.Linear(config.hidden_size, config.hidden_size)\n self.pooler_activation = nn.Tanh()\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _resize_token_embeddings(self, new_num_tokens):\n old_embeddings = self.embeddings.word_embeddings\n new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)\n self.embeddings.word_embeddings = new_embeddings\n return self.embeddings.word_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n ALBERT has a different architecture in that its layers are shared across groups, which then has inner groups.\n If an ALBERT model has 12 hidden layers and 2 hidden groups, with two inner groups, there\n is a total of 4 different layers.\n\n These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,\n while [2,3] correspond to the two inner groups of the second hidden layer.\n\n Any layer with in index other than [0,1,2,3] will result in an error.\n See base class PreTrainedModel for more information about head pruning\n \"\"\"\n for layer, heads in heads_to_prune.items():\n group_idx = int(layer / self.config.inner_group_num)\n inner_group_idx = int(layer - group_idx * self.config.inner_group_num)\n self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=BaseModelOutputWithPooling,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\n )\n encoder_outputs = self.encoder(\n embedding_output,\n extended_attention_mask,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = encoder_outputs[0]\n\n pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0]))\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Albert Model with two heads on top as done during the pre-training: a `masked language modeling` head and\n a `sentence order prediction (classification)` head. \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForPreTraining(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.albert = AlbertModel(config)\n self.predictions = AlbertMLMHead(config)\n self.sop_classifier = AlbertSOPHead(config)\n\n self.init_weights()\n self.tie_weights()\n\n def tie_weights(self):\n self._tie_or_clone_weights(self.predictions.decoder, self.albert.embeddings.word_embeddings)\n\n def get_output_embeddings(self):\n return self.predictions.decoder\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n sentence_order_label=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n sentence_order_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring)\n Indices should be in ``[0, 1]``.\n ``0`` indicates original order (sequence A, then sequence B),\n ``1`` indicates switched order (sequence B, then sequence A).\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n\n Returns:\n\n Examples::\n\n >>> from transformers import AlbertTokenizer, AlbertForPreTraining\n >>> import torch\n\n >>> tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')\n >>> model = AlbertForPreTraining.from_pretrained('albert-base-v2', return_dict=True)\n\n >>> input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n >>> outputs = model(input_ids)\n\n >>> prediction_logits = outputs.prediction_logits\n >>> sop_logits = outputs.sop_logits\n\n \"\"\"\n\n if \"masked_lm_labels\" in kwargs:\n warnings.warn(\n \"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"masked_lm_labels\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output, pooled_output = outputs[:2]\n\n prediction_scores = self.predictions(sequence_output)\n sop_scores = self.sop_classifier(pooled_output)\n\n total_loss = None\n if labels is not None and sentence_order_label is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1))\n total_loss = masked_lm_loss + sentence_order_loss\n\n if not return_dict:\n output = (prediction_scores, sop_scores) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return AlbertForPreTrainingOutput(\n loss=total_loss,\n prediction_logits=prediction_scores,\n sop_logits=sop_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass AlbertMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.LayerNorm = nn.LayerNorm(config.embedding_size)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n self.dense = nn.Linear(config.hidden_size, config.embedding_size)\n self.decoder = nn.Linear(config.embedding_size, config.vocab_size)\n self.activation = ACT2FN[config.hidden_act]\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.activation(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n hidden_states = self.decoder(hidden_states)\n\n prediction_scores = hidden_states\n\n return prediction_scores\n\n\nclass AlbertSOPHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.dropout = nn.Dropout(config.classifier_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, pooled_output):\n dropout_pooled_output = self.dropout(pooled_output)\n logits = self.classifier(dropout_pooled_output)\n return logits\n\n\n@add_start_docstrings(\n \"Albert Model with a `language modeling` head on top.\", ALBERT_START_DOCSTRING,\n)\nclass AlbertForMaskedLM(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.albert = AlbertModel(config)\n self.predictions = AlbertMLMHead(config)\n\n self.init_weights()\n self.tie_weights()\n\n def tie_weights(self):\n self._tie_or_clone_weights(self.predictions.decoder, self.albert.embeddings.word_embeddings)\n\n def get_output_embeddings(self):\n return self.predictions.decoder\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with\n labels in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n \"\"\"\n if \"masked_lm_labels\" in kwargs:\n warnings.warn(\n \"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"masked_lm_labels\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_outputs = outputs[0]\n\n prediction_scores = self.predictions(sequence_outputs)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForSequenceClassification(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.albert = AlbertModel(config)\n self.dropout = nn.Dropout(config.classifier_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Albert Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForTokenClassification(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.albert = AlbertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of\n the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForQuestionAnswering(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.albert = AlbertModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.albert(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Albert Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks. \"\"\",\n ALBERT_START_DOCSTRING,\n)\nclass AlbertForMultipleChoice(AlbertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.albert = AlbertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING.format(\"(batch_size, num_choices, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"albert-base-v2\",\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the multiple choice classification loss.\n Indices should be in ``[0, ..., num_choices-1]`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n outputs = self.albert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions,\n )\n",
"path": "src/transformers/modeling_albert.py"
}
] | diff --git a/src/transformers/modeling_albert.py b/src/transformers/modeling_albert.py
index eb0ed4dfbdd5..76f099895252 100755
--- a/src/transformers/modeling_albert.py
+++ b/src/transformers/modeling_albert.py
@@ -403,6 +403,7 @@ class AlbertPreTrainedModel(PreTrainedModel):
config_class = AlbertConfig
base_model_prefix = "albert"
+ authorized_missing_keys = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights.
|
espnet__espnet-617 | Conversion of AttributeDict with vars() returns unexpected results
I found a bug.
In training phase, `train_args` is `argparse.Namespace`.
So `vars(train_args)` convert into dict as follows.
```python
(Pdb) train_args
Namespace(aconv_chans=10, aconv_filts=100, adim=320, aheads=4, asr_model=False, atype='location', awin=5, backend='pytorch', batch_size=30, beam_size=4, char_list=['<blank>', '<unk>', '<space>', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '<eos>'], criterion='acc', ctc_type='warpctc', ctc_weight=0.3, debugdir='exp/train_nodev_pytorch_blstmp_e4_subsample1_2_2_1_1_unit320_proj320_d1_unit300_location_aconvc10_aconvf100_mtlalpha0.5_adadelta_sampprob0.0_bs30_mli800_mlo150', debugmode=1, dict='data/lang_1char/train_nodev_units.txt', dlayers=1, dropout_rate=0.0, dropout_rate_decoder=0.0, dtype='lstm', dunits=300, early_stop_criterion='validation/main/acc', elayers=4, elayers_sd=4, epochs=20, eprojs=320, eps=1e-08, eps_decay=0.01, etype='blstmp', eunits=320, grad_clip=5, lm_weight=0.1, lsm_type='', lsm_weight=0.0, maxlen_in=800, maxlen_out=150, maxlenratio=0.0, minibatches=0, minlenratio=0.0, mt_model=False, mtlalpha=0.5, n_iter_processes=0, nbest=1, ngpu=1, num_save_attention=3, num_spkrs=1, opt='adadelta', outdir='exp/train_nodev_pytorch_blstmp_e4_subsample1_2_2_1_1_unit320_proj320_d1_unit300_location_aconvc10_aconvf100_mtlalpha0.5_adadelta_sampprob0.0_bs30_mli800_mlo150/results', patience=3, penalty=0.0, preprocess_conf=None, report_cer=False, report_wer=False, resume=None, rnnlm=None, rnnlm_conf=None, sampling_probability=0.0, seed=1, sortagrad=0, spa=False, subsample='1_2_2_1_1', sym_blank='<blank>', sym_space='<space>', tensorboard_dir='tensorboard/train_nodev_pytorch_blstmp_e4_subsample1_2_2_1_1_unit320_proj320_d1_unit300_location_aconvc10_aconvf100_mtlalpha0.5_adadelta_sampprob0.0_bs30_mli800_mlo150', threshold=0.0001, train_json='dump/train_nodev/deltafalse/data.json', valid_json='dump/train_dev/deltafalse/data.json', verbose=1, weight_decay=0.0)
(Pdb) vars(train_args)
{'aconv_chans': 10, 'aconv_filts': 100, 'adim': 320, 'aheads': 4, 'asr_model': False, 'atype': 'location', 'awin': 5, 'backend': 'pytorch', 'batch_size': 30, 'beam_size': 4, 'char_list': ['<blank>', '<unk>', '<space>', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '<eos>'], 'criterion': 'acc', 'ctc_type': 'warpctc', 'ctc_weight': 0.3, 'debugdir': 'exp/train_nodev_pytorch_blstmp_e4_subsample1_2_2_1_1_unit320_proj320_d1_unit300_location_aconvc10_aconvf100_mtlalpha0.5_adadelta_sampprob0.0_bs30_mli800_mlo150', 'debugmode': 1, 'dict': 'data/lang_1char/train_nodev_units.txt', 'dlayers': 1, 'dropout_rate': 0.0, 'dropout_rate_decoder': 0.0, 'dtype': 'lstm', 'dunits': 300, 'early_stop_criterion': 'validation/main/acc', 'elayers': 4, 'elayers_sd': 4, 'epochs': 20, 'eprojs': 320, 'eps': 1e-08, 'eps_decay': 0.01, 'etype': 'blstmp', 'eunits': 320, 'grad_clip': 5, 'lm_weight': 0.1, 'lsm_type': '', 'lsm_weight': 0.0, 'maxlen_in': 800, 'maxlen_out': 150, 'maxlenratio': 0.0, 'minibatches': 0, 'minlenratio': 0.0, 'mt_model': False, 'mtlalpha': 0.5, 'n_iter_processes': 0, 'nbest': 1, 'ngpu': 1, 'num_save_attention': 3, 'num_spkrs': 1, 'opt': 'adadelta', 'outdir': 'exp/train_nodev_pytorch_blstmp_e4_subsample1_2_2_1_1_unit320_proj320_d1_unit300_location_aconvc10_aconvf100_mtlalpha0.5_adadelta_sampprob0.0_bs30_mli800_mlo150/results', 'patience': 3, 'penalty': 0.0, 'preprocess_conf': None, 'report_cer': False, 'report_wer': False, 'resume': None, 'rnnlm': None, 'rnnlm_conf': None, 'sampling_probability': 0.0, 'seed': 1, 'sortagrad': 0, 'spa': False, 'subsample': '1_2_2_1_1', 'sym_blank': '<blank>', 'sym_space': '<space>', 'tensorboard_dir': 'tensorboard/train_nodev_pytorch_blstmp_e4_subsample1_2_2_1_1_unit320_proj320_d1_unit300_location_aconvc10_aconvf100_mtlalpha0.5_adadelta_sampprob0.0_bs30_mli800_mlo150', 'threshold': 0.0001, 'train_json': 'dump/train_nodev/deltafalse/data.json', 'valid_json': 'dump/train_dev/deltafalse/data.json', 'verbose': 1, 'weight_decay': 0.0}
```
However, in the testing phase, loaded `train_args` is `AttributeDict`.
Therefore, `vars(train_args)` return different results.
```python
(Pdb) train_args
<espnet.asr.asr_utils.AttributeDict object at 0x7f2323130a58>
(Pdb) vars(train_args)
{'obj': {'aconv_chans': 10, 'aconv_filts': 100, 'adim': 320, 'aheads': 4, 'asr_model': False, 'atype': 'location', 'awin': 5, 'backend': 'pytorch', 'batch_size': 30, 'beam_size': 4, 'char_list': ['<blank>', '<unk>', '<space>', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '<eos>'], 'criterion': 'acc', 'ctc_type': 'warpctc', 'ctc_weight': 0.3, 'debugdir': 'exp/train_nodev_pytorch_blstmp_e4_subsample1_2_2_1_1_unit320_proj320_d1_unit300_location_aconvc10_aconvf100_mtlalpha0.5_adadelta_sampprob0.0_bs30_mli800_mlo150', 'debugmode': 1, 'dict': 'data/lang_1char/train_nodev_units.txt', 'dlayers': 1, 'dropout_rate': 0.0, 'dropout_rate_decoder': 0.0, 'dtype': 'lstm', 'dunits': 300, 'early_stop_criterion': 'validation/main/acc', 'elayers': 4, 'elayers_sd': 4, 'epochs': 20, 'eprojs': 320, 'eps': 1e-08, 'eps_decay': 0.01, 'etype': 'blstmp', 'eunits': 320, 'grad_clip': 5, 'lm_weight': 0.1, 'lsm_type': '', 'lsm_weight': 0.0, 'maxlen_in': 800, 'maxlen_out': 150, 'maxlenratio': 0.0, 'minibatches': 0, 'minlenratio': 0.0, 'mt_model': False, 'mtlalpha': 0.5, 'n_iter_processes': 0, 'nbest': 1, 'ngpu': 1, 'num_save_attention': 3, 'num_spkrs': 1, 'opt': 'adadelta', 'outdir': 'exp/train_nodev_pytorch_blstmp_e4_subsample1_2_2_1_1_unit320_proj320_d1_unit300_location_aconvc10_aconvf100_mtlalpha0.5_adadelta_sampprob0.0_bs30_mli800_mlo150/results', 'patience': 3, 'penalty': 0.0, 'preprocess_conf': None, 'report_cer': False, 'report_wer': False, 'resume': None, 'rnnlm': None, 'rnnlm_conf': None, 'sampling_probability': 0.0, 'seed': 1, 'sortagrad': 0, 'spa': False, 'subsample': '1_2_2_1_1', 'sym_blank': '<blank>', 'sym_space': '<space>', 'tensorboard_dir': 'tensorboard/train_nodev_pytorch_blstmp_e4_subsample1_2_2_1_1_unit320_proj320_d1_unit300_location_aconvc10_aconvf100_mtlalpha0.5_adadelta_sampprob0.0_bs30_mli800_mlo150', 'threshold': 0.0001, 'train_json': 'dump/train_nodev/deltafalse/data.json', 'valid_json': 'dump/train_dev/deltafalse/data.json', 'verbose': 1, 'weight_decay': 0.0}}
```
This causes unexpected behavior in following line.
https://github.com/espnet/espnet/blob/fb1cbd605c5fefc6e82c829cafc01840918c90c4/espnet/nets/pytorch_backend/ctc.py#L116
`vars(train_args).get(“ctc_type”)` always return `None`, so `vars(train_args).get(“ctc_type”, “builtin”)` will always return `“builtin”`.
@gtache Is there any reason why using `vars(train_args).get(“ctc_type”)` instead of `train_args.ctc_type`?
@sw005320 What is your intension of using `AttributeDict` in loading a config file?
| [
{
"content": "import logging\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom espnet.nets.pytorch_backend.nets_utils import to_device\n\n\nclass CTC(torch.nn.Module):\n \"\"\"CTC module\n\n :param int odim: dimension of outputs\n :param int eprojs: number of encoder projection units\n :param float dropout_rate: dropout rate (0.0 ~ 1.0)\n :param str ctc_type: builtin or warpctc\n :param bool reduce: reduce the CTC loss into a scalar\n \"\"\"\n\n def __init__(self, odim, eprojs, dropout_rate, ctc_type='warpctc', reduce=True):\n super(CTC, self).__init__()\n self.dropout_rate = dropout_rate\n self.loss = None\n self.ctc_lo = torch.nn.Linear(eprojs, odim)\n self.ctc_type = ctc_type\n\n if self.ctc_type == 'builtin':\n reduction_type = 'sum' if reduce else 'none'\n self.ctc_loss = torch.nn.CTCLoss(reduction=reduction_type)\n elif self.ctc_type == 'warpctc':\n import warpctc_pytorch as warp_ctc\n self.ctc_loss = warp_ctc.CTCLoss(size_average=True, reduce=reduce)\n else:\n raise ValueError('ctc_type must be \"builtin\" or \"warpctc\": {}'\n .format(self.ctc_type))\n\n self.ignore_id = -1\n self.reduce = reduce\n\n def loss_fn(self, th_pred, th_target, th_ilen, th_olen):\n if self.ctc_type == 'builtin':\n th_pred = th_pred.log_softmax(2)\n loss = self.ctc_loss(th_pred, th_target, th_ilen, th_olen)\n # Batch-size average\n loss = loss / th_pred.size(1)\n return loss\n elif self.ctc_type == 'warpctc':\n return self.ctc_loss(th_pred, th_target, th_ilen, th_olen)\n else:\n raise NotImplementedError\n\n def forward(self, hs_pad, hlens, ys_pad):\n \"\"\"CTC forward\n\n :param torch.Tensor hs_pad: batch of padded hidden state sequences (B, Tmax, D)\n :param torch.Tensor hlens: batch of lengths of hidden state sequences (B)\n :param torch.Tensor ys_pad: batch of padded character id sequence tensor (B, Lmax)\n :return: ctc loss value\n :rtype: torch.Tensor\n \"\"\"\n # TODO(kan-bayashi): need to make more smart way\n ys = [y[y != self.ignore_id] for y in ys_pad] # parse padded ys\n\n self.loss = None\n hlens = torch.from_numpy(np.fromiter(hlens, dtype=np.int32))\n olens = torch.from_numpy(np.fromiter(\n (x.size(0) for x in ys), dtype=np.int32))\n\n # zero padding for hs\n ys_hat = self.ctc_lo(F.dropout(hs_pad, p=self.dropout_rate))\n\n # zero padding for ys\n ys_true = torch.cat(ys).cpu().int() # batch x olen\n\n # get length info\n logging.info(self.__class__.__name__ + ' input lengths: ' + ''.join(str(hlens).split('\\n')))\n logging.info(self.__class__.__name__ + ' output lengths: ' + ''.join(str(olens).split('\\n')))\n\n # get ctc loss\n # expected shape of seqLength x batchSize x alphabet_size\n ys_hat = ys_hat.transpose(0, 1)\n self.loss = to_device(self, self.loss_fn(ys_hat, ys_true, hlens, olens))\n if self.reduce:\n logging.info('ctc loss:' + str(float(self.loss)))\n\n return self.loss\n\n def log_softmax(self, hs_pad):\n \"\"\"log_softmax of frame activations\n\n :param torch.Tensor hs_pad: 3d tensor (B, Tmax, eprojs)\n :return: log softmax applied 3d tensor (B, Tmax, odim)\n :rtype: torch.Tensor\n \"\"\"\n return F.log_softmax(self.ctc_lo(hs_pad), dim=2)\n\n def argmax(self, hs_pad):\n \"\"\"argmax of frame activations\n\n :param torch.Tensor hs_pad: 3d tensor (B, Tmax, eprojs)\n :return: argmax applied 2d tensor (B, Tmax)\n :rtype: torch.Tensor\n \"\"\"\n return torch.argmax(self.ctc_lo(hs_pad), dim=2)\n\n\ndef ctc_for(args, odim, reduce=True):\n \"\"\"Returns the CTC module for the given args and output dimension\n\n :param Namespace args: the program args\n :param int odim : The output dimension\n :param bool reduce : return the CTC loss in a scalar\n :return: the corresponding CTC module\n \"\"\"\n return CTC(odim, args.eprojs, args.dropout_rate,\n ctc_type=vars(args).get('ctc_type', 'builtin'), reduce=reduce)\n",
"path": "espnet/nets/pytorch_backend/ctc.py"
}
] | [
{
"content": "import logging\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom espnet.nets.pytorch_backend.nets_utils import to_device\n\n\nclass CTC(torch.nn.Module):\n \"\"\"CTC module\n\n :param int odim: dimension of outputs\n :param int eprojs: number of encoder projection units\n :param float dropout_rate: dropout rate (0.0 ~ 1.0)\n :param str ctc_type: builtin or warpctc\n :param bool reduce: reduce the CTC loss into a scalar\n \"\"\"\n\n def __init__(self, odim, eprojs, dropout_rate, ctc_type='warpctc', reduce=True):\n super(CTC, self).__init__()\n self.dropout_rate = dropout_rate\n self.loss = None\n self.ctc_lo = torch.nn.Linear(eprojs, odim)\n self.ctc_type = ctc_type\n\n if self.ctc_type == 'builtin':\n reduction_type = 'sum' if reduce else 'none'\n self.ctc_loss = torch.nn.CTCLoss(reduction=reduction_type)\n elif self.ctc_type == 'warpctc':\n import warpctc_pytorch as warp_ctc\n self.ctc_loss = warp_ctc.CTCLoss(size_average=True, reduce=reduce)\n else:\n raise ValueError('ctc_type must be \"builtin\" or \"warpctc\": {}'\n .format(self.ctc_type))\n\n self.ignore_id = -1\n self.reduce = reduce\n\n def loss_fn(self, th_pred, th_target, th_ilen, th_olen):\n if self.ctc_type == 'builtin':\n th_pred = th_pred.log_softmax(2)\n loss = self.ctc_loss(th_pred, th_target, th_ilen, th_olen)\n # Batch-size average\n loss = loss / th_pred.size(1)\n return loss\n elif self.ctc_type == 'warpctc':\n return self.ctc_loss(th_pred, th_target, th_ilen, th_olen)\n else:\n raise NotImplementedError\n\n def forward(self, hs_pad, hlens, ys_pad):\n \"\"\"CTC forward\n\n :param torch.Tensor hs_pad: batch of padded hidden state sequences (B, Tmax, D)\n :param torch.Tensor hlens: batch of lengths of hidden state sequences (B)\n :param torch.Tensor ys_pad: batch of padded character id sequence tensor (B, Lmax)\n :return: ctc loss value\n :rtype: torch.Tensor\n \"\"\"\n # TODO(kan-bayashi): need to make more smart way\n ys = [y[y != self.ignore_id] for y in ys_pad] # parse padded ys\n\n self.loss = None\n hlens = torch.from_numpy(np.fromiter(hlens, dtype=np.int32))\n olens = torch.from_numpy(np.fromiter(\n (x.size(0) for x in ys), dtype=np.int32))\n\n # zero padding for hs\n ys_hat = self.ctc_lo(F.dropout(hs_pad, p=self.dropout_rate))\n\n # zero padding for ys\n ys_true = torch.cat(ys).cpu().int() # batch x olen\n\n # get length info\n logging.info(self.__class__.__name__ + ' input lengths: ' + ''.join(str(hlens).split('\\n')))\n logging.info(self.__class__.__name__ + ' output lengths: ' + ''.join(str(olens).split('\\n')))\n\n # get ctc loss\n # expected shape of seqLength x batchSize x alphabet_size\n ys_hat = ys_hat.transpose(0, 1)\n self.loss = to_device(self, self.loss_fn(ys_hat, ys_true, hlens, olens))\n if self.reduce:\n logging.info('ctc loss:' + str(float(self.loss)))\n\n return self.loss\n\n def log_softmax(self, hs_pad):\n \"\"\"log_softmax of frame activations\n\n :param torch.Tensor hs_pad: 3d tensor (B, Tmax, eprojs)\n :return: log softmax applied 3d tensor (B, Tmax, odim)\n :rtype: torch.Tensor\n \"\"\"\n return F.log_softmax(self.ctc_lo(hs_pad), dim=2)\n\n def argmax(self, hs_pad):\n \"\"\"argmax of frame activations\n\n :param torch.Tensor hs_pad: 3d tensor (B, Tmax, eprojs)\n :return: argmax applied 2d tensor (B, Tmax)\n :rtype: torch.Tensor\n \"\"\"\n return torch.argmax(self.ctc_lo(hs_pad), dim=2)\n\n\ndef ctc_for(args, odim, reduce=True):\n \"\"\"Returns the CTC module for the given args and output dimension\n\n :param Namespace args: the program args\n :param int odim : The output dimension\n :param bool reduce : return the CTC loss in a scalar\n :return: the corresponding CTC module\n \"\"\"\n return CTC(odim, args.eprojs, args.dropout_rate,\n ctc_type=args.ctc_type, reduce=reduce)\n",
"path": "espnet/nets/pytorch_backend/ctc.py"
}
] | diff --git a/espnet/nets/pytorch_backend/ctc.py b/espnet/nets/pytorch_backend/ctc.py
index 4573cec9d7e..8c58ca557be 100644
--- a/espnet/nets/pytorch_backend/ctc.py
+++ b/espnet/nets/pytorch_backend/ctc.py
@@ -113,4 +113,4 @@ def ctc_for(args, odim, reduce=True):
:return: the corresponding CTC module
"""
return CTC(odim, args.eprojs, args.dropout_rate,
- ctc_type=vars(args).get('ctc_type', 'builtin'), reduce=reduce)
+ ctc_type=args.ctc_type, reduce=reduce)
|
numba__numba-941 | Update README
Two issues with our README file:
- it is not up-to-date (e.g. it mentions Cython, which we don't use anymore)
- it uses Markdown rather than reST, and therefore is badly formatted when used for the PyPI long description: https://pypi.python.org/pypi/numba
| [
{
"content": "try:\n # Try to use setuptools so as to enable support of the special\n # \"Microsoft Visual C++ Compiler for Python 2.7\" (http://aka.ms/vcpython27)\n # for building under Windows.\n # Note setuptools >= 6.0 is required for this.\n from setuptools import setup, Extension\nexcept ImportError:\n from distutils.core import setup, Extension\n\nimport sys\nimport os\nimport numpy\nimport numpy.distutils.misc_util as np_misc\nimport versioneer\n\nversioneer.versionfile_source = 'numba/_version.py'\nversioneer.versionfile_build = 'numba/_version.py'\nversioneer.tag_prefix = ''\nversioneer.parentdir_prefix = 'numba-'\n\ncmdclass = versioneer.get_cmdclass()\n\nsetup_args = {\n 'long_description': open('README.md').read(),\n}\n\nGCCFLAGS = [\"-std=c89\", \"-Wdeclaration-after-statement\", \"-Werror\"]\n\nif os.environ.get(\"NUMBA_GCC_FLAGS\"):\n CFLAGS = GCCFLAGS\nelse:\n CFLAGS = []\n\n\nif sys.platform == 'darwin' and sys.version_info[:2] == (2, 6):\n cpp_link_args = ['-lstdc++']\nelse:\n cpp_link_args = []\n\nnpymath_info = np_misc.get_info('npymath')\n\next_dynfunc = Extension(name='numba._dynfunc', sources=['numba/_dynfunc.c'],\n extra_compile_args=CFLAGS,\n depends=[\"numba/_pymodule.h\"])\n\next_npymath_exports = Extension(name='numba._npymath_exports',\n sources=['numba/_npymath_exports.c'],\n include_dirs=npymath_info['include_dirs'],\n libraries=npymath_info['libraries'],\n library_dirs=npymath_info['library_dirs'],\n define_macros=npymath_info['define_macros'])\n\n\next_dispatcher = Extension(name=\"numba._dispatcher\",\n include_dirs=[numpy.get_include()],\n sources=['numba/_dispatcher.c',\n 'numba/_dispatcherimpl.cpp',\n 'numba/typeconv/typeconv.cpp'],\n depends=[\"numba/_pymodule.h\",\n \"numba/_dispatcher.h\"],\n extra_link_args=cpp_link_args)\n\next_helperlib = Extension(name=\"numba._helperlib\",\n include_dirs=[numpy.get_include()],\n sources=[\"numba/_helperlib.c\", \"numba/_math_c99.c\"],\n extra_compile_args=CFLAGS,\n depends=[\"numba/_pymodule.h\",\n \"numba/_math_c99.h\",\n \"numba/mathnames.inc\"])\n\next_typeconv = Extension(name=\"numba.typeconv._typeconv\",\n sources=[\"numba/typeconv/typeconv.cpp\",\n \"numba/typeconv/_typeconv.cpp\"],\n depends=[\"numba/_pymodule.h\"],\n extra_link_args=cpp_link_args)\n\next_npyufunc_ufunc = Extension(name=\"numba.npyufunc._internal\",\n sources=[\"numba/npyufunc/_internal.c\"],\n include_dirs=[numpy.get_include()],\n depends=[\"numba/npyufunc/_ufunc.c\",\n \"numba/npyufunc/_internal.h\",\n \"numba/_pymodule.h\"])\n\next_mviewbuf = Extension(name='numba.mviewbuf',\n sources=['numba/mviewbuf.c'])\n\next_modules = [ext_dynfunc, ext_npymath_exports, ext_dispatcher,\n ext_helperlib, ext_typeconv, ext_npyufunc_ufunc, ext_mviewbuf]\n\npackages = [\n \"numba\",\n \"numba.targets\",\n \"numba.tests\",\n \"numba.typing\",\n \"numba.typeconv\",\n \"numba.npyufunc\",\n \"numba.pycc\",\n \"numba.servicelib\",\n \"numba.cuda\",\n \"numba.cuda.cudadrv\",\n \"numba.cuda.tests\",\n \"numba.cuda.tests.cudadrv\",\n \"numba.cuda.tests.cudadrv.data\",\n \"numba.cuda.tests.cudapy\",\n]\n\nsetup(name='numba',\n description=\"compiling Python code using LLVM\",\n version=versioneer.get_version(),\n\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Software Development :: Compilers\",\n ],\n package_data={\n \"numba\": [\"*.c\", \"*.h\", \"*.cpp\", \"*.inc\"],\n \"numba.npyufunc\": [\"*.c\", \"*.h\"],\n \"numba.typeconv\": [\"*.cpp\", \"*.hpp\"],\n \"numba.cuda.tests.cudadrv.data\": [\"*.ptx\"],\n },\n scripts=[\"numba/pycc/pycc\", \"bin/numba\"],\n author=\"Continuum Analytics, Inc.\",\n author_email=\"[email protected]\",\n url=\"http://numba.github.com\",\n ext_modules=ext_modules,\n packages=packages,\n license=\"BSD\",\n cmdclass=cmdclass,\n **setup_args)\n\n",
"path": "setup.py"
}
] | [
{
"content": "try:\n # Try to use setuptools so as to enable support of the special\n # \"Microsoft Visual C++ Compiler for Python 2.7\" (http://aka.ms/vcpython27)\n # for building under Windows.\n # Note setuptools >= 6.0 is required for this.\n from setuptools import setup, Extension\nexcept ImportError:\n from distutils.core import setup, Extension\n\nimport sys\nimport os\nimport numpy\nimport numpy.distutils.misc_util as np_misc\nimport versioneer\n\nversioneer.versionfile_source = 'numba/_version.py'\nversioneer.versionfile_build = 'numba/_version.py'\nversioneer.tag_prefix = ''\nversioneer.parentdir_prefix = 'numba-'\n\ncmdclass = versioneer.get_cmdclass()\n\nsetup_args = {\n 'long_description': open('README.rst').read(),\n}\n\nGCCFLAGS = [\"-std=c89\", \"-Wdeclaration-after-statement\", \"-Werror\"]\n\nif os.environ.get(\"NUMBA_GCC_FLAGS\"):\n CFLAGS = GCCFLAGS\nelse:\n CFLAGS = []\n\n\nif sys.platform == 'darwin' and sys.version_info[:2] == (2, 6):\n cpp_link_args = ['-lstdc++']\nelse:\n cpp_link_args = []\n\nnpymath_info = np_misc.get_info('npymath')\n\next_dynfunc = Extension(name='numba._dynfunc', sources=['numba/_dynfunc.c'],\n extra_compile_args=CFLAGS,\n depends=[\"numba/_pymodule.h\"])\n\next_npymath_exports = Extension(name='numba._npymath_exports',\n sources=['numba/_npymath_exports.c'],\n include_dirs=npymath_info['include_dirs'],\n libraries=npymath_info['libraries'],\n library_dirs=npymath_info['library_dirs'],\n define_macros=npymath_info['define_macros'])\n\n\next_dispatcher = Extension(name=\"numba._dispatcher\",\n include_dirs=[numpy.get_include()],\n sources=['numba/_dispatcher.c',\n 'numba/_dispatcherimpl.cpp',\n 'numba/typeconv/typeconv.cpp'],\n depends=[\"numba/_pymodule.h\",\n \"numba/_dispatcher.h\"],\n extra_link_args=cpp_link_args)\n\next_helperlib = Extension(name=\"numba._helperlib\",\n include_dirs=[numpy.get_include()],\n sources=[\"numba/_helperlib.c\", \"numba/_math_c99.c\"],\n extra_compile_args=CFLAGS,\n depends=[\"numba/_pymodule.h\",\n \"numba/_math_c99.h\",\n \"numba/mathnames.inc\"])\n\next_typeconv = Extension(name=\"numba.typeconv._typeconv\",\n sources=[\"numba/typeconv/typeconv.cpp\",\n \"numba/typeconv/_typeconv.cpp\"],\n depends=[\"numba/_pymodule.h\"],\n extra_link_args=cpp_link_args)\n\next_npyufunc_ufunc = Extension(name=\"numba.npyufunc._internal\",\n sources=[\"numba/npyufunc/_internal.c\"],\n include_dirs=[numpy.get_include()],\n depends=[\"numba/npyufunc/_ufunc.c\",\n \"numba/npyufunc/_internal.h\",\n \"numba/_pymodule.h\"])\n\next_mviewbuf = Extension(name='numba.mviewbuf',\n sources=['numba/mviewbuf.c'])\n\next_modules = [ext_dynfunc, ext_npymath_exports, ext_dispatcher,\n ext_helperlib, ext_typeconv, ext_npyufunc_ufunc, ext_mviewbuf]\n\npackages = [\n \"numba\",\n \"numba.targets\",\n \"numba.tests\",\n \"numba.typing\",\n \"numba.typeconv\",\n \"numba.npyufunc\",\n \"numba.pycc\",\n \"numba.servicelib\",\n \"numba.cuda\",\n \"numba.cuda.cudadrv\",\n \"numba.cuda.tests\",\n \"numba.cuda.tests.cudadrv\",\n \"numba.cuda.tests.cudadrv.data\",\n \"numba.cuda.tests.cudapy\",\n]\n\nsetup(name='numba',\n description=\"compiling Python code using LLVM\",\n version=versioneer.get_version(),\n\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Software Development :: Compilers\",\n ],\n package_data={\n \"numba\": [\"*.c\", \"*.h\", \"*.cpp\", \"*.inc\"],\n \"numba.npyufunc\": [\"*.c\", \"*.h\"],\n \"numba.typeconv\": [\"*.cpp\", \"*.hpp\"],\n \"numba.cuda.tests.cudadrv.data\": [\"*.ptx\"],\n },\n scripts=[\"numba/pycc/pycc\", \"bin/numba\"],\n author=\"Continuum Analytics, Inc.\",\n author_email=\"[email protected]\",\n url=\"http://numba.github.com\",\n ext_modules=ext_modules,\n packages=packages,\n license=\"BSD\",\n cmdclass=cmdclass,\n **setup_args)\n\n",
"path": "setup.py"
}
] | diff --git a/MANIFEST.in b/MANIFEST.in
index f6157564a73..17330a666bc 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README.md setup.py runtests.py versioneer.py CHANGE_LOG AUTHORS LICENSE
+include README.rst setup.py runtests.py versioneer.py CHANGE_LOG AUTHORS LICENSE
recursive-include docs *.ipynb *.txt *.py Makefile *.rstls
prune docs/_build
prune docs/gh-pages
diff --git a/README.md b/README.rst
similarity index 65%
rename from README.md
rename to README.rst
index 1b6f6ccefc3..657f63b557d 100644
--- a/README.md
+++ b/README.rst
@@ -1,3 +1,4 @@
+=====
Numba
=====
@@ -18,94 +19,93 @@ in the decorator.
Numba is a mechanism for producing machine code from Python syntax and typed
data structures such as those that exist in NumPy.
+
Dependencies
============
- * llvmlite
- * numpy (version 1.6 or higher)
- * argparse (for pycc in python2.6)
- * funcsigs (for Python 2)
+* llvmlite
+* numpy (version 1.6 or higher)
+* argparse (for pycc in python2.6)
+* funcsigs (for Python 2)
+
Installing
-=================
+==========
The easiest way to install numba and get updates is by using the Anaconda
Distribution: https://store.continuum.io/cshop/anaconda/
-```bash
-$ conda install numba
-```
+::
+
+ $ conda install numba
If you wanted to compile Numba from source,
it is recommended to use conda environment to maintain multiple isolated
-development environments. To create a new environment for Numba development:
+development environments. To create a new environment for Numba development::
-```bash
-$ conda create -p ~/dev/mynumba python numpy llvmlite
-```
+ $ conda create -p ~/dev/mynumba python numpy llvmlite
To select the installed version, append "=VERSION" to the package name,
-where, "VERSION" is the version number. For example:
+where, "VERSION" is the version number. For example::
-```bash
-$ conda create -p ~/dev/mynumba python=2.7 numpy=1.6 llvmlite
-```
+ $ conda create -p ~/dev/mynumba python=2.7 numpy=1.6 llvmlite
to use Python 2.7 and Numpy 1.6.
-**Note**: binary packages for llvmlite are currently available from Numba's
-own binstar account, so you'll have to add it to your channels first:
+If you need CUDA support, you should also install the CUDA toolkit::
-```bash
-$ conda config --add channels numba
-```
+ $ conda install cudatoolkit
Custom Python Environments
-==========================
+--------------------------
If you're not using conda, you will need to build llvmlite yourself:
-* Building and installing llvmlite
+Building and installing llvmlite
+''''''''''''''''''''''''''''''''
See https://github.com/numba/llvmlite for the most up-to-date instructions.
You will need a build of LLVM 3.5.
-```bash
-$ git clone https://github.com/numba/llvmlite
-$ cd llvmlite
-$ python setup.py install
-```
+::
+
+ $ git clone https://github.com/numba/llvmlite
+ $ cd llvmlite
+ $ python setup.py install
+
+Installing Numba
+''''''''''''''''
-* Installing Numba
+::
-```bash
-$ git clone https://github.com/numba/numba.git
-$ cd numba
-$ pip install -r requirements.txt
-$ python setup.py build_ext --inplace
-$ python setup.py install
-```
+ $ git clone https://github.com/numba/numba.git
+ $ cd numba
+ $ pip install -r requirements.txt
+ $ python setup.py build_ext --inplace
+ $ python setup.py install
or simply
-```bash
-$ pip install numba
-```
+::
+
+ $ pip install numba
+
+If you want to enable CUDA support, you will need CUDA Toolkit 5.5+ (which
+contains ``libnvvm``). After installing the Toolkit, you might have to
+specify a few environment variables according to
+http://numba.pydata.org/numba-doc/dev/CUDASupport.html
-If you want to enable CUDA support, you will need CUDA Toolkit 5.5+ (which contains
-``libnvvm``). After installing the Toolkit, you might have to specify a few
-environment variables according to http://numba.pydata.org/numba-doc/dev/CUDASupport.html
Documentation
=============
http://numba.pydata.org/numba-doc/dev/index.html
+
Mailing Lists
=============
Join the numba mailing list [email protected]:
-
https://groups.google.com/a/continuum.io/d/forum/numba-users
or access it through the Gmane mirror:
@@ -113,6 +113,7 @@ http://news.gmane.org/gmane.comp.python.numba.user
Some old archives are at: http://librelist.com/browser/numba/
+
Website
=======
@@ -120,6 +121,7 @@ See if our sponsor can help you (which can help this project): http://www.contin
http://numba.pydata.org
+
Continuous Integration
======================
diff --git a/setup.py b/setup.py
index 03b1d5de616..4da981636a7 100644
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@
cmdclass = versioneer.get_cmdclass()
setup_args = {
- 'long_description': open('README.md').read(),
+ 'long_description': open('README.rst').read(),
}
GCCFLAGS = ["-std=c89", "-Wdeclaration-after-statement", "-Werror"]
|
chainer__chainer-987 | Fix the shape of return value of F.det
Currently, return value of `det` is `xp.array` whose shape is `(1, )`, not a scalar.
```
In [16]: a = chainer.Variable(numpy.random.uniform(-1, 1, (3, 3)).astype(numpy.float32))
In [17]: chainer.functions.det(a).data
Out[17]: array([-0.80874199], dtype=float32)
```
But the document says the return value should be `chainer.Variable` whose data have the shape `()`.
| [
{
"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.functions.array import reshape\nfrom chainer.functions.math import inv\nfrom chainer.functions.math import matmul\nfrom chainer import utils\nfrom chainer.utils import type_check\n\n\ndef _det_gpu(b):\n # We do a batched LU decomposition on the GPU to compute\n # and compute the determinant by multiplying the diagonal.\n # Change the shape of the array to be size=1 minibatch if necessary.\n # Also copy the matrix as the elments will be modified in-place.\n a = matmul._as_batch_mat(b).copy()\n n = a.shape[1]\n n_matrices = len(a)\n # Pivot array\n p = cuda.cupy.zeros((n_matrices, n), dtype='int32')\n # Output array\n # These arrays hold information on the execution success\n # or if the matrix was singular.\n info1 = cuda.cupy.zeros(n_matrices, dtype=numpy.intp)\n ap = matmul._mat_ptrs(a)\n _, lda = matmul._get_ld(a)\n cuda.cublas.sgetrfBatched(cuda.Device().cublas_handle, n, ap.data.ptr, lda,\n p.data.ptr, info1.data.ptr, n_matrices)\n det = cuda.cupy.prod(a.diagonal(axis1=1, axis2=2), axis=1)\n # The determinant is equal to the product of the diagonal entries\n # of `a` where the sign of `a` is flipped depending on whether\n # the pivot array is equal to its index.\n rng = cuda.cupy.arange(1, n + 1, dtype='int32')\n parity = cuda.cupy.sum(p != rng, axis=1) % 2\n sign = 1. - 2. * parity.astype('float32')\n success = cuda.cupy.all(info1 == 0)\n return det * sign, success\n\n\nclass BatchDet(function.Function):\n\n @property\n def label(self):\n return 'det'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n a_type, = in_types\n a_type = matmul._convert_type(a_type)\n type_check.expect(a_type.dtype.kind == 'f')\n # Only a minibatch of 2D array shapes allowed.\n type_check.expect(a_type.ndim == 3)\n # Matrix inversion only allowed for square matrices\n # so assert the last two dimensions are equal.\n type_check.expect(a_type.shape[-1] == a_type.shape[-2])\n\n def forward_cpu(self, x):\n self.detx = utils.force_array(numpy.linalg.det(x[0]))\n return self.detx,\n\n def forward_gpu(self, x):\n self.detx, success = _det_gpu(x[0])\n if not success:\n raise ValueError('Singular Matrix')\n return self.detx,\n\n def backward_cpu(self, x, gy):\n x, = x\n gy, = gy\n grad = (gy[:, None, None] * self.detx[:, None, None] *\n numpy.linalg.inv(x.transpose((0, 2, 1))))\n return utils.force_array(grad),\n\n def backward_gpu(self, x, gy):\n x, = x\n gy, = gy\n grad = (gy[:, None, None] * self.detx[:, None, None] *\n inv._inv_gpu(x.transpose((0, 2, 1))))\n return utils.force_array(grad),\n\n\ndef batch_det(a):\n \"\"\"Computes the determinant of a batch of square matrices.\n\n Args:\n a (Variable): Input array to compute the determinant for.\n The first dimension should iterate over each matrix and be\n of the batchsize.\n\n Returns:\n ~chainer.Variable: vector of determinants for every matrix\n in the batch.\n\n \"\"\"\n return BatchDet()(a)\n\n\ndef det(a):\n \"\"\"Computes the determinant of a single square matrix.\n\n Args:\n a (Variable): Input array to compute the determinant for.\n\n Returns:\n ~chainer.Variable: Scalar determinant of the matrix a.\n\n \"\"\"\n shape = (1, len(a.data), a.data.shape[1])\n batched_a = reshape.Reshape(shape)(a)\n batched_det = BatchDet()(batched_a)\n return reshape.Reshape((1, ))(batched_det)\n",
"path": "chainer/functions/math/det.py"
}
] | [
{
"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.functions.array import reshape\nfrom chainer.functions.math import inv\nfrom chainer.functions.math import matmul\nfrom chainer import utils\nfrom chainer.utils import type_check\n\n\ndef _det_gpu(b):\n # We do a batched LU decomposition on the GPU to compute\n # and compute the determinant by multiplying the diagonal.\n # Change the shape of the array to be size=1 minibatch if necessary.\n # Also copy the matrix as the elments will be modified in-place.\n a = matmul._as_batch_mat(b).copy()\n n = a.shape[1]\n n_matrices = len(a)\n # Pivot array\n p = cuda.cupy.zeros((n_matrices, n), dtype='int32')\n # Output array\n # These arrays hold information on the execution success\n # or if the matrix was singular.\n info1 = cuda.cupy.zeros(n_matrices, dtype=numpy.intp)\n ap = matmul._mat_ptrs(a)\n _, lda = matmul._get_ld(a)\n cuda.cublas.sgetrfBatched(cuda.Device().cublas_handle, n, ap.data.ptr, lda,\n p.data.ptr, info1.data.ptr, n_matrices)\n det = cuda.cupy.prod(a.diagonal(axis1=1, axis2=2), axis=1)\n # The determinant is equal to the product of the diagonal entries\n # of `a` where the sign of `a` is flipped depending on whether\n # the pivot array is equal to its index.\n rng = cuda.cupy.arange(1, n + 1, dtype='int32')\n parity = cuda.cupy.sum(p != rng, axis=1) % 2\n sign = 1. - 2. * parity.astype('float32')\n success = cuda.cupy.all(info1 == 0)\n return det * sign, success\n\n\nclass BatchDet(function.Function):\n\n @property\n def label(self):\n return 'det'\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n a_type, = in_types\n a_type = matmul._convert_type(a_type)\n type_check.expect(a_type.dtype.kind == 'f')\n # Only a minibatch of 2D array shapes allowed.\n type_check.expect(a_type.ndim == 3)\n # Matrix inversion only allowed for square matrices\n # so assert the last two dimensions are equal.\n type_check.expect(a_type.shape[-1] == a_type.shape[-2])\n\n def forward_cpu(self, x):\n self.detx = utils.force_array(numpy.linalg.det(x[0]))\n return self.detx,\n\n def forward_gpu(self, x):\n self.detx, success = _det_gpu(x[0])\n if not success:\n raise ValueError('Singular Matrix')\n return self.detx,\n\n def backward_cpu(self, x, gy):\n x, = x\n gy, = gy\n grad = (gy[:, None, None] * self.detx[:, None, None] *\n numpy.linalg.inv(x.transpose((0, 2, 1))))\n return utils.force_array(grad),\n\n def backward_gpu(self, x, gy):\n x, = x\n gy, = gy\n grad = (gy[:, None, None] * self.detx[:, None, None] *\n inv._inv_gpu(x.transpose((0, 2, 1))))\n return utils.force_array(grad),\n\n\ndef batch_det(a):\n \"\"\"Computes the determinant of a batch of square matrices.\n\n Args:\n a (Variable): Input array to compute the determinant for.\n The first dimension should iterate over each matrix and be\n of the batchsize.\n\n Returns:\n ~chainer.Variable: vector of determinants for every matrix\n in the batch.\n\n \"\"\"\n return BatchDet()(a)\n\n\ndef det(a):\n \"\"\"Computes the determinant of a single square matrix.\n\n Args:\n a (Variable): Input array to compute the determinant for.\n\n Returns:\n ~chainer.Variable: Scalar determinant of the matrix a.\n\n \"\"\"\n shape = (1, len(a.data), a.data.shape[1])\n batched_a = reshape.Reshape(shape)(a)\n batched_det = BatchDet()(batched_a)\n return reshape.Reshape(())(batched_det)\n",
"path": "chainer/functions/math/det.py"
}
] | diff --git a/chainer/functions/math/det.py b/chainer/functions/math/det.py
index c34d5ce5e2ce..219e16247dfa 100644
--- a/chainer/functions/math/det.py
+++ b/chainer/functions/math/det.py
@@ -109,4 +109,4 @@ def det(a):
shape = (1, len(a.data), a.data.shape[1])
batched_a = reshape.Reshape(shape)(a)
batched_det = BatchDet()(batched_a)
- return reshape.Reshape((1, ))(batched_det)
+ return reshape.Reshape(())(batched_det)
diff --git a/tests/chainer_tests/functions_tests/math_tests/test_det.py b/tests/chainer_tests/functions_tests/math_tests/test_det.py
index 021911c02cd9..23ae65cb9f25 100644
--- a/tests/chainer_tests/functions_tests/math_tests/test_det.py
+++ b/tests/chainer_tests/functions_tests/math_tests/test_det.py
@@ -14,11 +14,6 @@
class DetFunctionTestBase(object):
- def setUp(self):
- self.x, self.y, self.gy = self.make_data()
- self.ct = numpy.array(
- [ix.T for ix in self.x], dtype=numpy.float32)
-
def det_transpose(self, gpu=False):
if gpu:
cx = cuda.to_gpu(self.x)
@@ -122,18 +117,20 @@ def test_batch_backward_cpu(self):
x_data, y_grad = self.x, self.gy
gradient_check.check_backward(self.det, x_data, y_grad)
- def test_expect_scalar_cpu(self):
- x = numpy.random.uniform(.5, 1, (2, 2)).astype(numpy.float32)
+ def check_single_matrix(self, x):
x = chainer.Variable(x)
- y = F.det(x)
- self.assertEqual(y.data.ndim, 1)
+ y = self.det(x)
+ if self.batched:
+ self.assertEqual(y.data.ndim, 1)
+ else:
+ self.assertEqual(y.data.ndim, 0)
+
+ def test_single_matrix_cpu(self):
+ self.check_single_matrix(self.x)
@attr.gpu
def test_expect_scalar_gpu(self):
- x = cuda.cupy.random.uniform(.5, 1, (2, 2)).astype(numpy.float32)
- x = chainer.Variable(x)
- y = F.det(x)
- self.assertEqual(y.data.ndim, 1)
+ self.check_single_matrix(cuda.to_gpu(self.x))
def test_zero_det_cpu(self):
x_data, y_grad = self.x, self.gy
@@ -198,11 +195,11 @@ def det(self, x):
def matmul(self, x, y):
return F.batch_matmul(x, y)
- def make_data(self):
- x = numpy.random.uniform(.5, 1, (6, 3, 3)).astype(numpy.float32)
- y = numpy.random.uniform(.5, 1, (6, 3, 3)).astype(numpy.float32)
- gy = numpy.random.uniform(-1, 1, (6,)).astype(numpy.float32)
- return x, y, gy
+ def setUp(self):
+ self.x = numpy.random.uniform(.5, 1, (6, 3, 3)).astype(numpy.float32)
+ self.y = numpy.random.uniform(.5, 1, (6, 3, 3)).astype(numpy.float32)
+ self.gy = numpy.random.uniform(-1, 1, (6,)).astype(numpy.float32)
+ self.ct = self.x.transpose(0, 2, 1)
class TestSquareDet(DetFunctionTestBase, unittest.TestCase):
@@ -214,11 +211,11 @@ def det(self, x):
def matmul(self, x, y):
return F.matmul(x, y)
- def make_data(self):
- x = numpy.random.uniform(.5, 1, (5, 5)).astype(numpy.float32)
- y = numpy.random.uniform(.5, 1, (5, 5)).astype(numpy.float32)
- gy = numpy.random.uniform(-1, 1, (1,)).astype(numpy.float32)
- return x, y, gy
+ def setUp(self):
+ self.x = numpy.random.uniform(.5, 1, (5, 5)).astype(numpy.float32)
+ self.y = numpy.random.uniform(.5, 1, (5, 5)).astype(numpy.float32)
+ self.gy = numpy.random.uniform(-1, 1, ()).astype(numpy.float32)
+ self.ct = self.x.transpose()
class DetFunctionRaiseTest(unittest.TestCase):
|
mitmproxy__mitmproxy-4762 | When too many requests come simultaneously, mitmdump called an error and quited [ValueError: too many file descriptors in select()]
#### Problem Description
A clear and concise description of what the bug is.
When too many requests come simultaneously, mitmdump called an error and quited.
Traceback (most recent call last):
File "mitmdump", line 3, in <module>
File "mitmproxy\tools\main.py", line 147, in mitmdump
File "mitmproxy\tools\main.py", line 114, in run
File "mitmproxy\master.py", line 76, in run
File "mitmproxy\master.py", line 59, in run_loop
File "mitmproxy\master.py", line 95, in shutdown
File "asyncio\base_events.py", line 629, in run_until_complete
File "asyncio\base_events.py", line 596, in run_forever
File "asyncio\base_events.py", line 1854, in _run_once
File "selectors.py", line 324, in select
File "selectors.py", line 315, in _select
ValueError: too many file descriptors in select()
[77436] Failed to execute script 'mitmdump' due to unhandled exception!
I googled the error message, and found the following answer. Don't know if it's related.
https://stackoverflow.com/questions/57182009/why-am-i-getting-an-valueerror-too-many-file-descriptors-in-select
#### Steps to reproduce the behavior:
1. I use the following command
`mitmdump.exe -p 8080 --anticomp -q -s "d:\redirect-router.py"`
In the script, I re-write the host for a specific URL
2.
3.
#### System Information
Paste the output of "mitmproxy --version" here.
mitmproxy --version
Mitmproxy: 7.0.2 binary
Python: 3.9.6
OpenSSL: OpenSSL 1.1.1k 25 Mar 2021
Platform: Windows-10-10.0.18363-SP0
| [
{
"content": "import asyncio\nimport sys\n\nif sys.platform == 'win32':\n # workaround for\n # https://github.com/tornadoweb/tornado/issues/2751\n # https://www.tornadoweb.org/en/stable/index.html#installation\n # (copied multiple times in the codebase, please remove all occurrences)\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n",
"path": "mitmproxy/__init__.py"
}
] | [
{
"content": "",
"path": "mitmproxy/__init__.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index c4f9ff6dd8..acf76819b8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,8 @@
## Unreleased: mitmproxy next
* fix some responses not being decoded properly if the encoding was uppercase #4735 (@Mattwmaster58)
+* Windows: Switch to Python's default asyncio event loop, which increases the number of sockets
+ that can be processed simultaneously.
## 4 August 2021: mitmproxy 7.0.2
diff --git a/mitmproxy/__init__.py b/mitmproxy/__init__.py
index 9deef96050..e69de29bb2 100644
--- a/mitmproxy/__init__.py
+++ b/mitmproxy/__init__.py
@@ -1,9 +0,0 @@
-import asyncio
-import sys
-
-if sys.platform == 'win32':
- # workaround for
- # https://github.com/tornadoweb/tornado/issues/2751
- # https://www.tornadoweb.org/en/stable/index.html#installation
- # (copied multiple times in the codebase, please remove all occurrences)
- asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
diff --git a/test/mitmproxy/tools/web/test_app.py b/test/mitmproxy/tools/web/test_app.py
index 79456e6003..4272b202b6 100644
--- a/test/mitmproxy/tools/web/test_app.py
+++ b/test/mitmproxy/tools/web/test_app.py
@@ -2,18 +2,10 @@
import json as _json
import logging
import os
-import sys
from unittest import mock
import pytest
-if sys.platform == 'win32':
- # workaround for
- # https://github.com/tornadoweb/tornado/issues/2751
- # https://www.tornadoweb.org/en/stable/index.html#installation
- # (copied multiple times in the codebase, please remove all occurrences)
- asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
-
import tornado.testing # noqa
from tornado import httpclient # noqa
from tornado import websocket # noqa
|
jazzband__django-debug-toolbar-1872 | New AJAX request resets whole view if HistoryPanel is enabled.
Maybe I am doing something wrong, but I find working with DDT with HistoryPanel enabled quite annoying.
I have notifications on my website which makes request every ~5 seconds if there is anything new.
If I have HistoryPanel enabled in DDT this means that if I am exploring some request (from history or just the last one) and this AJAX notification request is made I loose everything I am seeing and the whole DDT resets to the newest (notification) request.
Would it be possible to set DDT so that it switches the request only if I explicitly select it from history?
| [
{
"content": "import warnings\nfrom functools import lru_cache\n\nfrom django.conf import settings\nfrom django.dispatch import receiver\nfrom django.test.signals import setting_changed\n\nCONFIG_DEFAULTS = {\n # Toolbar options\n \"DISABLE_PANELS\": {\n \"debug_toolbar.panels.profiling.ProfilingPanel\",\n \"debug_toolbar.panels.redirects.RedirectsPanel\",\n },\n \"INSERT_BEFORE\": \"</body>\",\n \"RENDER_PANELS\": None,\n \"RESULTS_CACHE_SIZE\": 25,\n \"ROOT_TAG_EXTRA_ATTRS\": \"\",\n \"SHOW_COLLAPSED\": False,\n \"SHOW_TOOLBAR_CALLBACK\": \"debug_toolbar.middleware.show_toolbar\",\n # Panel options\n \"EXTRA_SIGNALS\": [],\n \"ENABLE_STACKTRACES\": True,\n \"ENABLE_STACKTRACES_LOCALS\": False,\n \"HIDE_IN_STACKTRACES\": (\n \"socketserver\",\n \"threading\",\n \"wsgiref\",\n \"debug_toolbar\",\n \"django.db\",\n \"django.core.handlers\",\n \"django.core.servers\",\n \"django.utils.decorators\",\n \"django.utils.deprecation\",\n \"django.utils.functional\",\n ),\n \"PRETTIFY_SQL\": True,\n \"PROFILER_CAPTURE_PROJECT_CODE\": True,\n \"PROFILER_MAX_DEPTH\": 10,\n \"PROFILER_THRESHOLD_RATIO\": 8,\n \"SHOW_TEMPLATE_CONTEXT\": True,\n \"SKIP_TEMPLATE_PREFIXES\": (\"django/forms/widgets/\", \"admin/widgets/\"),\n \"SQL_WARNING_THRESHOLD\": 500, # milliseconds\n \"OBSERVE_REQUEST_CALLBACK\": \"debug_toolbar.toolbar.observe_request\",\n \"TOOLBAR_LANGUAGE\": None,\n}\n\n\n@lru_cache(maxsize=None)\ndef get_config():\n USER_CONFIG = getattr(settings, \"DEBUG_TOOLBAR_CONFIG\", {})\n CONFIG = CONFIG_DEFAULTS.copy()\n CONFIG.update(USER_CONFIG)\n return CONFIG\n\n\nPANELS_DEFAULTS = [\n \"debug_toolbar.panels.history.HistoryPanel\",\n \"debug_toolbar.panels.versions.VersionsPanel\",\n \"debug_toolbar.panels.timer.TimerPanel\",\n \"debug_toolbar.panels.settings.SettingsPanel\",\n \"debug_toolbar.panels.headers.HeadersPanel\",\n \"debug_toolbar.panels.request.RequestPanel\",\n \"debug_toolbar.panels.sql.SQLPanel\",\n \"debug_toolbar.panels.staticfiles.StaticFilesPanel\",\n \"debug_toolbar.panels.templates.TemplatesPanel\",\n \"debug_toolbar.panels.cache.CachePanel\",\n \"debug_toolbar.panels.signals.SignalsPanel\",\n \"debug_toolbar.panels.redirects.RedirectsPanel\",\n \"debug_toolbar.panels.profiling.ProfilingPanel\",\n]\n\n\n@lru_cache(maxsize=None)\ndef get_panels():\n try:\n PANELS = list(settings.DEBUG_TOOLBAR_PANELS)\n except AttributeError:\n PANELS = PANELS_DEFAULTS\n\n logging_panel = \"debug_toolbar.panels.logging.LoggingPanel\"\n if logging_panel in PANELS:\n PANELS = [panel for panel in PANELS if panel != logging_panel]\n warnings.warn(\n f\"Please remove {logging_panel} from your DEBUG_TOOLBAR_PANELS setting.\",\n DeprecationWarning,\n stacklevel=1,\n )\n return PANELS\n\n\n@receiver(setting_changed)\ndef update_toolbar_config(*, setting, **kwargs):\n \"\"\"\n Refresh configuration when overriding settings.\n \"\"\"\n if setting == \"DEBUG_TOOLBAR_CONFIG\":\n get_config.cache_clear()\n elif setting == \"DEBUG_TOOLBAR_PANELS\":\n from debug_toolbar.toolbar import DebugToolbar\n\n get_panels.cache_clear()\n DebugToolbar._panel_classes = None\n # Not implemented: invalidate debug_toolbar.urls.\n",
"path": "debug_toolbar/settings.py"
}
] | [
{
"content": "import warnings\nfrom functools import lru_cache\n\nfrom django.conf import settings\nfrom django.dispatch import receiver\nfrom django.test.signals import setting_changed\n\nCONFIG_DEFAULTS = {\n # Toolbar options\n \"DISABLE_PANELS\": {\n \"debug_toolbar.panels.profiling.ProfilingPanel\",\n \"debug_toolbar.panels.redirects.RedirectsPanel\",\n },\n \"INSERT_BEFORE\": \"</body>\",\n \"RENDER_PANELS\": None,\n \"RESULTS_CACHE_SIZE\": 25,\n \"ROOT_TAG_EXTRA_ATTRS\": \"\",\n \"SHOW_COLLAPSED\": False,\n \"SHOW_TOOLBAR_CALLBACK\": \"debug_toolbar.middleware.show_toolbar\",\n # Panel options\n \"EXTRA_SIGNALS\": [],\n \"ENABLE_STACKTRACES\": True,\n \"ENABLE_STACKTRACES_LOCALS\": False,\n \"HIDE_IN_STACKTRACES\": (\n \"socketserver\",\n \"threading\",\n \"wsgiref\",\n \"debug_toolbar\",\n \"django.db\",\n \"django.core.handlers\",\n \"django.core.servers\",\n \"django.utils.decorators\",\n \"django.utils.deprecation\",\n \"django.utils.functional\",\n ),\n \"PRETTIFY_SQL\": True,\n \"PROFILER_CAPTURE_PROJECT_CODE\": True,\n \"PROFILER_MAX_DEPTH\": 10,\n \"PROFILER_THRESHOLD_RATIO\": 8,\n \"SHOW_TEMPLATE_CONTEXT\": True,\n \"SKIP_TEMPLATE_PREFIXES\": (\"django/forms/widgets/\", \"admin/widgets/\"),\n \"SQL_WARNING_THRESHOLD\": 500, # milliseconds\n \"OBSERVE_REQUEST_CALLBACK\": \"debug_toolbar.toolbar.observe_request\",\n \"TOOLBAR_LANGUAGE\": None,\n \"UPDATE_ON_FETCH\": False,\n}\n\n\n@lru_cache(maxsize=None)\ndef get_config():\n USER_CONFIG = getattr(settings, \"DEBUG_TOOLBAR_CONFIG\", {})\n CONFIG = CONFIG_DEFAULTS.copy()\n CONFIG.update(USER_CONFIG)\n return CONFIG\n\n\nPANELS_DEFAULTS = [\n \"debug_toolbar.panels.history.HistoryPanel\",\n \"debug_toolbar.panels.versions.VersionsPanel\",\n \"debug_toolbar.panels.timer.TimerPanel\",\n \"debug_toolbar.panels.settings.SettingsPanel\",\n \"debug_toolbar.panels.headers.HeadersPanel\",\n \"debug_toolbar.panels.request.RequestPanel\",\n \"debug_toolbar.panels.sql.SQLPanel\",\n \"debug_toolbar.panels.staticfiles.StaticFilesPanel\",\n \"debug_toolbar.panels.templates.TemplatesPanel\",\n \"debug_toolbar.panels.cache.CachePanel\",\n \"debug_toolbar.panels.signals.SignalsPanel\",\n \"debug_toolbar.panels.redirects.RedirectsPanel\",\n \"debug_toolbar.panels.profiling.ProfilingPanel\",\n]\n\n\n@lru_cache(maxsize=None)\ndef get_panels():\n try:\n PANELS = list(settings.DEBUG_TOOLBAR_PANELS)\n except AttributeError:\n PANELS = PANELS_DEFAULTS\n\n logging_panel = \"debug_toolbar.panels.logging.LoggingPanel\"\n if logging_panel in PANELS:\n PANELS = [panel for panel in PANELS if panel != logging_panel]\n warnings.warn(\n f\"Please remove {logging_panel} from your DEBUG_TOOLBAR_PANELS setting.\",\n DeprecationWarning,\n stacklevel=1,\n )\n return PANELS\n\n\n@receiver(setting_changed)\ndef update_toolbar_config(*, setting, **kwargs):\n \"\"\"\n Refresh configuration when overriding settings.\n \"\"\"\n if setting == \"DEBUG_TOOLBAR_CONFIG\":\n get_config.cache_clear()\n elif setting == \"DEBUG_TOOLBAR_PANELS\":\n from debug_toolbar.toolbar import DebugToolbar\n\n get_panels.cache_clear()\n DebugToolbar._panel_classes = None\n # Not implemented: invalidate debug_toolbar.urls.\n",
"path": "debug_toolbar/settings.py"
}
] | diff --git a/debug_toolbar/settings.py b/debug_toolbar/settings.py
index eb6b59209..1df24527d 100644
--- a/debug_toolbar/settings.py
+++ b/debug_toolbar/settings.py
@@ -42,6 +42,7 @@
"SQL_WARNING_THRESHOLD": 500, # milliseconds
"OBSERVE_REQUEST_CALLBACK": "debug_toolbar.toolbar.observe_request",
"TOOLBAR_LANGUAGE": None,
+ "UPDATE_ON_FETCH": False,
}
diff --git a/debug_toolbar/static/debug_toolbar/js/history.js b/debug_toolbar/static/debug_toolbar/js/history.js
index b30fcabae..314ddb3ef 100644
--- a/debug_toolbar/static/debug_toolbar/js/history.js
+++ b/debug_toolbar/static/debug_toolbar/js/history.js
@@ -104,3 +104,6 @@ $$.on(djDebug, "click", ".refreshHistory", function (event) {
event.preventDefault();
refreshHistory();
});
+// We don't refresh the whole toolbar each fetch or ajax request,
+// so we need to refresh the history when we open the panel
+$$.onPanelRender(djDebug, "HistoryPanel", refreshHistory);
diff --git a/debug_toolbar/static/debug_toolbar/js/toolbar.js b/debug_toolbar/static/debug_toolbar/js/toolbar.js
index 6648fb52b..199616336 100644
--- a/debug_toolbar/static/debug_toolbar/js/toolbar.js
+++ b/debug_toolbar/static/debug_toolbar/js/toolbar.js
@@ -17,8 +17,10 @@ function getDebugElement() {
const djdt = {
handleDragged: false,
+ needUpdateOnFetch: false,
init() {
const djDebug = getDebugElement();
+ djdt.needUpdateOnFetch = djDebug.dataset.updateOnFetch === "True";
$$.on(djDebug, "click", "#djDebugPanelList li a", function (event) {
event.preventDefault();
if (!this.className) {
@@ -274,7 +276,9 @@ const djdt = {
storeId = encodeURIComponent(storeId);
const dest = `${sidebarUrl}?store_id=${storeId}`;
slowjax(dest).then(function (data) {
- replaceToolbarState(storeId, data);
+ if (djdt.needUpdateOnFetch){
+ replaceToolbarState(storeId, data);
+ }
});
}
diff --git a/debug_toolbar/templates/debug_toolbar/base.html b/debug_toolbar/templates/debug_toolbar/base.html
index 5447970af..6f4967f21 100644
--- a/debug_toolbar/templates/debug_toolbar/base.html
+++ b/debug_toolbar/templates/debug_toolbar/base.html
@@ -16,7 +16,7 @@
data-sidebar-url="{{ history_url }}"
{% endif %}
data-default-show="{% if toolbar.config.SHOW_COLLAPSED %}false{% else %}true{% endif %}"
- {{ toolbar.config.ROOT_TAG_EXTRA_ATTRS|safe }}>
+ {{ toolbar.config.ROOT_TAG_EXTRA_ATTRS|safe }} data-update-on-fetch="{{ toolbar.config.UPDATE_ON_FETCH }}">
<div class="djdt-hidden" id="djDebugToolbar">
<ul id="djDebugPanelList">
<li><a id="djHideToolBarButton" href="#" title="{% trans 'Hide toolbar' %}">{% trans "Hide" %} »</a></li>
diff --git a/docs/changes.rst b/docs/changes.rst
index 82185d756..9ff88b2b8 100644
--- a/docs/changes.rst
+++ b/docs/changes.rst
@@ -19,6 +19,9 @@ Pending
<https://astral.sh/blog/the-ruff-formatter>`__.
* Changed the default position of the toolbar from top to the upper top
position.
+* Added the setting, ``UPDATE_ON_FETCH`` to control whether the
+ toolbar automatically updates to the latest AJAX request or not.
+ It defaults to ``False``.
4.2.0 (2023-08-10)
------------------
diff --git a/docs/configuration.rst b/docs/configuration.rst
index 887608c6e..8271092ca 100644
--- a/docs/configuration.rst
+++ b/docs/configuration.rst
@@ -163,6 +163,16 @@ Toolbar options
but want to render your application in French, you would set this to
``"en-us"`` and :setting:`LANGUAGE_CODE` to ``"fr"``.
+.. _UPDATE_ON_FETCH:
+
+* ``UPDATE_ON_FETCH``
+
+ Default: ``False``
+
+ This controls whether the toolbar should update to the latest AJAX
+ request when it occurs. This is especially useful when using htmx
+ boosting or similar JavaScript techniques.
+
Panel options
~~~~~~~~~~~~~
diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt
index 7a15d9aeb..436977bdc 100644
--- a/docs/spelling_wordlist.txt
+++ b/docs/spelling_wordlist.txt
@@ -6,6 +6,7 @@ Pympler
Roboto
Transifex
Werkzeug
+ajax
async
backend
backends
diff --git a/tests/templates/ajax/ajax.html b/tests/templates/ajax/ajax.html
new file mode 100644
index 000000000..c9de3acb6
--- /dev/null
+++ b/tests/templates/ajax/ajax.html
@@ -0,0 +1,21 @@
+{% extends "base.html" %}
+{% block content %}
+ <div id="click_for_ajax">click for ajax</div>
+
+ <script>
+
+ let click_for_ajax = document.getElementById("click_for_ajax");
+ function send_ajax() {
+ let xhr = new XMLHttpRequest();
+ let url = '/json_view/';
+ xhr.open("GET", url, true);
+ xhr.onreadystatechange = function () {
+ if (this.readyState == 4 && this.status == 200) {
+ console.log(this.responseText);
+ }
+ }
+ xhr.send();
+ }
+ document.addEventListener("click", (event) => {send_ajax()});
+ </script>
+{% endblock %}
diff --git a/tests/test_integration.py b/tests/test_integration.py
index b77b7cede..379fafaf4 100644
--- a/tests/test_integration.py
+++ b/tests/test_integration.py
@@ -1,5 +1,6 @@
import os
import re
+import time
import unittest
import html5lib
@@ -749,3 +750,24 @@ def test_toolbar_language_will_render_to_locale_when_set_both(self):
)
self.assertIn("Query", table.text)
self.assertIn("Action", table.text)
+
+ def test_ajax_dont_refresh(self):
+ self.get("/ajax/")
+ make_ajax = self.selenium.find_element(By.ID, "click_for_ajax")
+ make_ajax.click()
+ history_panel = self.selenium.find_element(By.ID, "djdt-HistoryPanel")
+ self.assertIn("/ajax/", history_panel.text)
+ self.assertNotIn("/json_view/", history_panel.text)
+
+ @override_settings(DEBUG_TOOLBAR_CONFIG={"UPDATE_ON_FETCH": True})
+ def test_ajax_refresh(self):
+ self.get("/ajax/")
+ make_ajax = self.selenium.find_element(By.ID, "click_for_ajax")
+ make_ajax.click()
+ # Need to wait until the ajax request is over and json_view is displayed on the toolbar
+ time.sleep(2)
+ history_panel = self.wait.until(
+ lambda selenium: self.selenium.find_element(By.ID, "djdt-HistoryPanel")
+ )
+ self.assertNotIn("/ajax/", history_panel.text)
+ self.assertIn("/json_view/", history_panel.text)
diff --git a/tests/urls.py b/tests/urls.py
index 6fc8811b7..f8929f1e8 100644
--- a/tests/urls.py
+++ b/tests/urls.py
@@ -21,6 +21,7 @@
path("cached_low_level_view/", views.cached_low_level_view),
path("json_view/", views.json_view),
path("redirect/", views.redirect_view),
+ path("ajax/", views.ajax_view),
path("login_without_redirect/", LoginView.as_view(redirect_field_name=None)),
path("admin/", admin.site.urls),
path("__debug__/", include("debug_toolbar.urls")),
diff --git a/tests/views.py b/tests/views.py
index b2fd21c54..c7214029e 100644
--- a/tests/views.py
+++ b/tests/views.py
@@ -58,3 +58,7 @@ def listcomp_view(request):
def redirect_view(request):
return HttpResponseRedirect("/regular/redirect/")
+
+
+def ajax_view(request):
+ return render(request, "ajax/ajax.html")
|
translate__pootle-3671 | Confusing sentence in permissions view
There is a permission called "Can review translations" that confused me as I thought that there are also reviewers beside suggesters and translators! Hopefully you fix it so that it lands in 2.7.0.
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import ugettext_noop as _\n\nfrom pootle.core.models import Revision\nfrom pootle_app.models import Directory\nfrom pootle_app.models.permissions import PermissionSet, get_pootle_permission\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom staticpages.models import StaticPage as Announcement\n\n\ndef initdb():\n \"\"\"Populate the database with default initial data.\n\n This creates the default database to get a working Pootle installation.\n \"\"\"\n create_revision()\n create_essential_users()\n create_root_directories()\n create_template_languages()\n create_terminology_project()\n create_pootle_permissions()\n create_pootle_permission_sets()\n\n create_default_projects()\n create_default_languages()\n create_default_admin()\n\n\ndef create_revision():\n Revision.initialize()\n\n\ndef create_essential_users():\n \"\"\"Create the 'default' and 'nobody' User instances.\n\n These users are required for Pootle's permission system.\n \"\"\"\n User = get_user_model()\n\n # The nobody user is used to represent an anonymous user in cases where\n # we need to associate model information with such a user. An example is\n # in the permission system: we need a way to store rights for anonymous\n # users; thus we use the nobody user.\n criteria = {\n 'username': u\"nobody\",\n 'full_name': u\"any anonymous user\",\n 'is_active': True,\n }\n nobody, created = User.objects.get_or_create(**criteria)\n if created:\n nobody.set_unusable_password()\n nobody.save()\n\n # The 'default' user represents any valid, non-anonymous user and is used\n # to associate information any such user. An example is in the permission\n # system: we need a way to store default rights for users. We use the\n # 'default' user for this.\n #\n # In a future version of Pootle we should think about using Django's\n # groups to do better permissions handling.\n criteria = {\n 'username': u\"default\",\n 'full_name': u\"any authenticated user\",\n 'is_active': True,\n }\n default, created = User.objects.get_or_create(**criteria)\n if created:\n default.set_unusable_password()\n default.save()\n\n # The system user represents a system, and is used to\n # associate updates done by bulk commands as update_stores.\n criteria = {\n 'username': u\"system\",\n 'full_name': u\"system user\",\n 'is_active': True,\n }\n system, created = User.objects.get_or_create(**criteria)\n if created:\n system.set_unusable_password()\n system.save()\n\n\ndef create_pootle_permissions():\n \"\"\"Create Pootle's directory level permissions.\"\"\"\n\n args = {\n 'app_label': \"pootle_app\",\n 'model': \"directory\",\n }\n pootle_content_type, created = ContentType.objects.get_or_create(**args)\n pootle_content_type.name = 'pootle'\n pootle_content_type.save()\n\n # Create the permissions.\n permissions = [\n {\n 'name': _(\"Can access a project\"),\n 'codename': \"view\",\n },\n {\n 'name': _(\"Cannot access a project\"),\n 'codename': \"hide\",\n },\n {\n 'name': _(\"Can make a suggestion for a translation\"),\n 'codename': \"suggest\",\n },\n {\n 'name': _(\"Can submit a translation\"),\n 'codename': \"translate\",\n },\n {\n 'name': _(\"Can review translations\"),\n 'codename': \"review\",\n },\n {\n 'name': _(\"Can administrate a translation project\"),\n 'codename': \"administrate\",\n },\n ]\n\n criteria = {\n 'content_type': pootle_content_type,\n }\n\n for permission in permissions:\n criteria.update(permission)\n obj, created = Permission.objects.get_or_create(**criteria)\n\n\ndef create_pootle_permission_sets():\n \"\"\"Create the default permission set for the 'nobody' and 'default' users.\n\n 'nobody' is the anonymous (non-logged in) user, and 'default' is the logged\n in user.\n \"\"\"\n User = get_user_model()\n\n nobody = User.objects.get(username='nobody')\n default = User.objects.get(username='default')\n\n view = get_pootle_permission('view')\n suggest = get_pootle_permission('suggest')\n translate = get_pootle_permission('translate')\n\n # Default permissions for tree root.\n criteria = {\n 'user': nobody,\n 'directory': Directory.objects.root,\n }\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = [view, suggest]\n permission_set.save()\n\n criteria['user'] = default\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = [view, suggest, translate]\n permission_set.save()\n\n # Default permissions for templates language.\n # Override with no permissions for templates language.\n criteria = {\n 'user': nobody,\n 'directory': Directory.objects.get(pootle_path=\"/templates/\"),\n }\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = []\n permission_set.save()\n\n criteria['user'] = default\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = []\n permission_set.save()\n\n\ndef require_english():\n \"\"\"Create the English Language item.\"\"\"\n criteria = {\n 'code': \"en\",\n 'fullname': u\"English\",\n 'nplurals': 2,\n 'pluralequation': \"(n != 1)\",\n }\n en, created = Language.objects.get_or_create(**criteria)\n return en\n\n\ndef create_root_directories():\n \"\"\"Create the root Directory items.\"\"\"\n root, created = Directory.objects.get_or_create(name='')\n projects, created = Directory.objects.get_or_create(name='projects',\n parent=root)\n\n\ndef create_template_languages():\n \"\"\"Create the 'templates' and English languages.\n\n The 'templates' language is used to give users access to the untranslated\n template files.\n \"\"\"\n templates, created = Language.objects.get_or_create(code=\"templates\",\n fullname=u'Templates')\n require_english()\n\n\ndef create_terminology_project():\n \"\"\"Create the terminology project.\n\n The terminology project is used to display terminology suggestions while\n translating.\n \"\"\"\n criteria = {\n 'code': \"terminology\",\n 'fullname': u\"Terminology\",\n 'source_language': require_english(),\n 'checkstyle': \"terminology\",\n }\n terminology, created = Project.objects.get_or_create(**criteria)\n\n\ndef create_default_projects():\n \"\"\"Create the default projects that we host.\n\n You might want to add your projects here, although you can also add things\n through the web interface later.\n \"\"\"\n from pootle_project.models import Project\n\n en = require_english()\n\n criteria = {\n 'code': u\"tutorial\",\n 'source_language': en,\n 'fullname': u\"Tutorial\",\n 'checkstyle': \"standard\",\n 'localfiletype': \"po\",\n 'treestyle': \"auto\",\n }\n tutorial = Project(**criteria)\n tutorial.save()\n\n criteria = {\n 'active': True,\n 'title': \"Project instructions\",\n 'body': ('<div dir=\"ltr\" lang=\"en\">Tutorial project where users can '\n 'play with Pootle and learn more about translation and '\n 'localisation.<br />For more help on localisation, visit the '\n '<a href=\"http://docs.translatehouse.org/projects/'\n 'localization-guide/en/latest/guide/start.html\">localisation '\n 'guide</a>.</div>'),\n 'virtual_path': \"announcements/projects/\"+tutorial.code,\n }\n ann = Announcement(**criteria)\n ann.save()\n\n\ndef create_default_languages():\n \"\"\"Create the default languages.\"\"\"\n from translate.lang import data, factory\n\n from pootle_language.models import Language\n\n # import languages from toolkit\n for code in data.languages.keys():\n try:\n tk_lang = factory.getlanguage(code)\n criteria = {\n 'code': code,\n 'fullname': tk_lang.fullname,\n 'nplurals': tk_lang.nplurals,\n 'pluralequation': tk_lang.pluralequation,\n }\n try:\n criteria['specialchars'] = tk_lang.specialchars\n except AttributeError:\n pass\n lang, created = Language.objects.get_or_create(**criteria)\n except:\n pass\n\n\ndef create_default_admin():\n \"\"\"Create the default admin user for Pootle.\n\n You definitely want to change the admin account so that your default\n install is not accessible with the default credentials. The users 'noboby'\n and 'default' should be left as is.\n \"\"\"\n User = get_user_model()\n\n criteria = {\n 'username': u\"admin\",\n 'full_name': u\"Administrator\",\n 'is_active': True,\n 'is_superuser': True,\n }\n admin = User(**criteria)\n admin.set_password(\"admin\")\n admin.save()\n",
"path": "pootle/core/initdb.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import ugettext_noop as _\n\nfrom pootle.core.models import Revision\nfrom pootle_app.models import Directory\nfrom pootle_app.models.permissions import PermissionSet, get_pootle_permission\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom staticpages.models import StaticPage as Announcement\n\n\ndef initdb():\n \"\"\"Populate the database with default initial data.\n\n This creates the default database to get a working Pootle installation.\n \"\"\"\n create_revision()\n create_essential_users()\n create_root_directories()\n create_template_languages()\n create_terminology_project()\n create_pootle_permissions()\n create_pootle_permission_sets()\n\n create_default_projects()\n create_default_languages()\n create_default_admin()\n\n\ndef create_revision():\n Revision.initialize()\n\n\ndef create_essential_users():\n \"\"\"Create the 'default' and 'nobody' User instances.\n\n These users are required for Pootle's permission system.\n \"\"\"\n User = get_user_model()\n\n # The nobody user is used to represent an anonymous user in cases where\n # we need to associate model information with such a user. An example is\n # in the permission system: we need a way to store rights for anonymous\n # users; thus we use the nobody user.\n criteria = {\n 'username': u\"nobody\",\n 'full_name': u\"any anonymous user\",\n 'is_active': True,\n }\n nobody, created = User.objects.get_or_create(**criteria)\n if created:\n nobody.set_unusable_password()\n nobody.save()\n\n # The 'default' user represents any valid, non-anonymous user and is used\n # to associate information any such user. An example is in the permission\n # system: we need a way to store default rights for users. We use the\n # 'default' user for this.\n #\n # In a future version of Pootle we should think about using Django's\n # groups to do better permissions handling.\n criteria = {\n 'username': u\"default\",\n 'full_name': u\"any authenticated user\",\n 'is_active': True,\n }\n default, created = User.objects.get_or_create(**criteria)\n if created:\n default.set_unusable_password()\n default.save()\n\n # The system user represents a system, and is used to\n # associate updates done by bulk commands as update_stores.\n criteria = {\n 'username': u\"system\",\n 'full_name': u\"system user\",\n 'is_active': True,\n }\n system, created = User.objects.get_or_create(**criteria)\n if created:\n system.set_unusable_password()\n system.save()\n\n\ndef create_pootle_permissions():\n \"\"\"Create Pootle's directory level permissions.\"\"\"\n\n args = {\n 'app_label': \"pootle_app\",\n 'model': \"directory\",\n }\n pootle_content_type, created = ContentType.objects.get_or_create(**args)\n pootle_content_type.name = 'pootle'\n pootle_content_type.save()\n\n # Create the permissions.\n permissions = [\n {\n 'name': _(\"Can access a project\"),\n 'codename': \"view\",\n },\n {\n 'name': _(\"Cannot access a project\"),\n 'codename': \"hide\",\n },\n {\n 'name': _(\"Can make a suggestion for a translation\"),\n 'codename': \"suggest\",\n },\n {\n 'name': _(\"Can submit a translation\"),\n 'codename': \"translate\",\n },\n {\n 'name': _(\"Can review suggestions\"),\n 'codename': \"review\",\n },\n {\n 'name': _(\"Can administrate a translation project\"),\n 'codename': \"administrate\",\n },\n ]\n\n criteria = {\n 'content_type': pootle_content_type,\n }\n\n for permission in permissions:\n criteria.update(permission)\n obj, created = Permission.objects.get_or_create(**criteria)\n\n\ndef create_pootle_permission_sets():\n \"\"\"Create the default permission set for the 'nobody' and 'default' users.\n\n 'nobody' is the anonymous (non-logged in) user, and 'default' is the logged\n in user.\n \"\"\"\n User = get_user_model()\n\n nobody = User.objects.get(username='nobody')\n default = User.objects.get(username='default')\n\n view = get_pootle_permission('view')\n suggest = get_pootle_permission('suggest')\n translate = get_pootle_permission('translate')\n\n # Default permissions for tree root.\n criteria = {\n 'user': nobody,\n 'directory': Directory.objects.root,\n }\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = [view, suggest]\n permission_set.save()\n\n criteria['user'] = default\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = [view, suggest, translate]\n permission_set.save()\n\n # Default permissions for templates language.\n # Override with no permissions for templates language.\n criteria = {\n 'user': nobody,\n 'directory': Directory.objects.get(pootle_path=\"/templates/\"),\n }\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = []\n permission_set.save()\n\n criteria['user'] = default\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = []\n permission_set.save()\n\n\ndef require_english():\n \"\"\"Create the English Language item.\"\"\"\n criteria = {\n 'code': \"en\",\n 'fullname': u\"English\",\n 'nplurals': 2,\n 'pluralequation': \"(n != 1)\",\n }\n en, created = Language.objects.get_or_create(**criteria)\n return en\n\n\ndef create_root_directories():\n \"\"\"Create the root Directory items.\"\"\"\n root, created = Directory.objects.get_or_create(name='')\n projects, created = Directory.objects.get_or_create(name='projects',\n parent=root)\n\n\ndef create_template_languages():\n \"\"\"Create the 'templates' and English languages.\n\n The 'templates' language is used to give users access to the untranslated\n template files.\n \"\"\"\n templates, created = Language.objects.get_or_create(code=\"templates\",\n fullname=u'Templates')\n require_english()\n\n\ndef create_terminology_project():\n \"\"\"Create the terminology project.\n\n The terminology project is used to display terminology suggestions while\n translating.\n \"\"\"\n criteria = {\n 'code': \"terminology\",\n 'fullname': u\"Terminology\",\n 'source_language': require_english(),\n 'checkstyle': \"terminology\",\n }\n terminology, created = Project.objects.get_or_create(**criteria)\n\n\ndef create_default_projects():\n \"\"\"Create the default projects that we host.\n\n You might want to add your projects here, although you can also add things\n through the web interface later.\n \"\"\"\n from pootle_project.models import Project\n\n en = require_english()\n\n criteria = {\n 'code': u\"tutorial\",\n 'source_language': en,\n 'fullname': u\"Tutorial\",\n 'checkstyle': \"standard\",\n 'localfiletype': \"po\",\n 'treestyle': \"auto\",\n }\n tutorial = Project(**criteria)\n tutorial.save()\n\n criteria = {\n 'active': True,\n 'title': \"Project instructions\",\n 'body': ('<div dir=\"ltr\" lang=\"en\">Tutorial project where users can '\n 'play with Pootle and learn more about translation and '\n 'localisation.<br />For more help on localisation, visit the '\n '<a href=\"http://docs.translatehouse.org/projects/'\n 'localization-guide/en/latest/guide/start.html\">localisation '\n 'guide</a>.</div>'),\n 'virtual_path': \"announcements/projects/\"+tutorial.code,\n }\n ann = Announcement(**criteria)\n ann.save()\n\n\ndef create_default_languages():\n \"\"\"Create the default languages.\"\"\"\n from translate.lang import data, factory\n\n from pootle_language.models import Language\n\n # import languages from toolkit\n for code in data.languages.keys():\n try:\n tk_lang = factory.getlanguage(code)\n criteria = {\n 'code': code,\n 'fullname': tk_lang.fullname,\n 'nplurals': tk_lang.nplurals,\n 'pluralequation': tk_lang.pluralequation,\n }\n try:\n criteria['specialchars'] = tk_lang.specialchars\n except AttributeError:\n pass\n lang, created = Language.objects.get_or_create(**criteria)\n except:\n pass\n\n\ndef create_default_admin():\n \"\"\"Create the default admin user for Pootle.\n\n You definitely want to change the admin account so that your default\n install is not accessible with the default credentials. The users 'noboby'\n and 'default' should be left as is.\n \"\"\"\n User = get_user_model()\n\n criteria = {\n 'username': u\"admin\",\n 'full_name': u\"Administrator\",\n 'is_active': True,\n 'is_superuser': True,\n }\n admin = User(**criteria)\n admin.set_password(\"admin\")\n admin.save()\n",
"path": "pootle/core/initdb.py"
}
] | diff --git a/pootle/core/initdb.py b/pootle/core/initdb.py
index fc14e6d8cc5..3b3b5ab3b07 100644
--- a/pootle/core/initdb.py
+++ b/pootle/core/initdb.py
@@ -124,7 +124,7 @@ def create_pootle_permissions():
'codename': "translate",
},
{
- 'name': _("Can review translations"),
+ 'name': _("Can review suggestions"),
'codename': "review",
},
{
|
archlinux__archinstall-1787 | archinstall crashing
Here's the traceback
```
Traceback (most recent call last):
File "/usr/bin/archinstall", line 5, in <module>
from archinstall import run_as_a_module
File "/usr/lib/python3.11/site-packages/archinstall/__init__.py", line 5, in <module>
from .lib import disk
File "/usr/lib/python3.11/site-packages/archinstall/lib/disk/__init__.py", line 1, in <module>
from .device_handler import device_handler, disk_layouts
File "/usr/lib/python3.11/site-packages/archinstall/lib/disk/device_handler.py", line 16, in <module>
from .device_model import (
File "/usr/lib/python3.11/site-packages/archinstall/lib/disk/device_model.py", line 849, in <module>
@dataclass
^^^^^^^^^
File "/usr/lib/python3.11/dataclasses.py", line 1223, in dataclass
return wrap(cls)
^^^^^^^^^
File "/usr/lib/python3.11/dataclasses.py", line 1213, in wrap
return _process_class(cls, init, repr, eq, order, unsafe_hash,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/dataclasses.py", line 958, in _process_class
cls_fields.append(_get_field(cls, name, type, kw_only))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/dataclasses.py", line 815, in _get_field
raise ValueError(f'mutable default {type(f.default)} for field '
ValueError: mutable default <class 'archinstall.lib.disk.device_model.Size'> for field size is not allowed: use default_factory
```
| [
{
"content": "from __future__ import annotations\n\nimport dataclasses\nimport json\nimport logging\nimport math\nimport time\nimport uuid\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom enum import auto\nfrom pathlib import Path\nfrom typing import Optional, List, Dict, TYPE_CHECKING, Any\nfrom typing import Union\n\nimport parted # type: ignore\nfrom parted import Disk, Geometry, Partition\n\nfrom ..exceptions import DiskError, SysCallError\nfrom ..general import SysCommand\nfrom ..output import log\nfrom ..storage import storage\n\nif TYPE_CHECKING:\n\t_: Any\n\n\nclass DiskLayoutType(Enum):\n\tDefault = 'default_layout'\n\tManual = 'manual_partitioning'\n\tPre_mount = 'pre_mounted_config'\n\n\tdef display_msg(self) -> str:\n\t\tmatch self:\n\t\t\tcase DiskLayoutType.Default: return str(_('Use a best-effort default partition layout'))\n\t\t\tcase DiskLayoutType.Manual: return str(_('Manual Partitioning'))\n\t\t\tcase DiskLayoutType.Pre_mount: return str(_('Pre-mounted configuration'))\n\n\n@dataclass\nclass DiskLayoutConfiguration:\n\tconfig_type: DiskLayoutType\n\tdevice_modifications: List[DeviceModification] = field(default_factory=list)\n\t# used for pre-mounted config\n\trelative_mountpoint: Optional[Path] = None\n\n\tdef __post_init__(self):\n\t\tif self.config_type == DiskLayoutType.Pre_mount and self.relative_mountpoint is None:\n\t\t\traise ValueError('Must set a relative mountpoint when layout type is pre-mount\"')\n\n\tdef __dump__(self) -> Dict[str, Any]:\n\t\treturn {\n\t\t\t'config_type': self.config_type.value,\n\t\t\t'device_modifications': [mod.__dump__() for mod in self.device_modifications]\n\t\t}\n\n\t@classmethod\n\tdef parse_arg(cls, disk_config: Dict[str, List[Dict[str, Any]]]) -> Optional[DiskLayoutConfiguration]:\n\t\tfrom .device_handler import device_handler\n\n\t\tdevice_modifications: List[DeviceModification] = []\n\t\tconfig_type = disk_config.get('config_type', None)\n\n\t\tif not config_type:\n\t\t\traise ValueError('Missing disk layout configuration: config_type')\n\n\t\tconfig = DiskLayoutConfiguration(\n\t\t\tconfig_type=DiskLayoutType(config_type),\n\t\t\tdevice_modifications=device_modifications\n\t\t)\n\n\t\tfor entry in disk_config.get('device_modifications', []):\n\t\t\tdevice_path = Path(entry.get('device', None)) if entry.get('device', None) else None\n\n\t\t\tif not device_path:\n\t\t\t\tcontinue\n\n\t\t\tdevice = device_handler.get_device(device_path)\n\n\t\t\tif not device:\n\t\t\t\tcontinue\n\n\t\t\tdevice_modification = DeviceModification(\n\t\t\t\twipe=entry.get('wipe', False),\n\t\t\t\tdevice=device\n\t\t\t)\n\n\t\t\tdevice_partitions: List[PartitionModification] = []\n\n\t\t\tfor partition in entry.get('partitions', []):\n\t\t\t\tdevice_partition = PartitionModification(\n\t\t\t\t\tstatus=ModificationStatus(partition['status']),\n\t\t\t\t\tfs_type=FilesystemType(partition['fs_type']),\n\t\t\t\t\tstart=Size.parse_args(partition['start']),\n\t\t\t\t\tlength=Size.parse_args(partition['length']),\n\t\t\t\t\tmount_options=partition['mount_options'],\n\t\t\t\t\tmountpoint=Path(partition['mountpoint']) if partition['mountpoint'] else None,\n\t\t\t\t\ttype=PartitionType(partition['type']),\n\t\t\t\t\tflags=[PartitionFlag[f] for f in partition.get('flags', [])],\n\t\t\t\t\tbtrfs_subvols=SubvolumeModification.parse_args(partition.get('btrfs', [])),\n\t\t\t\t)\n\t\t\t\t# special 'invisible attr to internally identify the part mod\n\t\t\t\tsetattr(device_partition, '_obj_id', partition['obj_id'])\n\t\t\t\tdevice_partitions.append(device_partition)\n\n\t\t\tdevice_modification.partitions = device_partitions\n\t\t\tdevice_modifications.append(device_modification)\n\n\t\treturn config\n\n\nclass PartitionTable(Enum):\n\tGPT = 'gpt'\n\tMBR = 'msdos'\n\n\nclass Unit(Enum):\n\tB = 1 # byte\n\tkB = 1000**1 # kilobyte\n\tMB = 1000**2 # megabyte\n\tGB = 1000**3 # gigabyte\n\tTB = 1000**4 # terabyte\n\tPB = 1000**5 # petabyte\n\tEB = 1000**6 # exabyte\n\tZB = 1000**7 # zettabyte\n\tYB = 1000**8 # yottabyte\n\n\tKiB = 1024**1 \t# kibibyte\n\tMiB = 1024**2 \t# mebibyte\n\tGiB = 1024**3 \t# gibibyte\n\tTiB = 1024**4 \t# tebibyte\n\tPiB = 1024**5 \t# pebibyte\n\tEiB = 1024**6 \t# exbibyte\n\tZiB = 1024**7 \t# zebibyte\n\tYiB = 1024**8 \t# yobibyte\n\n\tsectors = 'sectors' # size in sector\n\n\tPercent = '%' \t# size in percentile\n\n\n@dataclass\nclass Size:\n\tvalue: int\n\tunit: Unit\n\tsector_size: Optional[Size] = None # only required when unit is sector\n\ttotal_size: Optional[Size] = None # required when operating on percentages\n\n\tdef __post_init__(self):\n\t\tif self.unit == Unit.sectors and self.sector_size is None:\n\t\t\traise ValueError('Sector size is required when unit is sectors')\n\t\telif self.unit == Unit.Percent:\n\t\t\tif self.value < 0 or self.value > 100:\n\t\t\t\traise ValueError('Percentage must be between 0 and 100')\n\t\t\telif self.total_size is None:\n\t\t\t\traise ValueError('Total size is required when unit is percentage')\n\n\t@property\n\tdef _total_size(self) -> Size:\n\t\t\"\"\"\n\t\tSave method to get the total size, mainly to satisfy mypy\n\t\tThis shouldn't happen as the Size object fails instantiation on missing total size\n\t\t\"\"\"\n\t\tif self.unit == Unit.Percent and self.total_size is None:\n\t\t\traise ValueError('Percent unit size must specify a total size')\n\t\treturn self.total_size # type: ignore\n\n\tdef __dump__(self) -> Dict[str, Any]:\n\t\treturn {\n\t\t\t'value': self.value,\n\t\t\t'unit': self.unit.name,\n\t\t\t'sector_size': self.sector_size.__dump__() if self.sector_size else None,\n\t\t\t'total_size': self._total_size.__dump__() if self._total_size else None\n\t\t}\n\n\t@classmethod\n\tdef parse_args(cls, size_arg: Dict[str, Any]) -> Size:\n\t\tsector_size = size_arg['sector_size']\n\t\ttotal_size = size_arg['total_size']\n\n\t\treturn Size(\n\t\t\tsize_arg['value'],\n\t\t\tUnit[size_arg['unit']],\n\t\t\tSize.parse_args(sector_size) if sector_size else None,\n\t\t\tSize.parse_args(total_size) if total_size else None\n\t\t)\n\n\tdef convert(\n\t\tself,\n\t\ttarget_unit: Unit,\n\t\tsector_size: Optional[Size] = None,\n\t\ttotal_size: Optional[Size] = None\n\t) -> Size:\n\t\tif target_unit == Unit.sectors and sector_size is None:\n\t\t\traise ValueError('If target has unit sector, a sector size must be provided')\n\n\t\t# not sure why we would ever wanna convert to percentages\n\t\tif target_unit == Unit.Percent and total_size is None:\n\t\t\traise ValueError('Missing paramter total size to be able to convert to percentage')\n\n\t\tif self.unit == target_unit:\n\t\t\treturn self\n\t\telif self.unit == Unit.Percent:\n\t\t\tamount = int(self._total_size._normalize() * (self.value / 100))\n\t\t\treturn Size(amount, Unit.B)\n\t\telif self.unit == Unit.sectors:\n\t\t\tnorm = self._normalize()\n\t\t\treturn Size(norm, Unit.B).convert(target_unit, sector_size)\n\t\telse:\n\t\t\tif target_unit == Unit.sectors and sector_size is not None:\n\t\t\t\tnorm = self._normalize()\n\t\t\t\tsectors = math.ceil(norm / sector_size.value)\n\t\t\t\treturn Size(sectors, Unit.sectors, sector_size)\n\t\t\telse:\n\t\t\t\tvalue = int(self._normalize() / target_unit.value) # type: ignore\n\t\t\t\treturn Size(value, target_unit)\n\n\tdef format_size(\n\t\tself,\n\t\ttarget_unit: Unit,\n\t\tsector_size: Optional[Size] = None\n\t) -> str:\n\t\tif self.unit == Unit.Percent:\n\t\t\treturn f'{self.value}%'\n\t\telse:\n\t\t\ttarget_size = self.convert(target_unit, sector_size)\n\t\t\treturn f'{target_size.value} {target_unit.name}'\n\n\tdef _normalize(self) -> int:\n\t\t\"\"\"\n\t\twill normalize the value of the unit to Byte\n\t\t\"\"\"\n\t\tif self.unit == Unit.Percent:\n\t\t\treturn self.convert(Unit.B).value\n\t\telif self.unit == Unit.sectors and self.sector_size is not None:\n\t\t\treturn self.value * self.sector_size._normalize()\n\t\treturn int(self.value * self.unit.value) # type: ignore\n\n\tdef __sub__(self, other: Size) -> Size:\n\t\tsrc_norm = self._normalize()\n\t\tdest_norm = other._normalize()\n\t\treturn Size(abs(src_norm - dest_norm), Unit.B)\n\n\tdef __lt__(self, other):\n\t\treturn self._normalize() < other._normalize()\n\n\tdef __le__(self, other):\n\t\treturn self._normalize() <= other._normalize()\n\n\tdef __eq__(self, other):\n\t\treturn self._normalize() == other._normalize()\n\n\tdef __ne__(self, other):\n\t\treturn self._normalize() != other._normalize()\n\n\tdef __gt__(self, other):\n\t\treturn self._normalize() > other._normalize()\n\n\tdef __ge__(self, other):\n\t\treturn self._normalize() >= other._normalize()\n\n\n@dataclass\nclass _BtrfsSubvolumeInfo:\n\tname: Path\n\tmountpoint: Optional[Path]\n\n\n@dataclass\nclass _PartitionInfo:\n\tpartition: Partition\n\tname: str\n\ttype: PartitionType\n\tfs_type: FilesystemType\n\tpath: Path\n\tstart: Size\n\tlength: Size\n\tflags: List[PartitionFlag]\n\tpartuuid: str\n\tdisk: Disk\n\tmountpoints: List[Path]\n\tbtrfs_subvol_infos: List[_BtrfsSubvolumeInfo] = field(default_factory=list)\n\n\tdef as_json(self) -> Dict[str, Any]:\n\t\tinfo = {\n\t\t\t'Name': self.name,\n\t\t\t'Type': self.type.value,\n\t\t\t'Filesystem': self.fs_type.value if self.fs_type else str(_('Unknown')),\n\t\t\t'Path': str(self.path),\n\t\t\t'Start': self.start.format_size(Unit.MiB),\n\t\t\t'Length': self.length.format_size(Unit.MiB),\n\t\t\t'Flags': ', '.join([f.name for f in self.flags])\n\t\t}\n\n\t\tif self.btrfs_subvol_infos:\n\t\t\tinfo['Btrfs vol.'] = f'{len(self.btrfs_subvol_infos)} subvolumes'\n\n\t\treturn info\n\n\t@classmethod\n\tdef from_partition(\n\t\tcls,\n\t\tpartition: Partition,\n\t\tfs_type: FilesystemType,\n\t\tpartuuid: str,\n\t\tmountpoints: List[Path],\n\t\tbtrfs_subvol_infos: List[_BtrfsSubvolumeInfo] = []\n\t) -> _PartitionInfo:\n\t\tpartition_type = PartitionType.get_type_from_code(partition.type)\n\t\tflags = [f for f in PartitionFlag if partition.getFlag(f.value)]\n\n\t\tstart = Size(\n\t\t\tpartition.geometry.start,\n\t\t\tUnit.sectors,\n\t\t\tSize(partition.disk.device.sectorSize, Unit.B)\n\t\t)\n\n\t\tlength = Size(int(partition.getLength(unit='B')), Unit.B)\n\n\t\treturn _PartitionInfo(\n\t\t\tpartition=partition,\n\t\t\tname=partition.get_name(),\n\t\t\ttype=partition_type,\n\t\t\tfs_type=fs_type,\n\t\t\tpath=partition.path,\n\t\t\tstart=start,\n\t\t\tlength=length,\n\t\t\tflags=flags,\n\t\t\tpartuuid=partuuid,\n\t\t\tdisk=partition.disk,\n\t\t\tmountpoints=mountpoints,\n\t\t\tbtrfs_subvol_infos=btrfs_subvol_infos\n\t\t)\n\n\n@dataclass\nclass _DeviceInfo:\n\tmodel: str\n\tpath: Path\n\ttype: str\n\ttotal_size: Size\n\tfree_space_regions: List[DeviceGeometry]\n\tsector_size: Size\n\tread_only: bool\n\tdirty: bool\n\n\tdef as_json(self) -> Dict[str, Any]:\n\t\ttotal_free_space = sum([region.get_length(unit=Unit.MiB) for region in self.free_space_regions])\n\t\treturn {\n\t\t\t'Model': self.model,\n\t\t\t'Path': str(self.path),\n\t\t\t'Type': self.type,\n\t\t\t'Size': self.total_size.format_size(Unit.MiB),\n\t\t\t'Free space': int(total_free_space),\n\t\t\t'Sector size': self.sector_size.value,\n\t\t\t'Read only': self.read_only\n\t\t}\n\n\t@classmethod\n\tdef from_disk(cls, disk: Disk) -> _DeviceInfo:\n\t\tdevice = disk.device\n\t\tdevice_type = parted.devices[device.type]\n\n\t\tsector_size = Size(device.sectorSize, Unit.B)\n\t\tfree_space = [DeviceGeometry(g, sector_size) for g in disk.getFreeSpaceRegions()]\n\n\t\treturn _DeviceInfo(\n\t\t\tmodel=device.model.strip(),\n\t\t\tpath=Path(device.path),\n\t\t\ttype=device_type,\n\t\t\tsector_size=sector_size,\n\t\t\ttotal_size=Size(int(device.getLength(unit='B')), Unit.B),\n\t\t\tfree_space_regions=free_space,\n\t\t\tread_only=device.readOnly,\n\t\t\tdirty=device.dirty\n\t\t)\n\n\n@dataclass\nclass SubvolumeModification:\n\tname: Path\n\tmountpoint: Optional[Path] = None\n\tcompress: bool = False\n\tnodatacow: bool = False\n\n\t@classmethod\n\tdef from_existing_subvol_info(cls, info: _BtrfsSubvolumeInfo) -> SubvolumeModification:\n\t\treturn SubvolumeModification(info.name, mountpoint=info.mountpoint)\n\n\t@classmethod\n\tdef parse_args(cls, subvol_args: List[Dict[str, Any]]) -> List[SubvolumeModification]:\n\t\tmods = []\n\t\tfor entry in subvol_args:\n\t\t\tif not entry.get('name', None) or not entry.get('mountpoint', None):\n\t\t\t\tlog(f'Subvolume arg is missing name: {entry}', level=logging.DEBUG)\n\t\t\t\tcontinue\n\n\t\t\tmountpoint = Path(entry['mountpoint']) if entry['mountpoint'] else None\n\n\t\t\tmods.append(\n\t\t\t\tSubvolumeModification(\n\t\t\t\t\tentry['name'],\n\t\t\t\t\tmountpoint,\n\t\t\t\t\tentry.get('compress', False),\n\t\t\t\t\tentry.get('nodatacow', False)\n\t\t\t\t)\n\t\t\t)\n\n\t\treturn mods\n\n\t@property\n\tdef mount_options(self) -> List[str]:\n\t\toptions = []\n\t\toptions += ['compress'] if self.compress else []\n\t\toptions += ['nodatacow'] if self.nodatacow else []\n\t\treturn options\n\n\t@property\n\tdef relative_mountpoint(self) -> Path:\n\t\t\"\"\"\n\t\tWill return the relative path based on the anchor\n\t\te.g. Path('/mnt/test') -> Path('mnt/test')\n\t\t\"\"\"\n\t\tif self.mountpoint is not None:\n\t\t\treturn self.mountpoint.relative_to(self.mountpoint.anchor)\n\n\t\traise ValueError('Mountpoint is not specified')\n\n\tdef is_root(self, relative_mountpoint: Optional[Path] = None) -> bool:\n\t\tif self.mountpoint:\n\t\t\tif relative_mountpoint is not None:\n\t\t\t\treturn self.mountpoint.relative_to(relative_mountpoint) == Path('.')\n\t\t\treturn self.mountpoint == Path('/')\n\t\treturn False\n\n\tdef __dump__(self) -> Dict[str, Any]:\n\t\treturn {\n\t\t\t'name': str(self.name),\n\t\t\t'mountpoint': str(self.mountpoint),\n\t\t\t'compress': self.compress,\n\t\t\t'nodatacow': self.nodatacow\n\t\t}\n\n\tdef as_json(self) -> Dict[str, Any]:\n\t\treturn {\n\t\t\t'name': str(self.name),\n\t\t\t'mountpoint': str(self.mountpoint),\n\t\t\t'compress': self.compress,\n\t\t\t'nodatacow': self.nodatacow\n\t\t}\n\n\nclass DeviceGeometry:\n\tdef __init__(self, geometry: Geometry, sector_size: Size):\n\t\tself._geometry = geometry\n\t\tself._sector_size = sector_size\n\n\t@property\n\tdef start(self) -> int:\n\t\treturn self._geometry.start\n\n\t@property\n\tdef end(self) -> int:\n\t\treturn self._geometry.end\n\n\tdef get_length(self, unit: Unit = Unit.sectors) -> int:\n\t\treturn self._geometry.getLength(unit.name)\n\n\tdef as_json(self) -> Dict[str, Any]:\n\t\treturn {\n\t\t\t'Sector size': self._sector_size.value,\n\t\t\t'Start sector': self._geometry.start,\n\t\t\t'End sector': self._geometry.end,\n\t\t\t'Length': self._geometry.getLength()\n\t\t}\n\n\n@dataclass\nclass BDevice:\n\tdisk: Disk\n\tdevice_info: _DeviceInfo\n\tpartition_infos: List[_PartitionInfo]\n\n\tdef __hash__(self):\n\t\treturn hash(self.disk.device.path)\n\n\nclass PartitionType(Enum):\n\tBoot = 'boot'\n\tPrimary = 'primary'\n\n\t@classmethod\n\tdef get_type_from_code(cls, code: int) -> PartitionType:\n\t\tif code == parted.PARTITION_NORMAL:\n\t\t\treturn PartitionType.Primary\n\n\t\traise DiskError(f'Partition code not supported: {code}')\n\n\tdef get_partition_code(self) -> Optional[int]:\n\t\tif self == PartitionType.Primary:\n\t\t\treturn parted.PARTITION_NORMAL\n\t\telif self == PartitionType.Boot:\n\t\t\treturn parted.PARTITION_BOOT\n\t\treturn None\n\n\nclass PartitionFlag(Enum):\n\tBoot = 1\n\n\nclass FilesystemType(Enum):\n\tBtrfs = 'btrfs'\n\tExt2 = 'ext2'\n\tExt3 = 'ext3'\n\tExt4 = 'ext4'\n\tF2fs = 'f2fs'\n\tFat16 = 'fat16'\n\tFat32 = 'fat32'\n\tNtfs = 'ntfs'\n\tReiserfs = 'reiserfs'\n\tXfs = 'xfs'\n\n\t# this is not a FS known to parted, so be careful\n\t# with the usage from this enum\n\tCrypto_luks = 'crypto_LUKS'\n\n\tdef is_crypto(self) -> bool:\n\t\treturn self == FilesystemType.Crypto_luks\n\n\t@property\n\tdef fs_type_mount(self) -> str:\n\t\tmatch self:\n\t\t\tcase FilesystemType.Ntfs: return 'ntfs3'\n\t\t\tcase FilesystemType.Fat32: return 'vfat'\n\t\t\tcase _: return self.value # type: ignore\n\n\t@property\n\tdef installation_pkg(self) -> Optional[str]:\n\t\tmatch self:\n\t\t\tcase FilesystemType.Btrfs: return 'btrfs-progs'\n\t\t\tcase FilesystemType.Xfs: return 'xfsprogs'\n\t\t\tcase FilesystemType.F2fs: return 'f2fs-tools'\n\t\t\tcase _: return None\n\n\t@property\n\tdef installation_module(self) -> Optional[str]:\n\t\tmatch self:\n\t\t\tcase FilesystemType.Btrfs: return 'btrfs'\n\t\t\tcase _: return None\n\n\t@property\n\tdef installation_binary(self) -> Optional[str]:\n\t\tmatch self:\n\t\t\tcase FilesystemType.Btrfs: return '/usr/bin/btrfs'\n\t\t\tcase _: return None\n\n\t@property\n\tdef installation_hooks(self) -> Optional[str]:\n\t\tmatch self:\n\t\t\tcase FilesystemType.Btrfs: return 'btrfs'\n\t\t\tcase _: return None\n\n\nclass ModificationStatus(Enum):\n\tExist = 'existing'\n\tModify = 'modify'\n\tDelete = 'delete'\n\tCreate = 'create'\n\n\n@dataclass\nclass PartitionModification:\n\tstatus: ModificationStatus\n\ttype: PartitionType\n\tstart: Size\n\tlength: Size\n\tfs_type: FilesystemType\n\tmountpoint: Optional[Path] = None\n\tmount_options: List[str] = field(default_factory=list)\n\tflags: List[PartitionFlag] = field(default_factory=list)\n\tbtrfs_subvols: List[SubvolumeModification] = field(default_factory=list)\n\n\t# only set if the device was created or exists\n\tdev_path: Optional[Path] = None\n\tpartuuid: Optional[str] = None\n\tuuid: Optional[str] = None\n\n\tdef __post_init__(self):\n\t\t# needed to use the object as a dictionary key due to hash func\n\t\tif not hasattr(self, '_obj_id'):\n\t\t\tself._obj_id = uuid.uuid4()\n\n\t\tif self.is_exists_or_modify() and not self.dev_path:\n\t\t\traise ValueError('If partition marked as existing a path must be set')\n\n\tdef __hash__(self):\n\t\treturn hash(self._obj_id)\n\n\t@property\n\tdef obj_id(self) -> str:\n\t\tif hasattr(self, '_obj_id'):\n\t\t\treturn str(self._obj_id)\n\t\treturn ''\n\n\t@property\n\tdef safe_dev_path(self) -> Path:\n\t\tif self.dev_path is None:\n\t\t\traise ValueError('Device path was not set')\n\t\treturn self.dev_path\n\n\t@classmethod\n\tdef from_existing_partition(cls, partition_info: _PartitionInfo) -> PartitionModification:\n\t\tif partition_info.btrfs_subvol_infos:\n\t\t\tmountpoint = None\n\t\t\tsubvol_mods = []\n\t\t\tfor info in partition_info.btrfs_subvol_infos:\n\t\t\t\tsubvol_mods.append(\n\t\t\t\t\tSubvolumeModification.from_existing_subvol_info(info)\n\t\t\t\t)\n\t\telse:\n\t\t\tmountpoint = partition_info.mountpoints[0] if partition_info.mountpoints else None\n\t\t\tsubvol_mods = []\n\n\t\treturn PartitionModification(\n\t\t\tstatus=ModificationStatus.Exist,\n\t\t\ttype=partition_info.type,\n\t\t\tstart=partition_info.start,\n\t\t\tlength=partition_info.length,\n\t\t\tfs_type=partition_info.fs_type,\n\t\t\tdev_path=partition_info.path,\n\t\t\tflags=partition_info.flags,\n\t\t\tmountpoint=mountpoint,\n\t\t\tbtrfs_subvols=subvol_mods\n\t\t)\n\n\t@property\n\tdef relative_mountpoint(self) -> Path:\n\t\t\"\"\"\n\t\tWill return the relative path based on the anchor\n\t\te.g. Path('/mnt/test') -> Path('mnt/test')\n\t\t\"\"\"\n\t\tif self.mountpoint:\n\t\t\treturn self.mountpoint.relative_to(self.mountpoint.anchor)\n\n\t\traise ValueError('Mountpoint is not specified')\n\n\tdef is_boot(self) -> bool:\n\t\treturn PartitionFlag.Boot in self.flags\n\n\tdef is_root(self, relative_mountpoint: Optional[Path] = None) -> bool:\n\t\tif relative_mountpoint is not None and self.mountpoint is not None:\n\t\t\treturn self.mountpoint.relative_to(relative_mountpoint) == Path('.')\n\t\telif self.mountpoint is not None:\n\t\t\treturn Path('/') == self.mountpoint\n\t\telse:\n\t\t\tfor subvol in self.btrfs_subvols:\n\t\t\t\tif subvol.is_root(relative_mountpoint):\n\t\t\t\t\treturn True\n\n\t\treturn False\n\n\tdef is_modify(self) -> bool:\n\t\treturn self.status == ModificationStatus.Modify\n\n\tdef exists(self) -> bool:\n\t\treturn self.status == ModificationStatus.Exist\n\n\tdef is_exists_or_modify(self) -> bool:\n\t\treturn self.status in [ModificationStatus.Exist, ModificationStatus.Modify]\n\n\t@property\n\tdef mapper_name(self) -> Optional[str]:\n\t\tif self.dev_path:\n\t\t\treturn f'{storage.get(\"ENC_IDENTIFIER\", \"ai\")}{self.dev_path.name}'\n\t\treturn None\n\n\tdef set_flag(self, flag: PartitionFlag):\n\t\tif flag not in self.flags:\n\t\t\tself.flags.append(flag)\n\n\tdef invert_flag(self, flag: PartitionFlag):\n\t\tif flag in self.flags:\n\t\t\tself.flags = [f for f in self.flags if f != flag]\n\t\telse:\n\t\t\tself.set_flag(flag)\n\n\tdef json(self) -> Dict[str, Any]:\n\t\t\"\"\"\n\t\tCalled for configuration settings\n\t\t\"\"\"\n\t\treturn {\n\t\t\t'obj_id': self.obj_id,\n\t\t\t'status': self.status.value,\n\t\t\t'type': self.type.value,\n\t\t\t'start': self.start.__dump__(),\n\t\t\t'length': self.length.__dump__(),\n\t\t\t'fs_type': self.fs_type.value,\n\t\t\t'mountpoint': str(self.mountpoint) if self.mountpoint else None,\n\t\t\t'mount_options': self.mount_options,\n\t\t\t'flags': [f.name for f in self.flags],\n\t\t\t'btrfs': [vol.__dump__() for vol in self.btrfs_subvols]\n\t\t}\n\n\tdef as_json(self) -> Dict[str, Any]:\n\t\t\"\"\"\n\t\tCalled for displaying data in table format\n\t\t\"\"\"\n\t\tinfo = {\n\t\t\t'Status': self.status.value,\n\t\t\t'Device': str(self.dev_path) if self.dev_path else '',\n\t\t\t'Type': self.type.value,\n\t\t\t'Start': self.start.format_size(Unit.MiB),\n\t\t\t'Length': self.length.format_size(Unit.MiB),\n\t\t\t'FS type': self.fs_type.value,\n\t\t\t'Mountpoint': self.mountpoint if self.mountpoint else '',\n\t\t\t'Mount options': ', '.join(self.mount_options),\n\t\t\t'Flags': ', '.join([f.name for f in self.flags]),\n\t\t}\n\n\t\tif self.btrfs_subvols:\n\t\t\tinfo['Btrfs vol.'] = f'{len(self.btrfs_subvols)} subvolumes'\n\n\t\treturn info\n\n\n@dataclass\nclass DeviceModification:\n\tdevice: BDevice\n\twipe: bool\n\tpartitions: List[PartitionModification] = field(default_factory=list)\n\n\t@property\n\tdef device_path(self) -> Path:\n\t\treturn self.device.device_info.path\n\n\tdef add_partition(self, partition: PartitionModification):\n\t\tself.partitions.append(partition)\n\n\tdef get_boot_partition(self) -> Optional[PartitionModification]:\n\t\tliltered = filter(lambda x: x.is_boot(), self.partitions)\n\t\treturn next(liltered, None)\n\n\tdef get_root_partition(self, relative_path: Optional[Path]) -> Optional[PartitionModification]:\n\t\tfiltered = filter(lambda x: x.is_root(relative_path), self.partitions)\n\t\treturn next(filtered, None)\n\n\tdef __dump__(self) -> Dict[str, Any]:\n\t\t\"\"\"\n\t\tCalled when generating configuration files\n\t\t\"\"\"\n\t\treturn {\n\t\t\t'device': str(self.device.device_info.path),\n\t\t\t'wipe': self.wipe,\n\t\t\t'partitions': [p.json() for p in self.partitions]\n\t\t}\n\n\nclass EncryptionType(Enum):\n\tNoEncryption = \"no_encryption\"\n\tPartition = \"partition\"\n\n\t@classmethod\n\tdef _encryption_type_mapper(cls) -> Dict[str, 'EncryptionType']:\n\t\treturn {\n\t\t\t# str(_('Full disk encryption')): EncryptionType.FullDiskEncryption,\n\t\t\tstr(_('Partition encryption')): EncryptionType.Partition\n\t\t}\n\n\t@classmethod\n\tdef text_to_type(cls, text: str) -> 'EncryptionType':\n\t\tmapping = cls._encryption_type_mapper()\n\t\treturn mapping[text]\n\n\t@classmethod\n\tdef type_to_text(cls, type_: 'EncryptionType') -> str:\n\t\tmapping = cls._encryption_type_mapper()\n\t\ttype_to_text = {type_: text for text, type_ in mapping.items()}\n\t\treturn type_to_text[type_]\n\n\n@dataclass\nclass DiskEncryption:\n\tencryption_type: EncryptionType = EncryptionType.Partition\n\tencryption_password: str = ''\n\tpartitions: List[PartitionModification] = field(default_factory=list)\n\thsm_device: Optional[Fido2Device] = None\n\n\tdef should_generate_encryption_file(self, part_mod: PartitionModification) -> bool:\n\t\treturn part_mod in self.partitions and part_mod.mountpoint != Path('/')\n\n\tdef json(self) -> Dict[str, Any]:\n\t\tobj: Dict[str, Any] = {\n\t\t\t'encryption_type': self.encryption_type.value,\n\t\t\t'partitions': [p.obj_id for p in self.partitions]\n\t\t}\n\n\t\tif self.hsm_device:\n\t\t\tobj['hsm_device'] = self.hsm_device.json()\n\n\t\treturn obj\n\n\t@classmethod\n\tdef parse_arg(\n\t\tcls,\n\t\tdisk_config: DiskLayoutConfiguration,\n\t\targ: Dict[str, Any],\n\t\tpassword: str = ''\n\t) -> 'DiskEncryption':\n\t\tenc_partitions = []\n\t\tfor mod in disk_config.device_modifications:\n\t\t\tfor part in mod.partitions:\n\t\t\t\tif part.obj_id in arg.get('partitions', []):\n\t\t\t\t\tenc_partitions.append(part)\n\n\t\tenc = DiskEncryption(\n\t\t\tEncryptionType(arg['encryption_type']),\n\t\t\tpassword,\n\t\t\tenc_partitions\n\t\t)\n\n\t\tif hsm := arg.get('hsm_device', None):\n\t\t\tenc.hsm_device = Fido2Device.parse_arg(hsm)\n\n\t\treturn enc\n\n\n@dataclass\nclass Fido2Device:\n\tpath: Path\n\tmanufacturer: str\n\tproduct: str\n\n\tdef json(self) -> Dict[str, str]:\n\t\treturn {\n\t\t\t'path': str(self.path),\n\t\t\t'manufacturer': self.manufacturer,\n\t\t\t'product': self.product\n\t\t}\n\n\t@classmethod\n\tdef parse_arg(cls, arg: Dict[str, str]) -> 'Fido2Device':\n\t\treturn Fido2Device(\n\t\t\tPath(arg['path']),\n\t\t\targ['manufacturer'],\n\t\t\targ['product']\n\t\t)\n\n\n@dataclass\nclass LsblkInfo:\n\tname: str = ''\n\tpath: Path = Path()\n\tpkname: str = ''\n\tsize: Size = Size(0, Unit.B)\n\tlog_sec: int = 0\n\tpttype: str = ''\n\tptuuid: str = ''\n\trota: bool = False\n\ttran: Optional[str] = None\n\tpartuuid: Optional[str] = None\n\tuuid: Optional[str] = None\n\tfstype: Optional[str] = None\n\tfsver: Optional[str] = None\n\tfsavail: Optional[str] = None\n\tfsuse_percentage: Optional[str] = None\n\ttype: Optional[str] = None\n\tmountpoint: Optional[Path] = None\n\tmountpoints: List[Path] = field(default_factory=list)\n\tfsroots: List[Path] = field(default_factory=list)\n\tchildren: List[LsblkInfo] = field(default_factory=list)\n\n\tdef json(self) -> Dict[str, Any]:\n\t\treturn {\n\t\t\t'name': self.name,\n\t\t\t'path': str(self.path),\n\t\t\t'pkname': self.pkname,\n\t\t\t'size': self.size.format_size(Unit.MiB),\n\t\t\t'log_sec': self.log_sec,\n\t\t\t'pttype': self.pttype,\n\t\t\t'ptuuid': self.ptuuid,\n\t\t\t'rota': self.rota,\n\t\t\t'tran': self.tran,\n\t\t\t'partuuid': self.partuuid,\n\t\t\t'uuid': self.uuid,\n\t\t\t'fstype': self.fstype,\n\t\t\t'fsver': self.fsver,\n\t\t\t'fsavail': self.fsavail,\n\t\t\t'fsuse_percentage': self.fsuse_percentage,\n\t\t\t'type': self.type,\n\t\t\t'mountpoint': self.mountpoint,\n\t\t\t'mountpoints': [str(m) for m in self.mountpoints],\n\t\t\t'fsroots': [str(r) for r in self.fsroots],\n\t\t\t'children': [c.json() for c in self.children]\n\t\t}\n\n\t@property\n\tdef btrfs_subvol_info(self) -> Dict[Path, Path]:\n\t\t\"\"\"\n\t\tIt is assumed that lsblk will contain the fields as\n\n\t\t\"mountpoints\": [\"/mnt/archinstall/log\", \"/mnt/archinstall/home\", \"/mnt/archinstall\", ...]\n\t\t\"fsroots\": [\"/@log\", \"/@home\", \"/@\"...]\n\n\t\twe'll thereby map the fsroot, which are the mounted filesystem roots\n\t\tto the corresponding mountpoints\n\t\t\"\"\"\n\t\treturn dict(zip(self.fsroots, self.mountpoints))\n\n\t@classmethod\n\tdef exclude(cls) -> List[str]:\n\t\treturn ['children']\n\n\t@classmethod\n\tdef fields(cls) -> List[str]:\n\t\treturn [f.name for f in dataclasses.fields(LsblkInfo) if f.name not in cls.exclude()]\n\n\t@classmethod\n\tdef from_json(cls, blockdevice: Dict[str, Any]) -> LsblkInfo:\n\t\tinfo = cls()\n\n\t\tfor f in cls.fields():\n\t\t\tlsblk_field = _clean_field(f, CleanType.Blockdevice)\n\t\t\tdata_field = _clean_field(f, CleanType.Dataclass)\n\n\t\t\tval: Any = None\n\t\t\tif isinstance(getattr(info, data_field), Path):\n\t\t\t\tval = Path(blockdevice[lsblk_field])\n\t\t\telif isinstance(getattr(info, data_field), Size):\n\t\t\t\tval = Size(blockdevice[lsblk_field], Unit.B)\n\t\t\telse:\n\t\t\t\tval = blockdevice[lsblk_field]\n\n\t\t\tsetattr(info, data_field, val)\n\n\t\tinfo.children = [LsblkInfo.from_json(child) for child in blockdevice.get('children', [])]\n\n\t\t# sometimes lsblk returns 'mountpoints': [null]\n\t\tinfo.mountpoints = [Path(mnt) for mnt in info.mountpoints if mnt]\n\n\t\tfs_roots = []\n\t\tfor r in info.fsroots:\n\t\t\tif r:\n\t\t\t\tpath = Path(r)\n\t\t\t\t# store the fsroot entries without the leading /\n\t\t\t\tfs_roots.append(path.relative_to(path.anchor))\n\t\tinfo.fsroots = fs_roots\n\n\t\treturn info\n\n\nclass CleanType(Enum):\n\tBlockdevice = auto()\n\tDataclass = auto()\n\tLsblk = auto()\n\n\ndef _clean_field(name: str, clean_type: CleanType) -> str:\n\tmatch clean_type:\n\t\tcase CleanType.Blockdevice:\n\t\t\treturn name.replace('_percentage', '%').replace('_', '-')\n\t\tcase CleanType.Dataclass:\n\t\t\treturn name.lower().replace('-', '_').replace('%', '_percentage')\n\t\tcase CleanType.Lsblk:\n\t\t\treturn name.replace('_percentage', '%').replace('_', '-')\n\n\ndef _fetch_lsblk_info(dev_path: Optional[Union[Path, str]] = None, retry: int = 3) -> List[LsblkInfo]:\n\tfields = [_clean_field(f, CleanType.Lsblk) for f in LsblkInfo.fields()]\n\tlsblk_fields = ','.join(fields)\n\n\tif not dev_path:\n\t\tdev_path = ''\n\n\tif retry == 0:\n\t\tretry = 1\n\n\tfor retry_attempt in range(retry):\n\t\ttry:\n\t\t\tresult = SysCommand(f'lsblk --json -b -o+{lsblk_fields} {dev_path}')\n\t\t\tbreak\n\t\texcept SysCallError as error:\n\t\t\t# Get the output minus the message/info from lsblk if it returns a non-zero exit code.\n\t\t\tif error.worker:\n\t\t\t\terr = error.worker.decode('UTF-8')\n\t\t\t\tlog(f'Error calling lsblk: {err}', level=logging.DEBUG)\n\t\t\telse:\n\t\t\t\traise error\n\n\t\t\tif retry_attempt == retry - 1:\n\t\t\t\traise error\n\n\t\t\ttime.sleep(1)\n\n\ttry:\n\t\tif decoded := result.decode('utf-8'):\n\t\t\tblock_devices = json.loads(decoded)\n\t\t\tblockdevices = block_devices['blockdevices']\n\t\t\treturn [LsblkInfo.from_json(device) for device in blockdevices]\n\texcept json.decoder.JSONDecodeError as err:\n\t\tlog(f\"Could not decode lsblk JSON: {result}\", fg=\"red\", level=logging.ERROR)\n\t\traise err\n\n\traise DiskError(f'Failed to read disk \"{dev_path}\" with lsblk')\n\ndef get_lsblk_info(dev_path: Union[Path, str]) -> LsblkInfo:\n\tif infos := _fetch_lsblk_info(dev_path):\n\t\treturn infos[0]\n\n\traise DiskError(f'lsblk failed to retrieve information for \"{dev_path}\"')\n\n\ndef get_all_lsblk_info() -> List[LsblkInfo]:\n\treturn _fetch_lsblk_info()\n\n\ndef get_lsblk_by_mountpoint(mountpoint: Path, as_prefix: bool = False) -> List[LsblkInfo]:\n\tdef _check(infos: List[LsblkInfo]) -> List[LsblkInfo]:\n\t\tdevices = []\n\t\tfor entry in infos:\n\t\t\tif as_prefix:\n\t\t\t\tmatches = [m for m in entry.mountpoints if str(m).startswith(str(mountpoint))]\n\t\t\t\tif matches:\n\t\t\t\t\tdevices += [entry]\n\t\t\telif mountpoint in entry.mountpoints:\n\t\t\t\tdevices += [entry]\n\n\t\t\tif len(entry.children) > 0:\n\t\t\t\tif len(match := _check(entry.children)) > 0:\n\t\t\t\t\tdevices += match\n\n\t\treturn devices\n\n\tall_info = get_all_lsblk_info()\n\treturn _check(all_info)\n",
"path": "archinstall/lib/disk/device_model.py"
}
] | [
{
"content": "from __future__ import annotations\n\nimport dataclasses\nimport json\nimport logging\nimport math\nimport time\nimport uuid\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom enum import auto\nfrom pathlib import Path\nfrom typing import Optional, List, Dict, TYPE_CHECKING, Any\nfrom typing import Union\n\nimport parted # type: ignore\nfrom parted import Disk, Geometry, Partition\n\nfrom ..exceptions import DiskError, SysCallError\nfrom ..general import SysCommand\nfrom ..output import log\nfrom ..storage import storage\n\nif TYPE_CHECKING:\n\t_: Any\n\n\nclass DiskLayoutType(Enum):\n\tDefault = 'default_layout'\n\tManual = 'manual_partitioning'\n\tPre_mount = 'pre_mounted_config'\n\n\tdef display_msg(self) -> str:\n\t\tmatch self:\n\t\t\tcase DiskLayoutType.Default: return str(_('Use a best-effort default partition layout'))\n\t\t\tcase DiskLayoutType.Manual: return str(_('Manual Partitioning'))\n\t\t\tcase DiskLayoutType.Pre_mount: return str(_('Pre-mounted configuration'))\n\n\n@dataclass\nclass DiskLayoutConfiguration:\n\tconfig_type: DiskLayoutType\n\tdevice_modifications: List[DeviceModification] = field(default_factory=list)\n\t# used for pre-mounted config\n\trelative_mountpoint: Optional[Path] = None\n\n\tdef __post_init__(self):\n\t\tif self.config_type == DiskLayoutType.Pre_mount and self.relative_mountpoint is None:\n\t\t\traise ValueError('Must set a relative mountpoint when layout type is pre-mount\"')\n\n\tdef __dump__(self) -> Dict[str, Any]:\n\t\treturn {\n\t\t\t'config_type': self.config_type.value,\n\t\t\t'device_modifications': [mod.__dump__() for mod in self.device_modifications]\n\t\t}\n\n\t@classmethod\n\tdef parse_arg(cls, disk_config: Dict[str, List[Dict[str, Any]]]) -> Optional[DiskLayoutConfiguration]:\n\t\tfrom .device_handler import device_handler\n\n\t\tdevice_modifications: List[DeviceModification] = []\n\t\tconfig_type = disk_config.get('config_type', None)\n\n\t\tif not config_type:\n\t\t\traise ValueError('Missing disk layout configuration: config_type')\n\n\t\tconfig = DiskLayoutConfiguration(\n\t\t\tconfig_type=DiskLayoutType(config_type),\n\t\t\tdevice_modifications=device_modifications\n\t\t)\n\n\t\tfor entry in disk_config.get('device_modifications', []):\n\t\t\tdevice_path = Path(entry.get('device', None)) if entry.get('device', None) else None\n\n\t\t\tif not device_path:\n\t\t\t\tcontinue\n\n\t\t\tdevice = device_handler.get_device(device_path)\n\n\t\t\tif not device:\n\t\t\t\tcontinue\n\n\t\t\tdevice_modification = DeviceModification(\n\t\t\t\twipe=entry.get('wipe', False),\n\t\t\t\tdevice=device\n\t\t\t)\n\n\t\t\tdevice_partitions: List[PartitionModification] = []\n\n\t\t\tfor partition in entry.get('partitions', []):\n\t\t\t\tdevice_partition = PartitionModification(\n\t\t\t\t\tstatus=ModificationStatus(partition['status']),\n\t\t\t\t\tfs_type=FilesystemType(partition['fs_type']),\n\t\t\t\t\tstart=Size.parse_args(partition['start']),\n\t\t\t\t\tlength=Size.parse_args(partition['length']),\n\t\t\t\t\tmount_options=partition['mount_options'],\n\t\t\t\t\tmountpoint=Path(partition['mountpoint']) if partition['mountpoint'] else None,\n\t\t\t\t\ttype=PartitionType(partition['type']),\n\t\t\t\t\tflags=[PartitionFlag[f] for f in partition.get('flags', [])],\n\t\t\t\t\tbtrfs_subvols=SubvolumeModification.parse_args(partition.get('btrfs', [])),\n\t\t\t\t)\n\t\t\t\t# special 'invisible attr to internally identify the part mod\n\t\t\t\tsetattr(device_partition, '_obj_id', partition['obj_id'])\n\t\t\t\tdevice_partitions.append(device_partition)\n\n\t\t\tdevice_modification.partitions = device_partitions\n\t\t\tdevice_modifications.append(device_modification)\n\n\t\treturn config\n\n\nclass PartitionTable(Enum):\n\tGPT = 'gpt'\n\tMBR = 'msdos'\n\n\nclass Unit(Enum):\n\tB = 1 # byte\n\tkB = 1000**1 # kilobyte\n\tMB = 1000**2 # megabyte\n\tGB = 1000**3 # gigabyte\n\tTB = 1000**4 # terabyte\n\tPB = 1000**5 # petabyte\n\tEB = 1000**6 # exabyte\n\tZB = 1000**7 # zettabyte\n\tYB = 1000**8 # yottabyte\n\n\tKiB = 1024**1 \t# kibibyte\n\tMiB = 1024**2 \t# mebibyte\n\tGiB = 1024**3 \t# gibibyte\n\tTiB = 1024**4 \t# tebibyte\n\tPiB = 1024**5 \t# pebibyte\n\tEiB = 1024**6 \t# exbibyte\n\tZiB = 1024**7 \t# zebibyte\n\tYiB = 1024**8 \t# yobibyte\n\n\tsectors = 'sectors' # size in sector\n\n\tPercent = '%' \t# size in percentile\n\n\n@dataclass\nclass Size:\n\tvalue: int\n\tunit: Unit\n\tsector_size: Optional[Size] = None # only required when unit is sector\n\ttotal_size: Optional[Size] = None # required when operating on percentages\n\n\tdef __post_init__(self):\n\t\tif self.unit == Unit.sectors and self.sector_size is None:\n\t\t\traise ValueError('Sector size is required when unit is sectors')\n\t\telif self.unit == Unit.Percent:\n\t\t\tif self.value < 0 or self.value > 100:\n\t\t\t\traise ValueError('Percentage must be between 0 and 100')\n\t\t\telif self.total_size is None:\n\t\t\t\traise ValueError('Total size is required when unit is percentage')\n\n\t@property\n\tdef _total_size(self) -> Size:\n\t\t\"\"\"\n\t\tSave method to get the total size, mainly to satisfy mypy\n\t\tThis shouldn't happen as the Size object fails instantiation on missing total size\n\t\t\"\"\"\n\t\tif self.unit == Unit.Percent and self.total_size is None:\n\t\t\traise ValueError('Percent unit size must specify a total size')\n\t\treturn self.total_size # type: ignore\n\n\tdef __dump__(self) -> Dict[str, Any]:\n\t\treturn {\n\t\t\t'value': self.value,\n\t\t\t'unit': self.unit.name,\n\t\t\t'sector_size': self.sector_size.__dump__() if self.sector_size else None,\n\t\t\t'total_size': self._total_size.__dump__() if self._total_size else None\n\t\t}\n\n\t@classmethod\n\tdef parse_args(cls, size_arg: Dict[str, Any]) -> Size:\n\t\tsector_size = size_arg['sector_size']\n\t\ttotal_size = size_arg['total_size']\n\n\t\treturn Size(\n\t\t\tsize_arg['value'],\n\t\t\tUnit[size_arg['unit']],\n\t\t\tSize.parse_args(sector_size) if sector_size else None,\n\t\t\tSize.parse_args(total_size) if total_size else None\n\t\t)\n\n\tdef convert(\n\t\tself,\n\t\ttarget_unit: Unit,\n\t\tsector_size: Optional[Size] = None,\n\t\ttotal_size: Optional[Size] = None\n\t) -> Size:\n\t\tif target_unit == Unit.sectors and sector_size is None:\n\t\t\traise ValueError('If target has unit sector, a sector size must be provided')\n\n\t\t# not sure why we would ever wanna convert to percentages\n\t\tif target_unit == Unit.Percent and total_size is None:\n\t\t\traise ValueError('Missing paramter total size to be able to convert to percentage')\n\n\t\tif self.unit == target_unit:\n\t\t\treturn self\n\t\telif self.unit == Unit.Percent:\n\t\t\tamount = int(self._total_size._normalize() * (self.value / 100))\n\t\t\treturn Size(amount, Unit.B)\n\t\telif self.unit == Unit.sectors:\n\t\t\tnorm = self._normalize()\n\t\t\treturn Size(norm, Unit.B).convert(target_unit, sector_size)\n\t\telse:\n\t\t\tif target_unit == Unit.sectors and sector_size is not None:\n\t\t\t\tnorm = self._normalize()\n\t\t\t\tsectors = math.ceil(norm / sector_size.value)\n\t\t\t\treturn Size(sectors, Unit.sectors, sector_size)\n\t\t\telse:\n\t\t\t\tvalue = int(self._normalize() / target_unit.value) # type: ignore\n\t\t\t\treturn Size(value, target_unit)\n\n\tdef format_size(\n\t\tself,\n\t\ttarget_unit: Unit,\n\t\tsector_size: Optional[Size] = None\n\t) -> str:\n\t\tif self.unit == Unit.Percent:\n\t\t\treturn f'{self.value}%'\n\t\telse:\n\t\t\ttarget_size = self.convert(target_unit, sector_size)\n\t\t\treturn f'{target_size.value} {target_unit.name}'\n\n\tdef _normalize(self) -> int:\n\t\t\"\"\"\n\t\twill normalize the value of the unit to Byte\n\t\t\"\"\"\n\t\tif self.unit == Unit.Percent:\n\t\t\treturn self.convert(Unit.B).value\n\t\telif self.unit == Unit.sectors and self.sector_size is not None:\n\t\t\treturn self.value * self.sector_size._normalize()\n\t\treturn int(self.value * self.unit.value) # type: ignore\n\n\tdef __sub__(self, other: Size) -> Size:\n\t\tsrc_norm = self._normalize()\n\t\tdest_norm = other._normalize()\n\t\treturn Size(abs(src_norm - dest_norm), Unit.B)\n\n\tdef __lt__(self, other):\n\t\treturn self._normalize() < other._normalize()\n\n\tdef __le__(self, other):\n\t\treturn self._normalize() <= other._normalize()\n\n\tdef __eq__(self, other):\n\t\treturn self._normalize() == other._normalize()\n\n\tdef __ne__(self, other):\n\t\treturn self._normalize() != other._normalize()\n\n\tdef __gt__(self, other):\n\t\treturn self._normalize() > other._normalize()\n\n\tdef __ge__(self, other):\n\t\treturn self._normalize() >= other._normalize()\n\n\n@dataclass\nclass _BtrfsSubvolumeInfo:\n\tname: Path\n\tmountpoint: Optional[Path]\n\n\n@dataclass\nclass _PartitionInfo:\n\tpartition: Partition\n\tname: str\n\ttype: PartitionType\n\tfs_type: FilesystemType\n\tpath: Path\n\tstart: Size\n\tlength: Size\n\tflags: List[PartitionFlag]\n\tpartuuid: str\n\tdisk: Disk\n\tmountpoints: List[Path]\n\tbtrfs_subvol_infos: List[_BtrfsSubvolumeInfo] = field(default_factory=list)\n\n\tdef as_json(self) -> Dict[str, Any]:\n\t\tinfo = {\n\t\t\t'Name': self.name,\n\t\t\t'Type': self.type.value,\n\t\t\t'Filesystem': self.fs_type.value if self.fs_type else str(_('Unknown')),\n\t\t\t'Path': str(self.path),\n\t\t\t'Start': self.start.format_size(Unit.MiB),\n\t\t\t'Length': self.length.format_size(Unit.MiB),\n\t\t\t'Flags': ', '.join([f.name for f in self.flags])\n\t\t}\n\n\t\tif self.btrfs_subvol_infos:\n\t\t\tinfo['Btrfs vol.'] = f'{len(self.btrfs_subvol_infos)} subvolumes'\n\n\t\treturn info\n\n\t@classmethod\n\tdef from_partition(\n\t\tcls,\n\t\tpartition: Partition,\n\t\tfs_type: FilesystemType,\n\t\tpartuuid: str,\n\t\tmountpoints: List[Path],\n\t\tbtrfs_subvol_infos: List[_BtrfsSubvolumeInfo] = []\n\t) -> _PartitionInfo:\n\t\tpartition_type = PartitionType.get_type_from_code(partition.type)\n\t\tflags = [f for f in PartitionFlag if partition.getFlag(f.value)]\n\n\t\tstart = Size(\n\t\t\tpartition.geometry.start,\n\t\t\tUnit.sectors,\n\t\t\tSize(partition.disk.device.sectorSize, Unit.B)\n\t\t)\n\n\t\tlength = Size(int(partition.getLength(unit='B')), Unit.B)\n\n\t\treturn _PartitionInfo(\n\t\t\tpartition=partition,\n\t\t\tname=partition.get_name(),\n\t\t\ttype=partition_type,\n\t\t\tfs_type=fs_type,\n\t\t\tpath=partition.path,\n\t\t\tstart=start,\n\t\t\tlength=length,\n\t\t\tflags=flags,\n\t\t\tpartuuid=partuuid,\n\t\t\tdisk=partition.disk,\n\t\t\tmountpoints=mountpoints,\n\t\t\tbtrfs_subvol_infos=btrfs_subvol_infos\n\t\t)\n\n\n@dataclass\nclass _DeviceInfo:\n\tmodel: str\n\tpath: Path\n\ttype: str\n\ttotal_size: Size\n\tfree_space_regions: List[DeviceGeometry]\n\tsector_size: Size\n\tread_only: bool\n\tdirty: bool\n\n\tdef as_json(self) -> Dict[str, Any]:\n\t\ttotal_free_space = sum([region.get_length(unit=Unit.MiB) for region in self.free_space_regions])\n\t\treturn {\n\t\t\t'Model': self.model,\n\t\t\t'Path': str(self.path),\n\t\t\t'Type': self.type,\n\t\t\t'Size': self.total_size.format_size(Unit.MiB),\n\t\t\t'Free space': int(total_free_space),\n\t\t\t'Sector size': self.sector_size.value,\n\t\t\t'Read only': self.read_only\n\t\t}\n\n\t@classmethod\n\tdef from_disk(cls, disk: Disk) -> _DeviceInfo:\n\t\tdevice = disk.device\n\t\tdevice_type = parted.devices[device.type]\n\n\t\tsector_size = Size(device.sectorSize, Unit.B)\n\t\tfree_space = [DeviceGeometry(g, sector_size) for g in disk.getFreeSpaceRegions()]\n\n\t\treturn _DeviceInfo(\n\t\t\tmodel=device.model.strip(),\n\t\t\tpath=Path(device.path),\n\t\t\ttype=device_type,\n\t\t\tsector_size=sector_size,\n\t\t\ttotal_size=Size(int(device.getLength(unit='B')), Unit.B),\n\t\t\tfree_space_regions=free_space,\n\t\t\tread_only=device.readOnly,\n\t\t\tdirty=device.dirty\n\t\t)\n\n\n@dataclass\nclass SubvolumeModification:\n\tname: Path\n\tmountpoint: Optional[Path] = None\n\tcompress: bool = False\n\tnodatacow: bool = False\n\n\t@classmethod\n\tdef from_existing_subvol_info(cls, info: _BtrfsSubvolumeInfo) -> SubvolumeModification:\n\t\treturn SubvolumeModification(info.name, mountpoint=info.mountpoint)\n\n\t@classmethod\n\tdef parse_args(cls, subvol_args: List[Dict[str, Any]]) -> List[SubvolumeModification]:\n\t\tmods = []\n\t\tfor entry in subvol_args:\n\t\t\tif not entry.get('name', None) or not entry.get('mountpoint', None):\n\t\t\t\tlog(f'Subvolume arg is missing name: {entry}', level=logging.DEBUG)\n\t\t\t\tcontinue\n\n\t\t\tmountpoint = Path(entry['mountpoint']) if entry['mountpoint'] else None\n\n\t\t\tmods.append(\n\t\t\t\tSubvolumeModification(\n\t\t\t\t\tentry['name'],\n\t\t\t\t\tmountpoint,\n\t\t\t\t\tentry.get('compress', False),\n\t\t\t\t\tentry.get('nodatacow', False)\n\t\t\t\t)\n\t\t\t)\n\n\t\treturn mods\n\n\t@property\n\tdef mount_options(self) -> List[str]:\n\t\toptions = []\n\t\toptions += ['compress'] if self.compress else []\n\t\toptions += ['nodatacow'] if self.nodatacow else []\n\t\treturn options\n\n\t@property\n\tdef relative_mountpoint(self) -> Path:\n\t\t\"\"\"\n\t\tWill return the relative path based on the anchor\n\t\te.g. Path('/mnt/test') -> Path('mnt/test')\n\t\t\"\"\"\n\t\tif self.mountpoint is not None:\n\t\t\treturn self.mountpoint.relative_to(self.mountpoint.anchor)\n\n\t\traise ValueError('Mountpoint is not specified')\n\n\tdef is_root(self, relative_mountpoint: Optional[Path] = None) -> bool:\n\t\tif self.mountpoint:\n\t\t\tif relative_mountpoint is not None:\n\t\t\t\treturn self.mountpoint.relative_to(relative_mountpoint) == Path('.')\n\t\t\treturn self.mountpoint == Path('/')\n\t\treturn False\n\n\tdef __dump__(self) -> Dict[str, Any]:\n\t\treturn {\n\t\t\t'name': str(self.name),\n\t\t\t'mountpoint': str(self.mountpoint),\n\t\t\t'compress': self.compress,\n\t\t\t'nodatacow': self.nodatacow\n\t\t}\n\n\tdef as_json(self) -> Dict[str, Any]:\n\t\treturn {\n\t\t\t'name': str(self.name),\n\t\t\t'mountpoint': str(self.mountpoint),\n\t\t\t'compress': self.compress,\n\t\t\t'nodatacow': self.nodatacow\n\t\t}\n\n\nclass DeviceGeometry:\n\tdef __init__(self, geometry: Geometry, sector_size: Size):\n\t\tself._geometry = geometry\n\t\tself._sector_size = sector_size\n\n\t@property\n\tdef start(self) -> int:\n\t\treturn self._geometry.start\n\n\t@property\n\tdef end(self) -> int:\n\t\treturn self._geometry.end\n\n\tdef get_length(self, unit: Unit = Unit.sectors) -> int:\n\t\treturn self._geometry.getLength(unit.name)\n\n\tdef as_json(self) -> Dict[str, Any]:\n\t\treturn {\n\t\t\t'Sector size': self._sector_size.value,\n\t\t\t'Start sector': self._geometry.start,\n\t\t\t'End sector': self._geometry.end,\n\t\t\t'Length': self._geometry.getLength()\n\t\t}\n\n\n@dataclass\nclass BDevice:\n\tdisk: Disk\n\tdevice_info: _DeviceInfo\n\tpartition_infos: List[_PartitionInfo]\n\n\tdef __hash__(self):\n\t\treturn hash(self.disk.device.path)\n\n\nclass PartitionType(Enum):\n\tBoot = 'boot'\n\tPrimary = 'primary'\n\n\t@classmethod\n\tdef get_type_from_code(cls, code: int) -> PartitionType:\n\t\tif code == parted.PARTITION_NORMAL:\n\t\t\treturn PartitionType.Primary\n\n\t\traise DiskError(f'Partition code not supported: {code}')\n\n\tdef get_partition_code(self) -> Optional[int]:\n\t\tif self == PartitionType.Primary:\n\t\t\treturn parted.PARTITION_NORMAL\n\t\telif self == PartitionType.Boot:\n\t\t\treturn parted.PARTITION_BOOT\n\t\treturn None\n\n\nclass PartitionFlag(Enum):\n\tBoot = 1\n\n\nclass FilesystemType(Enum):\n\tBtrfs = 'btrfs'\n\tExt2 = 'ext2'\n\tExt3 = 'ext3'\n\tExt4 = 'ext4'\n\tF2fs = 'f2fs'\n\tFat16 = 'fat16'\n\tFat32 = 'fat32'\n\tNtfs = 'ntfs'\n\tReiserfs = 'reiserfs'\n\tXfs = 'xfs'\n\n\t# this is not a FS known to parted, so be careful\n\t# with the usage from this enum\n\tCrypto_luks = 'crypto_LUKS'\n\n\tdef is_crypto(self) -> bool:\n\t\treturn self == FilesystemType.Crypto_luks\n\n\t@property\n\tdef fs_type_mount(self) -> str:\n\t\tmatch self:\n\t\t\tcase FilesystemType.Ntfs: return 'ntfs3'\n\t\t\tcase FilesystemType.Fat32: return 'vfat'\n\t\t\tcase _: return self.value # type: ignore\n\n\t@property\n\tdef installation_pkg(self) -> Optional[str]:\n\t\tmatch self:\n\t\t\tcase FilesystemType.Btrfs: return 'btrfs-progs'\n\t\t\tcase FilesystemType.Xfs: return 'xfsprogs'\n\t\t\tcase FilesystemType.F2fs: return 'f2fs-tools'\n\t\t\tcase _: return None\n\n\t@property\n\tdef installation_module(self) -> Optional[str]:\n\t\tmatch self:\n\t\t\tcase FilesystemType.Btrfs: return 'btrfs'\n\t\t\tcase _: return None\n\n\t@property\n\tdef installation_binary(self) -> Optional[str]:\n\t\tmatch self:\n\t\t\tcase FilesystemType.Btrfs: return '/usr/bin/btrfs'\n\t\t\tcase _: return None\n\n\t@property\n\tdef installation_hooks(self) -> Optional[str]:\n\t\tmatch self:\n\t\t\tcase FilesystemType.Btrfs: return 'btrfs'\n\t\t\tcase _: return None\n\n\nclass ModificationStatus(Enum):\n\tExist = 'existing'\n\tModify = 'modify'\n\tDelete = 'delete'\n\tCreate = 'create'\n\n\n@dataclass\nclass PartitionModification:\n\tstatus: ModificationStatus\n\ttype: PartitionType\n\tstart: Size\n\tlength: Size\n\tfs_type: FilesystemType\n\tmountpoint: Optional[Path] = None\n\tmount_options: List[str] = field(default_factory=list)\n\tflags: List[PartitionFlag] = field(default_factory=list)\n\tbtrfs_subvols: List[SubvolumeModification] = field(default_factory=list)\n\n\t# only set if the device was created or exists\n\tdev_path: Optional[Path] = None\n\tpartuuid: Optional[str] = None\n\tuuid: Optional[str] = None\n\n\tdef __post_init__(self):\n\t\t# needed to use the object as a dictionary key due to hash func\n\t\tif not hasattr(self, '_obj_id'):\n\t\t\tself._obj_id = uuid.uuid4()\n\n\t\tif self.is_exists_or_modify() and not self.dev_path:\n\t\t\traise ValueError('If partition marked as existing a path must be set')\n\n\tdef __hash__(self):\n\t\treturn hash(self._obj_id)\n\n\t@property\n\tdef obj_id(self) -> str:\n\t\tif hasattr(self, '_obj_id'):\n\t\t\treturn str(self._obj_id)\n\t\treturn ''\n\n\t@property\n\tdef safe_dev_path(self) -> Path:\n\t\tif self.dev_path is None:\n\t\t\traise ValueError('Device path was not set')\n\t\treturn self.dev_path\n\n\t@classmethod\n\tdef from_existing_partition(cls, partition_info: _PartitionInfo) -> PartitionModification:\n\t\tif partition_info.btrfs_subvol_infos:\n\t\t\tmountpoint = None\n\t\t\tsubvol_mods = []\n\t\t\tfor info in partition_info.btrfs_subvol_infos:\n\t\t\t\tsubvol_mods.append(\n\t\t\t\t\tSubvolumeModification.from_existing_subvol_info(info)\n\t\t\t\t)\n\t\telse:\n\t\t\tmountpoint = partition_info.mountpoints[0] if partition_info.mountpoints else None\n\t\t\tsubvol_mods = []\n\n\t\treturn PartitionModification(\n\t\t\tstatus=ModificationStatus.Exist,\n\t\t\ttype=partition_info.type,\n\t\t\tstart=partition_info.start,\n\t\t\tlength=partition_info.length,\n\t\t\tfs_type=partition_info.fs_type,\n\t\t\tdev_path=partition_info.path,\n\t\t\tflags=partition_info.flags,\n\t\t\tmountpoint=mountpoint,\n\t\t\tbtrfs_subvols=subvol_mods\n\t\t)\n\n\t@property\n\tdef relative_mountpoint(self) -> Path:\n\t\t\"\"\"\n\t\tWill return the relative path based on the anchor\n\t\te.g. Path('/mnt/test') -> Path('mnt/test')\n\t\t\"\"\"\n\t\tif self.mountpoint:\n\t\t\treturn self.mountpoint.relative_to(self.mountpoint.anchor)\n\n\t\traise ValueError('Mountpoint is not specified')\n\n\tdef is_boot(self) -> bool:\n\t\treturn PartitionFlag.Boot in self.flags\n\n\tdef is_root(self, relative_mountpoint: Optional[Path] = None) -> bool:\n\t\tif relative_mountpoint is not None and self.mountpoint is not None:\n\t\t\treturn self.mountpoint.relative_to(relative_mountpoint) == Path('.')\n\t\telif self.mountpoint is not None:\n\t\t\treturn Path('/') == self.mountpoint\n\t\telse:\n\t\t\tfor subvol in self.btrfs_subvols:\n\t\t\t\tif subvol.is_root(relative_mountpoint):\n\t\t\t\t\treturn True\n\n\t\treturn False\n\n\tdef is_modify(self) -> bool:\n\t\treturn self.status == ModificationStatus.Modify\n\n\tdef exists(self) -> bool:\n\t\treturn self.status == ModificationStatus.Exist\n\n\tdef is_exists_or_modify(self) -> bool:\n\t\treturn self.status in [ModificationStatus.Exist, ModificationStatus.Modify]\n\n\t@property\n\tdef mapper_name(self) -> Optional[str]:\n\t\tif self.dev_path:\n\t\t\treturn f'{storage.get(\"ENC_IDENTIFIER\", \"ai\")}{self.dev_path.name}'\n\t\treturn None\n\n\tdef set_flag(self, flag: PartitionFlag):\n\t\tif flag not in self.flags:\n\t\t\tself.flags.append(flag)\n\n\tdef invert_flag(self, flag: PartitionFlag):\n\t\tif flag in self.flags:\n\t\t\tself.flags = [f for f in self.flags if f != flag]\n\t\telse:\n\t\t\tself.set_flag(flag)\n\n\tdef json(self) -> Dict[str, Any]:\n\t\t\"\"\"\n\t\tCalled for configuration settings\n\t\t\"\"\"\n\t\treturn {\n\t\t\t'obj_id': self.obj_id,\n\t\t\t'status': self.status.value,\n\t\t\t'type': self.type.value,\n\t\t\t'start': self.start.__dump__(),\n\t\t\t'length': self.length.__dump__(),\n\t\t\t'fs_type': self.fs_type.value,\n\t\t\t'mountpoint': str(self.mountpoint) if self.mountpoint else None,\n\t\t\t'mount_options': self.mount_options,\n\t\t\t'flags': [f.name for f in self.flags],\n\t\t\t'btrfs': [vol.__dump__() for vol in self.btrfs_subvols]\n\t\t}\n\n\tdef as_json(self) -> Dict[str, Any]:\n\t\t\"\"\"\n\t\tCalled for displaying data in table format\n\t\t\"\"\"\n\t\tinfo = {\n\t\t\t'Status': self.status.value,\n\t\t\t'Device': str(self.dev_path) if self.dev_path else '',\n\t\t\t'Type': self.type.value,\n\t\t\t'Start': self.start.format_size(Unit.MiB),\n\t\t\t'Length': self.length.format_size(Unit.MiB),\n\t\t\t'FS type': self.fs_type.value,\n\t\t\t'Mountpoint': self.mountpoint if self.mountpoint else '',\n\t\t\t'Mount options': ', '.join(self.mount_options),\n\t\t\t'Flags': ', '.join([f.name for f in self.flags]),\n\t\t}\n\n\t\tif self.btrfs_subvols:\n\t\t\tinfo['Btrfs vol.'] = f'{len(self.btrfs_subvols)} subvolumes'\n\n\t\treturn info\n\n\n@dataclass\nclass DeviceModification:\n\tdevice: BDevice\n\twipe: bool\n\tpartitions: List[PartitionModification] = field(default_factory=list)\n\n\t@property\n\tdef device_path(self) -> Path:\n\t\treturn self.device.device_info.path\n\n\tdef add_partition(self, partition: PartitionModification):\n\t\tself.partitions.append(partition)\n\n\tdef get_boot_partition(self) -> Optional[PartitionModification]:\n\t\tliltered = filter(lambda x: x.is_boot(), self.partitions)\n\t\treturn next(liltered, None)\n\n\tdef get_root_partition(self, relative_path: Optional[Path]) -> Optional[PartitionModification]:\n\t\tfiltered = filter(lambda x: x.is_root(relative_path), self.partitions)\n\t\treturn next(filtered, None)\n\n\tdef __dump__(self) -> Dict[str, Any]:\n\t\t\"\"\"\n\t\tCalled when generating configuration files\n\t\t\"\"\"\n\t\treturn {\n\t\t\t'device': str(self.device.device_info.path),\n\t\t\t'wipe': self.wipe,\n\t\t\t'partitions': [p.json() for p in self.partitions]\n\t\t}\n\n\nclass EncryptionType(Enum):\n\tNoEncryption = \"no_encryption\"\n\tPartition = \"partition\"\n\n\t@classmethod\n\tdef _encryption_type_mapper(cls) -> Dict[str, 'EncryptionType']:\n\t\treturn {\n\t\t\t# str(_('Full disk encryption')): EncryptionType.FullDiskEncryption,\n\t\t\tstr(_('Partition encryption')): EncryptionType.Partition\n\t\t}\n\n\t@classmethod\n\tdef text_to_type(cls, text: str) -> 'EncryptionType':\n\t\tmapping = cls._encryption_type_mapper()\n\t\treturn mapping[text]\n\n\t@classmethod\n\tdef type_to_text(cls, type_: 'EncryptionType') -> str:\n\t\tmapping = cls._encryption_type_mapper()\n\t\ttype_to_text = {type_: text for text, type_ in mapping.items()}\n\t\treturn type_to_text[type_]\n\n\n@dataclass\nclass DiskEncryption:\n\tencryption_type: EncryptionType = EncryptionType.Partition\n\tencryption_password: str = ''\n\tpartitions: List[PartitionModification] = field(default_factory=list)\n\thsm_device: Optional[Fido2Device] = None\n\n\tdef should_generate_encryption_file(self, part_mod: PartitionModification) -> bool:\n\t\treturn part_mod in self.partitions and part_mod.mountpoint != Path('/')\n\n\tdef json(self) -> Dict[str, Any]:\n\t\tobj: Dict[str, Any] = {\n\t\t\t'encryption_type': self.encryption_type.value,\n\t\t\t'partitions': [p.obj_id for p in self.partitions]\n\t\t}\n\n\t\tif self.hsm_device:\n\t\t\tobj['hsm_device'] = self.hsm_device.json()\n\n\t\treturn obj\n\n\t@classmethod\n\tdef parse_arg(\n\t\tcls,\n\t\tdisk_config: DiskLayoutConfiguration,\n\t\targ: Dict[str, Any],\n\t\tpassword: str = ''\n\t) -> 'DiskEncryption':\n\t\tenc_partitions = []\n\t\tfor mod in disk_config.device_modifications:\n\t\t\tfor part in mod.partitions:\n\t\t\t\tif part.obj_id in arg.get('partitions', []):\n\t\t\t\t\tenc_partitions.append(part)\n\n\t\tenc = DiskEncryption(\n\t\t\tEncryptionType(arg['encryption_type']),\n\t\t\tpassword,\n\t\t\tenc_partitions\n\t\t)\n\n\t\tif hsm := arg.get('hsm_device', None):\n\t\t\tenc.hsm_device = Fido2Device.parse_arg(hsm)\n\n\t\treturn enc\n\n\n@dataclass\nclass Fido2Device:\n\tpath: Path\n\tmanufacturer: str\n\tproduct: str\n\n\tdef json(self) -> Dict[str, str]:\n\t\treturn {\n\t\t\t'path': str(self.path),\n\t\t\t'manufacturer': self.manufacturer,\n\t\t\t'product': self.product\n\t\t}\n\n\t@classmethod\n\tdef parse_arg(cls, arg: Dict[str, str]) -> 'Fido2Device':\n\t\treturn Fido2Device(\n\t\t\tPath(arg['path']),\n\t\t\targ['manufacturer'],\n\t\t\targ['product']\n\t\t)\n\n\n@dataclass\nclass LsblkInfo:\n\tname: str = ''\n\tpath: Path = Path()\n\tpkname: str = ''\n\tsize: Size = field(default_factory=lambda: Size(0, Unit.B))\n\tlog_sec: int = 0\n\tpttype: str = ''\n\tptuuid: str = ''\n\trota: bool = False\n\ttran: Optional[str] = None\n\tpartuuid: Optional[str] = None\n\tuuid: Optional[str] = None\n\tfstype: Optional[str] = None\n\tfsver: Optional[str] = None\n\tfsavail: Optional[str] = None\n\tfsuse_percentage: Optional[str] = None\n\ttype: Optional[str] = None\n\tmountpoint: Optional[Path] = None\n\tmountpoints: List[Path] = field(default_factory=list)\n\tfsroots: List[Path] = field(default_factory=list)\n\tchildren: List[LsblkInfo] = field(default_factory=list)\n\n\tdef json(self) -> Dict[str, Any]:\n\t\treturn {\n\t\t\t'name': self.name,\n\t\t\t'path': str(self.path),\n\t\t\t'pkname': self.pkname,\n\t\t\t'size': self.size.format_size(Unit.MiB),\n\t\t\t'log_sec': self.log_sec,\n\t\t\t'pttype': self.pttype,\n\t\t\t'ptuuid': self.ptuuid,\n\t\t\t'rota': self.rota,\n\t\t\t'tran': self.tran,\n\t\t\t'partuuid': self.partuuid,\n\t\t\t'uuid': self.uuid,\n\t\t\t'fstype': self.fstype,\n\t\t\t'fsver': self.fsver,\n\t\t\t'fsavail': self.fsavail,\n\t\t\t'fsuse_percentage': self.fsuse_percentage,\n\t\t\t'type': self.type,\n\t\t\t'mountpoint': self.mountpoint,\n\t\t\t'mountpoints': [str(m) for m in self.mountpoints],\n\t\t\t'fsroots': [str(r) for r in self.fsroots],\n\t\t\t'children': [c.json() for c in self.children]\n\t\t}\n\n\t@property\n\tdef btrfs_subvol_info(self) -> Dict[Path, Path]:\n\t\t\"\"\"\n\t\tIt is assumed that lsblk will contain the fields as\n\n\t\t\"mountpoints\": [\"/mnt/archinstall/log\", \"/mnt/archinstall/home\", \"/mnt/archinstall\", ...]\n\t\t\"fsroots\": [\"/@log\", \"/@home\", \"/@\"...]\n\n\t\twe'll thereby map the fsroot, which are the mounted filesystem roots\n\t\tto the corresponding mountpoints\n\t\t\"\"\"\n\t\treturn dict(zip(self.fsroots, self.mountpoints))\n\n\t@classmethod\n\tdef exclude(cls) -> List[str]:\n\t\treturn ['children']\n\n\t@classmethod\n\tdef fields(cls) -> List[str]:\n\t\treturn [f.name for f in dataclasses.fields(LsblkInfo) if f.name not in cls.exclude()]\n\n\t@classmethod\n\tdef from_json(cls, blockdevice: Dict[str, Any]) -> LsblkInfo:\n\t\tinfo = cls()\n\n\t\tfor f in cls.fields():\n\t\t\tlsblk_field = _clean_field(f, CleanType.Blockdevice)\n\t\t\tdata_field = _clean_field(f, CleanType.Dataclass)\n\n\t\t\tval: Any = None\n\t\t\tif isinstance(getattr(info, data_field), Path):\n\t\t\t\tval = Path(blockdevice[lsblk_field])\n\t\t\telif isinstance(getattr(info, data_field), Size):\n\t\t\t\tval = Size(blockdevice[lsblk_field], Unit.B)\n\t\t\telse:\n\t\t\t\tval = blockdevice[lsblk_field]\n\n\t\t\tsetattr(info, data_field, val)\n\n\t\tinfo.children = [LsblkInfo.from_json(child) for child in blockdevice.get('children', [])]\n\n\t\t# sometimes lsblk returns 'mountpoints': [null]\n\t\tinfo.mountpoints = [Path(mnt) for mnt in info.mountpoints if mnt]\n\n\t\tfs_roots = []\n\t\tfor r in info.fsroots:\n\t\t\tif r:\n\t\t\t\tpath = Path(r)\n\t\t\t\t# store the fsroot entries without the leading /\n\t\t\t\tfs_roots.append(path.relative_to(path.anchor))\n\t\tinfo.fsroots = fs_roots\n\n\t\treturn info\n\n\nclass CleanType(Enum):\n\tBlockdevice = auto()\n\tDataclass = auto()\n\tLsblk = auto()\n\n\ndef _clean_field(name: str, clean_type: CleanType) -> str:\n\tmatch clean_type:\n\t\tcase CleanType.Blockdevice:\n\t\t\treturn name.replace('_percentage', '%').replace('_', '-')\n\t\tcase CleanType.Dataclass:\n\t\t\treturn name.lower().replace('-', '_').replace('%', '_percentage')\n\t\tcase CleanType.Lsblk:\n\t\t\treturn name.replace('_percentage', '%').replace('_', '-')\n\n\ndef _fetch_lsblk_info(dev_path: Optional[Union[Path, str]] = None, retry: int = 3) -> List[LsblkInfo]:\n\tfields = [_clean_field(f, CleanType.Lsblk) for f in LsblkInfo.fields()]\n\tlsblk_fields = ','.join(fields)\n\n\tif not dev_path:\n\t\tdev_path = ''\n\n\tif retry == 0:\n\t\tretry = 1\n\n\tfor retry_attempt in range(retry):\n\t\ttry:\n\t\t\tresult = SysCommand(f'lsblk --json -b -o+{lsblk_fields} {dev_path}')\n\t\t\tbreak\n\t\texcept SysCallError as error:\n\t\t\t# Get the output minus the message/info from lsblk if it returns a non-zero exit code.\n\t\t\tif error.worker:\n\t\t\t\terr = error.worker.decode('UTF-8')\n\t\t\t\tlog(f'Error calling lsblk: {err}', level=logging.DEBUG)\n\t\t\telse:\n\t\t\t\traise error\n\n\t\t\tif retry_attempt == retry - 1:\n\t\t\t\traise error\n\n\t\t\ttime.sleep(1)\n\n\ttry:\n\t\tif decoded := result.decode('utf-8'):\n\t\t\tblock_devices = json.loads(decoded)\n\t\t\tblockdevices = block_devices['blockdevices']\n\t\t\treturn [LsblkInfo.from_json(device) for device in blockdevices]\n\texcept json.decoder.JSONDecodeError as err:\n\t\tlog(f\"Could not decode lsblk JSON: {result}\", fg=\"red\", level=logging.ERROR)\n\t\traise err\n\n\traise DiskError(f'Failed to read disk \"{dev_path}\" with lsblk')\n\ndef get_lsblk_info(dev_path: Union[Path, str]) -> LsblkInfo:\n\tif infos := _fetch_lsblk_info(dev_path):\n\t\treturn infos[0]\n\n\traise DiskError(f'lsblk failed to retrieve information for \"{dev_path}\"')\n\n\ndef get_all_lsblk_info() -> List[LsblkInfo]:\n\treturn _fetch_lsblk_info()\n\n\ndef get_lsblk_by_mountpoint(mountpoint: Path, as_prefix: bool = False) -> List[LsblkInfo]:\n\tdef _check(infos: List[LsblkInfo]) -> List[LsblkInfo]:\n\t\tdevices = []\n\t\tfor entry in infos:\n\t\t\tif as_prefix:\n\t\t\t\tmatches = [m for m in entry.mountpoints if str(m).startswith(str(mountpoint))]\n\t\t\t\tif matches:\n\t\t\t\t\tdevices += [entry]\n\t\t\telif mountpoint in entry.mountpoints:\n\t\t\t\tdevices += [entry]\n\n\t\t\tif len(entry.children) > 0:\n\t\t\t\tif len(match := _check(entry.children)) > 0:\n\t\t\t\t\tdevices += match\n\n\t\treturn devices\n\n\tall_info = get_all_lsblk_info()\n\treturn _check(all_info)\n",
"path": "archinstall/lib/disk/device_model.py"
}
] | diff --git a/archinstall/lib/disk/device_model.py b/archinstall/lib/disk/device_model.py
index 8e26b1d703..d57347b737 100644
--- a/archinstall/lib/disk/device_model.py
+++ b/archinstall/lib/disk/device_model.py
@@ -851,7 +851,7 @@ class LsblkInfo:
name: str = ''
path: Path = Path()
pkname: str = ''
- size: Size = Size(0, Unit.B)
+ size: Size = field(default_factory=lambda: Size(0, Unit.B))
log_sec: int = 0
pttype: str = ''
ptuuid: str = ''
|
ivy-llc__ivy-17989 | fmax
| [
{
"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"int16\", \"float16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef conj(x, name=None):\n return ivy.conj(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rad2deg(x, name=None):\n return ivy.rad2deg(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef deg2rad(x, name=None):\n return ivy.deg2rad(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gcd(x, y, name=None):\n return ivy.gcd(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan2(x, y, name=None):\n return ivy.atan2(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sign(x, name=None):\n return ivy.sign(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef neg(x, name=None):\n return ivy.negative(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cumprod(x, dim=None, dtype=None, name=None):\n return ivy.cumprod(x, axis=dim, dtype=dtype)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef reciprocal(x, name=None):\n return ivy.reciprocal(x)\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"complex64\", \"complex128\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef angle(x, name=None):\n return ivy.angle(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef fmin(x, y, name=None):\n return ivy.fmin(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef logit(x, eps=None, name=None):\n return ivy.logit(x, eps=eps)\n",
"path": "ivy/functional/frontends/paddle/tensor/math.py"
}
] | [
{
"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"int16\", \"float16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef conj(x, name=None):\n return ivy.conj(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rad2deg(x, name=None):\n return ivy.rad2deg(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef deg2rad(x, name=None):\n return ivy.deg2rad(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gcd(x, y, name=None):\n return ivy.gcd(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan2(x, y, name=None):\n return ivy.atan2(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sign(x, name=None):\n return ivy.sign(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef neg(x, name=None):\n return ivy.negative(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cumprod(x, dim=None, dtype=None, name=None):\n return ivy.cumprod(x, axis=dim, dtype=dtype)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef reciprocal(x, name=None):\n return ivy.reciprocal(x)\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"complex64\", \"complex128\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef angle(x, name=None):\n return ivy.angle(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef fmin(x, y, name=None):\n return ivy.fmin(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef logit(x, eps=None, name=None):\n return ivy.logit(x, eps=eps)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef fmax(x, y, name=None):\n return ivy.fmax(x, y)\n",
"path": "ivy/functional/frontends/paddle/tensor/math.py"
}
] | diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py
index a55742b58857c..c0301050626a6 100644
--- a/ivy/functional/frontends/paddle/tensor/math.py
+++ b/ivy/functional/frontends/paddle/tensor/math.py
@@ -253,3 +253,9 @@ def fmin(x, y, name=None):
@to_ivy_arrays_and_back
def logit(x, eps=None, name=None):
return ivy.logit(x, eps=eps)
+
+
+@with_unsupported_dtypes({"2.5.0 and below": "bfloat16"}, "paddle")
+@to_ivy_arrays_and_back
+def fmax(x, y, name=None):
+ return ivy.fmax(x, y)
diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py
index 27fc996cbfa93..4bb3bcc18a915 100644
--- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py
+++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_math.py
@@ -1080,3 +1080,29 @@ def test_paddle_angle(
on_device=on_device,
x=x[0],
)
+
+
+@handle_frontend_test(
+ fn_tree="paddle.fmax",
+ dtypes_and_x=helpers.dtype_and_values(
+ available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True
+ ),
+)
+def test_paddle_fmax(
+ *,
+ dtypes_and_x,
+ on_device,
+ fn_tree,
+ frontend,
+ test_flags,
+):
+ input_dtype, x = dtypes_and_x
+ helpers.test_frontend_function(
+ input_dtypes=input_dtype,
+ frontend=frontend,
+ test_flags=test_flags,
+ fn_tree=fn_tree,
+ on_device=on_device,
+ x=x[0],
+ y=x[1],
+ )
|
conan-io__conan-127 | mark headers as "SYSTEM" headers to silence warnings
Many libraries generate tons of warnings in public headers. WebSocket++ uses auto_ptr for example and many Boost libraries truncate integers implicitly (-Wconversion). To consume these libraries you have to treat them as system headers because GCC won't emit warnings in these.
This is how Conan currently sets the include directories:
``` CMake
include_directories(${CONAN_INCLUDE_DIRS})
```
This is how you would add them as "system" headers to silence warnings:
``` CMake
include_directories(SYSTEM ${CONAN_INCLUDE_DIRS})
```
Is there a reason it is not already done this way?
This issue may apply to configurations other than CMake/GCC, too, but this is the most important one for me.
| [
{
"content": "from conans.model import Generator\nfrom conans.paths import BUILD_INFO_CMAKE\n\n\nclass DepsCppCmake(object):\n def __init__(self, deps_cpp_info):\n self.include_paths = \"\\n\\t\\t\\t\".join('\"%s\"' % p.replace(\"\\\\\", \"/\")\n for p in deps_cpp_info.include_paths)\n self.lib_paths = \"\\n\\t\\t\\t\".join('\"%s\"' % p.replace(\"\\\\\", \"/\")\n for p in deps_cpp_info.lib_paths)\n self.libs = \" \".join(deps_cpp_info.libs)\n self.defines = \"\\n\\t\\t\\t\".join(\"-D%s\" % d for d in deps_cpp_info.defines)\n self.cppflags = \" \".join(deps_cpp_info.cppflags)\n self.cflags = \" \".join(deps_cpp_info.cflags)\n self.sharedlinkflags = \" \".join(deps_cpp_info.sharedlinkflags)\n self.exelinkflags = \" \".join(deps_cpp_info.exelinkflags)\n self.bin_paths = \"\\n\\t\\t\\t\".join('\"%s\"' % p.replace(\"\\\\\", \"/\")\n for p in deps_cpp_info.bin_paths)\n\n self.rootpath = '\"%s\"' % deps_cpp_info.rootpath.replace(\"\\\\\", \"/\")\n\n\nclass CMakeGenerator(Generator):\n @property\n def filename(self):\n return BUILD_INFO_CMAKE\n\n @property\n def content(self):\n sections = []\n\n # DEPS VARIABLES\n template_dep = ('set(CONAN_{dep}_ROOT {deps.rootpath})\\n'\n 'set(CONAN_INCLUDE_DIRS_{dep} {deps.include_paths})\\n'\n 'set(CONAN_LIB_DIRS_{dep} {deps.lib_paths})\\n'\n 'set(CONAN_BIN_DIRS_{dep} {deps.bin_paths})\\n'\n 'set(CONAN_LIBS_{dep} {deps.libs})\\n'\n 'set(CONAN_DEFINES_{dep} {deps.defines})\\n'\n 'set(CONAN_CXX_FLAGS_{dep} \"{deps.cppflags}\")\\n'\n 'set(CONAN_SHARED_LINKER_FLAGS_{dep} \"{deps.sharedlinkflags}\")\\n'\n 'set(CONAN_EXE_LINKER_FLAGS_{dep} \"{deps.exelinkflags}\")\\n'\n 'set(CONAN_C_FLAGS_{dep} \"{deps.cflags}\")\\n')\n\n for dep_name, dep_cpp_info in self.deps_build_info.dependencies:\n deps = DepsCppCmake(dep_cpp_info)\n dep_flags = template_dep.format(dep=dep_name.upper(),\n deps=deps)\n sections.append(dep_flags)\n\n # GENERAL VARIABLES\n deps = DepsCppCmake(self.deps_build_info)\n\n template = ('set(CONAN_INCLUDE_DIRS {deps.include_paths} ${{CONAN_INCLUDE_DIRS}})\\n'\n 'set(CONAN_LIB_DIRS {deps.lib_paths} ${{CONAN_LIB_DIRS}})\\n'\n 'set(CONAN_BIN_DIRS {deps.bin_paths} ${{CONAN_BIN_DIRS}})\\n'\n 'set(CONAN_LIBS {deps.libs} ${{CONAN_LIBS}})\\n'\n 'set(CONAN_DEFINES {deps.defines} ${{CONAN_DEFINES}})\\n'\n 'set(CONAN_CXX_FLAGS \"{deps.cppflags} ${{CONAN_CXX_FLAGS}}\")\\n'\n 'set(CONAN_SHARED_LINKER_FLAGS \"{deps.sharedlinkflags} ${{CONAN_SHARED_LINKER_FLAGS}}\")\\n'\n 'set(CONAN_EXE_LINKER_FLAGS \"{deps.exelinkflags} ${{CONAN_EXE_LINKER_FLAGS}}\")\\n'\n 'set(CONAN_C_FLAGS \"{deps.cflags} ${{CONAN_C_FLAGS}}\")\\n'\n 'set(CONAN_CMAKE_MODULE_PATH {module_paths} ${{CONAN_CMAKE_MODULE_PATH}})')\n\n rootpaths = [DepsCppCmake(dep_cpp_info).rootpath for _, dep_cpp_info\n in self.deps_build_info.dependencies]\n module_paths = \" \".join(rootpaths)\n all_flags = template.format(deps=deps, module_paths=module_paths)\n sections.append(all_flags)\n\n # MACROS\n sections.append(self._aux_cmake_test_setup())\n\n return \"\\n\".join(sections)\n\n def _aux_cmake_test_setup(self):\n return \"\"\"macro(CONAN_BASIC_SETUP)\n conan_check_compiler()\n conan_output_dirs_setup()\n conan_flags_setup()\n # CMake can find findXXX.cmake files in the root of packages\n set(CMAKE_MODULE_PATH ${CONAN_CMAKE_MODULE_PATH} ${CMAKE_MODULE_PATH})\nendmacro()\n\nmacro(CONAN_FLAGS_SETUP)\n include_directories(${CONAN_INCLUDE_DIRS})\n link_directories(${CONAN_LIB_DIRS})\n add_definitions(${CONAN_DEFINES})\n\n # For find_library\n set(CMAKE_INCLUDE_PATH ${CONAN_INCLUDE_DIRS} ${CMAKE_INCLUDE_PATH})\n set(CMAKE_LIBRARY_PATH ${CONAN_LIB_DIRS} ${CMAKE_LIBRARY_PATH})\n\n set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${CONAN_CXX_FLAGS}\")\n set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} ${CONAN_C_FLAGS}\")\n set(CMAKE_SHARED_LINKER_FLAGS \"${CMAKE_SHARED_LINKER_FLAGS} ${CONAN_SHARED_LINKER_FLAGS}\")\n set(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} ${CONAN_EXE_LINKER_FLAGS}\")\n\n if(APPLE)\n # https://cmake.org/Wiki/CMake_RPATH_handling\n # CONAN GUIDE: All generated libraries should have the id and dependencies to other\n # dylibs without path, just the name, EX:\n # libMyLib1.dylib:\n # libMyLib1.dylib (compatibility version 0.0.0, current version 0.0.0)\n # libMyLib0.dylib (compatibility version 0.0.0, current version 0.0.0)\n # /usr/lib/libc++.1.dylib (compatibility version 1.0.0, current version 120.0.0)\n # /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1197.1.1)\n set(CMAKE_SKIP_RPATH 1) # AVOID RPATH FOR *.dylib, ALL LIBS BETWEEN THEM AND THE EXE\n # SHOULD BE ON THE LINKER RESOLVER PATH (./ IS ONE OF THEM)\n endif()\n if(CONAN_LINK_RUNTIME)\n string(REPLACE \"/MD\" ${CONAN_LINK_RUNTIME} CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE})\n string(REPLACE \"/MDd\" ${CONAN_LINK_RUNTIME} CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG})\n string(REPLACE \"/MD\" ${CONAN_LINK_RUNTIME} CMAKE_C_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE})\n string(REPLACE \"/MDd\" ${CONAN_LINK_RUNTIME} CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG})\n endif()\nendmacro()\n\nmacro(CONAN_OUTPUT_DIRS_SETUP)\n set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/bin)\n set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})\n set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})\n\n set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)\n set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY})\n set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY})\nendmacro()\n\nmacro(CONAN_SPLIT_VERSION VERSION_STRING MAJOR MINOR)\n #make a list from the version string\n string(REPLACE \".\" \";\" VERSION_LIST ${${VERSION_STRING}})\n\n #write output values\n list(GET VERSION_LIST 0 ${MAJOR})\n list(GET VERSION_LIST 1 ${MINOR})\nendmacro()\n\nmacro(ERROR_COMPILER_VERSION)\n message(FATAL_ERROR \"Incorrect '${CONAN_COMPILER}' version 'compiler.version=${CONAN_COMPILER_VERSION}'\"\n \" is not the one detected by CMake: '${CMAKE_CXX_COMPILER_ID}=\"${VERSION_MAJOR}.${VERSION_MINOR}')\nendmacro()\n\nmacro(CHECK_COMPILER_VERSION)\n\n CONAN_SPLIT_VERSION(CMAKE_CXX_COMPILER_VERSION VERSION_MAJOR VERSION_MINOR)\n\n if(\"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"MSVC\")\n # https://cmake.org/cmake/help/v3.2/variable/MSVC_VERSION.html\n if( (${CONAN_COMPILER_VERSION} STREQUAL \"14\" AND NOT ${VERSION_MAJOR} STREQUAL \"19\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"12\" AND NOT ${VERSION_MAJOR} STREQUAL \"18\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"11\" AND NOT ${VERSION_MAJOR} STREQUAL \"17\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"10\" AND NOT ${VERSION_MAJOR} STREQUAL \"16\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"9\" AND NOT ${VERSION_MAJOR} STREQUAL \"15\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"8\" AND NOT ${VERSION_MAJOR} STREQUAL \"14\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"7\" AND NOT ${VERSION_MAJOR} STREQUAL \"13\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"6\" AND NOT ${VERSION_MAJOR} STREQUAL \"12\") )\n ERROR_COMPILER_VERSION()\n endif()\n elseif(\"${CONAN_COMPILER}\" STREQUAL \"gcc\" OR \"${CONAN_COMPILER}\" MATCHES \"Clang\")\n if(NOT ${VERSION_MAJOR}.${VERSION_MINOR} VERSION_EQUAL \"${CONAN_COMPILER_VERSION}\")\n ERROR_COMPILER_VERSION()\n endif()\n else()\n message(\"Skipping version checking of not detected compiler...\")\n endif()\nendmacro()\n\nmacro(CONAN_CHECK_COMPILER)\n if( (\"${CONAN_COMPILER}\" STREQUAL \"Visual Studio\" AND NOT \"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"MSVC\") OR\n (\"${CONAN_COMPILER}\" STREQUAL \"gcc\" AND NOT \"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"GNU\") OR\n (\"${CONAN_COMPILER}\" STREQUAL \"apple-clang\" AND (NOT APPLE OR NOT ${CMAKE_CXX_COMPILER_ID} MATCHES \"Clang\")) OR\n (\"${CONAN_COMPILER}\" STREQUAL \"clang\" AND NOT ${CMAKE_CXX_COMPILER_ID} MATCHES \"Clang\") )\n message(FATAL_ERROR \"Incorrect '${CONAN_COMPILER}', is not the one detected by CMake: '${CMAKE_CXX_COMPILER_ID}'\")\n endif()\n CHECK_COMPILER_VERSION()\nendmacro()\n\"\"\"\n",
"path": "conans/client/generators/cmake.py"
}
] | [
{
"content": "from conans.model import Generator\nfrom conans.paths import BUILD_INFO_CMAKE\n\n\nclass DepsCppCmake(object):\n def __init__(self, deps_cpp_info):\n self.include_paths = \"\\n\\t\\t\\t\".join('\"%s\"' % p.replace(\"\\\\\", \"/\")\n for p in deps_cpp_info.include_paths)\n self.lib_paths = \"\\n\\t\\t\\t\".join('\"%s\"' % p.replace(\"\\\\\", \"/\")\n for p in deps_cpp_info.lib_paths)\n self.libs = \" \".join(deps_cpp_info.libs)\n self.defines = \"\\n\\t\\t\\t\".join(\"-D%s\" % d for d in deps_cpp_info.defines)\n self.cppflags = \" \".join(deps_cpp_info.cppflags)\n self.cflags = \" \".join(deps_cpp_info.cflags)\n self.sharedlinkflags = \" \".join(deps_cpp_info.sharedlinkflags)\n self.exelinkflags = \" \".join(deps_cpp_info.exelinkflags)\n self.bin_paths = \"\\n\\t\\t\\t\".join('\"%s\"' % p.replace(\"\\\\\", \"/\")\n for p in deps_cpp_info.bin_paths)\n\n self.rootpath = '\"%s\"' % deps_cpp_info.rootpath.replace(\"\\\\\", \"/\")\n\n\nclass CMakeGenerator(Generator):\n @property\n def filename(self):\n return BUILD_INFO_CMAKE\n\n @property\n def content(self):\n sections = []\n\n # DEPS VARIABLES\n template_dep = ('set(CONAN_{dep}_ROOT {deps.rootpath})\\n'\n 'set(CONAN_INCLUDE_DIRS_{dep} {deps.include_paths})\\n'\n 'set(CONAN_LIB_DIRS_{dep} {deps.lib_paths})\\n'\n 'set(CONAN_BIN_DIRS_{dep} {deps.bin_paths})\\n'\n 'set(CONAN_LIBS_{dep} {deps.libs})\\n'\n 'set(CONAN_DEFINES_{dep} {deps.defines})\\n'\n 'set(CONAN_CXX_FLAGS_{dep} \"{deps.cppflags}\")\\n'\n 'set(CONAN_SHARED_LINKER_FLAGS_{dep} \"{deps.sharedlinkflags}\")\\n'\n 'set(CONAN_EXE_LINKER_FLAGS_{dep} \"{deps.exelinkflags}\")\\n'\n 'set(CONAN_C_FLAGS_{dep} \"{deps.cflags}\")\\n')\n\n for dep_name, dep_cpp_info in self.deps_build_info.dependencies:\n deps = DepsCppCmake(dep_cpp_info)\n dep_flags = template_dep.format(dep=dep_name.upper(),\n deps=deps)\n sections.append(dep_flags)\n\n # GENERAL VARIABLES\n deps = DepsCppCmake(self.deps_build_info)\n\n template = ('set(CONAN_INCLUDE_DIRS {deps.include_paths} ${{CONAN_INCLUDE_DIRS}})\\n'\n 'set(CONAN_LIB_DIRS {deps.lib_paths} ${{CONAN_LIB_DIRS}})\\n'\n 'set(CONAN_BIN_DIRS {deps.bin_paths} ${{CONAN_BIN_DIRS}})\\n'\n 'set(CONAN_LIBS {deps.libs} ${{CONAN_LIBS}})\\n'\n 'set(CONAN_DEFINES {deps.defines} ${{CONAN_DEFINES}})\\n'\n 'set(CONAN_CXX_FLAGS \"{deps.cppflags} ${{CONAN_CXX_FLAGS}}\")\\n'\n 'set(CONAN_SHARED_LINKER_FLAGS \"{deps.sharedlinkflags} ${{CONAN_SHARED_LINKER_FLAGS}}\")\\n'\n 'set(CONAN_EXE_LINKER_FLAGS \"{deps.exelinkflags} ${{CONAN_EXE_LINKER_FLAGS}}\")\\n'\n 'set(CONAN_C_FLAGS \"{deps.cflags} ${{CONAN_C_FLAGS}}\")\\n'\n 'set(CONAN_CMAKE_MODULE_PATH {module_paths} ${{CONAN_CMAKE_MODULE_PATH}})')\n\n rootpaths = [DepsCppCmake(dep_cpp_info).rootpath for _, dep_cpp_info\n in self.deps_build_info.dependencies]\n module_paths = \" \".join(rootpaths)\n all_flags = template.format(deps=deps, module_paths=module_paths)\n sections.append(all_flags)\n\n # MACROS\n sections.append(self._aux_cmake_test_setup())\n\n return \"\\n\".join(sections)\n\n def _aux_cmake_test_setup(self):\n return \"\"\"macro(CONAN_BASIC_SETUP)\n conan_check_compiler()\n conan_output_dirs_setup()\n conan_flags_setup()\n # CMake can find findXXX.cmake files in the root of packages\n set(CMAKE_MODULE_PATH ${CONAN_CMAKE_MODULE_PATH} ${CMAKE_MODULE_PATH})\nendmacro()\n\nmacro(CONAN_FLAGS_SETUP)\n include_directories(SYSTEM ${CONAN_INCLUDE_DIRS})\n link_directories(${CONAN_LIB_DIRS})\n add_definitions(${CONAN_DEFINES})\n\n # For find_library\n set(CMAKE_INCLUDE_PATH ${CONAN_INCLUDE_DIRS} ${CMAKE_INCLUDE_PATH})\n set(CMAKE_LIBRARY_PATH ${CONAN_LIB_DIRS} ${CMAKE_LIBRARY_PATH})\n\n set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${CONAN_CXX_FLAGS}\")\n set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} ${CONAN_C_FLAGS}\")\n set(CMAKE_SHARED_LINKER_FLAGS \"${CMAKE_SHARED_LINKER_FLAGS} ${CONAN_SHARED_LINKER_FLAGS}\")\n set(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} ${CONAN_EXE_LINKER_FLAGS}\")\n\n if(APPLE)\n # https://cmake.org/Wiki/CMake_RPATH_handling\n # CONAN GUIDE: All generated libraries should have the id and dependencies to other\n # dylibs without path, just the name, EX:\n # libMyLib1.dylib:\n # libMyLib1.dylib (compatibility version 0.0.0, current version 0.0.0)\n # libMyLib0.dylib (compatibility version 0.0.0, current version 0.0.0)\n # /usr/lib/libc++.1.dylib (compatibility version 1.0.0, current version 120.0.0)\n # /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1197.1.1)\n set(CMAKE_SKIP_RPATH 1) # AVOID RPATH FOR *.dylib, ALL LIBS BETWEEN THEM AND THE EXE\n # SHOULD BE ON THE LINKER RESOLVER PATH (./ IS ONE OF THEM)\n endif()\n if(CONAN_LINK_RUNTIME)\n string(REPLACE \"/MD\" ${CONAN_LINK_RUNTIME} CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE})\n string(REPLACE \"/MDd\" ${CONAN_LINK_RUNTIME} CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG})\n string(REPLACE \"/MD\" ${CONAN_LINK_RUNTIME} CMAKE_C_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE})\n string(REPLACE \"/MDd\" ${CONAN_LINK_RUNTIME} CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG})\n endif()\nendmacro()\n\nmacro(CONAN_OUTPUT_DIRS_SETUP)\n set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/bin)\n set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})\n set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})\n\n set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)\n set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY})\n set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY})\nendmacro()\n\nmacro(CONAN_SPLIT_VERSION VERSION_STRING MAJOR MINOR)\n #make a list from the version string\n string(REPLACE \".\" \";\" VERSION_LIST ${${VERSION_STRING}})\n\n #write output values\n list(GET VERSION_LIST 0 ${MAJOR})\n list(GET VERSION_LIST 1 ${MINOR})\nendmacro()\n\nmacro(ERROR_COMPILER_VERSION)\n message(FATAL_ERROR \"Incorrect '${CONAN_COMPILER}' version 'compiler.version=${CONAN_COMPILER_VERSION}'\"\n \" is not the one detected by CMake: '${CMAKE_CXX_COMPILER_ID}=\"${VERSION_MAJOR}.${VERSION_MINOR}')\nendmacro()\n\nmacro(CHECK_COMPILER_VERSION)\n\n CONAN_SPLIT_VERSION(CMAKE_CXX_COMPILER_VERSION VERSION_MAJOR VERSION_MINOR)\n\n if(\"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"MSVC\")\n # https://cmake.org/cmake/help/v3.2/variable/MSVC_VERSION.html\n if( (${CONAN_COMPILER_VERSION} STREQUAL \"14\" AND NOT ${VERSION_MAJOR} STREQUAL \"19\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"12\" AND NOT ${VERSION_MAJOR} STREQUAL \"18\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"11\" AND NOT ${VERSION_MAJOR} STREQUAL \"17\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"10\" AND NOT ${VERSION_MAJOR} STREQUAL \"16\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"9\" AND NOT ${VERSION_MAJOR} STREQUAL \"15\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"8\" AND NOT ${VERSION_MAJOR} STREQUAL \"14\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"7\" AND NOT ${VERSION_MAJOR} STREQUAL \"13\") OR\n (${CONAN_COMPILER_VERSION} STREQUAL \"6\" AND NOT ${VERSION_MAJOR} STREQUAL \"12\") )\n ERROR_COMPILER_VERSION()\n endif()\n elseif(\"${CONAN_COMPILER}\" STREQUAL \"gcc\" OR \"${CONAN_COMPILER}\" MATCHES \"Clang\")\n if(NOT ${VERSION_MAJOR}.${VERSION_MINOR} VERSION_EQUAL \"${CONAN_COMPILER_VERSION}\")\n ERROR_COMPILER_VERSION()\n endif()\n else()\n message(\"Skipping version checking of not detected compiler...\")\n endif()\nendmacro()\n\nmacro(CONAN_CHECK_COMPILER)\n if( (\"${CONAN_COMPILER}\" STREQUAL \"Visual Studio\" AND NOT \"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"MSVC\") OR\n (\"${CONAN_COMPILER}\" STREQUAL \"gcc\" AND NOT \"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"GNU\") OR\n (\"${CONAN_COMPILER}\" STREQUAL \"apple-clang\" AND (NOT APPLE OR NOT ${CMAKE_CXX_COMPILER_ID} MATCHES \"Clang\")) OR\n (\"${CONAN_COMPILER}\" STREQUAL \"clang\" AND NOT ${CMAKE_CXX_COMPILER_ID} MATCHES \"Clang\") )\n message(FATAL_ERROR \"Incorrect '${CONAN_COMPILER}', is not the one detected by CMake: '${CMAKE_CXX_COMPILER_ID}'\")\n endif()\n CHECK_COMPILER_VERSION()\nendmacro()\n\"\"\"\n",
"path": "conans/client/generators/cmake.py"
}
] | diff --git a/conans/client/generators/cmake.py b/conans/client/generators/cmake.py
index 827f8b2cb1a..56291fdedcf 100644
--- a/conans/client/generators/cmake.py
+++ b/conans/client/generators/cmake.py
@@ -82,7 +82,7 @@ def _aux_cmake_test_setup(self):
endmacro()
macro(CONAN_FLAGS_SETUP)
- include_directories(${CONAN_INCLUDE_DIRS})
+ include_directories(SYSTEM ${CONAN_INCLUDE_DIRS})
link_directories(${CONAN_LIB_DIRS})
add_definitions(${CONAN_DEFINES})
|
pypa__pip-6427 | ensure_dir() should also check for ENOTEMPTY
**Environment**
* pip version:
pip 19.0.3
* Python version:
python 3.7
* OS:
'python:3.7-alpine3.9' docker image (docker-ce 17.9) running on Ubuntu 18.04.2 LTS in WSL (Windows)
**Description**
`pip install pipenv` fails with the following error:
> Could not install packages due to an EnvironmentError: [Errno 39] Directory not empty: '/tmp/pip-install-wx86kab7/pipenv/pipenv'
**How to Reproduce**
1. Set up environment as described above (alpine3.9, on docker-ce 17.9, running on Ubuntu in WSL). and run the command.
2. `apk --update add --virtual build-dependencies libffi-dev openssl-dev build-base`
3. `pip install --upgrade pip`
4. `pip install pipenv`
**Output**
> Could not install packages due to an EnvironmentError: [Errno 39] Directory not empty: '/tmp/pip-install-wx86kab7/pipenv/pipenv'
**Investigation**
Compared strace results of a successful run (on a different env) vs the failed run.
On a successful run, the `mkdir` command is continually executed with `/tmp/pip-install-<hash>/pipenv/pipenv` as an argument and fails with an `EEXIST` error. However, on the failed run the same command fails with an `ENOTEMPT` error. This has to do with the environment itself (maybe docker/windows related), as the same difference is observed when simply performing mkdir from a shell.
| [
{
"content": "from __future__ import absolute_import\n\nimport contextlib\nimport errno\nimport io\n# we have a submodule named 'logging' which would shadow this if we used the\n# regular name:\nimport logging as std_logging\nimport os\nimport posixpath\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport tarfile\nimport zipfile\nfrom collections import deque\n\nfrom pip._vendor import pkg_resources\n# NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is\n# why we ignore the type on this import.\nfrom pip._vendor.retrying import retry # type: ignore\nfrom pip._vendor.six import PY2\nfrom pip._vendor.six.moves import input, shlex_quote\nfrom pip._vendor.six.moves.urllib import parse as urllib_parse\nfrom pip._vendor.six.moves.urllib.parse import unquote as urllib_unquote\n\nfrom pip._internal.exceptions import CommandError, InstallationError\nfrom pip._internal.locations import (\n running_under_virtualenv, site_packages, user_site, virtualenv_no_global,\n write_delete_marker_file,\n)\nfrom pip._internal.utils.compat import (\n WINDOWS, console_to_str, expanduser, stdlib_pkgs,\n)\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif PY2:\n from io import BytesIO as StringIO\nelse:\n from io import StringIO\n\nif MYPY_CHECK_RUNNING:\n from typing import (\n Optional, Tuple, Iterable, List, Match, Union, Any, Mapping, Text,\n AnyStr, Container\n )\n from pip._vendor.pkg_resources import Distribution\n from pip._internal.models.link import Link\n from pip._internal.utils.ui import SpinnerInterface\n\n\n__all__ = ['rmtree', 'display_path', 'backup_dir',\n 'ask', 'splitext',\n 'format_size', 'is_installable_dir',\n 'is_svn_page', 'file_contents',\n 'split_leading_dir', 'has_leading_dir',\n 'normalize_path',\n 'renames', 'get_prog',\n 'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',\n 'captured_stdout', 'ensure_dir',\n 'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS', 'WHEEL_EXTENSION',\n 'get_installed_version', 'remove_auth_from_url']\n\n\nlogger = std_logging.getLogger(__name__)\nsubprocess_logger = std_logging.getLogger('pip.subprocessor')\n\nLOG_DIVIDER = '----------------------------------------'\n\nWHEEL_EXTENSION = '.whl'\nBZ2_EXTENSIONS = ('.tar.bz2', '.tbz')\nXZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma')\nZIP_EXTENSIONS = ('.zip', WHEEL_EXTENSION)\nTAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')\nARCHIVE_EXTENSIONS = (\n ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS)\nSUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS\n\ntry:\n import bz2 # noqa\n SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS\nexcept ImportError:\n logger.debug('bz2 module is not available')\n\ntry:\n # Only for Python 3.3+\n import lzma # noqa\n SUPPORTED_EXTENSIONS += XZ_EXTENSIONS\nexcept ImportError:\n logger.debug('lzma module is not available')\n\n\ndef ensure_dir(path):\n # type: (AnyStr) -> None\n \"\"\"os.path.makedirs without EEXIST.\"\"\"\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef get_prog():\n # type: () -> str\n try:\n prog = os.path.basename(sys.argv[0])\n if prog in ('__main__.py', '-c'):\n return \"%s -m pip\" % sys.executable\n else:\n return prog\n except (AttributeError, TypeError, IndexError):\n pass\n return 'pip'\n\n\n# Retry every half second for up to 3 seconds\n@retry(stop_max_delay=3000, wait_fixed=500)\ndef rmtree(dir, ignore_errors=False):\n # type: (str, bool) -> None\n shutil.rmtree(dir, ignore_errors=ignore_errors,\n onerror=rmtree_errorhandler)\n\n\ndef rmtree_errorhandler(func, path, exc_info):\n \"\"\"On Windows, the files in .svn are read-only, so when rmtree() tries to\n remove them, an exception is thrown. We catch that here, remove the\n read-only attribute, and hopefully continue without problems.\"\"\"\n # if file type currently read only\n if os.stat(path).st_mode & stat.S_IREAD:\n # convert to read/write\n os.chmod(path, stat.S_IWRITE)\n # use the original function to repeat the operation\n func(path)\n return\n else:\n raise\n\n\ndef display_path(path):\n # type: (Union[str, Text]) -> str\n \"\"\"Gives the display value for a given path, making it relative to cwd\n if possible.\"\"\"\n path = os.path.normcase(os.path.abspath(path))\n if sys.version_info[0] == 2:\n path = path.decode(sys.getfilesystemencoding(), 'replace')\n path = path.encode(sys.getdefaultencoding(), 'replace')\n if path.startswith(os.getcwd() + os.path.sep):\n path = '.' + path[len(os.getcwd()):]\n return path\n\n\ndef backup_dir(dir, ext='.bak'):\n # type: (str, str) -> str\n \"\"\"Figure out the name of a directory to back up the given dir to\n (adding .bak, .bak2, etc)\"\"\"\n n = 1\n extension = ext\n while os.path.exists(dir + extension):\n n += 1\n extension = ext + str(n)\n return dir + extension\n\n\ndef ask_path_exists(message, options):\n # type: (str, Iterable[str]) -> str\n for action in os.environ.get('PIP_EXISTS_ACTION', '').split():\n if action in options:\n return action\n return ask(message, options)\n\n\ndef ask(message, options):\n # type: (str, Iterable[str]) -> str\n \"\"\"Ask the message interactively, with the given possible responses\"\"\"\n while 1:\n if os.environ.get('PIP_NO_INPUT'):\n raise Exception(\n 'No input was expected ($PIP_NO_INPUT set); question: %s' %\n message\n )\n response = input(message)\n response = response.strip().lower()\n if response not in options:\n print(\n 'Your response (%r) was not one of the expected responses: '\n '%s' % (response, ', '.join(options))\n )\n else:\n return response\n\n\ndef format_size(bytes):\n # type: (float) -> str\n if bytes > 1000 * 1000:\n return '%.1fMB' % (bytes / 1000.0 / 1000)\n elif bytes > 10 * 1000:\n return '%ikB' % (bytes / 1000)\n elif bytes > 1000:\n return '%.1fkB' % (bytes / 1000.0)\n else:\n return '%ibytes' % bytes\n\n\ndef is_installable_dir(path):\n # type: (str) -> bool\n \"\"\"Is path is a directory containing setup.py or pyproject.toml?\n \"\"\"\n if not os.path.isdir(path):\n return False\n setup_py = os.path.join(path, 'setup.py')\n if os.path.isfile(setup_py):\n return True\n pyproject_toml = os.path.join(path, 'pyproject.toml')\n if os.path.isfile(pyproject_toml):\n return True\n return False\n\n\ndef is_svn_page(html):\n # type: (Union[str, Text]) -> Optional[Match[Union[str, Text]]]\n \"\"\"\n Returns true if the page appears to be the index page of an svn repository\n \"\"\"\n return (re.search(r'<title>[^<]*Revision \\d+:', html) and\n re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))\n\n\ndef file_contents(filename):\n # type: (str) -> Text\n with open(filename, 'rb') as fp:\n return fp.read().decode('utf-8')\n\n\ndef read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):\n \"\"\"Yield pieces of data from a file-like object until EOF.\"\"\"\n while True:\n chunk = file.read(size)\n if not chunk:\n break\n yield chunk\n\n\ndef split_leading_dir(path):\n # type: (Union[str, Text]) -> List[Union[str, Text]]\n path = path.lstrip('/').lstrip('\\\\')\n if '/' in path and (('\\\\' in path and path.find('/') < path.find('\\\\')) or\n '\\\\' not in path):\n return path.split('/', 1)\n elif '\\\\' in path:\n return path.split('\\\\', 1)\n else:\n return [path, '']\n\n\ndef has_leading_dir(paths):\n # type: (Iterable[Union[str, Text]]) -> bool\n \"\"\"Returns true if all the paths have the same leading path name\n (i.e., everything is in one subdirectory in an archive)\"\"\"\n common_prefix = None\n for path in paths:\n prefix, rest = split_leading_dir(path)\n if not prefix:\n return False\n elif common_prefix is None:\n common_prefix = prefix\n elif prefix != common_prefix:\n return False\n return True\n\n\ndef normalize_path(path, resolve_symlinks=True):\n # type: (str, bool) -> str\n \"\"\"\n Convert a path to its canonical, case-normalized, absolute version.\n\n \"\"\"\n path = expanduser(path)\n if resolve_symlinks:\n path = os.path.realpath(path)\n else:\n path = os.path.abspath(path)\n return os.path.normcase(path)\n\n\ndef splitext(path):\n # type: (str) -> Tuple[str, str]\n \"\"\"Like os.path.splitext, but take off .tar too\"\"\"\n base, ext = posixpath.splitext(path)\n if base.lower().endswith('.tar'):\n ext = base[-4:] + ext\n base = base[:-4]\n return base, ext\n\n\ndef renames(old, new):\n # type: (str, str) -> None\n \"\"\"Like os.renames(), but handles renaming across devices.\"\"\"\n # Implementation borrowed from os.renames().\n head, tail = os.path.split(new)\n if head and tail and not os.path.exists(head):\n os.makedirs(head)\n\n shutil.move(old, new)\n\n head, tail = os.path.split(old)\n if head and tail:\n try:\n os.removedirs(head)\n except OSError:\n pass\n\n\ndef is_local(path):\n # type: (str) -> bool\n \"\"\"\n Return True if path is within sys.prefix, if we're running in a virtualenv.\n\n If we're not in a virtualenv, all paths are considered \"local.\"\n\n \"\"\"\n if not running_under_virtualenv():\n return True\n return normalize_path(path).startswith(normalize_path(sys.prefix))\n\n\ndef dist_is_local(dist):\n # type: (Distribution) -> bool\n \"\"\"\n Return True if given Distribution object is installed locally\n (i.e. within current virtualenv).\n\n Always True if we're not in a virtualenv.\n\n \"\"\"\n return is_local(dist_location(dist))\n\n\ndef dist_in_usersite(dist):\n # type: (Distribution) -> bool\n \"\"\"\n Return True if given Distribution is installed in user site.\n \"\"\"\n norm_path = normalize_path(dist_location(dist))\n return norm_path.startswith(normalize_path(user_site))\n\n\ndef dist_in_site_packages(dist):\n # type: (Distribution) -> bool\n \"\"\"\n Return True if given Distribution is installed in\n sysconfig.get_python_lib().\n \"\"\"\n return normalize_path(\n dist_location(dist)\n ).startswith(normalize_path(site_packages))\n\n\ndef dist_is_editable(dist):\n # type: (Distribution) -> bool\n \"\"\"\n Return True if given Distribution is an editable install.\n \"\"\"\n for path_item in sys.path:\n egg_link = os.path.join(path_item, dist.project_name + '.egg-link')\n if os.path.isfile(egg_link):\n return True\n return False\n\n\ndef get_installed_distributions(local_only=True,\n skip=stdlib_pkgs,\n include_editables=True,\n editables_only=False,\n user_only=False):\n # type: (bool, Container[str], bool, bool, bool) -> List[Distribution]\n \"\"\"\n Return a list of installed Distribution objects.\n\n If ``local_only`` is True (default), only return installations\n local to the current virtualenv, if in a virtualenv.\n\n ``skip`` argument is an iterable of lower-case project names to\n ignore; defaults to stdlib_pkgs\n\n If ``include_editables`` is False, don't report editables.\n\n If ``editables_only`` is True , only report editables.\n\n If ``user_only`` is True , only report installations in the user\n site directory.\n\n \"\"\"\n if local_only:\n local_test = dist_is_local\n else:\n def local_test(d):\n return True\n\n if include_editables:\n def editable_test(d):\n return True\n else:\n def editable_test(d):\n return not dist_is_editable(d)\n\n if editables_only:\n def editables_only_test(d):\n return dist_is_editable(d)\n else:\n def editables_only_test(d):\n return True\n\n if user_only:\n user_test = dist_in_usersite\n else:\n def user_test(d):\n return True\n\n # because of pkg_resources vendoring, mypy cannot find stub in typeshed\n return [d for d in pkg_resources.working_set # type: ignore\n if local_test(d) and\n d.key not in skip and\n editable_test(d) and\n editables_only_test(d) and\n user_test(d)\n ]\n\n\ndef egg_link_path(dist):\n # type: (Distribution) -> Optional[str]\n \"\"\"\n Return the path for the .egg-link file if it exists, otherwise, None.\n\n There's 3 scenarios:\n 1) not in a virtualenv\n try to find in site.USER_SITE, then site_packages\n 2) in a no-global virtualenv\n try to find in site_packages\n 3) in a yes-global virtualenv\n try to find in site_packages, then site.USER_SITE\n (don't look in global location)\n\n For #1 and #3, there could be odd cases, where there's an egg-link in 2\n locations.\n\n This method will just return the first one found.\n \"\"\"\n sites = []\n if running_under_virtualenv():\n if virtualenv_no_global():\n sites.append(site_packages)\n else:\n sites.append(site_packages)\n if user_site:\n sites.append(user_site)\n else:\n if user_site:\n sites.append(user_site)\n sites.append(site_packages)\n\n for site in sites:\n egglink = os.path.join(site, dist.project_name) + '.egg-link'\n if os.path.isfile(egglink):\n return egglink\n return None\n\n\ndef dist_location(dist):\n # type: (Distribution) -> str\n \"\"\"\n Get the site-packages location of this distribution. Generally\n this is dist.location, except in the case of develop-installed\n packages, where dist.location is the source code location, and we\n want to know where the egg-link file is.\n\n \"\"\"\n egg_link = egg_link_path(dist)\n if egg_link:\n return egg_link\n return dist.location\n\n\ndef current_umask():\n \"\"\"Get the current umask which involves having to set it temporarily.\"\"\"\n mask = os.umask(0)\n os.umask(mask)\n return mask\n\n\ndef unzip_file(filename, location, flatten=True):\n # type: (str, str, bool) -> None\n \"\"\"\n Unzip the file (with path `filename`) to the destination `location`. All\n files are written based on system defaults and umask (i.e. permissions are\n not preserved), except that regular file members with any execute\n permissions (user, group, or world) have \"chmod +x\" applied after being\n written. Note that for windows, any execute changes using os.chmod are\n no-ops per the python docs.\n \"\"\"\n ensure_dir(location)\n zipfp = open(filename, 'rb')\n try:\n zip = zipfile.ZipFile(zipfp, allowZip64=True)\n leading = has_leading_dir(zip.namelist()) and flatten\n for info in zip.infolist():\n name = info.filename\n fn = name\n if leading:\n fn = split_leading_dir(name)[1]\n fn = os.path.join(location, fn)\n dir = os.path.dirname(fn)\n if fn.endswith('/') or fn.endswith('\\\\'):\n # A directory\n ensure_dir(fn)\n else:\n ensure_dir(dir)\n # Don't use read() to avoid allocating an arbitrarily large\n # chunk of memory for the file's content\n fp = zip.open(name)\n try:\n with open(fn, 'wb') as destfp:\n shutil.copyfileobj(fp, destfp)\n finally:\n fp.close()\n mode = info.external_attr >> 16\n # if mode and regular file and any execute permissions for\n # user/group/world?\n if mode and stat.S_ISREG(mode) and mode & 0o111:\n # make dest file have execute for user/group/world\n # (chmod +x) no-op on windows per python docs\n os.chmod(fn, (0o777 - current_umask() | 0o111))\n finally:\n zipfp.close()\n\n\ndef untar_file(filename, location):\n # type: (str, str) -> None\n \"\"\"\n Untar the file (with path `filename`) to the destination `location`.\n All files are written based on system defaults and umask (i.e. permissions\n are not preserved), except that regular file members with any execute\n permissions (user, group, or world) have \"chmod +x\" applied after being\n written. Note that for windows, any execute changes using os.chmod are\n no-ops per the python docs.\n \"\"\"\n ensure_dir(location)\n if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):\n mode = 'r:gz'\n elif filename.lower().endswith(BZ2_EXTENSIONS):\n mode = 'r:bz2'\n elif filename.lower().endswith(XZ_EXTENSIONS):\n mode = 'r:xz'\n elif filename.lower().endswith('.tar'):\n mode = 'r'\n else:\n logger.warning(\n 'Cannot determine compression type for file %s', filename,\n )\n mode = 'r:*'\n tar = tarfile.open(filename, mode)\n try:\n leading = has_leading_dir([\n member.name for member in tar.getmembers()\n ])\n for member in tar.getmembers():\n fn = member.name\n if leading:\n # https://github.com/python/mypy/issues/1174\n fn = split_leading_dir(fn)[1] # type: ignore\n path = os.path.join(location, fn)\n if member.isdir():\n ensure_dir(path)\n elif member.issym():\n try:\n # https://github.com/python/typeshed/issues/2673\n tar._extract_member(member, path) # type: ignore\n except Exception as exc:\n # Some corrupt tar files seem to produce this\n # (specifically bad symlinks)\n logger.warning(\n 'In the tar file %s the member %s is invalid: %s',\n filename, member.name, exc,\n )\n continue\n else:\n try:\n fp = tar.extractfile(member)\n except (KeyError, AttributeError) as exc:\n # Some corrupt tar files seem to produce this\n # (specifically bad symlinks)\n logger.warning(\n 'In the tar file %s the member %s is invalid: %s',\n filename, member.name, exc,\n )\n continue\n ensure_dir(os.path.dirname(path))\n with open(path, 'wb') as destfp:\n shutil.copyfileobj(fp, destfp)\n fp.close()\n # Update the timestamp (useful for cython compiled files)\n # https://github.com/python/typeshed/issues/2673\n tar.utime(member, path) # type: ignore\n # member have any execute permissions for user/group/world?\n if member.mode & 0o111:\n # make dest file have execute for user/group/world\n # no-op on windows per python docs\n os.chmod(path, (0o777 - current_umask() | 0o111))\n finally:\n tar.close()\n\n\ndef unpack_file(\n filename, # type: str\n location, # type: str\n content_type, # type: Optional[str]\n link # type: Optional[Link]\n):\n # type: (...) -> None\n filename = os.path.realpath(filename)\n if (content_type == 'application/zip' or\n filename.lower().endswith(ZIP_EXTENSIONS) or\n zipfile.is_zipfile(filename)):\n unzip_file(\n filename,\n location,\n flatten=not filename.endswith('.whl')\n )\n elif (content_type == 'application/x-gzip' or\n tarfile.is_tarfile(filename) or\n filename.lower().endswith(\n TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)):\n untar_file(filename, location)\n elif (content_type and content_type.startswith('text/html') and\n is_svn_page(file_contents(filename))):\n # We don't really care about this\n from pip._internal.vcs.subversion import Subversion\n Subversion('svn+' + link.url).unpack(location)\n else:\n # FIXME: handle?\n # FIXME: magic signatures?\n logger.critical(\n 'Cannot unpack file %s (downloaded from %s, content-type: %s); '\n 'cannot detect archive format',\n filename, location, content_type,\n )\n raise InstallationError(\n 'Cannot determine archive format of %s' % location\n )\n\n\ndef format_command_args(args):\n # type: (List[str]) -> str\n \"\"\"\n Format command arguments for display.\n \"\"\"\n return ' '.join(shlex_quote(arg) for arg in args)\n\n\ndef call_subprocess(\n cmd, # type: List[str]\n show_stdout=False, # type: bool\n cwd=None, # type: Optional[str]\n on_returncode='raise', # type: str\n extra_ok_returncodes=None, # type: Optional[Iterable[int]]\n command_desc=None, # type: Optional[str]\n extra_environ=None, # type: Optional[Mapping[str, Any]]\n unset_environ=None, # type: Optional[Iterable[str]]\n spinner=None # type: Optional[SpinnerInterface]\n):\n # type: (...) -> Optional[Text]\n \"\"\"\n Args:\n show_stdout: if true, use INFO to log the subprocess's stderr and\n stdout streams. Otherwise, use DEBUG. Defaults to False.\n extra_ok_returncodes: an iterable of integer return codes that are\n acceptable, in addition to 0. Defaults to None, which means [].\n unset_environ: an iterable of environment variable names to unset\n prior to calling subprocess.Popen().\n \"\"\"\n if extra_ok_returncodes is None:\n extra_ok_returncodes = []\n if unset_environ is None:\n unset_environ = []\n # Most places in pip use show_stdout=False. What this means is--\n #\n # - We connect the child's output (combined stderr and stdout) to a\n # single pipe, which we read.\n # - We log this output to stderr at DEBUG level as it is received.\n # - If DEBUG logging isn't enabled (e.g. if --verbose logging wasn't\n # requested), then we show a spinner so the user can still see the\n # subprocess is in progress.\n # - If the subprocess exits with an error, we log the output to stderr\n # at ERROR level if it hasn't already been displayed to the console\n # (e.g. if --verbose logging wasn't enabled). This way we don't log\n # the output to the console twice.\n #\n # If show_stdout=True, then the above is still done, but with DEBUG\n # replaced by INFO.\n if show_stdout:\n # Then log the subprocess output at INFO level.\n log_subprocess = subprocess_logger.info\n used_level = std_logging.INFO\n else:\n # Then log the subprocess output using DEBUG. This also ensures\n # it will be logged to the log file (aka user_log), if enabled.\n log_subprocess = subprocess_logger.debug\n used_level = std_logging.DEBUG\n\n # Whether the subprocess will be visible in the console.\n showing_subprocess = subprocess_logger.getEffectiveLevel() <= used_level\n\n # Only use the spinner if we're not showing the subprocess output\n # and we have a spinner.\n use_spinner = not showing_subprocess and spinner is not None\n\n if command_desc is None:\n command_desc = format_command_args(cmd)\n\n log_subprocess(\"Running command %s\", command_desc)\n env = os.environ.copy()\n if extra_environ:\n env.update(extra_environ)\n for name in unset_environ:\n env.pop(name, None)\n try:\n proc = subprocess.Popen(\n cmd, stderr=subprocess.STDOUT, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, cwd=cwd, env=env,\n )\n proc.stdin.close()\n except Exception as exc:\n subprocess_logger.critical(\n \"Error %s while executing command %s\", exc, command_desc,\n )\n raise\n all_output = []\n while True:\n line = console_to_str(proc.stdout.readline())\n if not line:\n break\n line = line.rstrip()\n all_output.append(line + '\\n')\n\n # Show the line immediately.\n log_subprocess(line)\n # Update the spinner.\n if use_spinner:\n spinner.spin()\n try:\n proc.wait()\n finally:\n if proc.stdout:\n proc.stdout.close()\n proc_had_error = (\n proc.returncode and proc.returncode not in extra_ok_returncodes\n )\n if use_spinner:\n if proc_had_error:\n spinner.finish(\"error\")\n else:\n spinner.finish(\"done\")\n if proc_had_error:\n if on_returncode == 'raise':\n if not showing_subprocess:\n # Then the subprocess streams haven't been logged to the\n # console yet.\n subprocess_logger.error(\n 'Complete output from command %s:', command_desc,\n )\n # The all_output value already ends in a newline.\n subprocess_logger.error(''.join(all_output) + LOG_DIVIDER)\n raise InstallationError(\n 'Command \"%s\" failed with error code %s in %s'\n % (command_desc, proc.returncode, cwd))\n elif on_returncode == 'warn':\n subprocess_logger.warning(\n 'Command \"%s\" had error code %s in %s',\n command_desc, proc.returncode, cwd,\n )\n elif on_returncode == 'ignore':\n pass\n else:\n raise ValueError('Invalid value: on_returncode=%s' %\n repr(on_returncode))\n return ''.join(all_output)\n\n\ndef _make_build_dir(build_dir):\n os.makedirs(build_dir)\n write_delete_marker_file(build_dir)\n\n\nclass FakeFile(object):\n \"\"\"Wrap a list of lines in an object with readline() to make\n ConfigParser happy.\"\"\"\n def __init__(self, lines):\n self._gen = (l for l in lines)\n\n def readline(self):\n try:\n try:\n return next(self._gen)\n except NameError:\n return self._gen.next()\n except StopIteration:\n return ''\n\n def __iter__(self):\n return self._gen\n\n\nclass StreamWrapper(StringIO):\n\n @classmethod\n def from_stream(cls, orig_stream):\n cls.orig_stream = orig_stream\n return cls()\n\n # compileall.compile_dir() needs stdout.encoding to print to stdout\n @property\n def encoding(self):\n return self.orig_stream.encoding\n\n\[email protected]\ndef captured_output(stream_name):\n \"\"\"Return a context manager used by captured_stdout/stdin/stderr\n that temporarily replaces the sys stream *stream_name* with a StringIO.\n\n Taken from Lib/support/__init__.py in the CPython repo.\n \"\"\"\n orig_stdout = getattr(sys, stream_name)\n setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))\n try:\n yield getattr(sys, stream_name)\n finally:\n setattr(sys, stream_name, orig_stdout)\n\n\ndef captured_stdout():\n \"\"\"Capture the output of sys.stdout:\n\n with captured_stdout() as stdout:\n print('hello')\n self.assertEqual(stdout.getvalue(), 'hello\\n')\n\n Taken from Lib/support/__init__.py in the CPython repo.\n \"\"\"\n return captured_output('stdout')\n\n\ndef captured_stderr():\n \"\"\"\n See captured_stdout().\n \"\"\"\n return captured_output('stderr')\n\n\nclass cached_property(object):\n \"\"\"A property that is only computed once per instance and then replaces\n itself with an ordinary attribute. Deleting the attribute resets the\n property.\n\n Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175\n \"\"\"\n\n def __init__(self, func):\n self.__doc__ = getattr(func, '__doc__')\n self.func = func\n\n def __get__(self, obj, cls):\n if obj is None:\n # We're being accessed from the class itself, not from an object\n return self\n value = obj.__dict__[self.func.__name__] = self.func(obj)\n return value\n\n\ndef get_installed_version(dist_name, working_set=None):\n \"\"\"Get the installed version of dist_name avoiding pkg_resources cache\"\"\"\n # Create a requirement that we'll look for inside of setuptools.\n req = pkg_resources.Requirement.parse(dist_name)\n\n if working_set is None:\n # We want to avoid having this cached, so we need to construct a new\n # working set each time.\n working_set = pkg_resources.WorkingSet()\n\n # Get the installed distribution from our working set\n dist = working_set.find(req)\n\n # Check to see if we got an installed distribution or not, if we did\n # we want to return it's version.\n return dist.version if dist else None\n\n\ndef consume(iterator):\n \"\"\"Consume an iterable at C speed.\"\"\"\n deque(iterator, maxlen=0)\n\n\n# Simulates an enum\ndef enum(*sequential, **named):\n enums = dict(zip(sequential, range(len(sequential))), **named)\n reverse = {value: key for key, value in enums.items()}\n enums['reverse_mapping'] = reverse\n return type('Enum', (), enums)\n\n\ndef split_auth_from_netloc(netloc):\n \"\"\"\n Parse out and remove the auth information from a netloc.\n\n Returns: (netloc, (username, password)).\n \"\"\"\n if '@' not in netloc:\n return netloc, (None, None)\n\n # Split from the right because that's how urllib.parse.urlsplit()\n # behaves if more than one @ is present (which can be checked using\n # the password attribute of urlsplit()'s return value).\n auth, netloc = netloc.rsplit('@', 1)\n if ':' in auth:\n # Split from the left because that's how urllib.parse.urlsplit()\n # behaves if more than one : is present (which again can be checked\n # using the password attribute of the return value)\n user_pass = auth.split(':', 1)\n else:\n user_pass = auth, None\n\n user_pass = tuple(\n None if x is None else urllib_unquote(x) for x in user_pass\n )\n\n return netloc, user_pass\n\n\ndef redact_netloc(netloc):\n # type: (str) -> str\n \"\"\"\n Replace the password in a netloc with \"****\", if it exists.\n\n For example, \"user:[email protected]\" returns \"user:****@example.com\".\n \"\"\"\n netloc, (user, password) = split_auth_from_netloc(netloc)\n if user is None:\n return netloc\n password = '' if password is None else ':****'\n return '{user}{password}@{netloc}'.format(user=urllib_parse.quote(user),\n password=password,\n netloc=netloc)\n\n\ndef _transform_url(url, transform_netloc):\n purl = urllib_parse.urlsplit(url)\n netloc = transform_netloc(purl.netloc)\n # stripped url\n url_pieces = (\n purl.scheme, netloc, purl.path, purl.query, purl.fragment\n )\n surl = urllib_parse.urlunsplit(url_pieces)\n return surl\n\n\ndef _get_netloc(netloc):\n return split_auth_from_netloc(netloc)[0]\n\n\ndef remove_auth_from_url(url):\n # type: (str) -> str\n # Return a copy of url with 'username:password@' removed.\n # username/pass params are passed to subversion through flags\n # and are not recognized in the url.\n return _transform_url(url, _get_netloc)\n\n\ndef redact_password_from_url(url):\n # type: (str) -> str\n \"\"\"Replace the password in a given url with ****.\"\"\"\n return _transform_url(url, redact_netloc)\n\n\ndef protect_pip_from_modification_on_windows(modifying_pip):\n \"\"\"Protection of pip.exe from modification on Windows\n\n On Windows, any operation modifying pip should be run as:\n python -m pip ...\n \"\"\"\n pip_names = [\n \"pip.exe\",\n \"pip{}.exe\".format(sys.version_info[0]),\n \"pip{}.{}.exe\".format(*sys.version_info[:2])\n ]\n\n # See https://github.com/pypa/pip/issues/1299 for more discussion\n should_show_use_python_msg = (\n modifying_pip and\n WINDOWS and\n os.path.basename(sys.argv[0]) in pip_names\n )\n\n if should_show_use_python_msg:\n new_command = [\n sys.executable, \"-m\", \"pip\"\n ] + sys.argv[1:]\n raise CommandError(\n 'To modify pip, please run the following command:\\n{}'\n .format(\" \".join(new_command))\n )\n",
"path": "src/pip/_internal/utils/misc.py"
}
] | [
{
"content": "from __future__ import absolute_import\n\nimport contextlib\nimport errno\nimport io\n# we have a submodule named 'logging' which would shadow this if we used the\n# regular name:\nimport logging as std_logging\nimport os\nimport posixpath\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport tarfile\nimport zipfile\nfrom collections import deque\n\nfrom pip._vendor import pkg_resources\n# NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is\n# why we ignore the type on this import.\nfrom pip._vendor.retrying import retry # type: ignore\nfrom pip._vendor.six import PY2\nfrom pip._vendor.six.moves import input, shlex_quote\nfrom pip._vendor.six.moves.urllib import parse as urllib_parse\nfrom pip._vendor.six.moves.urllib.parse import unquote as urllib_unquote\n\nfrom pip._internal.exceptions import CommandError, InstallationError\nfrom pip._internal.locations import (\n running_under_virtualenv, site_packages, user_site, virtualenv_no_global,\n write_delete_marker_file,\n)\nfrom pip._internal.utils.compat import (\n WINDOWS, console_to_str, expanduser, stdlib_pkgs,\n)\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif PY2:\n from io import BytesIO as StringIO\nelse:\n from io import StringIO\n\nif MYPY_CHECK_RUNNING:\n from typing import (\n Optional, Tuple, Iterable, List, Match, Union, Any, Mapping, Text,\n AnyStr, Container\n )\n from pip._vendor.pkg_resources import Distribution\n from pip._internal.models.link import Link\n from pip._internal.utils.ui import SpinnerInterface\n\n\n__all__ = ['rmtree', 'display_path', 'backup_dir',\n 'ask', 'splitext',\n 'format_size', 'is_installable_dir',\n 'is_svn_page', 'file_contents',\n 'split_leading_dir', 'has_leading_dir',\n 'normalize_path',\n 'renames', 'get_prog',\n 'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',\n 'captured_stdout', 'ensure_dir',\n 'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS', 'WHEEL_EXTENSION',\n 'get_installed_version', 'remove_auth_from_url']\n\n\nlogger = std_logging.getLogger(__name__)\nsubprocess_logger = std_logging.getLogger('pip.subprocessor')\n\nLOG_DIVIDER = '----------------------------------------'\n\nWHEEL_EXTENSION = '.whl'\nBZ2_EXTENSIONS = ('.tar.bz2', '.tbz')\nXZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma')\nZIP_EXTENSIONS = ('.zip', WHEEL_EXTENSION)\nTAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')\nARCHIVE_EXTENSIONS = (\n ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS)\nSUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS\n\ntry:\n import bz2 # noqa\n SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS\nexcept ImportError:\n logger.debug('bz2 module is not available')\n\ntry:\n # Only for Python 3.3+\n import lzma # noqa\n SUPPORTED_EXTENSIONS += XZ_EXTENSIONS\nexcept ImportError:\n logger.debug('lzma module is not available')\n\n\ndef ensure_dir(path):\n # type: (AnyStr) -> None\n \"\"\"os.path.makedirs without EEXIST.\"\"\"\n try:\n os.makedirs(path)\n except OSError as e:\n # Windows can raise spurious ENOTEMPTY errors. See #6426.\n if e.errno != errno.EEXIST and e.errno != errno.ENOTEMPTY:\n raise\n\n\ndef get_prog():\n # type: () -> str\n try:\n prog = os.path.basename(sys.argv[0])\n if prog in ('__main__.py', '-c'):\n return \"%s -m pip\" % sys.executable\n else:\n return prog\n except (AttributeError, TypeError, IndexError):\n pass\n return 'pip'\n\n\n# Retry every half second for up to 3 seconds\n@retry(stop_max_delay=3000, wait_fixed=500)\ndef rmtree(dir, ignore_errors=False):\n # type: (str, bool) -> None\n shutil.rmtree(dir, ignore_errors=ignore_errors,\n onerror=rmtree_errorhandler)\n\n\ndef rmtree_errorhandler(func, path, exc_info):\n \"\"\"On Windows, the files in .svn are read-only, so when rmtree() tries to\n remove them, an exception is thrown. We catch that here, remove the\n read-only attribute, and hopefully continue without problems.\"\"\"\n # if file type currently read only\n if os.stat(path).st_mode & stat.S_IREAD:\n # convert to read/write\n os.chmod(path, stat.S_IWRITE)\n # use the original function to repeat the operation\n func(path)\n return\n else:\n raise\n\n\ndef display_path(path):\n # type: (Union[str, Text]) -> str\n \"\"\"Gives the display value for a given path, making it relative to cwd\n if possible.\"\"\"\n path = os.path.normcase(os.path.abspath(path))\n if sys.version_info[0] == 2:\n path = path.decode(sys.getfilesystemencoding(), 'replace')\n path = path.encode(sys.getdefaultencoding(), 'replace')\n if path.startswith(os.getcwd() + os.path.sep):\n path = '.' + path[len(os.getcwd()):]\n return path\n\n\ndef backup_dir(dir, ext='.bak'):\n # type: (str, str) -> str\n \"\"\"Figure out the name of a directory to back up the given dir to\n (adding .bak, .bak2, etc)\"\"\"\n n = 1\n extension = ext\n while os.path.exists(dir + extension):\n n += 1\n extension = ext + str(n)\n return dir + extension\n\n\ndef ask_path_exists(message, options):\n # type: (str, Iterable[str]) -> str\n for action in os.environ.get('PIP_EXISTS_ACTION', '').split():\n if action in options:\n return action\n return ask(message, options)\n\n\ndef ask(message, options):\n # type: (str, Iterable[str]) -> str\n \"\"\"Ask the message interactively, with the given possible responses\"\"\"\n while 1:\n if os.environ.get('PIP_NO_INPUT'):\n raise Exception(\n 'No input was expected ($PIP_NO_INPUT set); question: %s' %\n message\n )\n response = input(message)\n response = response.strip().lower()\n if response not in options:\n print(\n 'Your response (%r) was not one of the expected responses: '\n '%s' % (response, ', '.join(options))\n )\n else:\n return response\n\n\ndef format_size(bytes):\n # type: (float) -> str\n if bytes > 1000 * 1000:\n return '%.1fMB' % (bytes / 1000.0 / 1000)\n elif bytes > 10 * 1000:\n return '%ikB' % (bytes / 1000)\n elif bytes > 1000:\n return '%.1fkB' % (bytes / 1000.0)\n else:\n return '%ibytes' % bytes\n\n\ndef is_installable_dir(path):\n # type: (str) -> bool\n \"\"\"Is path is a directory containing setup.py or pyproject.toml?\n \"\"\"\n if not os.path.isdir(path):\n return False\n setup_py = os.path.join(path, 'setup.py')\n if os.path.isfile(setup_py):\n return True\n pyproject_toml = os.path.join(path, 'pyproject.toml')\n if os.path.isfile(pyproject_toml):\n return True\n return False\n\n\ndef is_svn_page(html):\n # type: (Union[str, Text]) -> Optional[Match[Union[str, Text]]]\n \"\"\"\n Returns true if the page appears to be the index page of an svn repository\n \"\"\"\n return (re.search(r'<title>[^<]*Revision \\d+:', html) and\n re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))\n\n\ndef file_contents(filename):\n # type: (str) -> Text\n with open(filename, 'rb') as fp:\n return fp.read().decode('utf-8')\n\n\ndef read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):\n \"\"\"Yield pieces of data from a file-like object until EOF.\"\"\"\n while True:\n chunk = file.read(size)\n if not chunk:\n break\n yield chunk\n\n\ndef split_leading_dir(path):\n # type: (Union[str, Text]) -> List[Union[str, Text]]\n path = path.lstrip('/').lstrip('\\\\')\n if '/' in path and (('\\\\' in path and path.find('/') < path.find('\\\\')) or\n '\\\\' not in path):\n return path.split('/', 1)\n elif '\\\\' in path:\n return path.split('\\\\', 1)\n else:\n return [path, '']\n\n\ndef has_leading_dir(paths):\n # type: (Iterable[Union[str, Text]]) -> bool\n \"\"\"Returns true if all the paths have the same leading path name\n (i.e., everything is in one subdirectory in an archive)\"\"\"\n common_prefix = None\n for path in paths:\n prefix, rest = split_leading_dir(path)\n if not prefix:\n return False\n elif common_prefix is None:\n common_prefix = prefix\n elif prefix != common_prefix:\n return False\n return True\n\n\ndef normalize_path(path, resolve_symlinks=True):\n # type: (str, bool) -> str\n \"\"\"\n Convert a path to its canonical, case-normalized, absolute version.\n\n \"\"\"\n path = expanduser(path)\n if resolve_symlinks:\n path = os.path.realpath(path)\n else:\n path = os.path.abspath(path)\n return os.path.normcase(path)\n\n\ndef splitext(path):\n # type: (str) -> Tuple[str, str]\n \"\"\"Like os.path.splitext, but take off .tar too\"\"\"\n base, ext = posixpath.splitext(path)\n if base.lower().endswith('.tar'):\n ext = base[-4:] + ext\n base = base[:-4]\n return base, ext\n\n\ndef renames(old, new):\n # type: (str, str) -> None\n \"\"\"Like os.renames(), but handles renaming across devices.\"\"\"\n # Implementation borrowed from os.renames().\n head, tail = os.path.split(new)\n if head and tail and not os.path.exists(head):\n os.makedirs(head)\n\n shutil.move(old, new)\n\n head, tail = os.path.split(old)\n if head and tail:\n try:\n os.removedirs(head)\n except OSError:\n pass\n\n\ndef is_local(path):\n # type: (str) -> bool\n \"\"\"\n Return True if path is within sys.prefix, if we're running in a virtualenv.\n\n If we're not in a virtualenv, all paths are considered \"local.\"\n\n \"\"\"\n if not running_under_virtualenv():\n return True\n return normalize_path(path).startswith(normalize_path(sys.prefix))\n\n\ndef dist_is_local(dist):\n # type: (Distribution) -> bool\n \"\"\"\n Return True if given Distribution object is installed locally\n (i.e. within current virtualenv).\n\n Always True if we're not in a virtualenv.\n\n \"\"\"\n return is_local(dist_location(dist))\n\n\ndef dist_in_usersite(dist):\n # type: (Distribution) -> bool\n \"\"\"\n Return True if given Distribution is installed in user site.\n \"\"\"\n norm_path = normalize_path(dist_location(dist))\n return norm_path.startswith(normalize_path(user_site))\n\n\ndef dist_in_site_packages(dist):\n # type: (Distribution) -> bool\n \"\"\"\n Return True if given Distribution is installed in\n sysconfig.get_python_lib().\n \"\"\"\n return normalize_path(\n dist_location(dist)\n ).startswith(normalize_path(site_packages))\n\n\ndef dist_is_editable(dist):\n # type: (Distribution) -> bool\n \"\"\"\n Return True if given Distribution is an editable install.\n \"\"\"\n for path_item in sys.path:\n egg_link = os.path.join(path_item, dist.project_name + '.egg-link')\n if os.path.isfile(egg_link):\n return True\n return False\n\n\ndef get_installed_distributions(local_only=True,\n skip=stdlib_pkgs,\n include_editables=True,\n editables_only=False,\n user_only=False):\n # type: (bool, Container[str], bool, bool, bool) -> List[Distribution]\n \"\"\"\n Return a list of installed Distribution objects.\n\n If ``local_only`` is True (default), only return installations\n local to the current virtualenv, if in a virtualenv.\n\n ``skip`` argument is an iterable of lower-case project names to\n ignore; defaults to stdlib_pkgs\n\n If ``include_editables`` is False, don't report editables.\n\n If ``editables_only`` is True , only report editables.\n\n If ``user_only`` is True , only report installations in the user\n site directory.\n\n \"\"\"\n if local_only:\n local_test = dist_is_local\n else:\n def local_test(d):\n return True\n\n if include_editables:\n def editable_test(d):\n return True\n else:\n def editable_test(d):\n return not dist_is_editable(d)\n\n if editables_only:\n def editables_only_test(d):\n return dist_is_editable(d)\n else:\n def editables_only_test(d):\n return True\n\n if user_only:\n user_test = dist_in_usersite\n else:\n def user_test(d):\n return True\n\n # because of pkg_resources vendoring, mypy cannot find stub in typeshed\n return [d for d in pkg_resources.working_set # type: ignore\n if local_test(d) and\n d.key not in skip and\n editable_test(d) and\n editables_only_test(d) and\n user_test(d)\n ]\n\n\ndef egg_link_path(dist):\n # type: (Distribution) -> Optional[str]\n \"\"\"\n Return the path for the .egg-link file if it exists, otherwise, None.\n\n There's 3 scenarios:\n 1) not in a virtualenv\n try to find in site.USER_SITE, then site_packages\n 2) in a no-global virtualenv\n try to find in site_packages\n 3) in a yes-global virtualenv\n try to find in site_packages, then site.USER_SITE\n (don't look in global location)\n\n For #1 and #3, there could be odd cases, where there's an egg-link in 2\n locations.\n\n This method will just return the first one found.\n \"\"\"\n sites = []\n if running_under_virtualenv():\n if virtualenv_no_global():\n sites.append(site_packages)\n else:\n sites.append(site_packages)\n if user_site:\n sites.append(user_site)\n else:\n if user_site:\n sites.append(user_site)\n sites.append(site_packages)\n\n for site in sites:\n egglink = os.path.join(site, dist.project_name) + '.egg-link'\n if os.path.isfile(egglink):\n return egglink\n return None\n\n\ndef dist_location(dist):\n # type: (Distribution) -> str\n \"\"\"\n Get the site-packages location of this distribution. Generally\n this is dist.location, except in the case of develop-installed\n packages, where dist.location is the source code location, and we\n want to know where the egg-link file is.\n\n \"\"\"\n egg_link = egg_link_path(dist)\n if egg_link:\n return egg_link\n return dist.location\n\n\ndef current_umask():\n \"\"\"Get the current umask which involves having to set it temporarily.\"\"\"\n mask = os.umask(0)\n os.umask(mask)\n return mask\n\n\ndef unzip_file(filename, location, flatten=True):\n # type: (str, str, bool) -> None\n \"\"\"\n Unzip the file (with path `filename`) to the destination `location`. All\n files are written based on system defaults and umask (i.e. permissions are\n not preserved), except that regular file members with any execute\n permissions (user, group, or world) have \"chmod +x\" applied after being\n written. Note that for windows, any execute changes using os.chmod are\n no-ops per the python docs.\n \"\"\"\n ensure_dir(location)\n zipfp = open(filename, 'rb')\n try:\n zip = zipfile.ZipFile(zipfp, allowZip64=True)\n leading = has_leading_dir(zip.namelist()) and flatten\n for info in zip.infolist():\n name = info.filename\n fn = name\n if leading:\n fn = split_leading_dir(name)[1]\n fn = os.path.join(location, fn)\n dir = os.path.dirname(fn)\n if fn.endswith('/') or fn.endswith('\\\\'):\n # A directory\n ensure_dir(fn)\n else:\n ensure_dir(dir)\n # Don't use read() to avoid allocating an arbitrarily large\n # chunk of memory for the file's content\n fp = zip.open(name)\n try:\n with open(fn, 'wb') as destfp:\n shutil.copyfileobj(fp, destfp)\n finally:\n fp.close()\n mode = info.external_attr >> 16\n # if mode and regular file and any execute permissions for\n # user/group/world?\n if mode and stat.S_ISREG(mode) and mode & 0o111:\n # make dest file have execute for user/group/world\n # (chmod +x) no-op on windows per python docs\n os.chmod(fn, (0o777 - current_umask() | 0o111))\n finally:\n zipfp.close()\n\n\ndef untar_file(filename, location):\n # type: (str, str) -> None\n \"\"\"\n Untar the file (with path `filename`) to the destination `location`.\n All files are written based on system defaults and umask (i.e. permissions\n are not preserved), except that regular file members with any execute\n permissions (user, group, or world) have \"chmod +x\" applied after being\n written. Note that for windows, any execute changes using os.chmod are\n no-ops per the python docs.\n \"\"\"\n ensure_dir(location)\n if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):\n mode = 'r:gz'\n elif filename.lower().endswith(BZ2_EXTENSIONS):\n mode = 'r:bz2'\n elif filename.lower().endswith(XZ_EXTENSIONS):\n mode = 'r:xz'\n elif filename.lower().endswith('.tar'):\n mode = 'r'\n else:\n logger.warning(\n 'Cannot determine compression type for file %s', filename,\n )\n mode = 'r:*'\n tar = tarfile.open(filename, mode)\n try:\n leading = has_leading_dir([\n member.name for member in tar.getmembers()\n ])\n for member in tar.getmembers():\n fn = member.name\n if leading:\n # https://github.com/python/mypy/issues/1174\n fn = split_leading_dir(fn)[1] # type: ignore\n path = os.path.join(location, fn)\n if member.isdir():\n ensure_dir(path)\n elif member.issym():\n try:\n # https://github.com/python/typeshed/issues/2673\n tar._extract_member(member, path) # type: ignore\n except Exception as exc:\n # Some corrupt tar files seem to produce this\n # (specifically bad symlinks)\n logger.warning(\n 'In the tar file %s the member %s is invalid: %s',\n filename, member.name, exc,\n )\n continue\n else:\n try:\n fp = tar.extractfile(member)\n except (KeyError, AttributeError) as exc:\n # Some corrupt tar files seem to produce this\n # (specifically bad symlinks)\n logger.warning(\n 'In the tar file %s the member %s is invalid: %s',\n filename, member.name, exc,\n )\n continue\n ensure_dir(os.path.dirname(path))\n with open(path, 'wb') as destfp:\n shutil.copyfileobj(fp, destfp)\n fp.close()\n # Update the timestamp (useful for cython compiled files)\n # https://github.com/python/typeshed/issues/2673\n tar.utime(member, path) # type: ignore\n # member have any execute permissions for user/group/world?\n if member.mode & 0o111:\n # make dest file have execute for user/group/world\n # no-op on windows per python docs\n os.chmod(path, (0o777 - current_umask() | 0o111))\n finally:\n tar.close()\n\n\ndef unpack_file(\n filename, # type: str\n location, # type: str\n content_type, # type: Optional[str]\n link # type: Optional[Link]\n):\n # type: (...) -> None\n filename = os.path.realpath(filename)\n if (content_type == 'application/zip' or\n filename.lower().endswith(ZIP_EXTENSIONS) or\n zipfile.is_zipfile(filename)):\n unzip_file(\n filename,\n location,\n flatten=not filename.endswith('.whl')\n )\n elif (content_type == 'application/x-gzip' or\n tarfile.is_tarfile(filename) or\n filename.lower().endswith(\n TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)):\n untar_file(filename, location)\n elif (content_type and content_type.startswith('text/html') and\n is_svn_page(file_contents(filename))):\n # We don't really care about this\n from pip._internal.vcs.subversion import Subversion\n Subversion('svn+' + link.url).unpack(location)\n else:\n # FIXME: handle?\n # FIXME: magic signatures?\n logger.critical(\n 'Cannot unpack file %s (downloaded from %s, content-type: %s); '\n 'cannot detect archive format',\n filename, location, content_type,\n )\n raise InstallationError(\n 'Cannot determine archive format of %s' % location\n )\n\n\ndef format_command_args(args):\n # type: (List[str]) -> str\n \"\"\"\n Format command arguments for display.\n \"\"\"\n return ' '.join(shlex_quote(arg) for arg in args)\n\n\ndef call_subprocess(\n cmd, # type: List[str]\n show_stdout=False, # type: bool\n cwd=None, # type: Optional[str]\n on_returncode='raise', # type: str\n extra_ok_returncodes=None, # type: Optional[Iterable[int]]\n command_desc=None, # type: Optional[str]\n extra_environ=None, # type: Optional[Mapping[str, Any]]\n unset_environ=None, # type: Optional[Iterable[str]]\n spinner=None # type: Optional[SpinnerInterface]\n):\n # type: (...) -> Optional[Text]\n \"\"\"\n Args:\n show_stdout: if true, use INFO to log the subprocess's stderr and\n stdout streams. Otherwise, use DEBUG. Defaults to False.\n extra_ok_returncodes: an iterable of integer return codes that are\n acceptable, in addition to 0. Defaults to None, which means [].\n unset_environ: an iterable of environment variable names to unset\n prior to calling subprocess.Popen().\n \"\"\"\n if extra_ok_returncodes is None:\n extra_ok_returncodes = []\n if unset_environ is None:\n unset_environ = []\n # Most places in pip use show_stdout=False. What this means is--\n #\n # - We connect the child's output (combined stderr and stdout) to a\n # single pipe, which we read.\n # - We log this output to stderr at DEBUG level as it is received.\n # - If DEBUG logging isn't enabled (e.g. if --verbose logging wasn't\n # requested), then we show a spinner so the user can still see the\n # subprocess is in progress.\n # - If the subprocess exits with an error, we log the output to stderr\n # at ERROR level if it hasn't already been displayed to the console\n # (e.g. if --verbose logging wasn't enabled). This way we don't log\n # the output to the console twice.\n #\n # If show_stdout=True, then the above is still done, but with DEBUG\n # replaced by INFO.\n if show_stdout:\n # Then log the subprocess output at INFO level.\n log_subprocess = subprocess_logger.info\n used_level = std_logging.INFO\n else:\n # Then log the subprocess output using DEBUG. This also ensures\n # it will be logged to the log file (aka user_log), if enabled.\n log_subprocess = subprocess_logger.debug\n used_level = std_logging.DEBUG\n\n # Whether the subprocess will be visible in the console.\n showing_subprocess = subprocess_logger.getEffectiveLevel() <= used_level\n\n # Only use the spinner if we're not showing the subprocess output\n # and we have a spinner.\n use_spinner = not showing_subprocess and spinner is not None\n\n if command_desc is None:\n command_desc = format_command_args(cmd)\n\n log_subprocess(\"Running command %s\", command_desc)\n env = os.environ.copy()\n if extra_environ:\n env.update(extra_environ)\n for name in unset_environ:\n env.pop(name, None)\n try:\n proc = subprocess.Popen(\n cmd, stderr=subprocess.STDOUT, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, cwd=cwd, env=env,\n )\n proc.stdin.close()\n except Exception as exc:\n subprocess_logger.critical(\n \"Error %s while executing command %s\", exc, command_desc,\n )\n raise\n all_output = []\n while True:\n line = console_to_str(proc.stdout.readline())\n if not line:\n break\n line = line.rstrip()\n all_output.append(line + '\\n')\n\n # Show the line immediately.\n log_subprocess(line)\n # Update the spinner.\n if use_spinner:\n spinner.spin()\n try:\n proc.wait()\n finally:\n if proc.stdout:\n proc.stdout.close()\n proc_had_error = (\n proc.returncode and proc.returncode not in extra_ok_returncodes\n )\n if use_spinner:\n if proc_had_error:\n spinner.finish(\"error\")\n else:\n spinner.finish(\"done\")\n if proc_had_error:\n if on_returncode == 'raise':\n if not showing_subprocess:\n # Then the subprocess streams haven't been logged to the\n # console yet.\n subprocess_logger.error(\n 'Complete output from command %s:', command_desc,\n )\n # The all_output value already ends in a newline.\n subprocess_logger.error(''.join(all_output) + LOG_DIVIDER)\n raise InstallationError(\n 'Command \"%s\" failed with error code %s in %s'\n % (command_desc, proc.returncode, cwd))\n elif on_returncode == 'warn':\n subprocess_logger.warning(\n 'Command \"%s\" had error code %s in %s',\n command_desc, proc.returncode, cwd,\n )\n elif on_returncode == 'ignore':\n pass\n else:\n raise ValueError('Invalid value: on_returncode=%s' %\n repr(on_returncode))\n return ''.join(all_output)\n\n\ndef _make_build_dir(build_dir):\n os.makedirs(build_dir)\n write_delete_marker_file(build_dir)\n\n\nclass FakeFile(object):\n \"\"\"Wrap a list of lines in an object with readline() to make\n ConfigParser happy.\"\"\"\n def __init__(self, lines):\n self._gen = (l for l in lines)\n\n def readline(self):\n try:\n try:\n return next(self._gen)\n except NameError:\n return self._gen.next()\n except StopIteration:\n return ''\n\n def __iter__(self):\n return self._gen\n\n\nclass StreamWrapper(StringIO):\n\n @classmethod\n def from_stream(cls, orig_stream):\n cls.orig_stream = orig_stream\n return cls()\n\n # compileall.compile_dir() needs stdout.encoding to print to stdout\n @property\n def encoding(self):\n return self.orig_stream.encoding\n\n\[email protected]\ndef captured_output(stream_name):\n \"\"\"Return a context manager used by captured_stdout/stdin/stderr\n that temporarily replaces the sys stream *stream_name* with a StringIO.\n\n Taken from Lib/support/__init__.py in the CPython repo.\n \"\"\"\n orig_stdout = getattr(sys, stream_name)\n setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))\n try:\n yield getattr(sys, stream_name)\n finally:\n setattr(sys, stream_name, orig_stdout)\n\n\ndef captured_stdout():\n \"\"\"Capture the output of sys.stdout:\n\n with captured_stdout() as stdout:\n print('hello')\n self.assertEqual(stdout.getvalue(), 'hello\\n')\n\n Taken from Lib/support/__init__.py in the CPython repo.\n \"\"\"\n return captured_output('stdout')\n\n\ndef captured_stderr():\n \"\"\"\n See captured_stdout().\n \"\"\"\n return captured_output('stderr')\n\n\nclass cached_property(object):\n \"\"\"A property that is only computed once per instance and then replaces\n itself with an ordinary attribute. Deleting the attribute resets the\n property.\n\n Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175\n \"\"\"\n\n def __init__(self, func):\n self.__doc__ = getattr(func, '__doc__')\n self.func = func\n\n def __get__(self, obj, cls):\n if obj is None:\n # We're being accessed from the class itself, not from an object\n return self\n value = obj.__dict__[self.func.__name__] = self.func(obj)\n return value\n\n\ndef get_installed_version(dist_name, working_set=None):\n \"\"\"Get the installed version of dist_name avoiding pkg_resources cache\"\"\"\n # Create a requirement that we'll look for inside of setuptools.\n req = pkg_resources.Requirement.parse(dist_name)\n\n if working_set is None:\n # We want to avoid having this cached, so we need to construct a new\n # working set each time.\n working_set = pkg_resources.WorkingSet()\n\n # Get the installed distribution from our working set\n dist = working_set.find(req)\n\n # Check to see if we got an installed distribution or not, if we did\n # we want to return it's version.\n return dist.version if dist else None\n\n\ndef consume(iterator):\n \"\"\"Consume an iterable at C speed.\"\"\"\n deque(iterator, maxlen=0)\n\n\n# Simulates an enum\ndef enum(*sequential, **named):\n enums = dict(zip(sequential, range(len(sequential))), **named)\n reverse = {value: key for key, value in enums.items()}\n enums['reverse_mapping'] = reverse\n return type('Enum', (), enums)\n\n\ndef split_auth_from_netloc(netloc):\n \"\"\"\n Parse out and remove the auth information from a netloc.\n\n Returns: (netloc, (username, password)).\n \"\"\"\n if '@' not in netloc:\n return netloc, (None, None)\n\n # Split from the right because that's how urllib.parse.urlsplit()\n # behaves if more than one @ is present (which can be checked using\n # the password attribute of urlsplit()'s return value).\n auth, netloc = netloc.rsplit('@', 1)\n if ':' in auth:\n # Split from the left because that's how urllib.parse.urlsplit()\n # behaves if more than one : is present (which again can be checked\n # using the password attribute of the return value)\n user_pass = auth.split(':', 1)\n else:\n user_pass = auth, None\n\n user_pass = tuple(\n None if x is None else urllib_unquote(x) for x in user_pass\n )\n\n return netloc, user_pass\n\n\ndef redact_netloc(netloc):\n # type: (str) -> str\n \"\"\"\n Replace the password in a netloc with \"****\", if it exists.\n\n For example, \"user:[email protected]\" returns \"user:****@example.com\".\n \"\"\"\n netloc, (user, password) = split_auth_from_netloc(netloc)\n if user is None:\n return netloc\n password = '' if password is None else ':****'\n return '{user}{password}@{netloc}'.format(user=urllib_parse.quote(user),\n password=password,\n netloc=netloc)\n\n\ndef _transform_url(url, transform_netloc):\n purl = urllib_parse.urlsplit(url)\n netloc = transform_netloc(purl.netloc)\n # stripped url\n url_pieces = (\n purl.scheme, netloc, purl.path, purl.query, purl.fragment\n )\n surl = urllib_parse.urlunsplit(url_pieces)\n return surl\n\n\ndef _get_netloc(netloc):\n return split_auth_from_netloc(netloc)[0]\n\n\ndef remove_auth_from_url(url):\n # type: (str) -> str\n # Return a copy of url with 'username:password@' removed.\n # username/pass params are passed to subversion through flags\n # and are not recognized in the url.\n return _transform_url(url, _get_netloc)\n\n\ndef redact_password_from_url(url):\n # type: (str) -> str\n \"\"\"Replace the password in a given url with ****.\"\"\"\n return _transform_url(url, redact_netloc)\n\n\ndef protect_pip_from_modification_on_windows(modifying_pip):\n \"\"\"Protection of pip.exe from modification on Windows\n\n On Windows, any operation modifying pip should be run as:\n python -m pip ...\n \"\"\"\n pip_names = [\n \"pip.exe\",\n \"pip{}.exe\".format(sys.version_info[0]),\n \"pip{}.{}.exe\".format(*sys.version_info[:2])\n ]\n\n # See https://github.com/pypa/pip/issues/1299 for more discussion\n should_show_use_python_msg = (\n modifying_pip and\n WINDOWS and\n os.path.basename(sys.argv[0]) in pip_names\n )\n\n if should_show_use_python_msg:\n new_command = [\n sys.executable, \"-m\", \"pip\"\n ] + sys.argv[1:]\n raise CommandError(\n 'To modify pip, please run the following command:\\n{}'\n .format(\" \".join(new_command))\n )\n",
"path": "src/pip/_internal/utils/misc.py"
}
] | diff --git a/news/6426.bugfix b/news/6426.bugfix
new file mode 100644
index 00000000000..25512b3c808
--- /dev/null
+++ b/news/6426.bugfix
@@ -0,0 +1 @@
+Make ``ensure_dir()`` also ignore ``ENOTEMPTY`` as seen on Windows.
diff --git a/src/pip/_internal/utils/misc.py b/src/pip/_internal/utils/misc.py
index ca7a529387c..7c7fc4a09b8 100644
--- a/src/pip/_internal/utils/misc.py
+++ b/src/pip/_internal/utils/misc.py
@@ -98,7 +98,8 @@ def ensure_dir(path):
try:
os.makedirs(path)
except OSError as e:
- if e.errno != errno.EEXIST:
+ # Windows can raise spurious ENOTEMPTY errors. See #6426.
+ if e.errno != errno.EEXIST and e.errno != errno.ENOTEMPTY:
raise
|
coala__coala-4276 | pytest-3.1 raises lots of warnings running our tests
Latest `pytest-3.1.x` versions raise several warnings when running our tests, mostly telling that `unittest` functions `assertEquals` and `assertRaisesRegexp` should not be used anymore. We should get rid of those warnings...
| [
{
"content": "import os\nimport platform\nimport re\nfrom functools import lru_cache\n\nfrom coala_utils.decorators import yield_once\nfrom coalib.misc.Constants import GLOBBING_SPECIAL_CHARS\n\n\ndef _end_of_set_index(string, start_index):\n \"\"\"\n Returns the position of the appropriate closing bracket for a glob set in\n string.\n\n :param string: Glob string with wildcards\n :param start_index: Index at which the set starts, meaning the position\n right behind the opening bracket\n :return: Position of appropriate closing bracket\n \"\"\"\n length = len(string)\n closing_index = start_index\n if closing_index < length and string[closing_index] == '!':\n closing_index += 1\n\n if closing_index < length: # The set cannot be closed by a bracket here.\n closing_index += 1\n\n while closing_index < length and string[closing_index] != ']':\n closing_index += 1\n\n return closing_index\n\n\ndef glob_escape(input_string):\n \"\"\"\n Escapes the given string with ``[c]`` pattern. Examples:\n\n >>> from coalib.parsing.Globbing import glob_escape\n >>> glob_escape('test (1)')\n 'test [(]1[)]'\n >>> glob_escape('test folder?')\n 'test folder[?]'\n >>> glob_escape('test*folder')\n 'test[*]folder'\n\n :param input_string: String that is to be escaped with ``[ ]``.\n :return: Escaped string in which all the special glob characters\n ``()[]|?*`` are escaped.\n \"\"\"\n return re.sub('(?P<char>[' + re.escape(GLOBBING_SPECIAL_CHARS) + '])',\n '[\\\\g<char>]', input_string)\n\n\ndef _position_is_bracketed(string, position):\n \"\"\"\n Tests whether the char at string[position] is inside a valid pair of\n brackets (and therefore loses its special meaning)\n\n :param string: Glob string with wildcards\n :param position: Position of a char in string\n :return: Whether or not the char is inside a valid set of brackets\n \"\"\"\n # Allow negative positions and trim too long ones.\n position = len(string[:position])\n\n index, length = 0, len(string)\n while index < position:\n char = string[index]\n index += 1\n if char == '[':\n closing_index = _end_of_set_index(string, index)\n if closing_index < length:\n if index <= position < closing_index:\n return True\n index = closing_index + 1\n else:\n return False\n return False\n\n\ndef _boundary_of_alternatives_indices(pattern):\n \"\"\"\n Determines the location of a set of alternatives in a glob pattern.\n Alternatives are defined by a matching set of non-bracketed parentheses.\n\n :param pattern: Glob pattern with wildcards.\n :return: Indices of the innermost set of matching non-bracketed\n parentheses in a tuple. The Index of a missing parenthesis\n will be passed as None.\n \"\"\"\n # Taking the leftmost closing parenthesis and the rightmost opening\n # parenthesis left of it ensures that the parentheses belong together and\n # the pattern is parsed correctly from the most nested section outwards.\n end_pos = None\n for match in re.finditer('\\\\)', pattern):\n if not _position_is_bracketed(pattern, match.start()):\n end_pos = match.start()\n break # Break to get leftmost.\n\n start_pos = None\n for match in re.finditer('\\\\(', pattern[:end_pos]):\n if not _position_is_bracketed(pattern, match.start()):\n start_pos = match.end()\n # No break to get rightmost.\n\n return start_pos, end_pos\n\n\n@yield_once\ndef _iter_choices(pattern):\n \"\"\"\n Iterate through each choice of an alternative. Splits pattern on '|'s if\n they are not bracketed.\n\n :param pattern: String of choices separated by '|'s\n :return: Iterator that yields parts of string separated by\n non-bracketed '|'s\n \"\"\"\n start_pos = 0\n split_pos_list = [match.start() for match in re.finditer('\\\\|', pattern)]\n split_pos_list.append(len(pattern))\n for end_pos in split_pos_list:\n if not _position_is_bracketed(pattern, end_pos):\n yield pattern[start_pos: end_pos]\n start_pos = end_pos + 1\n\n\n@yield_once\ndef _iter_alternatives(pattern):\n \"\"\"\n Iterates through all glob patterns that can be obtaines by combination of\n all choices for each alternative\n\n :param pattern: Glob pattern with wildcards\n :return: Iterator that yields all glob patterns without alternatives\n that can be created from the given pattern containing them.\n \"\"\"\n start_pos, end_pos = _boundary_of_alternatives_indices(pattern)\n\n if None in (start_pos, end_pos):\n yield pattern\n else:\n # Iterate through choices inside of parenthesis (separated by '|'):\n for choice in _iter_choices(pattern[start_pos: end_pos]):\n # Put glob expression back together with alternative:\n variant = pattern[:start_pos-1] + choice + pattern[end_pos+1:]\n\n # Iterate through alternatives outside of parenthesis.\n # (pattern can have more alternatives elsewhere)\n for glob_pattern in _iter_alternatives(variant):\n yield glob_pattern\n\n\ndef translate(pattern):\n \"\"\"\n Translates a pattern into a regular expression.\n\n :param pattern: Glob pattern with wildcards\n :return: Regular expression with the same meaning\n \"\"\"\n index, length = 0, len(pattern)\n regex = ''\n while index < length:\n char = pattern[index]\n index += 1\n if char == '*':\n # '**' matches everything\n if index < length and pattern[index] == '*':\n regex += '.*'\n # On Windows, '*' matches everything but the filesystem\n # separators '/' and '\\'.\n elif platform.system() == 'Windows': # pragma posix: no cover\n regex += '[^/\\\\\\\\]*'\n # On all other (~Unix-) platforms, '*' matches everything but the\n # filesystem separator, most likely '/'.\n else: # pragma nt: no cover\n regex += '[^' + re.escape(os.sep) + ']*'\n elif char == '?':\n regex += '.'\n elif char == '[':\n closing_index = _end_of_set_index(pattern, index)\n if closing_index >= length:\n regex += '\\\\['\n else:\n sequence = pattern[index:closing_index].replace('\\\\', '\\\\\\\\')\n index = closing_index+1\n if sequence[0] == '!':\n sequence = '^' + sequence[1:]\n elif sequence[0] == '^':\n sequence = '\\\\' + sequence\n regex += '[' + sequence + ']'\n else:\n regex = regex + re.escape(char)\n return regex + '\\\\Z(?ms)'\n\n\ndef fnmatch(name, globs):\n \"\"\"\n Tests whether name matches one of the given globs.\n\n :param name: File or directory name\n :param globs: Glob string with wildcards or list of globs\n :return: Boolean: Whether or not name is matched by glob\n\n Glob Syntax:\n\n - '[seq]': Matches any character in seq. Cannot be empty. Any\n special character looses its special meaning in a set.\n - '[!seq]': Matches any character not in seq. Cannot be empty. Any\n special character looses its special meaning in a set.\n - '(seq_a|seq_b)': Matches either sequence_a or sequence_b as a whole.\n More than two or just one sequence can be given.\n - '?': Matches any single character.\n - '*': Matches everything but os.sep.\n - '**': Matches everything.\n \"\"\"\n globs = (globs,) if isinstance(globs, str) else tuple(globs)\n\n if len(globs) == 0:\n return True\n\n name = os.path.normcase(name)\n\n return any(compiled_pattern.match(name)\n for glob in globs\n for compiled_pattern in _compile_pattern(glob))\n\n\n@lru_cache()\ndef _compile_pattern(pattern):\n return tuple(re.compile(translate(os.path.normcase(\n os.path.expanduser(pat))))\n for pat in _iter_alternatives(pattern))\n\n\ndef _absolute_flat_glob(pattern):\n \"\"\"\n Glob function for a pattern that do not contain wildcards.\n\n :pattern: File or directory path\n :return: Iterator that yields at most one valid file or dir name\n \"\"\"\n dirname, basename = os.path.split(pattern)\n\n if basename:\n if os.path.exists(pattern):\n yield pattern\n else:\n # Patterns ending with a slash should match only directories.\n if os.path.isdir(dirname):\n yield pattern\n return\n\n\ndef _iter_relative_dirs(dirname):\n \"\"\"\n Recursively iterates subdirectories of all levels from dirname\n\n :param dirname: Directory name\n :return: Iterator that yields files and directory from the given dir\n and all it's (recursive) subdirectories\n \"\"\"\n if not dirname:\n dirname = os.curdir\n try:\n files_or_dirs = os.listdir(dirname)\n except os.error:\n return\n for file_or_dir in files_or_dirs:\n yield file_or_dir\n path = os.path.join(dirname, file_or_dir)\n for sub_file_or_dir in _iter_relative_dirs(path):\n yield os.path.join(file_or_dir, sub_file_or_dir)\n\n\ndef relative_wildcard_glob(dirname, pattern):\n \"\"\"\n Non-recursive glob for one directory. Accepts wildcards.\n\n :param dirname: Directory name\n :param pattern: Glob pattern with wildcards\n :return: List of files in the dir of dirname that match the pattern\n \"\"\"\n if not dirname:\n dirname = os.curdir\n try:\n if '**' in pattern:\n names = list(_iter_relative_dirs(dirname))\n else:\n names = os.listdir(dirname)\n except OSError:\n return []\n result = []\n pattern = os.path.normcase(pattern)\n match = re.compile(translate(pattern)).match\n for name in names:\n if match(os.path.normcase(name)):\n result.append(name)\n return result\n\n\ndef relative_flat_glob(dirname, basename):\n \"\"\"\n Non-recursive glob for one directory. Does not accept wildcards.\n\n :param dirname: Directory name\n :param basename: Basename of a file in dir of dirname\n :return: List containing Basename if the file exists\n \"\"\"\n if os.path.exists(os.path.join(dirname, basename)):\n return [basename]\n return []\n\n\ndef relative_recursive_glob(dirname, pattern):\n \"\"\"\n Recursive Glob for one directory and all its (nested) subdirectories.\n Accepts only '**' as pattern.\n\n :param dirname: Directory name\n :param pattern: The recursive wildcard '**'\n :return: Iterator that yields all the (nested) subdirectories of the\n given dir\n \"\"\"\n assert pattern == '**'\n if dirname:\n yield pattern[:0]\n for relative_dir in _iter_relative_dirs(dirname):\n yield relative_dir\n\n\nwildcard_check_pattern = re.compile('([*?[])')\n\n\ndef has_wildcard(pattern):\n \"\"\"\n Checks whether pattern has any wildcards.\n\n :param pattern: Glob pattern that may contain wildcards\n :return: Boolean: Whether or not there are wildcards in pattern\n \"\"\"\n match = wildcard_check_pattern.search(pattern)\n return match is not None\n\n\ndef _iglob(pattern):\n dirname, basename = os.path.split(pattern)\n if not has_wildcard(pattern):\n for file in _absolute_flat_glob(pattern):\n yield file\n return\n\n if basename == '**':\n relative_glob_function = relative_recursive_glob\n elif has_wildcard(basename):\n relative_glob_function = relative_wildcard_glob\n else:\n relative_glob_function = relative_flat_glob\n\n if not dirname:\n for file in relative_glob_function(dirname, basename):\n yield file\n return\n\n # Prevent an infinite recursion if a drive or UNC path contains\n # wildcard characters (i.e. r'\\\\?\\C:').\n if dirname != pattern and has_wildcard(dirname):\n dirs = iglob(dirname)\n else:\n dirs = [dirname]\n\n for dirname in dirs:\n for name in relative_glob_function(dirname, basename):\n yield os.path.join(dirname, name)\n\n\n@yield_once\ndef iglob(pattern):\n \"\"\"\n Iterates all filesystem paths that get matched by the glob pattern.\n Syntax is equal to that of fnmatch.\n\n :param pattern: Glob pattern with wildcards\n :return: Iterator that yields all file names that match pattern\n \"\"\"\n for pat in _iter_alternatives(pattern):\n pat = os.path.expanduser(pat)\n pat = os.path.normcase(pat)\n\n if pat.endswith(os.sep):\n for name in _iglob(pat):\n yield name\n else:\n for name in _iglob(pat):\n yield name.rstrip(os.sep)\n\n\ndef glob(pattern):\n \"\"\"\n Iterates all filesystem paths that get matched by the glob pattern.\n Syntax is equal to that of fnmatch.\n\n :param pattern: Glob pattern with wildcards\n :return: List of all file names that match pattern\n \"\"\"\n return list(iglob(pattern))\n",
"path": "coalib/parsing/Globbing.py"
}
] | [
{
"content": "import os\nimport platform\nimport re\nfrom functools import lru_cache\n\nfrom coala_utils.decorators import yield_once\nfrom coalib.misc.Constants import GLOBBING_SPECIAL_CHARS\n\n\ndef _end_of_set_index(string, start_index):\n \"\"\"\n Returns the position of the appropriate closing bracket for a glob set in\n string.\n\n :param string: Glob string with wildcards\n :param start_index: Index at which the set starts, meaning the position\n right behind the opening bracket\n :return: Position of appropriate closing bracket\n \"\"\"\n length = len(string)\n closing_index = start_index\n if closing_index < length and string[closing_index] == '!':\n closing_index += 1\n\n if closing_index < length: # The set cannot be closed by a bracket here.\n closing_index += 1\n\n while closing_index < length and string[closing_index] != ']':\n closing_index += 1\n\n return closing_index\n\n\ndef glob_escape(input_string):\n \"\"\"\n Escapes the given string with ``[c]`` pattern. Examples:\n\n >>> from coalib.parsing.Globbing import glob_escape\n >>> glob_escape('test (1)')\n 'test [(]1[)]'\n >>> glob_escape('test folder?')\n 'test folder[?]'\n >>> glob_escape('test*folder')\n 'test[*]folder'\n\n :param input_string: String that is to be escaped with ``[ ]``.\n :return: Escaped string in which all the special glob characters\n ``()[]|?*`` are escaped.\n \"\"\"\n return re.sub('(?P<char>[' + re.escape(GLOBBING_SPECIAL_CHARS) + '])',\n '[\\\\g<char>]', input_string)\n\n\ndef _position_is_bracketed(string, position):\n \"\"\"\n Tests whether the char at string[position] is inside a valid pair of\n brackets (and therefore loses its special meaning)\n\n :param string: Glob string with wildcards\n :param position: Position of a char in string\n :return: Whether or not the char is inside a valid set of brackets\n \"\"\"\n # Allow negative positions and trim too long ones.\n position = len(string[:position])\n\n index, length = 0, len(string)\n while index < position:\n char = string[index]\n index += 1\n if char == '[':\n closing_index = _end_of_set_index(string, index)\n if closing_index < length:\n if index <= position < closing_index:\n return True\n index = closing_index + 1\n else:\n return False\n return False\n\n\ndef _boundary_of_alternatives_indices(pattern):\n \"\"\"\n Determines the location of a set of alternatives in a glob pattern.\n Alternatives are defined by a matching set of non-bracketed parentheses.\n\n :param pattern: Glob pattern with wildcards.\n :return: Indices of the innermost set of matching non-bracketed\n parentheses in a tuple. The Index of a missing parenthesis\n will be passed as None.\n \"\"\"\n # Taking the leftmost closing parenthesis and the rightmost opening\n # parenthesis left of it ensures that the parentheses belong together and\n # the pattern is parsed correctly from the most nested section outwards.\n end_pos = None\n for match in re.finditer('\\\\)', pattern):\n if not _position_is_bracketed(pattern, match.start()):\n end_pos = match.start()\n break # Break to get leftmost.\n\n start_pos = None\n for match in re.finditer('\\\\(', pattern[:end_pos]):\n if not _position_is_bracketed(pattern, match.start()):\n start_pos = match.end()\n # No break to get rightmost.\n\n return start_pos, end_pos\n\n\n@yield_once\ndef _iter_choices(pattern):\n \"\"\"\n Iterate through each choice of an alternative. Splits pattern on '|'s if\n they are not bracketed.\n\n :param pattern: String of choices separated by '|'s\n :return: Iterator that yields parts of string separated by\n non-bracketed '|'s\n \"\"\"\n start_pos = 0\n split_pos_list = [match.start() for match in re.finditer('\\\\|', pattern)]\n split_pos_list.append(len(pattern))\n for end_pos in split_pos_list:\n if not _position_is_bracketed(pattern, end_pos):\n yield pattern[start_pos: end_pos]\n start_pos = end_pos + 1\n\n\n@yield_once\ndef _iter_alternatives(pattern):\n \"\"\"\n Iterates through all glob patterns that can be obtaines by combination of\n all choices for each alternative\n\n :param pattern: Glob pattern with wildcards\n :return: Iterator that yields all glob patterns without alternatives\n that can be created from the given pattern containing them.\n \"\"\"\n start_pos, end_pos = _boundary_of_alternatives_indices(pattern)\n\n if None in (start_pos, end_pos):\n yield pattern\n else:\n # Iterate through choices inside of parenthesis (separated by '|'):\n for choice in _iter_choices(pattern[start_pos: end_pos]):\n # Put glob expression back together with alternative:\n variant = pattern[:start_pos-1] + choice + pattern[end_pos+1:]\n\n # Iterate through alternatives outside of parenthesis.\n # (pattern can have more alternatives elsewhere)\n for glob_pattern in _iter_alternatives(variant):\n yield glob_pattern\n\n\ndef translate(pattern):\n \"\"\"\n Translates a pattern into a regular expression.\n\n :param pattern: Glob pattern with wildcards\n :return: Regular expression with the same meaning\n \"\"\"\n index, length = 0, len(pattern)\n regex = ''\n while index < length:\n char = pattern[index]\n index += 1\n if char == '*':\n # '**' matches everything\n if index < length and pattern[index] == '*':\n regex += '.*'\n # On Windows, '*' matches everything but the filesystem\n # separators '/' and '\\'.\n elif platform.system() == 'Windows': # pragma posix: no cover\n regex += '[^/\\\\\\\\]*'\n # On all other (~Unix-) platforms, '*' matches everything but the\n # filesystem separator, most likely '/'.\n else: # pragma nt: no cover\n regex += '[^' + re.escape(os.sep) + ']*'\n elif char == '?':\n regex += '.'\n elif char == '[':\n closing_index = _end_of_set_index(pattern, index)\n if closing_index >= length:\n regex += '\\\\['\n else:\n sequence = pattern[index:closing_index].replace('\\\\', '\\\\\\\\')\n index = closing_index+1\n if sequence[0] == '!':\n sequence = '^' + sequence[1:]\n elif sequence[0] == '^':\n sequence = '\\\\' + sequence\n regex += '[' + sequence + ']'\n else:\n regex = regex + re.escape(char)\n return '(?ms)' + regex + '\\\\Z'\n\n\ndef fnmatch(name, globs):\n \"\"\"\n Tests whether name matches one of the given globs.\n\n :param name: File or directory name\n :param globs: Glob string with wildcards or list of globs\n :return: Boolean: Whether or not name is matched by glob\n\n Glob Syntax:\n\n - '[seq]': Matches any character in seq. Cannot be empty. Any\n special character looses its special meaning in a set.\n - '[!seq]': Matches any character not in seq. Cannot be empty. Any\n special character looses its special meaning in a set.\n - '(seq_a|seq_b)': Matches either sequence_a or sequence_b as a whole.\n More than two or just one sequence can be given.\n - '?': Matches any single character.\n - '*': Matches everything but os.sep.\n - '**': Matches everything.\n \"\"\"\n globs = (globs,) if isinstance(globs, str) else tuple(globs)\n\n if len(globs) == 0:\n return True\n\n name = os.path.normcase(name)\n\n return any(compiled_pattern.match(name)\n for glob in globs\n for compiled_pattern in _compile_pattern(glob))\n\n\n@lru_cache()\ndef _compile_pattern(pattern):\n return tuple(re.compile(translate(os.path.normcase(\n os.path.expanduser(pat))))\n for pat in _iter_alternatives(pattern))\n\n\ndef _absolute_flat_glob(pattern):\n \"\"\"\n Glob function for a pattern that do not contain wildcards.\n\n :pattern: File or directory path\n :return: Iterator that yields at most one valid file or dir name\n \"\"\"\n dirname, basename = os.path.split(pattern)\n\n if basename:\n if os.path.exists(pattern):\n yield pattern\n else:\n # Patterns ending with a slash should match only directories.\n if os.path.isdir(dirname):\n yield pattern\n return\n\n\ndef _iter_relative_dirs(dirname):\n \"\"\"\n Recursively iterates subdirectories of all levels from dirname\n\n :param dirname: Directory name\n :return: Iterator that yields files and directory from the given dir\n and all it's (recursive) subdirectories\n \"\"\"\n if not dirname:\n dirname = os.curdir\n try:\n files_or_dirs = os.listdir(dirname)\n except os.error:\n return\n for file_or_dir in files_or_dirs:\n yield file_or_dir\n path = os.path.join(dirname, file_or_dir)\n for sub_file_or_dir in _iter_relative_dirs(path):\n yield os.path.join(file_or_dir, sub_file_or_dir)\n\n\ndef relative_wildcard_glob(dirname, pattern):\n \"\"\"\n Non-recursive glob for one directory. Accepts wildcards.\n\n :param dirname: Directory name\n :param pattern: Glob pattern with wildcards\n :return: List of files in the dir of dirname that match the pattern\n \"\"\"\n if not dirname:\n dirname = os.curdir\n try:\n if '**' in pattern:\n names = list(_iter_relative_dirs(dirname))\n else:\n names = os.listdir(dirname)\n except OSError:\n return []\n result = []\n pattern = os.path.normcase(pattern)\n match = re.compile(translate(pattern)).match\n for name in names:\n if match(os.path.normcase(name)):\n result.append(name)\n return result\n\n\ndef relative_flat_glob(dirname, basename):\n \"\"\"\n Non-recursive glob for one directory. Does not accept wildcards.\n\n :param dirname: Directory name\n :param basename: Basename of a file in dir of dirname\n :return: List containing Basename if the file exists\n \"\"\"\n if os.path.exists(os.path.join(dirname, basename)):\n return [basename]\n return []\n\n\ndef relative_recursive_glob(dirname, pattern):\n \"\"\"\n Recursive Glob for one directory and all its (nested) subdirectories.\n Accepts only '**' as pattern.\n\n :param dirname: Directory name\n :param pattern: The recursive wildcard '**'\n :return: Iterator that yields all the (nested) subdirectories of the\n given dir\n \"\"\"\n assert pattern == '**'\n if dirname:\n yield pattern[:0]\n for relative_dir in _iter_relative_dirs(dirname):\n yield relative_dir\n\n\nwildcard_check_pattern = re.compile('([*?[])')\n\n\ndef has_wildcard(pattern):\n \"\"\"\n Checks whether pattern has any wildcards.\n\n :param pattern: Glob pattern that may contain wildcards\n :return: Boolean: Whether or not there are wildcards in pattern\n \"\"\"\n match = wildcard_check_pattern.search(pattern)\n return match is not None\n\n\ndef _iglob(pattern):\n dirname, basename = os.path.split(pattern)\n if not has_wildcard(pattern):\n for file in _absolute_flat_glob(pattern):\n yield file\n return\n\n if basename == '**':\n relative_glob_function = relative_recursive_glob\n elif has_wildcard(basename):\n relative_glob_function = relative_wildcard_glob\n else:\n relative_glob_function = relative_flat_glob\n\n if not dirname:\n for file in relative_glob_function(dirname, basename):\n yield file\n return\n\n # Prevent an infinite recursion if a drive or UNC path contains\n # wildcard characters (i.e. r'\\\\?\\C:').\n if dirname != pattern and has_wildcard(dirname):\n dirs = iglob(dirname)\n else:\n dirs = [dirname]\n\n for dirname in dirs:\n for name in relative_glob_function(dirname, basename):\n yield os.path.join(dirname, name)\n\n\n@yield_once\ndef iglob(pattern):\n \"\"\"\n Iterates all filesystem paths that get matched by the glob pattern.\n Syntax is equal to that of fnmatch.\n\n :param pattern: Glob pattern with wildcards\n :return: Iterator that yields all file names that match pattern\n \"\"\"\n for pat in _iter_alternatives(pattern):\n pat = os.path.expanduser(pat)\n pat = os.path.normcase(pat)\n\n if pat.endswith(os.sep):\n for name in _iglob(pat):\n yield name\n else:\n for name in _iglob(pat):\n yield name.rstrip(os.sep)\n\n\ndef glob(pattern):\n \"\"\"\n Iterates all filesystem paths that get matched by the glob pattern.\n Syntax is equal to that of fnmatch.\n\n :param pattern: Glob pattern with wildcards\n :return: List of all file names that match pattern\n \"\"\"\n return list(iglob(pattern))\n",
"path": "coalib/parsing/Globbing.py"
}
] | diff --git a/coalib/parsing/Globbing.py b/coalib/parsing/Globbing.py
index c31726a861..0e64920688 100644
--- a/coalib/parsing/Globbing.py
+++ b/coalib/parsing/Globbing.py
@@ -191,7 +191,7 @@ def translate(pattern):
regex += '[' + sequence + ']'
else:
regex = regex + re.escape(char)
- return regex + '\\Z(?ms)'
+ return '(?ms)' + regex + '\\Z'
def fnmatch(name, globs):
diff --git a/test-requirements.txt b/test-requirements.txt
index 05831f5b12..b622eb08e7 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -2,7 +2,7 @@ argcomplete~=1.8
coverage~=4.3.4
codecov~=2.0.5
freezegun~=0.3.9
-pytest~=3.0
+pytest~=3.1.1
pytest-cov~=2.2
pytest-env~=0.6.0
pytest-mock~=1.1
diff --git a/tests/bearlib/languages/LanguageTest.py b/tests/bearlib/languages/LanguageTest.py
index 84dba7d9e5..71a5d2e684 100644
--- a/tests/bearlib/languages/LanguageTest.py
+++ b/tests/bearlib/languages/LanguageTest.py
@@ -21,9 +21,9 @@ def tearDown(self):
pass
def test_invalid_attribute(self):
- with self.assertRaisesRegexp(AttributeError, 'not a valid attribute'):
+ with self.assertRaisesRegex(AttributeError, 'not a valid attribute'):
self.lang_cpp.not_an_attribute
def test_attribute_list_empy(self):
- with self.assertRaisesRegexp(AttributeError, 'no available attribute'):
+ with self.assertRaisesRegex(AttributeError, 'no available attribute'):
self.lang_unknown.not_an_attribute
diff --git a/tests/bears/BearTest.py b/tests/bears/BearTest.py
index c8977052e2..36da1d32f4 100644
--- a/tests/bears/BearTest.py
+++ b/tests/bears/BearTest.py
@@ -364,7 +364,7 @@ def test_connection_timeout_mocked(self):
exc = requests.exceptions.ConnectTimeout
with requests_mock.Mocker() as reqmock:
reqmock.get(self.mock_url, exc=exc)
- with self.assertRaisesRegexp(exc, '^$'):
+ with self.assertRaisesRegex(exc, '^$'):
self.uut.download_cached_file(
self.mock_url, self.filename)
@@ -380,17 +380,18 @@ def test_read_broken(self):
with requests_mock.Mocker() as reqmock:
reqmock.get(self.mock_url, body=fake_content_provider)
- with self.assertRaisesRegexp(exc, 'Fake read timeout'):
+ with self.assertRaisesRegex(exc, 'Fake read timeout'):
self.uut.download_cached_file(
self.mock_url, self.filename)
self.assertTrue(isfile(self.file_location))
- self.assertEqual(open(self.file_location, 'rb').read(),
- b''.join(fake_content))
+
+ with open(self.file_location, 'rb') as fh:
+ self.assertEqual(fh.read(), b''.join(fake_content))
def test_status_code_error(self):
exc = requests.exceptions.HTTPError
- with self.assertRaisesRegexp(exc, '418 Client Error'):
+ with self.assertRaisesRegex(exc, '418 Client Error'):
self.uut.download_cached_file(
'http://httpbin.org/status/418', self.filename)
diff --git a/tests/core/BearTest.py b/tests/core/BearTest.py
index 941db19379..843c8f9d92 100644
--- a/tests/core/BearTest.py
+++ b/tests/core/BearTest.py
@@ -211,13 +211,13 @@ def test_download_cached_file_connection_timeout_mocked(self):
exc = requests.exceptions.ConnectTimeout
with requests_mock.Mocker() as reqmock:
reqmock.get(mock_url, exc=exc)
- with self.assertRaisesRegexp(exc, '^$'):
+ with self.assertRaisesRegex(exc, '^$'):
Bear.download_cached_file(
mock_url, 'test.html')
def test_download_cached_file_status_code_error(self):
exc = requests.exceptions.HTTPError
- with self.assertRaisesRegexp(exc, '418 Client Error'):
+ with self.assertRaisesRegex(exc, '418 Client Error'):
Bear.download_cached_file(
'http://httpbin.org/status/418', 'test.html')
diff --git a/tests/output/JSONEncoderTest.py b/tests/output/JSONEncoderTest.py
index 9b05000b5f..7f624392f5 100644
--- a/tests/output/JSONEncoderTest.py
+++ b/tests/output/JSONEncoderTest.py
@@ -56,25 +56,25 @@ class JSONEncoderTest(unittest.TestCase):
kw = {'cls': JSONEncoder, 'sort_keys': True}
def test_builtins(self):
- self.assertEquals('"test"', json.dumps('test', **self.kw))
- self.assertEquals('1', json.dumps(1, **self.kw))
- self.assertEquals('true', json.dumps(True, **self.kw))
- self.assertEquals('null', json.dumps(None, **self.kw))
+ self.assertEqual('"test"', json.dumps('test', **self.kw))
+ self.assertEqual('1', json.dumps(1, **self.kw))
+ self.assertEqual('true', json.dumps(True, **self.kw))
+ self.assertEqual('null', json.dumps(None, **self.kw))
def test_iter(self):
- self.assertEquals('[0, 1]', json.dumps([0, 1], **self.kw))
- self.assertEquals('[0, 1]', json.dumps((0, 1), **self.kw))
- self.assertEquals('[0, 1]', json.dumps(range(2), **self.kw))
+ self.assertEqual('[0, 1]', json.dumps([0, 1], **self.kw))
+ self.assertEqual('[0, 1]', json.dumps((0, 1), **self.kw))
+ self.assertEqual('[0, 1]', json.dumps(range(2), **self.kw))
def test_dict(self):
- self.assertEquals('{"0": 1}', json.dumps({0: 1}, **self.kw))
- self.assertEquals('{"0": 1}', json.dumps({'0': 1}, **self.kw))
- self.assertEquals('{"0": "1"}', json.dumps({'0': '1'}, **self.kw))
+ self.assertEqual('{"0": 1}', json.dumps({0: 1}, **self.kw))
+ self.assertEqual('{"0": 1}', json.dumps({'0': 1}, **self.kw))
+ self.assertEqual('{"0": "1"}', json.dumps({'0': '1'}, **self.kw))
def test_time(self):
tf = datetime.today()
- self.assertEquals('"' + tf.isoformat() + '"',
- json.dumps(tf, **self.kw))
+ self.assertEqual('"' + tf.isoformat() + '"',
+ json.dumps(tf, **self.kw))
def test_re_object(self):
uut = re.compile('x')
@@ -83,19 +83,19 @@ def test_re_object(self):
def test_class1(self):
tc1 = TestClass1()
- self.assertEquals('{"a": 0}', json.dumps(tc1, **self.kw))
- self.assertEquals('[{"a": 0}]', json.dumps([tc1], **self.kw))
- self.assertEquals('{"0": {"a": 0}}', json.dumps({0: tc1}, **self.kw))
+ self.assertEqual('{"a": 0}', json.dumps(tc1, **self.kw))
+ self.assertEqual('[{"a": 0}]', json.dumps([tc1], **self.kw))
+ self.assertEqual('{"0": {"a": 0}}', json.dumps({0: tc1}, **self.kw))
def test_class2(self):
tc2 = TestClass2()
- self.assertEquals('{"a": 0, "b": {"a": 0}}',
- json.dumps(tc2, **self.kw))
+ self.assertEqual('{"a": 0, "b": {"a": 0}}',
+ json.dumps(tc2, **self.kw))
def test_class3(self):
tc3 = TestClass3()
- self.assertEquals('{"key": "val"}',
- json.dumps(tc3, **self.kw))
+ self.assertEqual('{"key": "val"}',
+ json.dumps(tc3, **self.kw))
def test_propertied_class(self):
uut = PropertiedClass()
diff --git a/tests/parsing/DefaultArgParserTest.py b/tests/parsing/DefaultArgParserTest.py
index 1750c474db..18f5115b41 100644
--- a/tests/parsing/DefaultArgParserTest.py
+++ b/tests/parsing/DefaultArgParserTest.py
@@ -23,11 +23,11 @@ def test_metavar_in_usage(self):
self.output,
flags=re.DOTALL)
self.assertIsNotNone(match)
- self.assertEquals(match.group(1), '-a [BOOL]')
+ self.assertEqual(match.group(1), '-a [BOOL]')
def test_metavar_not_in_optional_args_sections(self):
match = re.search('optional arguments:.+(-a, --all).*',
self.output,
flags=re.DOTALL)
self.assertIsNotNone(match)
- self.assertEquals(match.group(1), '-a, --all')
+ self.assertEqual(match.group(1), '-a, --all')
diff --git a/tests/results/result_actions/ApplyPatchActionTest.py b/tests/results/result_actions/ApplyPatchActionTest.py
index 41a11c552f..2b0c0ae02f 100644
--- a/tests/results/result_actions/ApplyPatchActionTest.py
+++ b/tests/results/result_actions/ApplyPatchActionTest.py
@@ -115,7 +115,8 @@ def test_apply_rename(self):
file_diff_dict)
self.assertFalse(isfile(f_a+'.renamed.orig'))
- file_dict = {f_a+'.renamed': open(f_a+'.renamed').readlines()}
+ with open(f_a+'.renamed') as fh:
+ file_dict = {f_a+'.renamed': fh.readlines()}
self.assertEqual(file_dict, expected_file_dict)
# Recreate file so that context manager make_temp() can delete it
|
ansible__molecule-3103 | Current directory being inadvertently added to ANSIBLE_LIBRARY
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Do not report bugs before reproducing them with the code of the master branch! -->
<!--- Please also check https://molecule.readthedocs.io/en/latest/faq.html --->
<!--- Please use https://groups.google.com/forum/#!forum/molecule-users for usage questions -->
# Issue Type
- Bug report
# Molecule and Ansible details
```
ansible --version && molecule --version
ansible 2.10.7.post0
config file = None
configured module search path = ['/home/mgraves/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/mgraves/git/ansible/lib/ansible
executable location = /home/mgraves/git/ansible/bin/ansible
python version = 3.8.8 (default, Mar 5 2021, 11:47:01) [GCC 10.2.1 20210110]
molecule 3.3.0 using python 3.8
ansible:2.10.7.post0
delegated:3.3.0 from molecule
docker:0.2.4 from molecule_docker
```
Molecule installation method (one of):
- pip
Ansible installation method (one of):
- source
Detail any linters or test runners used:
# Desired Behavior
Molecule should successfully complete.
# Actual Behaviour
```
~/git/ansible_collections/community/kubernetes $ molecule --debug converge -- -vvv
DEBUG Validating schema /home/mgraves/git/ansible_collections/community/kubernetes/molecule/default/molecule.yml.
INFO default scenario test matrix: dependency, create, prepare, converge
INFO Performing prerun...
INFO Added ANSIBLE_LIBRARY=:plugins/modules
INFO Added ANSIBLE_COLLECTIONS_PATH=/home/mgraves/git:/home/mgraves/.ansible/collections:/home/mgraves/git:/home/mgraves/.ansible/collections:./.cache/collections
INFO Running default > dependency
WARNING Skipping, missing the requirements file.
WARNING Skipping, missing the requirements file.
INFO Running default > create
WARNING Skipping, instances are delegated.
INFO Running default > prepare
WARNING Skipping, prepare playbook not configured.
INFO Running default > converge
DEBUG: ANSIBLE ENVIRONMENT:
ANSIBLE_COLLECTIONS_PATH: /home/mgraves/.cache/molecule/kubernetes/default/collections:/home/mgraves/git:/home/mgraves/.ansible/collections:/usr/share/ansible/collections:/etc/ansible/collections
ANSIBLE_CONFIG: /home/mgraves/.cache/molecule/kubernetes/default/ansible.cfg
ANSIBLE_FILTER_PLUGINS: /home/mgraves/git/ansible/venv/lib/python3.8/site-packages/molecule/provisioner/ansible/plugins/filter:/home/mgraves/.cache/molecule/kubernetes/default/plugins/filter:/home/mgraves/git/ansible_collections/community/kubernetes/plugins/filter:/home/mgraves/.ansible/plugins/filter:/usr/share/ansible/plugins/filter
ANSIBLE_FORCE_COLOR: 'true'
ANSIBLE_HOME: /home/mgraves/git/ansible
ANSIBLE_LIBRARY: /home/mgraves/git/ansible/venv/lib/python3.8/site-packages/molecule/provisioner/ansible/plugins/modules:/home/mgraves/.cache/molecule/kubernetes/default/library:/home/mgraves/git/ansible_collections/community/kubernetes/library:/home/mgraves/.ansible/plugins/modules:/usr/share/ansible/plugins/modules:/home/mgraves/git/ansible_collections/community/kubernetes/:plugins/modules
ANSIBLE_ROLES_PATH: '/home/mgraves/.cache/molecule/kubernetes/default/roles:/home/mgraves/git/ansible_collections/community:/home/mgraves/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:'
DEBUG: MOLECULE ENVIRONMENT:
MOLECULE_DEBUG: 'True'
MOLECULE_DEPENDENCY_NAME: galaxy
MOLECULE_DRIVER_NAME: delegated
MOLECULE_ENV_FILE: /home/mgraves/git/ansible_collections/community/kubernetes/.env.yml
MOLECULE_EPHEMERAL_DIRECTORY: /home/mgraves/.cache/molecule/kubernetes/default
MOLECULE_FILE: /home/mgraves/.cache/molecule/kubernetes/default/molecule.yml
MOLECULE_INSTANCE_CONFIG: /home/mgraves/.cache/molecule/kubernetes/default/instance_config.yml
MOLECULE_INVENTORY_FILE: /home/mgraves/.cache/molecule/kubernetes/default/inventory/ansible_inventory.yml
MOLECULE_PROJECT_DIRECTORY: /home/mgraves/git/ansible_collections/community/kubernetes
MOLECULE_PROVISIONER_NAME: ansible
MOLECULE_SCENARIO_DIRECTORY: /home/mgraves/git/ansible_collections/community/kubernetes/molecule/default
MOLECULE_SCENARIO_NAME: default
MOLECULE_STATE_FILE: /home/mgraves/.cache/molecule/kubernetes/default/state.yml
MOLECULE_VERIFIER_NAME: ansible
MOLECULE_VERIFIER_TEST_DIRECTORY: /home/mgraves/git/ansible_collections/community/kubernetes/molecule/default/tests
DEBUG: SHELL REPLAY:
ANSIBLE_COLLECTIONS_PATH=/home/mgraves/.cache/molecule/kubernetes/default/collections:/home/mgraves/git:/home/mgraves/.ansible/collections:/usr/share/ansible/collections:/etc/ansible/collections ANSIBLE_CONFIG=/home/mgraves/.cache/molecule/kubernetes/default/ansible.cfg ANSIBLE_FILTER_PLUGINS=/home/mgraves/git/ansible/venv/lib/python3.8/site-packages/molecule/provisioner/ansible/plugins/filter:/home/mgraves/.cache/molecule/kubernetes/default/plugins/filter:/home/mgraves/git/ansible_collections/community/kubernetes/plugins/filter:/home/mgraves/.ansible/plugins/filter:/usr/share/ansible/plugins/filter ANSIBLE_FORCE_COLOR=true ANSIBLE_HOME=/home/mgraves/git/ansible ANSIBLE_LIBRARY=/home/mgraves/git/ansible/venv/lib/python3.8/site-packages/molecule/provisioner/ansible/plugins/modules:/home/mgraves/.cache/molecule/kubernetes/default/library:/home/mgraves/git/ansible_collections/community/kubernetes/library:/home/mgraves/.ansible/plugins/modules:/usr/share/ansible/plugins/modules:/home/mgraves/git/ansible_collections/community/kubernetes/:plugins/modules ANSIBLE_ROLES_PATH=/home/mgraves/.cache/molecule/kubernetes/default/roles:/home/mgraves/git/ansible_collections/community:/home/mgraves/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles: MOLECULE_DEBUG=True MOLECULE_DEPENDENCY_NAME=galaxy MOLECULE_DRIVER_NAME=delegated MOLECULE_ENV_FILE=/home/mgraves/git/ansible_collections/community/kubernetes/.env.yml MOLECULE_EPHEMERAL_DIRECTORY=/home/mgraves/.cache/molecule/kubernetes/default MOLECULE_FILE=/home/mgraves/.cache/molecule/kubernetes/default/molecule.yml MOLECULE_INSTANCE_CONFIG=/home/mgraves/.cache/molecule/kubernetes/default/instance_config.yml MOLECULE_INVENTORY_FILE=/home/mgraves/.cache/molecule/kubernetes/default/inventory/ansible_inventory.yml MOLECULE_PROJECT_DIRECTORY=/home/mgraves/git/ansible_collections/community/kubernetes MOLECULE_PROVISIONER_NAME=ansible MOLECULE_SCENARIO_DIRECTORY=/home/mgraves/git/ansible_collections/community/kubernetes/molecule/default MOLECULE_SCENARIO_NAME=default MOLECULE_STATE_FILE=/home/mgraves/.cache/molecule/kubernetes/default/state.yml MOLECULE_VERIFIER_NAME=ansible MOLECULE_VERIFIER_TEST_DIRECTORY=/home/mgraves/git/ansible_collections/community/kubernetes/molecule/default/tests
COMMAND: ansible-playbook --diff --inventory /home/mgraves/.cache/molecule/kubernetes/default/inventory --skip-tags molecule-notest,notest -vvv /home/mgraves/git/ansible_collections/community/kubernetes/molecule/default/converge.yml
ansible-playbook 2.10.7.post0
config file = /home/mgraves/.cache/molecule/kubernetes/default/ansible.cfg
configured module search path = ['/home/mgraves/git/ansible/venv/lib/python3.8/site-packages/molecule/provisioner/ansible/plugins/modules', '/home/mgraves/.cache/molecule/kubernetes/default/library', '/home/mgraves/git/ansible_collections/community/kubernetes/library', '/home/mgraves/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules', '/home/mgraves/git/ansible_collections/community/kubernetes', '/home/mgraves/git/ansible_collections/community/kubernetes/plugins/modules']
ansible python module location = /home/mgraves/git/ansible/lib/ansible
executable location = /home/mgraves/git/ansible/bin/ansible-playbook
python version = 3.8.8 (default, Mar 5 2021, 11:47:01) [GCC 10.2.1 20210110]
Using /home/mgraves/.cache/molecule/kubernetes/default/ansible.cfg as config file
[WARNING]: running playbook inside collection community.kubernetes
[WARNING]: * Failed to parse /home/mgraves/.cache/molecule/kubernetes/default/
inventory/ansible_inventory.yml with
ansible_collections.community.kubernetes.plugins.inventory.k8s plugin:
Incorrect plugin name in file: none found
File "/home/mgraves/git/ansible/lib/ansible/inventory/manager.py", line 289, in parse_source
plugin.parse(self._inventory, self._loader, source, cache=cache)
File "/home/mgraves/git/ansible_collections/community/kubernetes/plugins/inventory/k8s.py", line 153, in parse
config_data = self._read_config_data(path)
File "/home/mgraves/git/ansible/lib/ansible/plugins/inventory/__init__.py", line 227, in _read_config_data
raise AnsibleParserError("Incorrect plugin name in file: %s" % config.get('plugin', 'none found'))
[WARNING]: Unable to parse /home/mgraves/.cache/molecule/kubernetes/default/inv
entory/ansible_inventory.yml as an inventory source
[WARNING]: Invalid characters were found in group names but not replaced, use
-vvvv to see details
Parsed /home/mgraves/.cache/molecule/kubernetes/default/inventory/hosts inventory source with ansible_collections.community.kubernetes.plugins.inventory.k8s plugin
Read vars_file 'vars/main.yml'
Read vars_file 'vars/main.yml'
redirecting (type: action) community.kubernetes.k8s to community.kubernetes.k8s_info
redirecting (type: action) community.kubernetes.k8s to community.kubernetes.k8s_info
Read vars_file 'vars/main.yml'
Read vars_file 'vars/main.yml'
Read vars_file 'vars/main.yml'
Read vars_file 'vars/main.yml'
Read vars_file 'vars/main.yml'
Read vars_file 'vars/main.yml'
Read vars_file 'vars/main.yml'
Read vars_file 'vars/main.yml'
Read vars_file 'vars/main.yml'
Read vars_file 'vars/main.yml'
Read vars_file 'vars/main.yml'
redirecting (type: action) community.kubernetes.k8s to community.kubernetes.k8s_info
Skipping callback 'default', as we already have a stdout callback.
Skipping callback 'minimal', as we already have a stdout callback.
Skipping callback 'oneline', as we already have a stdout callback.
PLAYBOOK: converge.yml *********************************************************
3 plays in /home/mgraves/git/ansible_collections/community/kubernetes/molecule/default/converge.yml
Read vars_file 'vars/main.yml'
Read vars_file 'vars/main.yml'
Read vars_file 'vars/main.yml'
PLAY [Converge] ****************************************************************
Read vars_file 'vars/main.yml'
TASK [Gathering Facts] *********************************************************
task path: /home/mgraves/git/ansible_collections/community/kubernetes/molecule/default/converge.yml:2
<127.0.0.1> ESTABLISH LOCAL CONNECTION FOR USER: mgraves
<127.0.0.1> EXEC /bin/sh -c 'echo ~mgraves && sleep 0'
<127.0.0.1> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo /home/mgraves/.ansible/tmp `"&& mkdir "` echo /home/mgraves/.ansible/tmp/ansible-tmp-1616596936.7291377-1244769-237862218132468 `" && echo ansible-tmp-1616596936.7291377-1244769-237862218132468="` echo /home/mgraves/.ansible/tmp/ansible-tmp-1616596936.7291377-1244769-237862218132468 `" ) && sleep 0'
Using module file /home/mgraves/git/ansible_collections/community/kubernetes/setup.cfg
<127.0.0.1> EXEC /bin/sh -c 'rm -f -r /home/mgraves/.ansible/tmp/ansible-tmp-1616596936.7291377-1244769-237862218132468/ > /dev/null 2>&1 && sleep 0'
fatal: [localhost]: FAILED! => {
"msg": "module (ansible.legacy.setup) is missing interpreter line"
}
PLAY RECAP *********************************************************************
localhost : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
CRITICAL Ansible return code was 2, command was: ansible-playbook --diff --inventory /home/mgraves/.cache/molecule/kubernetes/default/inventory --skip-tags molecule-notest,notest -vvv /home/mgraves/git/ansible_collections/community/kubernetes/molecule/default/converge.yml
Please give some details of what is actually happening.
Include a [minimum complete verifiable example](https://stackoverflow.com/help/mcve) with
output of running `molecule --debug`.
```
Our test suite started failing with the 3.3.0 version of molecule. The prerun change that was added in #3077 started adding the current directory to `ANSIBLE_LIBRARY`. This can cause ansible to fail when there is a file with the same name as a module as can be seen here where ansible is trying to read `setup.cfg` in our project root as the setup module.
The problem seems to be in https://github.com/ansible-community/molecule/blob/60b68140fb5c650c47019f5db238c0864dbd43ed/src/molecule/provisioner/ansible.py#L943 In our case, after ansible-lint has run `prepare_environment` the `ANSIBLE_LIBRARY` envvar is `:plugin/modules`. I would think calling `abs_path` on this probably not appropriate since this is a colon separated path string and should just be read in unprocessed.
| [
{
"content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\"\"\"Ansible Provisioner Module.\"\"\"\n\nimport collections\nimport copy\nimport logging\nimport os\nimport shutil\n\nfrom molecule import util\nfrom molecule.api import drivers\nfrom molecule.provisioner import ansible_playbook, ansible_playbooks, base\n\nLOG = logging.getLogger(__name__)\n\n\nclass Ansible(base.Base):\n \"\"\"\n `Ansible`_ is the default provisioner. No other provisioner will be \\\n supported.\n\n Molecule's provisioner manages the instances lifecycle. However, the user\n must provide the create, destroy, and converge playbooks. Molecule's\n ``init`` subcommand will provide the necessary files for convenience.\n\n Molecule will skip tasks which are tagged with either `molecule-notest` or\n `notest`. With the tag `molecule-idempotence-notest` tasks are only\n skipped during the idempotence action step.\n\n .. important::\n\n Reserve the create and destroy playbooks for provisioning. Do not\n attempt to gather facts or perform operations on the provisioned nodes\n inside these playbooks. Due to the gymnastics necessary to sync state\n between Ansible and Molecule, it is best to perform these tasks in the\n prepare or converge playbooks.\n\n It is the developers responsibility to properly map the modules' fact\n data into the instance_conf_dict fact in the create playbook. This\n allows Molecule to properly configure Ansible inventory.\n\n Additional options can be passed to ``ansible-playbook`` through the options\n dict. Any option set in this section will override the defaults.\n\n .. important::\n\n Options do not affect the create and destroy actions.\n\n .. note::\n\n Molecule will remove any options matching '^[v]+$', and pass ``-vvv``\n to the underlying ``ansible-playbook`` command when executing\n `molecule --debug`.\n\n Molecule will silence log output, unless invoked with the ``--debug`` flag.\n However, this results in quite a bit of output. To enable Ansible log\n output, add the following to the ``provisioner`` section of ``molecule.yml``.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n log: True\n\n The create/destroy playbooks for Docker and Podman are bundled with\n Molecule. These playbooks have a clean API from `molecule.yml`, and\n are the most commonly used. The bundled playbooks can still be overridden.\n\n The playbook loading order is:\n\n 1. provisioner.playbooks.$driver_name.$action\n 2. provisioner.playbooks.$action\n 3. bundled_playbook.$driver_name.$action\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n options:\n vvv: True\n playbooks:\n create: create.yml\n converge: converge.yml\n destroy: destroy.yml\n\n Share playbooks between roles.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n playbooks:\n create: ../default/create.yml\n destroy: ../default/destroy.yml\n converge: converge.yml\n\n Multiple driver playbooks. In some situations a developer may choose to\n test the same role against different backends. Molecule will choose driver\n specific create/destroy playbooks, if the determined driver has a key in\n the playbooks section of the provisioner's dict.\n\n .. important::\n\n If the determined driver has a key in the playbooks dict, Molecule will\n use this dict to resolve all provisioning playbooks (create/destroy).\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n playbooks:\n docker:\n create: create.yml\n destroy: destroy.yml\n create: create.yml\n destroy: destroy.yml\n converge: converge.yml\n\n .. important::\n\n Paths in this section are converted to absolute paths, where the\n relative parent is the $scenario_directory.\n\n The side effect playbook executes actions which produce side effects to the\n instances(s). Intended to test HA failover scenarios or the like. It is\n not enabled by default. Add the following to the provisioner's ``playbooks``\n section to enable.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n playbooks:\n side_effect: side_effect.yml\n\n .. important::\n\n This feature should be considered experimental.\n\n The prepare playbook executes actions which bring the system to a given\n state prior to converge. It is executed after create, and only once for\n the duration of the instances life.\n\n This can be used to bring instances into a particular state, prior to\n testing.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n playbooks:\n prepare: prepare.yml\n\n The cleanup playbook is for cleaning up test infrastructure that may not\n be present on the instance that will be destroyed. The primary use-case\n is for \"cleaning up\" changes that were made outside of Molecule's test\n environment. For example, remote database connections or user accounts.\n Intended to be used in conjunction with `prepare` to modify external\n resources when required.\n\n The cleanup step is executed directly before every destroy step. Just like\n the destroy step, it will be run twice. An initial clean before converge\n and then a clean before the last destroy step. This means that the cleanup\n playbook must handle failures to cleanup resources which have not\n been created yet.\n\n Add the following to the provisioner's `playbooks` section\n to enable.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n playbooks:\n cleanup: cleanup.yml\n\n .. important::\n\n This feature should be considered experimental.\n\n Environment variables. Molecule does its best to handle common Ansible\n paths. The defaults are as follows.\n\n ::\n\n ANSIBLE_ROLES_PATH:\n $ephemeral_directory/roles/:$project_directory/../:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\n ANSIBLE_LIBRARY:\n $ephemeral_directory/modules/:$project_directory/library/:~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules\n ANSIBLE_FILTER_PLUGINS:\n $ephemeral_directory/plugins/filter/:$project_directory/filter/plugins/:~/.ansible/plugins/filter:/usr/share/ansible/plugins/modules\n\n Environment variables can be passed to the provisioner. Variables in this\n section which match the names above will be appended to the above defaults,\n and converted to absolute paths, where the relative parent is the\n $scenario_directory.\n\n .. important::\n\n Paths in this section are converted to absolute paths, where the\n relative parent is the $scenario_directory.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n env:\n FOO: bar\n\n Modifying ansible.cfg.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n config_options:\n defaults:\n fact_caching: jsonfile\n ssh_connection:\n scp_if_ssh: True\n\n .. important::\n\n The following keys are disallowed to prevent Molecule from\n improperly functioning. They can be specified through the\n provisioner's env setting described above, with the exception\n of the `privilege_escalation`.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n config_options:\n defaults:\n roles_path: /path/to/roles_path\n library: /path/to/library\n filter_plugins: /path/to/filter_plugins\n privilege_escalation: {}\n\n Roles which require host/groups to have certain variables set. Molecule\n uses the same `variables defined in a playbook`_ syntax as `Ansible`_.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n inventory:\n group_vars:\n foo1:\n foo: bar\n foo2:\n foo: bar\n baz:\n qux: zzyzx\n host_vars:\n foo1-01:\n foo: bar\n\n Molecule automatically generates the inventory based on the hosts defined\n under `Platforms`_. Using the ``hosts`` key allows to add extra hosts to\n the inventory that are not managed by Molecule.\n\n A typical use case is if you want to access some variables from another\n host in the inventory (using hostvars) without creating it.\n\n .. note::\n\n The content of ``hosts`` should follow the YAML based inventory syntax:\n start with the ``all`` group and have hosts/vars/children entries.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n inventory:\n hosts:\n all:\n extra_host:\n foo: hello\n\n .. important::\n\n The extra hosts added to the inventory using this key won't be\n created/destroyed by Molecule. It is the developers responsibility\n to target the proper hosts in the playbook. Only the hosts defined\n under `Platforms`_ should be targeted instead of ``all``.\n\n\n An alternative to the above is symlinking. Molecule creates symlinks to\n the specified directory in the inventory directory. This allows ansible to\n converge utilizing its built in host/group_vars resolution. These two\n forms of inventory management are mutually exclusive.\n\n Like above, it is possible to pass an additional inventory file\n (or even dynamic inventory script), using the ``hosts`` key. `Ansible`_ will\n automatically merge this inventory with the one generated by molecule.\n This can be useful if you want to define extra hosts that are not managed\n by Molecule.\n\n .. important::\n\n Again, it is the developers responsibility to target the proper hosts\n in the playbook. Only the hosts defined under\n `Platforms`_ should be targeted instead of ``all``.\n\n .. note::\n\n The source directory linking is relative to the scenario's\n directory.\n\n The only valid keys are ``hosts``, ``group_vars`` and ``host_vars``. Molecule's\n schema validator will enforce this.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n inventory:\n links:\n hosts: ../../../inventory/hosts\n group_vars: ../../../inventory/group_vars/\n host_vars: ../../../inventory/host_vars/\n\n Override connection options:\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n connection_options:\n ansible_ssh_user: foo\n ansible_ssh_common_args: -o IdentitiesOnly=no\n\n .. _`variables defined in a playbook`: https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#defining-variables-in-a-playbook\n\n Add arguments to ansible-playbook when running converge:\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n ansible_args:\n - --inventory=mygroups.yml\n - --limit=host1,host2\n\n \"\"\" # noqa\n\n def __init__(self, config):\n \"\"\"\n Initialize a new ansible class and returns None.\n\n :param config: An instance of a Molecule config.\n :return: None\n \"\"\"\n super(Ansible, self).__init__(config)\n\n @property\n def default_config_options(self):\n \"\"\"\n Provide Default options to construct ansible.cfg and returns a dict.\n\n :return: dict\n \"\"\"\n return {\n \"defaults\": {\n \"ansible_managed\": \"Ansible managed: Do NOT edit this file manually!\",\n \"display_failed_stderr\": True,\n \"forks\": 50,\n \"retry_files_enabled\": False,\n \"host_key_checking\": False,\n \"nocows\": 1,\n \"interpreter_python\": \"auto_silent\",\n },\n \"ssh_connection\": {\n \"scp_if_ssh\": True,\n \"control_path\": \"%(directory)s/%%h-%%p-%%r\",\n },\n }\n\n @property\n def default_options(self):\n d = {\"skip-tags\": \"molecule-notest,notest\"}\n\n if self._config.action == \"idempotence\":\n d[\"skip-tags\"] += \",molecule-idempotence-notest\"\n\n if self._config.debug:\n d[\"vvv\"] = True\n d[\"diff\"] = True\n\n return d\n\n @property\n def default_env(self):\n # Finds if the current project is part of an ansible_collections hierarchy\n collection_indicator = \"ansible_collections\"\n # isolating test environment by injects ephemeral scenario directory on\n # top of the collection_path_list. This prevents dependency commands\n # from installing dependencies to user list of collections.\n collections_path_list = [\n util.abs_path(\n os.path.join(self._config.scenario.ephemeral_directory, \"collections\")\n )\n ]\n if collection_indicator in self._config.project_directory:\n collection_path, right = self._config.project_directory.rsplit(\n collection_indicator, 1\n )\n collections_path_list.append(util.abs_path(collection_path))\n collections_path_list.extend(\n [\n util.abs_path(\n os.path.join(os.path.expanduser(\"~\"), \".ansible/collections\")\n ),\n \"/usr/share/ansible/collections\",\n \"/etc/ansible/collections\",\n ]\n )\n env = util.merge_dicts(\n os.environ,\n {\n \"ANSIBLE_CONFIG\": self._config.provisioner.config_file,\n \"ANSIBLE_ROLES_PATH\": \":\".join(\n [\n util.abs_path(\n os.path.join(\n self._config.scenario.ephemeral_directory, \"roles\"\n )\n ),\n util.abs_path(\n os.path.join(self._config.project_directory, os.path.pardir)\n ),\n util.abs_path(\n os.path.join(os.path.expanduser(\"~\"), \".ansible\", \"roles\")\n ),\n \"/usr/share/ansible/roles\",\n \"/etc/ansible/roles\",\n *os.environ.get(\"ANSIBLE_ROLES_PATH\", \"\").split(\":\"),\n ]\n ),\n self._config.ansible_collections_path: \":\".join(collections_path_list),\n \"ANSIBLE_LIBRARY\": \":\".join(self._get_modules_directories()),\n \"ANSIBLE_FILTER_PLUGINS\": \":\".join(\n [\n self._get_filter_plugin_directory(),\n util.abs_path(\n os.path.join(\n self._config.scenario.ephemeral_directory,\n \"plugins\",\n \"filter\",\n )\n ),\n util.abs_path(\n os.path.join(\n self._config.project_directory, \"plugins\", \"filter\"\n )\n ),\n util.abs_path(\n os.path.join(\n os.path.expanduser(\"~\"), \".ansible\", \"plugins\", \"filter\"\n )\n ),\n \"/usr/share/ansible/plugins/filter\",\n ]\n ),\n },\n )\n env = util.merge_dicts(env, self._config.env)\n\n return env\n\n @property\n def name(self):\n return self._config.config[\"provisioner\"][\"name\"]\n\n @property\n def ansible_args(self):\n return self._config.config[\"provisioner\"][\"ansible_args\"]\n\n @property\n def config_options(self):\n return util.merge_dicts(\n self.default_config_options,\n self._config.config[\"provisioner\"][\"config_options\"],\n )\n\n @property\n def options(self):\n if self._config.action in [\"create\", \"destroy\"]:\n return self.default_options\n\n o = self._config.config[\"provisioner\"][\"options\"]\n # NOTE(retr0h): Remove verbose options added by the user while in\n # debug.\n if self._config.debug:\n o = util.filter_verbose_permutation(o)\n\n return util.merge_dicts(self.default_options, o)\n\n @property\n def env(self):\n default_env = self.default_env\n env = self._config.config[\"provisioner\"][\"env\"].copy()\n # ensure that all keys and values are strings\n env = {str(k): str(v) for k, v in env.items()}\n\n roles_path = default_env[\"ANSIBLE_ROLES_PATH\"]\n library_path = default_env[\"ANSIBLE_LIBRARY\"]\n filter_plugins_path = default_env[\"ANSIBLE_FILTER_PLUGINS\"]\n\n try:\n path = self._absolute_path_for(env, \"ANSIBLE_ROLES_PATH\")\n roles_path = \"{}:{}\".format(roles_path, path)\n except KeyError:\n pass\n\n try:\n path = self._absolute_path_for(env, \"ANSIBLE_LIBRARY\")\n library_path = \"{}:{}\".format(library_path, path)\n except KeyError:\n pass\n\n try:\n path = self._absolute_path_for(env, \"ANSIBLE_FILTER_PLUGINS\")\n filter_plugins_path = \"{}:{}\".format(filter_plugins_path, path)\n except KeyError:\n pass\n\n env[\"ANSIBLE_ROLES_PATH\"] = roles_path\n env[\"ANSIBLE_LIBRARY\"] = library_path\n env[\"ANSIBLE_FILTER_PLUGINS\"] = filter_plugins_path\n\n return util.merge_dicts(default_env, env)\n\n @property\n def hosts(self):\n return self._config.config[\"provisioner\"][\"inventory\"][\"hosts\"]\n\n @property\n def host_vars(self):\n return self._config.config[\"provisioner\"][\"inventory\"][\"host_vars\"]\n\n @property\n def group_vars(self):\n return self._config.config[\"provisioner\"][\"inventory\"][\"group_vars\"]\n\n @property\n def links(self):\n return self._config.config[\"provisioner\"][\"inventory\"][\"links\"]\n\n @property\n def inventory(self):\n \"\"\"\n Create an inventory structure and returns a dict.\n\n .. code-block:: yaml\n ungrouped:\n vars:\n foo: bar\n hosts:\n instance-1:\n instance-2:\n children:\n $child_group_name:\n hosts:\n instance-1:\n instance-2:\n $group_name:\n hosts:\n instance-1:\n ansible_connection: docker\n instance-2:\n ansible_connection: docker\n\n :return: str\n \"\"\"\n dd = self._vivify()\n for platform in self._config.platforms.instances:\n for group in platform.get(\"groups\", [\"ungrouped\"]):\n instance_name = platform[\"name\"]\n connection_options = self.connection_options(instance_name)\n molecule_vars = {\n \"molecule_file\": \"{{ lookup('env', 'MOLECULE_FILE') }}\",\n \"molecule_ephemeral_directory\": \"{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}\",\n \"molecule_scenario_directory\": \"{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}\",\n \"molecule_yml\": \"{{ lookup('file', molecule_file) | from_yaml }}\",\n \"molecule_instance_config\": \"{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}\",\n \"molecule_no_log\": \"{{ lookup('env', 'MOLECULE_NO_LOG') or not \"\n \"molecule_yml.provisioner.log|default(False) | bool }}\",\n }\n\n # All group\n dd[\"all\"][\"hosts\"][instance_name] = connection_options\n dd[\"all\"][\"vars\"] = molecule_vars\n # Named group\n dd[group][\"hosts\"][instance_name] = connection_options\n dd[group][\"vars\"] = molecule_vars\n # Ungrouped\n dd[\"ungrouped\"][\"vars\"] = {}\n # Children\n for child_group in platform.get(\"children\", []):\n dd[group][\"children\"][child_group][\"hosts\"][\n instance_name\n ] = connection_options\n\n return self._default_to_regular(dd)\n\n @property\n def inventory_directory(self):\n return self._config.scenario.inventory_directory\n\n @property\n def inventory_file(self):\n return os.path.join(self.inventory_directory, \"ansible_inventory.yml\")\n\n @property\n def config_file(self):\n return os.path.join(self._config.scenario.ephemeral_directory, \"ansible.cfg\")\n\n @property # type: ignore\n @util.lru_cache()\n def playbooks(self):\n return ansible_playbooks.AnsiblePlaybooks(self._config)\n\n @property\n def directory(self):\n return os.path.join(\n os.path.dirname(__file__),\n os.path.pardir,\n os.path.pardir,\n \"molecule\",\n \"provisioner\",\n \"ansible\",\n )\n\n def cleanup(self):\n \"\"\"\n Execute `ansible-playbook` against the cleanup playbook and returns \\\n None.\n\n :return: None\n \"\"\"\n pb = self._get_ansible_playbook(self.playbooks.cleanup)\n pb.execute()\n\n def connection_options(self, instance_name):\n d = self._config.driver.ansible_connection_options(instance_name)\n\n return util.merge_dicts(\n d, self._config.config[\"provisioner\"][\"connection_options\"]\n )\n\n def check(self):\n \"\"\"\n Execute ``ansible-playbook`` against the converge playbook with the \\\n ``--check`` flag and returns None.\n\n :return: None\n \"\"\"\n pb = self._get_ansible_playbook(self.playbooks.converge)\n pb.add_cli_arg(\"check\", True)\n pb.execute()\n\n def converge(self, playbook=None, **kwargs):\n \"\"\"\n Execute ``ansible-playbook`` against the converge playbook unless \\\n specified otherwise and returns a string.\n\n :param playbook: An optional string containing an absolute path to a\n playbook.\n :param kwargs: An optional keyword arguments.\n :return: str\n \"\"\"\n pb = self._get_ansible_playbook(playbook or self.playbooks.converge, **kwargs)\n\n return pb.execute()\n\n def destroy(self):\n \"\"\"\n Execute ``ansible-playbook`` against the destroy playbook and returns \\\n None.\n\n :return: None\n \"\"\"\n pb = self._get_ansible_playbook(self.playbooks.destroy)\n pb.execute()\n\n def side_effect(self):\n \"\"\"\n Execute ``ansible-playbook`` against the side_effect playbook and \\\n returns None.\n\n :return: None\n \"\"\"\n pb = self._get_ansible_playbook(self.playbooks.side_effect)\n pb.execute()\n\n def create(self):\n \"\"\"\n Execute ``ansible-playbook`` against the create playbook and returns \\\n None.\n\n :return: None\n \"\"\"\n pb = self._get_ansible_playbook(self.playbooks.create)\n pb.execute()\n\n def prepare(self):\n \"\"\"\n Execute ``ansible-playbook`` against the prepare playbook and returns \\\n None.\n\n :return: None\n \"\"\"\n pb = self._get_ansible_playbook(self.playbooks.prepare)\n pb.execute()\n\n def syntax(self):\n \"\"\"\n Execute ``ansible-playbook`` against the converge playbook with the \\\n ``-syntax-check`` flag and returns None.\n\n :return: None\n \"\"\"\n pb = self._get_ansible_playbook(self.playbooks.converge)\n pb.add_cli_arg(\"syntax-check\", True)\n pb.execute()\n\n def verify(self):\n \"\"\"\n Execute ``ansible-playbook`` against the verify playbook and returns \\\n None.\n\n :return: None\n \"\"\"\n if not self.playbooks.verify:\n LOG.warning(\"Skipping, verify playbook not configured.\")\n return\n\n pb = self._get_ansible_playbook(self.playbooks.verify)\n pb.execute()\n\n def write_config(self):\n \"\"\"\n Write the provisioner's config file to disk and returns None.\n\n :return: None\n \"\"\"\n template = util.render_template(\n self._get_config_template(), config_options=self.config_options\n )\n util.write_file(self.config_file, template)\n\n def manage_inventory(self):\n \"\"\"\n Manage inventory for Ansible and returns None.\n\n :returns: None\n \"\"\"\n self._write_inventory()\n self._remove_vars()\n if not self.links:\n self._add_or_update_vars()\n else:\n self._link_or_update_vars()\n\n def abs_path(self, path):\n return util.abs_path(os.path.join(self._config.scenario.directory, path))\n\n def _add_or_update_vars(self):\n \"\"\"\n Create host and/or group vars and returns None.\n\n :returns: None\n \"\"\"\n # Create the hosts extra inventory source (only if not empty)\n hosts_file = os.path.join(self.inventory_directory, \"hosts\")\n if self.hosts:\n util.write_file(hosts_file, util.safe_dump(self.hosts))\n # Create the host_vars and group_vars directories\n for target in [\"host_vars\", \"group_vars\"]:\n if target == \"host_vars\":\n vars_target = copy.deepcopy(self.host_vars)\n for instance_name, _ in self.host_vars.items():\n instance_key = instance_name\n vars_target[instance_key] = vars_target.pop(instance_name)\n\n elif target == \"group_vars\":\n vars_target = self.group_vars\n\n if vars_target:\n target_vars_directory = os.path.join(self.inventory_directory, target)\n\n if not os.path.isdir(util.abs_path(target_vars_directory)):\n os.mkdir(util.abs_path(target_vars_directory))\n\n for target in vars_target.keys():\n target_var_content = vars_target[target]\n path = os.path.join(util.abs_path(target_vars_directory), target)\n util.write_file(path, util.safe_dump(target_var_content))\n\n def _write_inventory(self):\n \"\"\"\n Write the provisioner's inventory file to disk and returns None.\n\n :return: None\n \"\"\"\n self._verify_inventory()\n\n util.write_file(self.inventory_file, util.safe_dump(self.inventory))\n\n def _remove_vars(self):\n \"\"\"\n Remove hosts/host_vars/group_vars and returns None.\n\n :returns: None\n \"\"\"\n for name in (\"hosts\", \"group_vars\", \"host_vars\"):\n d = os.path.join(self.inventory_directory, name)\n if os.path.islink(d) or os.path.isfile(d):\n os.unlink(d)\n elif os.path.isdir(d):\n shutil.rmtree(d)\n\n def _link_or_update_vars(self):\n \"\"\"\n Create or updates the symlink to group_vars and returns None.\n\n :returns: None\n \"\"\"\n for d, source in self.links.items():\n target = os.path.join(self.inventory_directory, d)\n source = os.path.join(self._config.scenario.directory, source)\n\n if not os.path.exists(source):\n msg = \"The source path '{}' does not exist.\".format(source)\n util.sysexit_with_message(msg)\n msg = \"Inventory {} linked to {}\".format(source, target)\n LOG.info(msg)\n os.symlink(source, target)\n\n def _get_ansible_playbook(self, playbook, **kwargs):\n \"\"\"\n Get an instance of AnsiblePlaybook and returns it.\n\n :param playbook: A string containing an absolute path to a\n provisioner's playbook.\n :param kwargs: An optional keyword arguments.\n :return: object\n \"\"\"\n return ansible_playbook.AnsiblePlaybook(playbook, self._config, **kwargs)\n\n def _verify_inventory(self):\n \"\"\"\n Verify the inventory is valid and returns None.\n\n :return: None\n \"\"\"\n if not self.inventory:\n msg = \"Instances missing from the 'platform' \" \"section of molecule.yml.\"\n util.sysexit_with_message(msg)\n\n def _get_config_template(self):\n \"\"\"\n Return a config template string.\n\n :return: str\n \"\"\"\n return \"\"\"\n{% for section, section_dict in config_options.items() -%}\n[{{ section }}]\n{% for k, v in section_dict.items() -%}\n{{ k }} = {{ v }}\n{% endfor -%}\n{% endfor -%}\n\"\"\".strip()\n\n def _vivify(self):\n \"\"\"\n Return an autovivification default dict.\n\n :return: dict\n \"\"\"\n return collections.defaultdict(self._vivify)\n\n def _default_to_regular(self, d):\n if isinstance(d, collections.defaultdict):\n d = {k: self._default_to_regular(v) for k, v in d.items()}\n\n return d\n\n def _get_plugin_directory(self):\n return os.path.join(self.directory, \"plugins\")\n\n def _get_modules_directories(self):\n \"\"\"Return list of ansilbe module includes directories.\n\n Adds modules directory from molecule and its plugins.\n \"\"\"\n paths = [util.abs_path(os.path.join(self._get_plugin_directory(), \"modules\"))]\n\n for d in drivers():\n p = d.modules_dir()\n if p:\n paths.append(p)\n paths.extend(\n [\n util.abs_path(\n os.path.join(self._config.scenario.ephemeral_directory, \"library\")\n ),\n util.abs_path(os.path.join(self._config.project_directory, \"library\")),\n util.abs_path(\n os.path.join(\n os.path.expanduser(\"~\"),\n \".ansible\",\n \"plugins\",\n \"modules\",\n )\n ),\n \"/usr/share/ansible/plugins/modules\",\n ]\n )\n\n if os.environ.get(\"ANSIBLE_LIBRARY\"):\n paths.extend([util.abs_path(os.environ.get(\"ANSIBLE_LIBRARY\"))])\n\n return paths\n\n def _get_filter_plugin_directory(self):\n return util.abs_path(os.path.join(self._get_plugin_directory(), \"filter\"))\n\n def _absolute_path_for(self, env, key):\n return \":\".join([self.abs_path(p) for p in env[key].split(\":\")])\n",
"path": "src/molecule/provisioner/ansible.py"
}
] | [
{
"content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\"\"\"Ansible Provisioner Module.\"\"\"\n\nimport collections\nimport copy\nimport logging\nimport os\nimport shutil\n\nfrom molecule import util\nfrom molecule.api import drivers\nfrom molecule.provisioner import ansible_playbook, ansible_playbooks, base\n\nLOG = logging.getLogger(__name__)\n\n\nclass Ansible(base.Base):\n \"\"\"\n `Ansible`_ is the default provisioner. No other provisioner will be \\\n supported.\n\n Molecule's provisioner manages the instances lifecycle. However, the user\n must provide the create, destroy, and converge playbooks. Molecule's\n ``init`` subcommand will provide the necessary files for convenience.\n\n Molecule will skip tasks which are tagged with either `molecule-notest` or\n `notest`. With the tag `molecule-idempotence-notest` tasks are only\n skipped during the idempotence action step.\n\n .. important::\n\n Reserve the create and destroy playbooks for provisioning. Do not\n attempt to gather facts or perform operations on the provisioned nodes\n inside these playbooks. Due to the gymnastics necessary to sync state\n between Ansible and Molecule, it is best to perform these tasks in the\n prepare or converge playbooks.\n\n It is the developers responsibility to properly map the modules' fact\n data into the instance_conf_dict fact in the create playbook. This\n allows Molecule to properly configure Ansible inventory.\n\n Additional options can be passed to ``ansible-playbook`` through the options\n dict. Any option set in this section will override the defaults.\n\n .. important::\n\n Options do not affect the create and destroy actions.\n\n .. note::\n\n Molecule will remove any options matching '^[v]+$', and pass ``-vvv``\n to the underlying ``ansible-playbook`` command when executing\n `molecule --debug`.\n\n Molecule will silence log output, unless invoked with the ``--debug`` flag.\n However, this results in quite a bit of output. To enable Ansible log\n output, add the following to the ``provisioner`` section of ``molecule.yml``.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n log: True\n\n The create/destroy playbooks for Docker and Podman are bundled with\n Molecule. These playbooks have a clean API from `molecule.yml`, and\n are the most commonly used. The bundled playbooks can still be overridden.\n\n The playbook loading order is:\n\n 1. provisioner.playbooks.$driver_name.$action\n 2. provisioner.playbooks.$action\n 3. bundled_playbook.$driver_name.$action\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n options:\n vvv: True\n playbooks:\n create: create.yml\n converge: converge.yml\n destroy: destroy.yml\n\n Share playbooks between roles.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n playbooks:\n create: ../default/create.yml\n destroy: ../default/destroy.yml\n converge: converge.yml\n\n Multiple driver playbooks. In some situations a developer may choose to\n test the same role against different backends. Molecule will choose driver\n specific create/destroy playbooks, if the determined driver has a key in\n the playbooks section of the provisioner's dict.\n\n .. important::\n\n If the determined driver has a key in the playbooks dict, Molecule will\n use this dict to resolve all provisioning playbooks (create/destroy).\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n playbooks:\n docker:\n create: create.yml\n destroy: destroy.yml\n create: create.yml\n destroy: destroy.yml\n converge: converge.yml\n\n .. important::\n\n Paths in this section are converted to absolute paths, where the\n relative parent is the $scenario_directory.\n\n The side effect playbook executes actions which produce side effects to the\n instances(s). Intended to test HA failover scenarios or the like. It is\n not enabled by default. Add the following to the provisioner's ``playbooks``\n section to enable.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n playbooks:\n side_effect: side_effect.yml\n\n .. important::\n\n This feature should be considered experimental.\n\n The prepare playbook executes actions which bring the system to a given\n state prior to converge. It is executed after create, and only once for\n the duration of the instances life.\n\n This can be used to bring instances into a particular state, prior to\n testing.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n playbooks:\n prepare: prepare.yml\n\n The cleanup playbook is for cleaning up test infrastructure that may not\n be present on the instance that will be destroyed. The primary use-case\n is for \"cleaning up\" changes that were made outside of Molecule's test\n environment. For example, remote database connections or user accounts.\n Intended to be used in conjunction with `prepare` to modify external\n resources when required.\n\n The cleanup step is executed directly before every destroy step. Just like\n the destroy step, it will be run twice. An initial clean before converge\n and then a clean before the last destroy step. This means that the cleanup\n playbook must handle failures to cleanup resources which have not\n been created yet.\n\n Add the following to the provisioner's `playbooks` section\n to enable.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n playbooks:\n cleanup: cleanup.yml\n\n .. important::\n\n This feature should be considered experimental.\n\n Environment variables. Molecule does its best to handle common Ansible\n paths. The defaults are as follows.\n\n ::\n\n ANSIBLE_ROLES_PATH:\n $ephemeral_directory/roles/:$project_directory/../:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\n ANSIBLE_LIBRARY:\n $ephemeral_directory/modules/:$project_directory/library/:~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules\n ANSIBLE_FILTER_PLUGINS:\n $ephemeral_directory/plugins/filter/:$project_directory/filter/plugins/:~/.ansible/plugins/filter:/usr/share/ansible/plugins/modules\n\n Environment variables can be passed to the provisioner. Variables in this\n section which match the names above will be appended to the above defaults,\n and converted to absolute paths, where the relative parent is the\n $scenario_directory.\n\n .. important::\n\n Paths in this section are converted to absolute paths, where the\n relative parent is the $scenario_directory.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n env:\n FOO: bar\n\n Modifying ansible.cfg.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n config_options:\n defaults:\n fact_caching: jsonfile\n ssh_connection:\n scp_if_ssh: True\n\n .. important::\n\n The following keys are disallowed to prevent Molecule from\n improperly functioning. They can be specified through the\n provisioner's env setting described above, with the exception\n of the `privilege_escalation`.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n config_options:\n defaults:\n roles_path: /path/to/roles_path\n library: /path/to/library\n filter_plugins: /path/to/filter_plugins\n privilege_escalation: {}\n\n Roles which require host/groups to have certain variables set. Molecule\n uses the same `variables defined in a playbook`_ syntax as `Ansible`_.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n inventory:\n group_vars:\n foo1:\n foo: bar\n foo2:\n foo: bar\n baz:\n qux: zzyzx\n host_vars:\n foo1-01:\n foo: bar\n\n Molecule automatically generates the inventory based on the hosts defined\n under `Platforms`_. Using the ``hosts`` key allows to add extra hosts to\n the inventory that are not managed by Molecule.\n\n A typical use case is if you want to access some variables from another\n host in the inventory (using hostvars) without creating it.\n\n .. note::\n\n The content of ``hosts`` should follow the YAML based inventory syntax:\n start with the ``all`` group and have hosts/vars/children entries.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n inventory:\n hosts:\n all:\n extra_host:\n foo: hello\n\n .. important::\n\n The extra hosts added to the inventory using this key won't be\n created/destroyed by Molecule. It is the developers responsibility\n to target the proper hosts in the playbook. Only the hosts defined\n under `Platforms`_ should be targeted instead of ``all``.\n\n\n An alternative to the above is symlinking. Molecule creates symlinks to\n the specified directory in the inventory directory. This allows ansible to\n converge utilizing its built in host/group_vars resolution. These two\n forms of inventory management are mutually exclusive.\n\n Like above, it is possible to pass an additional inventory file\n (or even dynamic inventory script), using the ``hosts`` key. `Ansible`_ will\n automatically merge this inventory with the one generated by molecule.\n This can be useful if you want to define extra hosts that are not managed\n by Molecule.\n\n .. important::\n\n Again, it is the developers responsibility to target the proper hosts\n in the playbook. Only the hosts defined under\n `Platforms`_ should be targeted instead of ``all``.\n\n .. note::\n\n The source directory linking is relative to the scenario's\n directory.\n\n The only valid keys are ``hosts``, ``group_vars`` and ``host_vars``. Molecule's\n schema validator will enforce this.\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n inventory:\n links:\n hosts: ../../../inventory/hosts\n group_vars: ../../../inventory/group_vars/\n host_vars: ../../../inventory/host_vars/\n\n Override connection options:\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n connection_options:\n ansible_ssh_user: foo\n ansible_ssh_common_args: -o IdentitiesOnly=no\n\n .. _`variables defined in a playbook`: https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#defining-variables-in-a-playbook\n\n Add arguments to ansible-playbook when running converge:\n\n .. code-block:: yaml\n\n provisioner:\n name: ansible\n ansible_args:\n - --inventory=mygroups.yml\n - --limit=host1,host2\n\n \"\"\" # noqa\n\n def __init__(self, config):\n \"\"\"\n Initialize a new ansible class and returns None.\n\n :param config: An instance of a Molecule config.\n :return: None\n \"\"\"\n super(Ansible, self).__init__(config)\n\n @property\n def default_config_options(self):\n \"\"\"\n Provide Default options to construct ansible.cfg and returns a dict.\n\n :return: dict\n \"\"\"\n return {\n \"defaults\": {\n \"ansible_managed\": \"Ansible managed: Do NOT edit this file manually!\",\n \"display_failed_stderr\": True,\n \"forks\": 50,\n \"retry_files_enabled\": False,\n \"host_key_checking\": False,\n \"nocows\": 1,\n \"interpreter_python\": \"auto_silent\",\n },\n \"ssh_connection\": {\n \"scp_if_ssh\": True,\n \"control_path\": \"%(directory)s/%%h-%%p-%%r\",\n },\n }\n\n @property\n def default_options(self):\n d = {\"skip-tags\": \"molecule-notest,notest\"}\n\n if self._config.action == \"idempotence\":\n d[\"skip-tags\"] += \",molecule-idempotence-notest\"\n\n if self._config.debug:\n d[\"vvv\"] = True\n d[\"diff\"] = True\n\n return d\n\n @property\n def default_env(self):\n # Finds if the current project is part of an ansible_collections hierarchy\n collection_indicator = \"ansible_collections\"\n # isolating test environment by injects ephemeral scenario directory on\n # top of the collection_path_list. This prevents dependency commands\n # from installing dependencies to user list of collections.\n collections_path_list = [\n util.abs_path(\n os.path.join(self._config.scenario.ephemeral_directory, \"collections\")\n )\n ]\n if collection_indicator in self._config.project_directory:\n collection_path, right = self._config.project_directory.rsplit(\n collection_indicator, 1\n )\n collections_path_list.append(util.abs_path(collection_path))\n collections_path_list.extend(\n [\n util.abs_path(\n os.path.join(os.path.expanduser(\"~\"), \".ansible/collections\")\n ),\n \"/usr/share/ansible/collections\",\n \"/etc/ansible/collections\",\n ]\n )\n env = util.merge_dicts(\n os.environ,\n {\n \"ANSIBLE_CONFIG\": self._config.provisioner.config_file,\n \"ANSIBLE_ROLES_PATH\": \":\".join(\n [\n util.abs_path(\n os.path.join(\n self._config.scenario.ephemeral_directory, \"roles\"\n )\n ),\n util.abs_path(\n os.path.join(self._config.project_directory, os.path.pardir)\n ),\n util.abs_path(\n os.path.join(os.path.expanduser(\"~\"), \".ansible\", \"roles\")\n ),\n \"/usr/share/ansible/roles\",\n \"/etc/ansible/roles\",\n *os.environ.get(\"ANSIBLE_ROLES_PATH\", \"\").split(\":\"),\n ]\n ),\n self._config.ansible_collections_path: \":\".join(collections_path_list),\n \"ANSIBLE_LIBRARY\": \":\".join(self._get_modules_directories()),\n \"ANSIBLE_FILTER_PLUGINS\": \":\".join(\n [\n self._get_filter_plugin_directory(),\n util.abs_path(\n os.path.join(\n self._config.scenario.ephemeral_directory,\n \"plugins\",\n \"filter\",\n )\n ),\n util.abs_path(\n os.path.join(\n self._config.project_directory, \"plugins\", \"filter\"\n )\n ),\n util.abs_path(\n os.path.join(\n os.path.expanduser(\"~\"), \".ansible\", \"plugins\", \"filter\"\n )\n ),\n \"/usr/share/ansible/plugins/filter\",\n ]\n ),\n },\n )\n env = util.merge_dicts(env, self._config.env)\n\n return env\n\n @property\n def name(self):\n return self._config.config[\"provisioner\"][\"name\"]\n\n @property\n def ansible_args(self):\n return self._config.config[\"provisioner\"][\"ansible_args\"]\n\n @property\n def config_options(self):\n return util.merge_dicts(\n self.default_config_options,\n self._config.config[\"provisioner\"][\"config_options\"],\n )\n\n @property\n def options(self):\n if self._config.action in [\"create\", \"destroy\"]:\n return self.default_options\n\n o = self._config.config[\"provisioner\"][\"options\"]\n # NOTE(retr0h): Remove verbose options added by the user while in\n # debug.\n if self._config.debug:\n o = util.filter_verbose_permutation(o)\n\n return util.merge_dicts(self.default_options, o)\n\n @property\n def env(self):\n default_env = self.default_env\n env = self._config.config[\"provisioner\"][\"env\"].copy()\n # ensure that all keys and values are strings\n env = {str(k): str(v) for k, v in env.items()}\n\n roles_path = default_env[\"ANSIBLE_ROLES_PATH\"]\n library_path = default_env[\"ANSIBLE_LIBRARY\"]\n filter_plugins_path = default_env[\"ANSIBLE_FILTER_PLUGINS\"]\n\n try:\n path = self._absolute_path_for(env, \"ANSIBLE_ROLES_PATH\")\n roles_path = \"{}:{}\".format(roles_path, path)\n except KeyError:\n pass\n\n try:\n path = self._absolute_path_for(env, \"ANSIBLE_LIBRARY\")\n library_path = \"{}:{}\".format(library_path, path)\n except KeyError:\n pass\n\n try:\n path = self._absolute_path_for(env, \"ANSIBLE_FILTER_PLUGINS\")\n filter_plugins_path = \"{}:{}\".format(filter_plugins_path, path)\n except KeyError:\n pass\n\n env[\"ANSIBLE_ROLES_PATH\"] = roles_path\n env[\"ANSIBLE_LIBRARY\"] = library_path\n env[\"ANSIBLE_FILTER_PLUGINS\"] = filter_plugins_path\n\n return util.merge_dicts(default_env, env)\n\n @property\n def hosts(self):\n return self._config.config[\"provisioner\"][\"inventory\"][\"hosts\"]\n\n @property\n def host_vars(self):\n return self._config.config[\"provisioner\"][\"inventory\"][\"host_vars\"]\n\n @property\n def group_vars(self):\n return self._config.config[\"provisioner\"][\"inventory\"][\"group_vars\"]\n\n @property\n def links(self):\n return self._config.config[\"provisioner\"][\"inventory\"][\"links\"]\n\n @property\n def inventory(self):\n \"\"\"\n Create an inventory structure and returns a dict.\n\n .. code-block:: yaml\n ungrouped:\n vars:\n foo: bar\n hosts:\n instance-1:\n instance-2:\n children:\n $child_group_name:\n hosts:\n instance-1:\n instance-2:\n $group_name:\n hosts:\n instance-1:\n ansible_connection: docker\n instance-2:\n ansible_connection: docker\n\n :return: str\n \"\"\"\n dd = self._vivify()\n for platform in self._config.platforms.instances:\n for group in platform.get(\"groups\", [\"ungrouped\"]):\n instance_name = platform[\"name\"]\n connection_options = self.connection_options(instance_name)\n molecule_vars = {\n \"molecule_file\": \"{{ lookup('env', 'MOLECULE_FILE') }}\",\n \"molecule_ephemeral_directory\": \"{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}\",\n \"molecule_scenario_directory\": \"{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}\",\n \"molecule_yml\": \"{{ lookup('file', molecule_file) | from_yaml }}\",\n \"molecule_instance_config\": \"{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}\",\n \"molecule_no_log\": \"{{ lookup('env', 'MOLECULE_NO_LOG') or not \"\n \"molecule_yml.provisioner.log|default(False) | bool }}\",\n }\n\n # All group\n dd[\"all\"][\"hosts\"][instance_name] = connection_options\n dd[\"all\"][\"vars\"] = molecule_vars\n # Named group\n dd[group][\"hosts\"][instance_name] = connection_options\n dd[group][\"vars\"] = molecule_vars\n # Ungrouped\n dd[\"ungrouped\"][\"vars\"] = {}\n # Children\n for child_group in platform.get(\"children\", []):\n dd[group][\"children\"][child_group][\"hosts\"][\n instance_name\n ] = connection_options\n\n return self._default_to_regular(dd)\n\n @property\n def inventory_directory(self):\n return self._config.scenario.inventory_directory\n\n @property\n def inventory_file(self):\n return os.path.join(self.inventory_directory, \"ansible_inventory.yml\")\n\n @property\n def config_file(self):\n return os.path.join(self._config.scenario.ephemeral_directory, \"ansible.cfg\")\n\n @property # type: ignore\n @util.lru_cache()\n def playbooks(self):\n return ansible_playbooks.AnsiblePlaybooks(self._config)\n\n @property\n def directory(self):\n return os.path.join(\n os.path.dirname(__file__),\n os.path.pardir,\n os.path.pardir,\n \"molecule\",\n \"provisioner\",\n \"ansible\",\n )\n\n def cleanup(self):\n \"\"\"\n Execute `ansible-playbook` against the cleanup playbook and returns \\\n None.\n\n :return: None\n \"\"\"\n pb = self._get_ansible_playbook(self.playbooks.cleanup)\n pb.execute()\n\n def connection_options(self, instance_name):\n d = self._config.driver.ansible_connection_options(instance_name)\n\n return util.merge_dicts(\n d, self._config.config[\"provisioner\"][\"connection_options\"]\n )\n\n def check(self):\n \"\"\"\n Execute ``ansible-playbook`` against the converge playbook with the \\\n ``--check`` flag and returns None.\n\n :return: None\n \"\"\"\n pb = self._get_ansible_playbook(self.playbooks.converge)\n pb.add_cli_arg(\"check\", True)\n pb.execute()\n\n def converge(self, playbook=None, **kwargs):\n \"\"\"\n Execute ``ansible-playbook`` against the converge playbook unless \\\n specified otherwise and returns a string.\n\n :param playbook: An optional string containing an absolute path to a\n playbook.\n :param kwargs: An optional keyword arguments.\n :return: str\n \"\"\"\n pb = self._get_ansible_playbook(playbook or self.playbooks.converge, **kwargs)\n\n return pb.execute()\n\n def destroy(self):\n \"\"\"\n Execute ``ansible-playbook`` against the destroy playbook and returns \\\n None.\n\n :return: None\n \"\"\"\n pb = self._get_ansible_playbook(self.playbooks.destroy)\n pb.execute()\n\n def side_effect(self):\n \"\"\"\n Execute ``ansible-playbook`` against the side_effect playbook and \\\n returns None.\n\n :return: None\n \"\"\"\n pb = self._get_ansible_playbook(self.playbooks.side_effect)\n pb.execute()\n\n def create(self):\n \"\"\"\n Execute ``ansible-playbook`` against the create playbook and returns \\\n None.\n\n :return: None\n \"\"\"\n pb = self._get_ansible_playbook(self.playbooks.create)\n pb.execute()\n\n def prepare(self):\n \"\"\"\n Execute ``ansible-playbook`` against the prepare playbook and returns \\\n None.\n\n :return: None\n \"\"\"\n pb = self._get_ansible_playbook(self.playbooks.prepare)\n pb.execute()\n\n def syntax(self):\n \"\"\"\n Execute ``ansible-playbook`` against the converge playbook with the \\\n ``-syntax-check`` flag and returns None.\n\n :return: None\n \"\"\"\n pb = self._get_ansible_playbook(self.playbooks.converge)\n pb.add_cli_arg(\"syntax-check\", True)\n pb.execute()\n\n def verify(self):\n \"\"\"\n Execute ``ansible-playbook`` against the verify playbook and returns \\\n None.\n\n :return: None\n \"\"\"\n if not self.playbooks.verify:\n LOG.warning(\"Skipping, verify playbook not configured.\")\n return\n\n pb = self._get_ansible_playbook(self.playbooks.verify)\n pb.execute()\n\n def write_config(self):\n \"\"\"\n Write the provisioner's config file to disk and returns None.\n\n :return: None\n \"\"\"\n template = util.render_template(\n self._get_config_template(), config_options=self.config_options\n )\n util.write_file(self.config_file, template)\n\n def manage_inventory(self):\n \"\"\"\n Manage inventory for Ansible and returns None.\n\n :returns: None\n \"\"\"\n self._write_inventory()\n self._remove_vars()\n if not self.links:\n self._add_or_update_vars()\n else:\n self._link_or_update_vars()\n\n def abs_path(self, path):\n return util.abs_path(os.path.join(self._config.scenario.directory, path))\n\n def _add_or_update_vars(self):\n \"\"\"\n Create host and/or group vars and returns None.\n\n :returns: None\n \"\"\"\n # Create the hosts extra inventory source (only if not empty)\n hosts_file = os.path.join(self.inventory_directory, \"hosts\")\n if self.hosts:\n util.write_file(hosts_file, util.safe_dump(self.hosts))\n # Create the host_vars and group_vars directories\n for target in [\"host_vars\", \"group_vars\"]:\n if target == \"host_vars\":\n vars_target = copy.deepcopy(self.host_vars)\n for instance_name, _ in self.host_vars.items():\n instance_key = instance_name\n vars_target[instance_key] = vars_target.pop(instance_name)\n\n elif target == \"group_vars\":\n vars_target = self.group_vars\n\n if vars_target:\n target_vars_directory = os.path.join(self.inventory_directory, target)\n\n if not os.path.isdir(util.abs_path(target_vars_directory)):\n os.mkdir(util.abs_path(target_vars_directory))\n\n for target in vars_target.keys():\n target_var_content = vars_target[target]\n path = os.path.join(util.abs_path(target_vars_directory), target)\n util.write_file(path, util.safe_dump(target_var_content))\n\n def _write_inventory(self):\n \"\"\"\n Write the provisioner's inventory file to disk and returns None.\n\n :return: None\n \"\"\"\n self._verify_inventory()\n\n util.write_file(self.inventory_file, util.safe_dump(self.inventory))\n\n def _remove_vars(self):\n \"\"\"\n Remove hosts/host_vars/group_vars and returns None.\n\n :returns: None\n \"\"\"\n for name in (\"hosts\", \"group_vars\", \"host_vars\"):\n d = os.path.join(self.inventory_directory, name)\n if os.path.islink(d) or os.path.isfile(d):\n os.unlink(d)\n elif os.path.isdir(d):\n shutil.rmtree(d)\n\n def _link_or_update_vars(self):\n \"\"\"\n Create or updates the symlink to group_vars and returns None.\n\n :returns: None\n \"\"\"\n for d, source in self.links.items():\n target = os.path.join(self.inventory_directory, d)\n source = os.path.join(self._config.scenario.directory, source)\n\n if not os.path.exists(source):\n msg = \"The source path '{}' does not exist.\".format(source)\n util.sysexit_with_message(msg)\n msg = \"Inventory {} linked to {}\".format(source, target)\n LOG.info(msg)\n os.symlink(source, target)\n\n def _get_ansible_playbook(self, playbook, **kwargs):\n \"\"\"\n Get an instance of AnsiblePlaybook and returns it.\n\n :param playbook: A string containing an absolute path to a\n provisioner's playbook.\n :param kwargs: An optional keyword arguments.\n :return: object\n \"\"\"\n return ansible_playbook.AnsiblePlaybook(playbook, self._config, **kwargs)\n\n def _verify_inventory(self):\n \"\"\"\n Verify the inventory is valid and returns None.\n\n :return: None\n \"\"\"\n if not self.inventory:\n msg = \"Instances missing from the 'platform' \" \"section of molecule.yml.\"\n util.sysexit_with_message(msg)\n\n def _get_config_template(self):\n \"\"\"\n Return a config template string.\n\n :return: str\n \"\"\"\n return \"\"\"\n{% for section, section_dict in config_options.items() -%}\n[{{ section }}]\n{% for k, v in section_dict.items() -%}\n{{ k }} = {{ v }}\n{% endfor -%}\n{% endfor -%}\n\"\"\".strip()\n\n def _vivify(self):\n \"\"\"\n Return an autovivification default dict.\n\n :return: dict\n \"\"\"\n return collections.defaultdict(self._vivify)\n\n def _default_to_regular(self, d):\n if isinstance(d, collections.defaultdict):\n d = {k: self._default_to_regular(v) for k, v in d.items()}\n\n return d\n\n def _get_plugin_directory(self):\n return os.path.join(self.directory, \"plugins\")\n\n def _get_modules_directories(self):\n \"\"\"Return list of ansilbe module includes directories.\n\n Adds modules directory from molecule and its plugins.\n \"\"\"\n paths = [util.abs_path(os.path.join(self._get_plugin_directory(), \"modules\"))]\n\n for d in drivers():\n p = d.modules_dir()\n if p:\n paths.append(p)\n paths.extend(\n [\n util.abs_path(\n os.path.join(self._config.scenario.ephemeral_directory, \"library\")\n ),\n util.abs_path(os.path.join(self._config.project_directory, \"library\")),\n util.abs_path(\n os.path.join(\n os.path.expanduser(\"~\"),\n \".ansible\",\n \"plugins\",\n \"modules\",\n )\n ),\n \"/usr/share/ansible/plugins/modules\",\n ]\n )\n\n if os.environ.get(\"ANSIBLE_LIBRARY\"):\n paths.extend(map(util.abs_path, os.environ[\"ANSIBLE_LIBRARY\"].split(\":\")))\n\n return paths\n\n def _get_filter_plugin_directory(self):\n return util.abs_path(os.path.join(self._get_plugin_directory(), \"filter\"))\n\n def _absolute_path_for(self, env, key):\n return \":\".join([self.abs_path(p) for p in env[key].split(\":\")])\n",
"path": "src/molecule/provisioner/ansible.py"
}
] | diff --git a/.github/workflows/tox.yml b/.github/workflows/tox.yml
index d4a61c8db1..af350717a0 100644
--- a/.github/workflows/tox.yml
+++ b/.github/workflows/tox.yml
@@ -25,17 +25,17 @@ jobs:
- tox_env: lint
- tox_env: docs
- tox_env: py36
- PREFIX: PYTEST_REQPASS=433
+ PREFIX: PYTEST_REQPASS=435
- tox_env: py37
- PREFIX: PYTEST_REQPASS=433
+ PREFIX: PYTEST_REQPASS=435
- tox_env: py38
- PREFIX: PYTEST_REQPASS=433
+ PREFIX: PYTEST_REQPASS=435
- tox_env: py39
- PREFIX: PYTEST_REQPASS=433
+ PREFIX: PYTEST_REQPASS=435
- tox_env: py36-devel
- PREFIX: PYTEST_REQPASS=433
+ PREFIX: PYTEST_REQPASS=435
- tox_env: py39-devel
- PREFIX: PYTEST_REQPASS=433
+ PREFIX: PYTEST_REQPASS=435
- tox_env: packaging
- tox_env: eco
- tox_env: dockerfile
diff --git a/src/molecule/provisioner/ansible.py b/src/molecule/provisioner/ansible.py
index 9982e03691..99a7173aa9 100644
--- a/src/molecule/provisioner/ansible.py
+++ b/src/molecule/provisioner/ansible.py
@@ -940,7 +940,7 @@ def _get_modules_directories(self):
)
if os.environ.get("ANSIBLE_LIBRARY"):
- paths.extend([util.abs_path(os.environ.get("ANSIBLE_LIBRARY"))])
+ paths.extend(map(util.abs_path, os.environ["ANSIBLE_LIBRARY"].split(":")))
return paths
diff --git a/src/molecule/test/unit/provisioner/test_ansible.py b/src/molecule/test/unit/provisioner/test_ansible.py
index 61bd744a1a..73ffe768a2 100644
--- a/src/molecule/test/unit/provisioner/test_ansible.py
+++ b/src/molecule/test/unit/provisioner/test_ansible.py
@@ -20,6 +20,7 @@
import collections
import os
+import re
import pytest
@@ -684,22 +685,36 @@ def test_get_plugin_directory(_instance):
assert ("molecule", "provisioner", "ansible", "plugins") == parts[-4:]
-def test_get_modules_directories(_instance, monkeypatch):
- result = _instance._get_modules_directories()[0]
- parts = pytest.helpers.os_split(result)
- x = ("molecule", "provisioner", "ansible", "plugins", "modules")
+def test_get_modules_directories_default(_instance, monkeypatch):
+ monkeypatch.delenv("ANSIBLE_LIBRARY", raising=False)
+
+ paths = _instance._get_modules_directories()
+
+ assert len(paths) == 5
+ assert re.search(r"molecule/provisioner/ansible/plugins/modules$", paths[0])
+ assert re.search(r"\.cache/molecule/[^/]+/default/library$", paths[1])
+ assert re.search(r"/library$", paths[2])
+ assert re.search(r"\.ansible/plugins/modules$", paths[3])
+ assert re.search(r"/usr/share/ansible/plugins/modules$", paths[4])
+
+
+def test_get_modules_directories_single_ansible_library(_instance, monkeypatch):
+ monkeypatch.setenv("ANSIBLE_LIBRARY", "/abs/path/lib")
+
+ paths = _instance._get_modules_directories()
+
+ assert len(paths) == 6
+ assert paths[-1] == "/abs/path/lib"
- assert x == parts[-5:]
- lib_prev = os.environ.get("ANSIBLE_LIBRARY")
- monkeypatch.setenv("ANSIBLE_LIBRARY", "/foo/bar")
- result = _instance._get_modules_directories()[-1]
- monkeypatch.setenv("ANSIBLE_LIBRARY", lib_prev if lib_prev else "")
+def test_get_modules_directories_multi_ansible_library(_instance, monkeypatch):
+ monkeypatch.setenv("ANSIBLE_LIBRARY", "relpath/lib:/abs/path/lib")
- env_lib_result_parts = pytest.helpers.os_split(result)
- env_lib_expected_parts = ("foo", "bar")
+ paths = _instance._get_modules_directories()
- assert env_lib_result_parts == env_lib_expected_parts[-2:]
+ assert len(paths) == 7
+ assert paths[-2].endswith("relpath/lib")
+ assert paths[-1] == "/abs/path/lib"
def test_get_filter_plugin_directory(_instance):
|
ccnmtl__django-pagetree-89 | Custom pageblocks in Hierarchy menu
Jess has a feature request for pagetree:
WORTH has a giant hierarchy menu: https://worth2.ccnmtl.columbia.edu/pages/edit/ and it would be nice to see which sections have which pageblocks on them. She says it shouldn't list text blocks, html blocks, quizzes etc., since many of those will be present on every page.
| [
{
"content": "# Copyright (c) 2007-2015, Columbia Center For New Media Teaching And Learning (CCNMTL)\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the CCNMTL nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY CCNMTL ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom setuptools import setup\n\nsetup(\n name=\"django-pagetree\",\n version=\"1.1.4\",\n author=\"Anders Pearson\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ccnmtl/django-pagetree\",\n description=\"Tree of Pages helper application\",\n long_description=\"Application for managing trees of pages\",\n install_requires=[\n \"Django\",\n \"django-treebeard\",\n \"Markdown\",\n \"coverage\",\n \"django-markwhat\",\n \"django-bootstrap-form\",\n ],\n scripts=[],\n license=\"BSD\",\n platforms=[\"any\"],\n zip_safe=False,\n packages=['pagetree'],\n include_package_data=True,\n )\n",
"path": "setup.py"
}
] | [
{
"content": "# Copyright (c) 2007-2015, Columbia Center For New Media Teaching And Learning (CCNMTL)\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the CCNMTL nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY CCNMTL ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom setuptools import setup\n\nsetup(\n name=\"django-pagetree\",\n version=\"1.1.5\",\n author=\"Anders Pearson\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ccnmtl/django-pagetree\",\n description=\"Tree of Pages helper application\",\n long_description=\"Application for managing trees of pages\",\n install_requires=[\n \"Django\",\n \"django-treebeard\",\n \"Markdown\",\n \"coverage\",\n \"django-markwhat\",\n \"django-bootstrap-form\",\n ],\n scripts=[],\n license=\"BSD\",\n platforms=[\"any\"],\n zip_safe=False,\n packages=['pagetree'],\n include_package_data=True,\n )\n",
"path": "setup.py"
}
] | diff --git a/CHANGES.txt b/CHANGES.txt
index edfd1023..1d7c5f10 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,3 +1,7 @@
+1.1.5 (2015-05-15)
+==================
+* Add pageblock count to sections in the edit page
+
1.1.4 (2015-05-13)
==================
* Show sections with empty labels in the edit_page hierarchy
diff --git a/pagetree/media/css/pagetree.css b/pagetree/media/css/pagetree.css
index 1db47171..fde9040d 100644
--- a/pagetree/media/css/pagetree.css
+++ b/pagetree/media/css/pagetree.css
@@ -1,5 +1,4 @@
-.admin_block_column
-{
+.admin_block_column {
width: 870px;
margin: 5px 0 0 15px;
padding: 5px 0 0 25px;
@@ -13,27 +12,27 @@
border-bottom-color: #eeeeee;
}
-.admin_tools_column
-{
+.admin_tools_column {
text-align: left;
}
-.admin_block_view_row1
-{
+.admin_block_view_row1 {
background-color: #eee;
}
-.admin_block_view_row2
-{
+.admin_block_view_row2 {
background-color: #fff;
}
-#id_label, #id_slug, #id_template
-{
+#id_label, #id_slug, #id_template {
width: 300px;
}
.draghandle {
cursor: move;
float: left;
+}
+
+.pagetree-pageblock-section-count {
+ font-size: smaller;
}
\ No newline at end of file
diff --git a/pagetree/templates/pagetree/edit_page.html b/pagetree/templates/pagetree/edit_page.html
index deba18e0..d1fcc417 100644
--- a/pagetree/templates/pagetree/edit_page.html
+++ b/pagetree/templates/pagetree/edit_page.html
@@ -124,8 +124,14 @@ <h3 style="margin-top: 0;"><a href="{{root.get_edit_url}}">{{hierarchy.name}}</a
<span class="glyphicon glyphicon-hand-right"></span>
<strong>
{% endifequal %}
- <a href="{{s.get_edit_url}}"
- >{{s.label|default:"Empty Label"}}{% if s.label|length < 1 %} ({{s.slug}}){% endif %}</a>
+ <span title="The number of pageblocks in this section"
+ class="pagetree-pageblock-section-count">
+ {{s.pageblock_set.count}}
+ </span>
+ <a href="{{s.get_edit_url}}"
+ >{{s.label|default:"Empty Label"}}
+ {% if s.label|length < 1 %} ({{s.slug}}) {% endif %}
+ </a>
{% ifequal s section %}
</strong>
<span class="glyphicon glyphicon-hand-left"></span>
diff --git a/setup.py b/setup.py
index 2acf536f..8fcd3aa7 100644
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,7 @@
setup(
name="django-pagetree",
- version="1.1.4",
+ version="1.1.5",
author="Anders Pearson",
author_email="[email protected]",
url="https://github.com/ccnmtl/django-pagetree",
|
bridgecrewio__checkov-4012 | Dependent Package "packaging" upgrade halts invocation
**Describe the issue**
Currently we are running checkov in a CI environment in Azure DevOps over our Terraform configurations. Earlier today Checkov started failing to run, at first it was believed to link to the release that occurred earlier.
Investigation though has shown that the dependency `packaging` has also had a release, wherein it has dropped `LegacyVersion` from its codebase (see stack trace).
The quick solution is to pin `packaging==21.3` to ensure the needed codebase functionality is in place.
This seems to only apply to environments that fresh install everything, as this was innoticed in local development until the CI pipeline triggered the issue.
**Examples**
In the ADO CI this simple version should recreate the behavior:
```
- script: |
python -m pip install --upgrade pip setuptools wheel
pip install checkov
displayName: "Install Checkov"
- task: Bash@3
displayName: Run Checkov tests
inputs:
targetType: "inline"
script: |
checkov -d . -o cli
```
**Exception Trace**
```sh
Traceback (most recent call last):
File "/opt/hostedtoolcache/Python/3.8.15/x64/bin/checkov", line 2, in <module>
from checkov.main import run
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/main.py", line 20, in <module>
from checkov.argo_workflows.runner import Runner as argo_workflows_runner
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/argo_workflows/runner.py", line 7, in <module>
from checkov.common.images.image_referencer import ImageReferencer, Image
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/images/image_referencer.py", line 12, in <module>
from checkov.common.bridgecrew.vulnerability_scanning.image_scanner import image_scanner
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/vulnerability_scanning/image_scanner.py", line 15, in <module>
from checkov.common.bridgecrew.vulnerability_scanning.integrations.docker_image_scanning import \
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/vulnerability_scanning/integrations/docker_image_scanning.py", line 8, in <module>
from checkov.common.bridgecrew.vulnerability_scanning.integrations.twistcli import TwistcliIntegration
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/vulnerability_scanning/integrations/twistcli.py", line 11, in <module>
from checkov.common.bridgecrew.platform_integration import bc_integration
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/platform_integration.py", line 31, in <module>
from checkov.common.bridgecrew.wrapper import reduce_scan_reports, persist_checks_results, \
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/wrapper.py", line 14, in <module>
from checkov.common.util.json_utils import CustomJSONEncoder
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/util/json_utils.py", line 6, in <module>
from packaging.version import LegacyVersion, Version
ImportError: cannot import name 'LegacyVersion' from 'packaging.version' (/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/packaging/version.py)
```
**Desktop (please complete the following information):**
- OS: Ubuntu 20.04 ADO Pipeline Container
- Checkov Version: tested 2.2.124 and 2.2.116, likely applies to others if they have the dependency
**Additional context**
Release in packaging that causes this issue is `22.0`, `21.3` appears to function as expected.
| [
{
"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2==0.3.47\",\n \"bc-detect-secrets==1.4.5\",\n \"deep-merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath<2,>=1.5.0\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"gitpython\",\n \"jmespath\",\n \"tqdm\",\n \"update-checker\",\n \"semantic-version\",\n \"packaging\",\n \"cloudsplaining>=0.4.3\",\n \"networkx<2.7\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"policyuniverse\",\n \"typing-extensions>=4.1.0\",\n \"importlib-metadata>=0.12\",\n \"cachetools\",\n \"cyclonedx-python-lib>=2.4.0,<4.0.0\",\n \"packageurl-python\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath-ng\",\n \"jsonschema>=3.0.2,<4.0.0\",\n \"prettytable>=3.0.0\",\n \"pycep-parser==0.3.9\",\n \"charset-normalizer\",\n \"pyston-autoload==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64'\",\n \"pyston==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64'\",\n \"schema\",\n \"requests>=2.26.0\",\n ],\n dependency_links=[], # keep it empty, needed for pipenv-setup\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.bicep.checks.graph_checks\": \"checkov/bicep/checks/graph_checks\",\n \"checkov.cloudformation.checks.graph_checks\": \"checkov/cloudformation/checks/graph_checks\",\n \"checkov.dockerfile.checks.graph_checks\": \"checkov/dockerfile/checks/graph_checks\",\n \"checkov.github_actions.checks.graph_checks\": \"checkov/github_actions/checks/graph_checks\",\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\",\n },\n package_data={\n \"checkov\": [\"py.typed\"],\n \"checkov.bicep.checks.graph_checks\": [\"*.yaml\"],\n \"checkov.common.util.templates\": [\"*.jinja2\"],\n \"checkov.dockerfile.checks.graph_checks\": [\"*.yaml\"],\n \"checkov.github_actions.checks.graph_checks\": [\"*.yaml\"],\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ],\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n \"Typing :: Typed\",\n ],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2==0.3.47\",\n \"bc-detect-secrets==1.4.5\",\n \"deep-merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath<2,>=1.5.0\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"gitpython\",\n \"jmespath\",\n \"tqdm\",\n \"update-checker\",\n \"semantic-version\",\n \"packaging==21.3\",\n \"cloudsplaining>=0.4.3\",\n \"networkx<2.7\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"policyuniverse\",\n \"typing-extensions>=4.1.0\",\n \"importlib-metadata>=0.12\",\n \"cachetools\",\n \"cyclonedx-python-lib>=2.4.0,<4.0.0\",\n \"packageurl-python\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath-ng\",\n \"jsonschema>=3.0.2,<4.0.0\",\n \"prettytable>=3.0.0\",\n \"pycep-parser==0.3.9\",\n \"charset-normalizer\",\n \"pyston-autoload==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64'\",\n \"pyston==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64'\",\n \"schema\",\n \"requests>=2.26.0\",\n ],\n dependency_links=[], # keep it empty, needed for pipenv-setup\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.bicep.checks.graph_checks\": \"checkov/bicep/checks/graph_checks\",\n \"checkov.cloudformation.checks.graph_checks\": \"checkov/cloudformation/checks/graph_checks\",\n \"checkov.dockerfile.checks.graph_checks\": \"checkov/dockerfile/checks/graph_checks\",\n \"checkov.github_actions.checks.graph_checks\": \"checkov/github_actions/checks/graph_checks\",\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\",\n },\n package_data={\n \"checkov\": [\"py.typed\"],\n \"checkov.bicep.checks.graph_checks\": [\"*.yaml\"],\n \"checkov.common.util.templates\": [\"*.jinja2\"],\n \"checkov.dockerfile.checks.graph_checks\": [\"*.yaml\"],\n \"checkov.github_actions.checks.graph_checks\": [\"*.yaml\"],\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ],\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n \"Typing :: Typed\",\n ],\n)\n",
"path": "setup.py"
}
] | diff --git a/Pipfile b/Pipfile
index 49b82a3f54..74502f78a7 100644
--- a/Pipfile
+++ b/Pipfile
@@ -55,7 +55,7 @@ jmespath = "*"
tqdm = "*"
update-checker = "*"
semantic-version = "*"
-packaging = "*"
+packaging = "==21.3"
cloudsplaining = ">=0.4.3"
networkx = "<2.7"
dockerfile-parse ="*"
diff --git a/Pipfile.lock b/Pipfile.lock
index 1584a9be8e..6c50e65f7c 100644
--- a/Pipfile.lock
+++ b/Pipfile.lock
@@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
- "sha256": "ae6d1b7e14b39f049aad52a8d0bee5677dc964db53939eb49eb1587b8b523bda"
+ "sha256": "74a303f71e9f83752824ccd5e623d33c3d1b2142a84db7cb4d687e5597ffd467"
},
"pipfile-spec": 6,
"requires": {
@@ -149,14 +149,6 @@
"markers": "python_version >= '3.6'",
"version": "==4.0.2"
},
- "asynctest": {
- "hashes": [
- "sha256:5da6118a7e6d6b54d83a8f7197769d046922a44d2a99c21382f0a6e4fadae676",
- "sha256:c27862842d15d83e6a34eb0b2866c323880eb3a75e4485b079ea11748fd77fac"
- ],
- "markers": "python_version < '3.8'",
- "version": "==0.13.0"
- },
"attrs": {
"hashes": [
"sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6",
@@ -191,19 +183,19 @@
},
"boto3": {
"hashes": [
- "sha256:6531198b9d4cd86a1945eaffb2c5d8d75c5447b72870ad2b07c411ea75d6e59c",
- "sha256:f6117707d140363c58ffe41495400cc88c35c165e0f711c6b62edadbd6f600b5"
+ "sha256:7048335b099473816240046753d77ef0953ce5a037b93b2848477dcf036d3849",
+ "sha256:e0ba3620feb430e31926270ea3dc0bb94df55a0fd2b209bd91f7904f2f2166ef"
],
"index": "pypi",
- "version": "==1.26.24"
+ "version": "==1.26.25"
},
"botocore": {
"hashes": [
- "sha256:4f9c92979b29132185f645f61bdf0fecf031ecc26a8dd1a99dbd88d323211325",
- "sha256:fb37c63d5e2b7c778f52d096c6c54207d49143cd942b4dc19297086a1385a7cd"
+ "sha256:a204140c9d7adadf3919d8024d79278f1865a20c869e4f216eaea599ca3a1743",
+ "sha256:cb489ca8fbc043cd9bf901e3e105f0dec316ed438ee883e55c9f9c77bd0f6a2d"
],
"markers": "python_version >= '3.7'",
- "version": "==1.29.24"
+ "version": "==1.29.25"
},
"cached-property": {
"hashes": [
@@ -222,11 +214,11 @@
},
"certifi": {
"hashes": [
- "sha256:0d9c601124e5a6ba9712dbc60d9c53c21e34f5f641fe83002317394311bdce14",
- "sha256:90c1a32f1d68f940488354e36370f6cca89f0f106db09518524c88d6ed83f382"
+ "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3",
+ "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"
],
"markers": "python_version >= '3.6'",
- "version": "==2022.9.24"
+ "version": "==2022.12.7"
},
"cffi": {
"hashes": [
@@ -318,7 +310,7 @@
"sha256:0f8ca79bc9b1d6fcaafdbe194b17ba1a2dde44ddf19087235c3efed2ad288143",
"sha256:78ee474f07a0ca0ef6c0317bb3ebe79387aafb0c4a1e03b1d8b2b0be1e42fc78"
],
- "markers": "python_version >= '3.6' and python_version < '4'",
+ "markers": "python_version >= '3.6' and python_version < '4.0'",
"version": "==0.5.5"
},
"cloudsplaining": {
@@ -506,19 +498,11 @@
},
"importlib-metadata": {
"hashes": [
- "sha256:8a8a81bcf996e74fee46f0d16bd3eaa382a7eb20fd82445c3ad11f4090334116",
- "sha256:dd0173e8f150d6815e098fd354f6414b0f079af4644ddfe90c71e2fc6174346d"
+ "sha256:d5059f9f1e8e41f80e9c56c2ee58811450c31984dfa625329ffd7c0dad88a73b",
+ "sha256:d84d17e21670ec07990e1044a99efe8d615d860fd176fc29ef5c306068fda313"
],
"index": "pypi",
- "version": "==4.13.0"
- },
- "importlib-resources": {
- "hashes": [
- "sha256:32bb095bda29741f6ef0e5278c42df98d135391bee5f932841efc0041f748dc3",
- "sha256:c09b067d82e72c66f4f8eb12332f5efbebc9b007c0b6c40818108c9870adc363"
- ],
- "markers": "python_version < '3.9'",
- "version": "==5.10.1"
+ "version": "==5.1.0"
},
"jinja2": {
"hashes": [
@@ -1402,14 +1386,6 @@
"markers": "python_version >= '3.6'",
"version": "==4.0.2"
},
- "asynctest": {
- "hashes": [
- "sha256:5da6118a7e6d6b54d83a8f7197769d046922a44d2a99c21382f0a6e4fadae676",
- "sha256:c27862842d15d83e6a34eb0b2866c323880eb3a75e4485b079ea11748fd77fac"
- ],
- "markers": "python_version < '3.8'",
- "version": "==0.13.0"
- },
"attrs": {
"hashes": [
"sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6",
@@ -1428,11 +1404,11 @@
},
"certifi": {
"hashes": [
- "sha256:0d9c601124e5a6ba9712dbc60d9c53c21e34f5f641fe83002317394311bdce14",
- "sha256:90c1a32f1d68f940488354e36370f6cca89f0f106db09518524c88d6ed83f382"
+ "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3",
+ "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"
],
"markers": "python_version >= '3.6'",
- "version": "==2022.9.24"
+ "version": "==2022.12.7"
},
"cfgv": {
"hashes": [
@@ -1682,20 +1658,12 @@
"markers": "python_version >= '3.5'",
"version": "==3.4"
},
- "importlib-metadata": {
- "hashes": [
- "sha256:8a8a81bcf996e74fee46f0d16bd3eaa382a7eb20fd82445c3ad11f4090334116",
- "sha256:dd0173e8f150d6815e098fd354f6414b0f079af4644ddfe90c71e2fc6174346d"
- ],
- "index": "pypi",
- "version": "==4.13.0"
- },
"importlib-resources": {
"hashes": [
"sha256:32bb095bda29741f6ef0e5278c42df98d135391bee5f932841efc0041f748dc3",
"sha256:c09b067d82e72c66f4f8eb12332f5efbebc9b007c0b6c40818108c9870adc363"
],
- "markers": "python_version < '3.9'",
+ "index": "pypi",
"version": "==5.10.1"
},
"iniconfig": {
@@ -1876,21 +1844,13 @@
"markers": "python_version >= '2.6'",
"version": "==5.11.0"
},
- "pkgutil-resolve-name": {
- "hashes": [
- "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174",
- "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"
- ],
- "markers": "python_version < '3.9'",
- "version": "==1.3.10"
- },
"platformdirs": {
"hashes": [
- "sha256:1006647646d80f16130f052404c6b901e80ee4ed6bef6792e1f238a8969106f7",
- "sha256:af0276409f9a02373d540bf8480021a048711d572745aef4b7842dad245eba10"
+ "sha256:1a89a12377800c81983db6be069ec068eee989748799b946cce2a6e80dcc54ca",
+ "sha256:b46ffafa316e6b83b47489d240ce17173f123a9b9c83282141c3daf26ad9ac2e"
],
"markers": "python_version >= '3.7'",
- "version": "==2.5.4"
+ "version": "==2.6.0"
},
"pluggy": {
"hashes": [
@@ -1924,14 +1884,6 @@
"markers": "python_version >= '3.6'",
"version": "==2.5.0"
},
- "pyparsing": {
- "hashes": [
- "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb",
- "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"
- ],
- "markers": "python_full_version >= '3.6.8'",
- "version": "==3.0.9"
- },
"pyrsistent": {
"hashes": [
"sha256:055ab45d5911d7cae397dc418808d8802fb95262751872c841c170b0dbf51eed",
@@ -2080,11 +2032,11 @@
},
"stevedore": {
"hashes": [
- "sha256:cf99f41fc0d5a4f185ca4d3d42b03be9011b0a1ec1a4ea1a282be1b4b306dcc2",
- "sha256:fa2630e3d0ad3e22d4914aff2501445815b9a4467a6edc49387c667a38faf5bf"
+ "sha256:7f8aeb6e3f90f96832c301bff21a7eb5eefbe894c88c506483d355565d88cc1a",
+ "sha256:aa6436565c069b2946fe4ebff07f5041e0c8bf18c7376dd29edf80cf7d524e4e"
],
- "markers": "python_version >= '3.6'",
- "version": "==3.5.2"
+ "markers": "python_version >= '3.8'",
+ "version": "==4.1.1"
},
"toml": {
"hashes": [
@@ -2102,36 +2054,6 @@
"markers": "python_version < '3.11'",
"version": "==2.0.1"
},
- "typed-ast": {
- "hashes": [
- "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2",
- "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1",
- "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6",
- "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62",
- "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac",
- "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d",
- "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc",
- "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2",
- "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97",
- "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35",
- "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6",
- "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1",
- "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4",
- "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c",
- "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e",
- "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec",
- "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f",
- "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72",
- "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47",
- "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72",
- "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe",
- "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6",
- "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3",
- "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"
- ],
- "markers": "python_version < '3.8'",
- "version": "==1.5.4"
- },
"types-cachetools": {
"hashes": [
"sha256:069cfc825697cd51445c1feabbe4edc1fae2b2315870e7a9a179a7c4a5851bee",
@@ -2237,11 +2159,11 @@
},
"virtualenv": {
"hashes": [
- "sha256:0ef5be6d07181946891f5abc8047fda8bc2f0b4b9bf222c64e6e8963baee76db",
- "sha256:635b272a8e2f77cb051946f46c60a54ace3cb5e25568228bd6b57fc70eca9ff3"
+ "sha256:ce3b1684d6e1a20a3e5ed36795a97dfc6af29bc3970ca8dab93e11ac6094b3c4",
+ "sha256:f8b927684efc6f1cc206c9db297a570ab9ad0e51c16fa9e45487d36d1905c058"
],
"markers": "python_version >= '3.6'",
- "version": "==20.16.2"
+ "version": "==20.17.1"
},
"yarl": {
"hashes": [
@@ -2322,14 +2244,6 @@
],
"markers": "python_version >= '3.7'",
"version": "==1.8.2"
- },
- "zipp": {
- "hashes": [
- "sha256:83a28fcb75844b5c0cdaf5aa4003c2d728c77e05f5aeabe8e95e56727005fbaa",
- "sha256:a7a22e05929290a67401440b39690ae6563279bced5f314609d9d03798f56766"
- ],
- "markers": "python_version >= '3.7'",
- "version": "==3.11.0"
}
}
}
diff --git a/setup.py b/setup.py
index d58f039110..d8822d4ecd 100644
--- a/setup.py
+++ b/setup.py
@@ -48,7 +48,7 @@
"tqdm",
"update-checker",
"semantic-version",
- "packaging",
+ "packaging==21.3",
"cloudsplaining>=0.4.3",
"networkx<2.7",
"dockerfile-parse",
|
swcarpentry__python-novice-inflammation-736 | Lesson 10 - numpy.mean(data) and data.mean
In lesson 10, when the lesson refers to readings_03.py, the code shows that to calculate the mean over 'data' across all days, numpy.mean is used: numpy.mean(data, axis=1). However when looking at the file readings_03.py (at least the version I downloaded recently) uses the instruction data.mean(axis=1). Both lead to the same result, but for consistency I would suggest to either modify the readings_*.py to use numpy.mean (as this is what it has been used throughout the entire lesson), or explain explicitly that both expressions lead to the same result (it would be a good time to remind students about object attributes).
| [
{
"content": "import sys\nimport numpy\n\n\ndef main():\n script = sys.argv[0]\n for filename in sys.argv[1:]:\n data = numpy.loadtxt(filename, delimiter=',')\n for m in data.mean(axis=1):\n print(m)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "code/readings_03.py"
}
] | [
{
"content": "import sys\nimport numpy\n\n\ndef main():\n script = sys.argv[0]\n for filename in sys.argv[1:]:\n data = numpy.loadtxt(filename, delimiter=',')\n for m in numpy.mean(data, axis=1):\n print(m)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "code/readings_03.py"
}
] | diff --git a/code/readings_03.py b/code/readings_03.py
index 7736fdf73..423ec9bf5 100644
--- a/code/readings_03.py
+++ b/code/readings_03.py
@@ -6,7 +6,7 @@ def main():
script = sys.argv[0]
for filename in sys.argv[1:]:
data = numpy.loadtxt(filename, delimiter=',')
- for m in data.mean(axis=1):
+ for m in numpy.mean(data, axis=1):
print(m)
|
codespell-project__codespell-96 | README: outdated license notice
README says:
> The Python script `codespell.py` is available with the following terms:
But currently `codespell.py` is just a thin wrapper over `codespell_lib`, with little to no creativity.
This sentence should probably read something like this:
> The Python code is available with the following terms:
| [
{
"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; version 2 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see\n# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html.\n\"\"\"\nCopyright (C) 2010-2011 Lucas De Marchi <[email protected]>\nCopyright (C) 2011 ProFUSION embedded systems\n\"\"\"\n\nfrom __future__ import print_function\n\nimport codecs\nimport sys\nimport re\nfrom optparse import OptionParser\nimport os\nimport fnmatch\n\nUSAGE = \"\"\"\n\\t%prog [OPTIONS] [file1 file2 ... fileN]\n\"\"\"\nVERSION = '1.10.0.dev0'\n\nmisspellings = {}\nexclude_lines = set()\noptions = None\nfile_opener = None\nquiet_level = 0\nencodings = ['utf-8', 'iso-8859-1']\nregex = re.compile(r\"[\\w\\-']+\")\n# Users might want to link this file into /usr/local/bin, so we resolve the\n# symbolic link path to the real path if necessary.\ndefault_dictionary = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'data', 'dictionary.txt')\n\n# OPTIONS:\n#\n# ARGUMENTS:\n# dict_filename The file containing the dictionary of misspellings.\n# If set to '-', it will be read from stdin\n# file1 .. fileN Files to check spelling\n\n\nclass QuietLevels(object):\n NONE = 0\n ENCODING = 1\n BINARY_FILE = 2\n DISABLED_FIXES = 4\n NON_AUTOMATIC_FIXES = 8\n FIXES = 16\n\n\nclass GlobMatch(object):\n def __init__(self, pattern):\n if pattern:\n self.pattern_list = pattern.split(',')\n else:\n self.pattern_list = None\n\n def match(self, filename):\n if self.pattern_list is None:\n return False\n\n for p in self.pattern_list:\n if fnmatch.fnmatch(filename, p):\n return True\n\n return False\n\n\nclass Misspelling(object):\n def __init__(self, data, fix, reason):\n self.data = data\n self.fix = fix\n self.reason = reason\n\n\nclass TermColors(object):\n def __init__(self):\n self.FILE = '\\033[33m'\n self.WWORD = '\\033[31m'\n self.FWORD = '\\033[32m'\n self.DISABLE = '\\033[0m'\n\n def disable(self):\n self.FILE = ''\n self.WWORD = ''\n self.FWORD = ''\n self.DISABLE = ''\n\n\nclass Summary(object):\n def __init__(self):\n self.summary = {}\n\n def update(self, wrongword):\n if wrongword in self.summary:\n self.summary[wrongword] += 1\n else:\n self.summary[wrongword] = 1\n\n def __str__(self):\n keys = list(self.summary.keys())\n keys.sort()\n\n return \"\\n\".join([\"{0}{1:{width}}\".format(\n key,\n self.summary.get(key),\n width=15 - len(key)) for key in keys])\n\n\nclass FileOpener(object):\n def __init__(self, use_chardet):\n self.use_chardet = use_chardet\n if use_chardet:\n self.init_chardet()\n\n def init_chardet(self):\n try:\n from chardet.universaldetector import UniversalDetector\n except ImportError:\n raise ImportError(\"There's no chardet installed to import from. \"\n \"Please, install it and check your PYTHONPATH \"\n \"environment variable\")\n\n self.encdetector = UniversalDetector()\n\n def open(self, filename):\n if self.use_chardet:\n return self.open_with_chardet(filename)\n else:\n return self.open_with_internal(filename)\n\n def open_with_chardet(self, filename):\n self.encdetector.reset()\n with codecs.open(filename, 'rb') as f:\n for line in f:\n self.encdetector.feed(line)\n if self.encdetector.done:\n break\n self.encdetector.close()\n encoding = self.encdetector.result['encoding']\n\n try:\n f = codecs.open(filename, 'r', encoding=encoding)\n except UnicodeDecodeError:\n print('ERROR: Could not detect encoding: %s' % filename,\n file=sys.stderr)\n raise\n except LookupError:\n print('ERROR: %s -- Don\\'t know how to handle encoding %s'\n % (filename, encoding), file=sys.stderr)\n raise\n else:\n lines = f.readlines()\n f.close()\n\n return lines, encoding\n\n def open_with_internal(self, filename):\n curr = 0\n global encodings\n\n while True:\n try:\n f = codecs.open(filename, 'r', encoding=encodings[curr])\n except UnicodeDecodeError:\n if not quiet_level & QuietLevels.ENCODING:\n print('WARNING: Decoding file %s' % filename,\n file=sys.stderr)\n print('WARNING: using encoding=%s failed. '\n % encodings[curr], file=sys.stderr)\n try:\n print('WARNING: Trying next encoding: %s'\n % encodings[curr + 1], file=sys.stderr)\n except IndexError:\n pass\n\n curr += 1\n else:\n lines = f.readlines()\n f.close()\n break\n if not lines:\n raise Exception('Unknown encoding')\n\n encoding = encodings[curr]\n\n return lines, encoding\n\n# -.-:-.-:-.-:-.:-.-:-.-:-.-:-.-:-.:-.-:-.-:-.-:-.-:-.:-.-:-\n\n\ndef parse_options(args):\n parser = OptionParser(usage=USAGE, version=VERSION)\n\n parser.set_defaults(colors=sys.stdout.isatty())\n parser.add_option('-d', '--disable-colors',\n action='store_false', dest='colors',\n help='disable colors even when printing to terminal')\n parser.add_option('-c', '--enable-colors',\n action='store_true', dest='colors',\n help='enable colors even when not printing to terminal')\n parser.add_option('-w', '--write-changes',\n action='store_true', default=False,\n help='write changes in place if possible')\n parser.add_option('-D', '--dictionary',\n action='append', metavar='FILE',\n help='Custom dictionary file that contains spelling '\n 'corrections. If this flag is not specified or '\n 'equals \"-\" then default dictionary \"%s\" is used. '\n 'This option can be specified multiple times.' %\n default_dictionary)\n\n parser.add_option('-s', '--summary',\n action='store_true', default=False,\n help='print summary of fixes')\n\n parser.add_option('-S', '--skip',\n help='Comma-separated list of files to skip. It '\n 'accepts globs as well. E.g.: if you want '\n 'codespell to skip .eps and .txt files, '\n 'you\\'d give \"*.eps,*.txt\" to this option.')\n\n parser.add_option('-x', '--exclude-file',\n help='FILE with lines that should not be changed',\n metavar='FILE')\n\n parser.add_option('-i', '--interactive',\n action='store', type='int', default=0,\n help='Set interactive mode when writing changes. '\n '0 is the same of no interactivity; 1 makes '\n 'codespell ask confirmation; 2 ask user to '\n 'choose one fix when more than one is '\n 'available; 3 applies both 1 and 2')\n\n parser.add_option('-q', '--quiet-level',\n action='store', type='int', default=0,\n help='Bitmask that allows codespell to run quietly. '\n '0: the default, in which all messages are '\n 'printed. 1: disable warnings about wrong '\n 'encoding. 2: disable warnings about binary '\n 'file. 4: shut down warnings about automatic '\n 'fixes that were disabled in dictionary. '\n '8: don\\'t print anything for non-automatic '\n 'fixes. 16: don\\'t print fixed files.')\n\n parser.add_option('-e', '--hard-encoding-detection',\n action='store_true', default=False,\n help='Use chardet to detect the encoding of each '\n 'file. This can slow down codespell, but is more '\n 'reliable in detecting encodings other than utf-8, '\n 'iso8859-1 and ascii.')\n\n (o, args) = parser.parse_args(list(args))\n\n if not args:\n args.append('.')\n\n return o, args, parser\n\n\ndef build_exclude_hashes(filename):\n with codecs.open(filename, 'r') as f:\n for line in f:\n exclude_lines.add(line)\n\n\ndef build_dict(filename):\n with codecs.open(filename, mode='r', buffering=1, encoding='utf-8') as f:\n for line in f:\n [key, data] = line.split('->')\n data = data.strip()\n fix = data.rfind(',')\n\n if fix < 0:\n fix = True\n reason = ''\n elif fix == (len(data) - 1):\n data = data[:fix]\n reason = ''\n fix = False\n else:\n reason = data[fix + 1:].strip()\n data = data[:fix]\n fix = False\n\n misspellings[key] = Misspelling(data, fix, reason)\n\n\ndef is_hidden(filename):\n bfilename = os.path.basename(filename)\n\n if bfilename != '' and bfilename != '.' and bfilename != '..' \\\n and bfilename[0] == '.':\n return True\n\n return False\n\n\ndef is_text_file(filename):\n with open(filename, mode='rb') as f:\n s = f.read(1024)\n if b'\\x00' in s:\n return False\n return True\n\n\ndef fix_case(word, fixword):\n if word == word.capitalize():\n return fixword.capitalize()\n elif word == word.upper():\n return fixword.upper()\n # they are both lower case\n # or we don't have any idea\n return fixword\n\n\ndef ask_for_word_fix(line, wrongword, misspelling, interactivity):\n if interactivity <= 0:\n return misspelling.fix, fix_case(wrongword, misspelling.data)\n\n if misspelling.fix and interactivity & 1:\n r = ''\n fixword = fix_case(wrongword, misspelling.data)\n while not r:\n print(\"%s\\t%s ==> %s (Y/n) \" % (line, wrongword, fixword), end='')\n r = sys.stdin.readline().strip().upper()\n if not r:\n r = 'Y'\n if r != 'Y' and r != 'N':\n print(\"Say 'y' or 'n'\")\n r = ''\n\n if r == 'N':\n misspelling.fix = False\n misspelling.fixword = ''\n\n elif (interactivity & 2) and not misspelling.reason:\n # if it is not disabled, i.e. it just has more than one possible fix,\n # we ask the user which word to use\n\n r = ''\n opt = list(map(lambda x: x.strip(), misspelling.data.split(',')))\n while not r:\n print(\"%s Choose an option (blank for none): \" % line, end='')\n for i in range(len(opt)):\n fixword = fix_case(wrongword, opt[i])\n print(\" %d) %s\" % (i, fixword), end='')\n print(\": \", end='')\n sys.stdout.flush()\n\n n = sys.stdin.readline().strip()\n if not n:\n break\n\n try:\n n = int(n)\n r = opt[n]\n except (ValueError, IndexError):\n print(\"Not a valid option\\n\")\n\n if r:\n misspelling.fix = True\n misspelling.data = r\n\n return misspelling.fix, fix_case(wrongword, misspelling.data)\n\n\ndef parse_file(filename, colors, summary):\n lines = None\n changed = False\n global misspellings\n global options\n global encodings\n global quiet_level\n\n encoding = encodings[0] # if not defined, use UTF-8\n\n if filename == '-':\n f = sys.stdin\n lines = f.readlines()\n else:\n # ignore binary files\n if not os.path.isfile(filename):\n return 0\n text = is_text_file(filename)\n if not text:\n if not quiet_level & QuietLevels.BINARY_FILE:\n print(\"WARNING: Binary file: %s \" % filename, file=sys.stderr)\n return 0\n try:\n lines, encoding = file_opener.open(filename)\n except Exception:\n return 0\n\n bad_count = 0\n for i, line in enumerate(lines):\n if line in exclude_lines:\n continue\n\n fixed_words = set()\n asked_for = set()\n\n for word in regex.findall(line):\n lword = word.lower()\n if lword in misspellings:\n fix = misspellings[lword].fix\n fixword = fix_case(word, misspellings[lword].data)\n\n if options.interactive and lword not in asked_for:\n fix, fixword = ask_for_word_fix(lines[i], word,\n misspellings[lword],\n options.interactive)\n asked_for.add(lword)\n\n if summary and fix:\n summary.update(lword)\n\n if word in fixed_words: # can skip because of re.sub below\n continue\n\n if options.write_changes and fix:\n changed = True\n lines[i] = re.sub(r'\\b%s\\b' % word, fixword, lines[i])\n fixed_words.add(word)\n continue\n\n # otherwise warning was explicitly set by interactive mode\n if (options.interactive & 2 and not fix and not\n misspellings[lword].reason):\n continue\n\n cfilename = \"%s%s%s\" % (colors.FILE, filename, colors.DISABLE)\n cline = \"%s%d%s\" % (colors.FILE, i + 1, colors.DISABLE)\n cwrongword = \"%s%s%s\" % (colors.WWORD, word, colors.DISABLE)\n crightword = \"%s%s%s\" % (colors.FWORD, fixword, colors.DISABLE)\n\n if misspellings[lword].reason:\n if quiet_level & QuietLevels.DISABLED_FIXES:\n continue\n\n creason = \" | %s%s%s\" % (colors.FILE,\n misspellings[lword].reason,\n colors.DISABLE)\n else:\n if quiet_level & QuietLevels.NON_AUTOMATIC_FIXES:\n continue\n\n creason = ''\n\n # If we get to this point (uncorrected error) we should change\n # our bad_count and thus return value\n bad_count += 1\n\n if filename != '-':\n print(\"%(FILENAME)s:%(LINE)s: %(WRONGWORD)s \"\n \" ==> %(RIGHTWORD)s%(REASON)s\"\n % {'FILENAME': cfilename, 'LINE': cline,\n 'WRONGWORD': cwrongword,\n 'RIGHTWORD': crightword, 'REASON': creason})\n else:\n print('%(LINE)s: %(STRLINE)s\\n\\t%(WRONGWORD)s '\n '==> %(RIGHTWORD)s%(REASON)s'\n % {'LINE': cline, 'STRLINE': line.strip(),\n 'WRONGWORD': cwrongword,\n 'RIGHTWORD': crightword, 'REASON': creason})\n\n if changed:\n if filename == '-':\n print(\"---\")\n for line in lines:\n print(line, end='')\n else:\n if not quiet_level & QuietLevels.FIXES:\n print(\"%sFIXED:%s %s\"\n % (colors.FWORD, colors.DISABLE, filename),\n file=sys.stderr)\n with codecs.open(filename, 'w', encoding=encoding) as f:\n f.writelines(lines)\n return bad_count\n\n\ndef main(*args):\n \"\"\"Contains flow control\"\"\"\n global options\n global quiet_level\n global file_opener\n\n options, args, parser = parse_options(args)\n\n dictionaries = options.dictionary or [default_dictionary]\n for dictionary in dictionaries:\n if dictionary is \"-\":\n dictionary = default_dictionary\n if not os.path.exists(dictionary):\n print('ERROR: cannot find dictionary file: %s' % dictionary,\n file=sys.stderr)\n parser.print_help()\n return 1\n build_dict(dictionary)\n \n colors = TermColors()\n if not options.colors:\n colors.disable()\n\n if options.summary:\n summary = Summary()\n else:\n summary = None\n\n if options.exclude_file:\n build_exclude_hashes(options.exclude_file)\n\n if options.quiet_level:\n quiet_level = options.quiet_level\n\n file_opener = FileOpener(options.hard_encoding_detection)\n\n glob_match = GlobMatch(options.skip)\n\n bad_count = 0\n for filename in args:\n # ignore hidden files\n if is_hidden(filename):\n continue\n\n if os.path.isdir(filename):\n for root, dirs, files in os.walk(filename):\n for file_ in files:\n fname = os.path.join(root, file_)\n if not os.path.isfile(fname) or not os.path.getsize(fname):\n continue\n if glob_match.match(root): # skips also match directories\n continue\n if glob_match.match(file_):\n continue\n bad_count += parse_file(fname, colors, summary)\n\n else:\n bad_count += parse_file(filename, colors, summary)\n\n if summary:\n print(\"\\n-------8<-------\\nSUMMARY:\")\n print(summary)\n return bad_count\n",
"path": "codespell_lib/_codespell.py"
}
] | [
{
"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; version 2 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see\n# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html.\n\"\"\"\nCopyright (C) 2010-2011 Lucas De Marchi <[email protected]>\nCopyright (C) 2011 ProFUSION embedded systems\n\"\"\"\n\nfrom __future__ import print_function\n\nimport codecs\nimport sys\nimport re\nfrom optparse import OptionParser\nimport os\nimport fnmatch\n\nUSAGE = \"\"\"\n\\t%prog [OPTIONS] [file1 file2 ... fileN]\n\"\"\"\nVERSION = '1.10.0.dev0'\n\nmisspellings = {}\nexclude_lines = set()\noptions = None\nfile_opener = None\nquiet_level = 0\nencodings = ['utf-8', 'iso-8859-1']\nregex = re.compile(r\"[\\w\\-']+\")\n# Users might want to link this file into /usr/local/bin, so we resolve the\n# symbolic link path to the real path if necessary.\ndefault_dictionary = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'data', 'dictionary.txt')\n\n# OPTIONS:\n#\n# ARGUMENTS:\n# dict_filename The file containing the dictionary of misspellings.\n# If set to '-', it will be read from stdin\n# file1 .. fileN Files to check spelling\n\n\nclass QuietLevels(object):\n NONE = 0\n ENCODING = 1\n BINARY_FILE = 2\n DISABLED_FIXES = 4\n NON_AUTOMATIC_FIXES = 8\n FIXES = 16\n\n\nclass GlobMatch(object):\n def __init__(self, pattern):\n if pattern:\n self.pattern_list = pattern.split(',')\n else:\n self.pattern_list = None\n\n def match(self, filename):\n if self.pattern_list is None:\n return False\n\n for p in self.pattern_list:\n if fnmatch.fnmatch(filename, p):\n return True\n\n return False\n\n\nclass Misspelling(object):\n def __init__(self, data, fix, reason):\n self.data = data\n self.fix = fix\n self.reason = reason\n\n\nclass TermColors(object):\n def __init__(self):\n self.FILE = '\\033[33m'\n self.WWORD = '\\033[31m'\n self.FWORD = '\\033[32m'\n self.DISABLE = '\\033[0m'\n\n def disable(self):\n self.FILE = ''\n self.WWORD = ''\n self.FWORD = ''\n self.DISABLE = ''\n\n\nclass Summary(object):\n def __init__(self):\n self.summary = {}\n\n def update(self, wrongword):\n if wrongword in self.summary:\n self.summary[wrongword] += 1\n else:\n self.summary[wrongword] = 1\n\n def __str__(self):\n keys = list(self.summary.keys())\n keys.sort()\n\n return \"\\n\".join([\"{0}{1:{width}}\".format(\n key,\n self.summary.get(key),\n width=15 - len(key)) for key in keys])\n\n\nclass FileOpener(object):\n def __init__(self, use_chardet):\n self.use_chardet = use_chardet\n if use_chardet:\n self.init_chardet()\n\n def init_chardet(self):\n try:\n from chardet.universaldetector import UniversalDetector\n except ImportError:\n raise ImportError(\"There's no chardet installed to import from. \"\n \"Please, install it and check your PYTHONPATH \"\n \"environment variable\")\n\n self.encdetector = UniversalDetector()\n\n def open(self, filename):\n if self.use_chardet:\n return self.open_with_chardet(filename)\n else:\n return self.open_with_internal(filename)\n\n def open_with_chardet(self, filename):\n self.encdetector.reset()\n with codecs.open(filename, 'rb') as f:\n for line in f:\n self.encdetector.feed(line)\n if self.encdetector.done:\n break\n self.encdetector.close()\n encoding = self.encdetector.result['encoding']\n\n try:\n f = codecs.open(filename, 'r', encoding=encoding)\n except UnicodeDecodeError:\n print('ERROR: Could not detect encoding: %s' % filename,\n file=sys.stderr)\n raise\n except LookupError:\n print('ERROR: %s -- Don\\'t know how to handle encoding %s'\n % (filename, encoding), file=sys.stderr)\n raise\n else:\n lines = f.readlines()\n f.close()\n\n return lines, encoding\n\n def open_with_internal(self, filename):\n curr = 0\n global encodings\n\n while True:\n try:\n f = codecs.open(filename, 'r', encoding=encodings[curr])\n except UnicodeDecodeError:\n if not quiet_level & QuietLevels.ENCODING:\n print('WARNING: Decoding file %s' % filename,\n file=sys.stderr)\n print('WARNING: using encoding=%s failed. '\n % encodings[curr], file=sys.stderr)\n try:\n print('WARNING: Trying next encoding: %s'\n % encodings[curr + 1], file=sys.stderr)\n except IndexError:\n pass\n\n curr += 1\n else:\n lines = f.readlines()\n f.close()\n break\n if not lines:\n raise Exception('Unknown encoding')\n\n encoding = encodings[curr]\n\n return lines, encoding\n\n# -.-:-.-:-.-:-.:-.-:-.-:-.-:-.-:-.:-.-:-.-:-.-:-.-:-.:-.-:-\n\n\ndef parse_options(args):\n parser = OptionParser(usage=USAGE, version=VERSION)\n\n parser.set_defaults(colors=sys.stdout.isatty())\n parser.add_option('-d', '--disable-colors',\n action='store_false', dest='colors',\n help='disable colors even when printing to terminal')\n parser.add_option('-c', '--enable-colors',\n action='store_true', dest='colors',\n help='enable colors even when not printing to terminal')\n parser.add_option('-w', '--write-changes',\n action='store_true', default=False,\n help='write changes in place if possible')\n parser.add_option('-D', '--dictionary',\n action='append', metavar='FILE',\n help='Custom dictionary file that contains spelling '\n 'corrections. If this flag is not specified or '\n 'equals \"-\" then default dictionary \"%s\" is used. '\n 'This option can be specified multiple times.' %\n default_dictionary)\n\n parser.add_option('-s', '--summary',\n action='store_true', default=False,\n help='print summary of fixes')\n\n parser.add_option('-S', '--skip',\n help='Comma-separated list of files to skip. It '\n 'accepts globs as well. E.g.: if you want '\n 'codespell to skip .eps and .txt files, '\n 'you\\'d give \"*.eps,*.txt\" to this option.')\n\n parser.add_option('-x', '--exclude-file',\n help='FILE with lines that should not be changed',\n metavar='FILE')\n\n parser.add_option('-i', '--interactive',\n action='store', type='int', default=0,\n help='Set interactive mode when writing changes. '\n '0 is the same of no interactivity; 1 makes '\n 'codespell ask confirmation; 2 ask user to '\n 'choose one fix when more than one is '\n 'available; 3 applies both 1 and 2')\n\n parser.add_option('-q', '--quiet-level',\n action='store', type='int', default=0,\n help='Bitmask that allows codespell to run quietly. '\n '0: the default, in which all messages are '\n 'printed. 1: disable warnings about wrong '\n 'encoding. 2: disable warnings about binary '\n 'file. 4: shut down warnings about automatic '\n 'fixes that were disabled in dictionary. '\n '8: don\\'t print anything for non-automatic '\n 'fixes. 16: don\\'t print fixed files.')\n\n parser.add_option('-e', '--hard-encoding-detection',\n action='store_true', default=False,\n help='Use chardet to detect the encoding of each '\n 'file. This can slow down codespell, but is more '\n 'reliable in detecting encodings other than utf-8, '\n 'iso8859-1 and ascii.')\n\n (o, args) = parser.parse_args(list(args))\n\n if not args:\n args.append('.')\n\n return o, args, parser\n\n\ndef build_exclude_hashes(filename):\n with codecs.open(filename, 'r') as f:\n for line in f:\n exclude_lines.add(line)\n\n\ndef build_dict(filename):\n with codecs.open(filename, mode='r', buffering=1, encoding='utf-8') as f:\n for line in f:\n [key, data] = line.split('->')\n data = data.strip()\n fix = data.rfind(',')\n\n if fix < 0:\n fix = True\n reason = ''\n elif fix == (len(data) - 1):\n data = data[:fix]\n reason = ''\n fix = False\n else:\n reason = data[fix + 1:].strip()\n data = data[:fix]\n fix = False\n\n misspellings[key] = Misspelling(data, fix, reason)\n\n\ndef is_hidden(filename):\n bfilename = os.path.basename(filename)\n\n if bfilename != '' and bfilename != '.' and bfilename != '..' \\\n and bfilename[0] == '.':\n return True\n\n return False\n\n\ndef is_text_file(filename):\n with open(filename, mode='rb') as f:\n s = f.read(1024)\n if b'\\x00' in s:\n return False\n return True\n\n\ndef fix_case(word, fixword):\n if word == word.capitalize():\n return fixword.capitalize()\n elif word == word.upper():\n return fixword.upper()\n # they are both lower case\n # or we don't have any idea\n return fixword\n\n\ndef ask_for_word_fix(line, wrongword, misspelling, interactivity):\n if interactivity <= 0:\n return misspelling.fix, fix_case(wrongword, misspelling.data)\n\n if misspelling.fix and interactivity & 1:\n r = ''\n fixword = fix_case(wrongword, misspelling.data)\n while not r:\n print(\"%s\\t%s ==> %s (Y/n) \" % (line, wrongword, fixword), end='')\n r = sys.stdin.readline().strip().upper()\n if not r:\n r = 'Y'\n if r != 'Y' and r != 'N':\n print(\"Say 'y' or 'n'\")\n r = ''\n\n if r == 'N':\n misspelling.fix = False\n misspelling.fixword = ''\n\n elif (interactivity & 2) and not misspelling.reason:\n # if it is not disabled, i.e. it just has more than one possible fix,\n # we ask the user which word to use\n\n r = ''\n opt = list(map(lambda x: x.strip(), misspelling.data.split(',')))\n while not r:\n print(\"%s Choose an option (blank for none): \" % line, end='')\n for i in range(len(opt)):\n fixword = fix_case(wrongword, opt[i])\n print(\" %d) %s\" % (i, fixword), end='')\n print(\": \", end='')\n sys.stdout.flush()\n\n n = sys.stdin.readline().strip()\n if not n:\n break\n\n try:\n n = int(n)\n r = opt[n]\n except (ValueError, IndexError):\n print(\"Not a valid option\\n\")\n\n if r:\n misspelling.fix = True\n misspelling.data = r\n\n return misspelling.fix, fix_case(wrongword, misspelling.data)\n\n\ndef parse_file(filename, colors, summary):\n lines = None\n changed = False\n global misspellings\n global options\n global encodings\n global quiet_level\n\n encoding = encodings[0] # if not defined, use UTF-8\n\n if filename == '-':\n f = sys.stdin\n lines = f.readlines()\n else:\n # ignore binary files\n if not os.path.isfile(filename):\n return 0\n text = is_text_file(filename)\n if not text:\n if not quiet_level & QuietLevels.BINARY_FILE:\n print(\"WARNING: Binary file: %s \" % filename, file=sys.stderr)\n return 0\n try:\n lines, encoding = file_opener.open(filename)\n except Exception:\n return 0\n\n bad_count = 0\n for i, line in enumerate(lines):\n if line in exclude_lines:\n continue\n\n fixed_words = set()\n asked_for = set()\n\n for word in regex.findall(line):\n lword = word.lower()\n if lword in misspellings:\n fix = misspellings[lword].fix\n fixword = fix_case(word, misspellings[lword].data)\n\n if options.interactive and lword not in asked_for:\n fix, fixword = ask_for_word_fix(lines[i], word,\n misspellings[lword],\n options.interactive)\n asked_for.add(lword)\n\n if summary and fix:\n summary.update(lword)\n\n if word in fixed_words: # can skip because of re.sub below\n continue\n\n if options.write_changes and fix:\n changed = True\n lines[i] = re.sub(r'\\b%s\\b' % word, fixword, lines[i])\n fixed_words.add(word)\n continue\n\n # otherwise warning was explicitly set by interactive mode\n if (options.interactive & 2 and not fix and not\n misspellings[lword].reason):\n continue\n\n cfilename = \"%s%s%s\" % (colors.FILE, filename, colors.DISABLE)\n cline = \"%s%d%s\" % (colors.FILE, i + 1, colors.DISABLE)\n cwrongword = \"%s%s%s\" % (colors.WWORD, word, colors.DISABLE)\n crightword = \"%s%s%s\" % (colors.FWORD, fixword, colors.DISABLE)\n\n if misspellings[lword].reason:\n if quiet_level & QuietLevels.DISABLED_FIXES:\n continue\n\n creason = \" | %s%s%s\" % (colors.FILE,\n misspellings[lword].reason,\n colors.DISABLE)\n else:\n if quiet_level & QuietLevels.NON_AUTOMATIC_FIXES:\n continue\n\n creason = ''\n\n # If we get to this point (uncorrected error) we should change\n # our bad_count and thus return value\n bad_count += 1\n\n if filename != '-':\n print(\"%(FILENAME)s:%(LINE)s: %(WRONGWORD)s \"\n \" ==> %(RIGHTWORD)s%(REASON)s\"\n % {'FILENAME': cfilename, 'LINE': cline,\n 'WRONGWORD': cwrongword,\n 'RIGHTWORD': crightword, 'REASON': creason})\n else:\n print('%(LINE)s: %(STRLINE)s\\n\\t%(WRONGWORD)s '\n '==> %(RIGHTWORD)s%(REASON)s'\n % {'LINE': cline, 'STRLINE': line.strip(),\n 'WRONGWORD': cwrongword,\n 'RIGHTWORD': crightword, 'REASON': creason})\n\n if changed:\n if filename == '-':\n print(\"---\")\n for line in lines:\n print(line, end='')\n else:\n if not quiet_level & QuietLevels.FIXES:\n print(\"%sFIXED:%s %s\"\n % (colors.FWORD, colors.DISABLE, filename),\n file=sys.stderr)\n with codecs.open(filename, 'w', encoding=encoding) as f:\n f.writelines(lines)\n return bad_count\n\n\ndef main(*args):\n \"\"\"Contains flow control\"\"\"\n global options\n global quiet_level\n global file_opener\n\n options, args, parser = parse_options(args)\n\n dictionaries = options.dictionary or [default_dictionary]\n for dictionary in dictionaries:\n if dictionary is \"-\":\n dictionary = default_dictionary\n if not os.path.exists(dictionary):\n print('ERROR: cannot find dictionary file: %s' % dictionary,\n file=sys.stderr)\n parser.print_help()\n return 1\n build_dict(dictionary)\n colors = TermColors()\n if not options.colors:\n colors.disable()\n\n if options.summary:\n summary = Summary()\n else:\n summary = None\n\n if options.exclude_file:\n build_exclude_hashes(options.exclude_file)\n\n if options.quiet_level:\n quiet_level = options.quiet_level\n\n file_opener = FileOpener(options.hard_encoding_detection)\n\n glob_match = GlobMatch(options.skip)\n\n bad_count = 0\n for filename in args:\n # ignore hidden files\n if is_hidden(filename):\n continue\n\n if os.path.isdir(filename):\n for root, dirs, files in os.walk(filename):\n for file_ in files:\n fname = os.path.join(root, file_)\n if not os.path.isfile(fname) or not os.path.getsize(fname):\n continue\n if glob_match.match(root): # skips also match directories\n continue\n if glob_match.match(file_):\n continue\n bad_count += parse_file(fname, colors, summary)\n\n else:\n bad_count += parse_file(filename, colors, summary)\n\n if summary:\n print(\"\\n-------8<-------\\nSUMMARY:\")\n print(summary)\n return bad_count\n",
"path": "codespell_lib/_codespell.py"
}
] | diff --git a/Makefile b/Makefile
index b541dc91ec..9b0b051d3f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,28 +1,12 @@
-prefix ?= /usr/local
-bindir ?= ${prefix}/bin
-datadir ?= ${prefix}/share/codespell
-mandir ?= ${prefix}/share/man/man1
-
-_VERSION := $(shell grep -e "VERSION = '[0-9]\.[0-9]" codespell_lib/_codespell.py | cut -f 3 -d ' ')
-VERSION = $(subst ',,$(_VERSION))
-
SORT_ARGS := -f
-PHONY := all manpage check check-dictionary sort-dictionary install git-tag-release tar-sync clean
-
-all: codespell manpage
+PHONY := all check check-dictionary sort-dictionary clean
-codespell: codespell.py check-dictionary
- sed "s|^default_dictionary = .*|default_dictionary = '${datadir}/dictionary.txt'|" < $< > $@
- chmod 755 codespell
+all: check-dictionary codespell.1
-manpage: codespell codespell.1.include
- help2man ./codespell --include codespell.1.include --no-info --output codespell.1
+codespell.1: codespell.1.include bin/codespell
+ PYTHONPATH=. help2man ./bin/codespell --include codespell.1.include --no-info --output codespell.1
sed -i '/\.SS \"Usage/,+2d' codespell.1
- gzip -9 -f codespell.1
-
-check:
- test 1bfb1f089c3c7772f0898f66df089b9e = $$(./codespell.py example/ | md5sum | cut -f1 -d\ )
check-dictionary:
@if ! LANG=C sort ${SORT_ARGS} -c codespell_lib/data/dictionary.txt; then \
@@ -33,37 +17,8 @@ check-dictionary:
sort-dictionary:
LANG=C sort ${SORT_ARGS} -u -o codespell_lib/data/dictionary.txt codespell_lib/data/dictionary.txt
-install: codespell manpage
- install -d ${DESTDIR}${datadir} ${DESTDIR}${bindir} ${DESTDIR}${mandir}
- install -m644 -t ${DESTDIR}${datadir} data/dictionary.txt data/linux-kernel.exclude
- install -m755 -T codespell ${DESTDIR}${bindir}/codespell
- install -d ${DESTDIR}${mandir}
- install -m644 -t ${DESTDIR}${mandir} codespell.1.gz
-
-git-tag-release:
- git commit -a -m "codespell $(VERSION)"
- git tag -m "codespell $(VERSION)" -s v$(VERSION)
- git gc --prune=0
-
-codespell-$(VERSION).tar.xz.asc: codespell-$(VERSION).tar.xz
- gpg --armor --detach-sign --output $@ $^
-
-codespell-$(VERSION).tar.xz:
- git archive --format=tar --prefix codespell-$(VERSION)/ v$(VERSION) | xz > $@
-
-tar-sync: codespell-$(VERSION).tar.xz codespell-$(VERSION).tar.xz.asc
- github-release release --repo codespell --tag v$(VERSION) --name v$(VERSION)
- github-release upload --repo codespell --tag v$(VERSION) \
- --name codespell-$(VERSION).tar.xz \
- --file codespell-$(VERSION).tar.xz
- github-release upload --repo codespell --tag v$(VERSION) \
- --name codespell-$(VERSION).tar.xz.asc \
- --file codespell-$(VERSION).tar.xz.asc
-
pypi:
python setup.py sdist register upload
clean:
- rm -rf codespell
rm -rf codespell.1
- rm -rf codespell.1.gz
diff --git a/README.rst b/README.rst
index 44b4f0b749..f0ae2cb330 100644
--- a/README.rst
+++ b/README.rst
@@ -71,7 +71,8 @@ directly, but instead be manually inspected. E.g.:
License
-------
-The Python script ``codespell`` is available with the following terms:
+The Python script ``codespell`` with its library ``codespell_lib`` is available
+with the following terms:
(*tl;dr*: `GPL v2`_)
Copyright (C) 2010-2011 Lucas De Marchi <[email protected]>
diff --git a/codespell_lib/_codespell.py b/codespell_lib/_codespell.py
index 62bff4227f..4706ec8529 100755
--- a/codespell_lib/_codespell.py
+++ b/codespell_lib/_codespell.py
@@ -510,7 +510,6 @@ def main(*args):
parser.print_help()
return 1
build_dict(dictionary)
-
colors = TermColors()
if not options.colors:
colors.disable()
|
aws__aws-cli-2892 | - Support use of colorama up to 0.3.8
+ colorama bugfix release 0.3.8 is available and contains no incompatible
changes. There is no need to restrict use to less or equal 0.3.7
| [
{
"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.10.19',\n 'colorama>=0.2.5,<=0.3.7',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.1.12,<0.2.0',\n 'PyYAML>=3.10,<=3.12']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.10.19',\n 'colorama>=0.2.5,<=0.3.9',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.1.12,<0.2.0',\n 'PyYAML>=3.10,<=3.12']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n",
"path": "setup.py"
}
] | diff --git a/.changes/next-release/enhancement-colorama-91382.json b/.changes/next-release/enhancement-colorama-91382.json
new file mode 100644
index 000000000000..95c802ae4eac
--- /dev/null
+++ b/.changes/next-release/enhancement-colorama-91382.json
@@ -0,0 +1,5 @@
+{
+ "type": "enhancement",
+ "category": "colorama",
+ "description": "Increased the upper bound on the colorama dependency to 0.3.9."
+}
diff --git a/requirements.txt b/requirements.txt
index 1cd3845af02f..2f887cc15547 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,7 +7,7 @@ docutils>=0.10
-e git://github.com/boto/s3transfer.git@develop#egg=s3transfer
-e git://github.com/boto/jmespath.git@develop#egg=jmespath
nose==1.3.7
-colorama>=0.2.5,<=0.3.7
+colorama>=0.2.5,<=0.3.9
mock==1.3.0
rsa>=3.1.2,<=3.5.0
wheel==0.24.0
diff --git a/setup.cfg b/setup.cfg
index 47fa9c8240a7..f39b54f14d87 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -5,7 +5,7 @@ universal = 1
[metadata]
requires-dist =
botocore==1.10.19
- colorama>=0.2.5,<=0.3.7
+ colorama>=0.2.5,<=0.3.9
docutils>=0.10
rsa>=3.1.2,<=3.5.0
PyYAML>=3.10,<=3.12
diff --git a/setup.py b/setup.py
index aa50140c6740..56341a9feb8b 100644
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@ def find_version(*file_paths):
requires = ['botocore==1.10.19',
- 'colorama>=0.2.5,<=0.3.7',
+ 'colorama>=0.2.5,<=0.3.9',
'docutils>=0.10',
'rsa>=3.1.2,<=3.5.0',
's3transfer>=0.1.12,<0.2.0',
|
cookiecutter__cookiecutter-django-4520 | Unused gulp dependency added to package.json when Webpack is chosen
## What happened?
package.json has reference to `"gulp-concat": "^2.6.1"` even though it is not used when Webpack has been chosen
## What should've happened instead?
No reference to gulp-concat at all when using Webpack
## Additional details
Host system configuration:
Cookiecutter 2.1.1
Ventura 13.5
Python 3.11.4
Docker version 24.0.2, build cb74dfc
Docker Compose version v2.19.1
| [
{
"content": "\"\"\"\nNOTE:\n the below code is to be maintained Python 2.x-compatible\n as the whole Cookiecutter Django project initialization\n can potentially be run in Python 2.x environment\n (at least so we presume in `pre_gen_project.py`).\n\nTODO: restrict Cookiecutter Django project initialization to\n Python 3.x environments only\n\"\"\"\nfrom __future__ import print_function\n\nimport json\nimport os\nimport random\nimport shutil\nimport string\n\ntry:\n # Inspired by\n # https://github.com/django/django/blob/master/django/utils/crypto.py\n random = random.SystemRandom()\n using_sysrandom = True\nexcept NotImplementedError:\n using_sysrandom = False\n\nTERMINATOR = \"\\x1b[0m\"\nWARNING = \"\\x1b[1;33m [WARNING]: \"\nINFO = \"\\x1b[1;33m [INFO]: \"\nHINT = \"\\x1b[3;33m\"\nSUCCESS = \"\\x1b[1;32m [SUCCESS]: \"\n\nDEBUG_VALUE = \"debug\"\n\n\ndef remove_open_source_files():\n file_names = [\"CONTRIBUTORS.txt\", \"LICENSE\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_gplv3_files():\n file_names = [\"COPYING\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_custom_user_manager_files():\n os.remove(\n os.path.join(\n \"{{cookiecutter.project_slug}}\",\n \"users\",\n \"managers.py\",\n )\n )\n os.remove(\n os.path.join(\n \"{{cookiecutter.project_slug}}\",\n \"users\",\n \"tests\",\n \"test_managers.py\",\n )\n )\n\n\ndef remove_pycharm_files():\n idea_dir_path = \".idea\"\n if os.path.exists(idea_dir_path):\n shutil.rmtree(idea_dir_path)\n\n docs_dir_path = os.path.join(\"docs\", \"pycharm\")\n if os.path.exists(docs_dir_path):\n shutil.rmtree(docs_dir_path)\n\n\ndef remove_docker_files():\n shutil.rmtree(\".devcontainer\")\n shutil.rmtree(\"compose\")\n\n file_names = [\"local.yml\", \"production.yml\", \".dockerignore\"]\n for file_name in file_names:\n os.remove(file_name)\n if \"{{ cookiecutter.editor }}\" == \"PyCharm\":\n file_names = [\"docker_compose_up_django.xml\", \"docker_compose_up_docs.xml\"]\n for file_name in file_names:\n os.remove(os.path.join(\".idea\", \"runConfigurations\", file_name))\n\n\ndef remove_utility_files():\n shutil.rmtree(\"utility\")\n\n\ndef remove_heroku_files():\n file_names = [\"Procfile\", \"runtime.txt\", \"requirements.txt\"]\n for file_name in file_names:\n if file_name == \"requirements.txt\" and \"{{ cookiecutter.ci_tool }}\".lower() == \"travis\":\n # don't remove the file if we are using travisci but not using heroku\n continue\n os.remove(file_name)\n shutil.rmtree(\"bin\")\n\n\ndef remove_sass_files():\n shutil.rmtree(os.path.join(\"{{cookiecutter.project_slug}}\", \"static\", \"sass\"))\n\n\ndef remove_gulp_files():\n file_names = [\"gulpfile.js\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_webpack_files():\n shutil.rmtree(\"webpack\")\n remove_vendors_js()\n\n\ndef remove_vendors_js():\n vendors_js_path = os.path.join(\n \"{{ cookiecutter.project_slug }}\",\n \"static\",\n \"js\",\n \"vendors.js\",\n )\n if os.path.exists(vendors_js_path):\n os.remove(vendors_js_path)\n\n\ndef remove_packagejson_file():\n file_names = [\"package.json\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef update_package_json(remove_dev_deps=None, remove_keys=None, scripts=None):\n remove_dev_deps = remove_dev_deps or []\n remove_keys = remove_keys or []\n scripts = scripts or {}\n with open(\"package.json\", mode=\"r\") as fd:\n content = json.load(fd)\n for package_name in remove_dev_deps:\n content[\"devDependencies\"].pop(package_name)\n for key in remove_keys:\n content.pop(key)\n content[\"scripts\"].update(scripts)\n with open(\"package.json\", mode=\"w\") as fd:\n json.dump(content, fd, ensure_ascii=False, indent=2)\n fd.write(\"\\n\")\n\n\ndef handle_js_runner(choice, use_docker, use_async):\n if choice == \"Gulp\":\n update_package_json(\n remove_dev_deps=[\n \"@babel/core\",\n \"@babel/preset-env\",\n \"babel-loader\",\n \"concurrently\",\n \"css-loader\",\n \"mini-css-extract-plugin\",\n \"postcss-loader\",\n \"postcss-preset-env\",\n \"sass-loader\",\n \"webpack\",\n \"webpack-bundle-tracker\",\n \"webpack-cli\",\n \"webpack-dev-server\",\n \"webpack-merge\",\n ],\n remove_keys=[\"babel\"],\n scripts={\n \"dev\": \"gulp\",\n \"build\": \"gulp generate-assets\",\n },\n )\n remove_webpack_files()\n elif choice == \"Webpack\":\n scripts = {\n \"dev\": \"webpack serve --config webpack/dev.config.js\",\n \"build\": \"webpack --config webpack/prod.config.js\",\n }\n remove_dev_deps = [\n \"browser-sync\",\n \"cssnano\",\n \"gulp\",\n \"gulp-imagemin\",\n \"gulp-plumber\",\n \"gulp-postcss\",\n \"gulp-rename\",\n \"gulp-sass\",\n \"gulp-uglify-es\",\n ]\n if not use_docker:\n dev_django_cmd = (\n \"uvicorn config.asgi:application --reload\" if use_async else \"python manage.py runserver_plus\"\n )\n scripts.update(\n {\n \"dev\": \"concurrently npm:dev:*\",\n \"dev:webpack\": \"webpack serve --config webpack/dev.config.js\",\n \"dev:django\": dev_django_cmd,\n }\n )\n else:\n remove_dev_deps.append(\"concurrently\")\n update_package_json(remove_dev_deps=remove_dev_deps, scripts=scripts)\n remove_gulp_files()\n\n\ndef remove_prettier_pre_commit():\n with open(\".pre-commit-config.yaml\", \"r\") as fd:\n content = fd.readlines()\n\n removing = False\n new_lines = []\n for line in content:\n if removing and \"- repo:\" in line:\n removing = False\n if \"mirrors-prettier\" in line:\n removing = True\n if not removing:\n new_lines.append(line)\n\n with open(\".pre-commit-config.yaml\", \"w\") as fd:\n fd.writelines(new_lines)\n\n\ndef remove_celery_files():\n file_names = [\n os.path.join(\"config\", \"celery_app.py\"),\n os.path.join(\"{{ cookiecutter.project_slug }}\", \"users\", \"tasks.py\"),\n os.path.join(\"{{ cookiecutter.project_slug }}\", \"users\", \"tests\", \"test_tasks.py\"),\n ]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_async_files():\n file_names = [\n os.path.join(\"config\", \"asgi.py\"),\n os.path.join(\"config\", \"websocket.py\"),\n ]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_dottravisyml_file():\n os.remove(\".travis.yml\")\n\n\ndef remove_dotgitlabciyml_file():\n os.remove(\".gitlab-ci.yml\")\n\n\ndef remove_dotgithub_folder():\n shutil.rmtree(\".github\")\n\n\ndef remove_dotdrone_file():\n os.remove(\".drone.yml\")\n\n\ndef generate_random_string(length, using_digits=False, using_ascii_letters=False, using_punctuation=False):\n \"\"\"\n Example:\n opting out for 50 symbol-long, [a-z][A-Z][0-9] string\n would yield log_2((26+26+50)^50) ~= 334 bit strength.\n \"\"\"\n if not using_sysrandom:\n return None\n\n symbols = []\n if using_digits:\n symbols += string.digits\n if using_ascii_letters:\n symbols += string.ascii_letters\n if using_punctuation:\n all_punctuation = set(string.punctuation)\n # These symbols can cause issues in environment variables\n unsuitable = {\"'\", '\"', \"\\\\\", \"$\"}\n suitable = all_punctuation.difference(unsuitable)\n symbols += \"\".join(suitable)\n return \"\".join([random.choice(symbols) for _ in range(length)])\n\n\ndef set_flag(file_path, flag, value=None, formatted=None, *args, **kwargs):\n if value is None:\n random_string = generate_random_string(*args, **kwargs)\n if random_string is None:\n print(\n \"We couldn't find a secure pseudo-random number generator on your \"\n \"system. Please, make sure to manually {} later.\".format(flag)\n )\n random_string = flag\n if formatted is not None:\n random_string = formatted.format(random_string)\n value = random_string\n\n with open(file_path, \"r+\") as f:\n file_contents = f.read().replace(flag, value)\n f.seek(0)\n f.write(file_contents)\n f.truncate()\n\n return value\n\n\ndef set_django_secret_key(file_path):\n django_secret_key = set_flag(\n file_path,\n \"!!!SET DJANGO_SECRET_KEY!!!\",\n length=64,\n using_digits=True,\n using_ascii_letters=True,\n )\n return django_secret_key\n\n\ndef set_django_admin_url(file_path):\n django_admin_url = set_flag(\n file_path,\n \"!!!SET DJANGO_ADMIN_URL!!!\",\n formatted=\"{}/\",\n length=32,\n using_digits=True,\n using_ascii_letters=True,\n )\n return django_admin_url\n\n\ndef generate_random_user():\n return generate_random_string(length=32, using_ascii_letters=True)\n\n\ndef generate_postgres_user(debug=False):\n return DEBUG_VALUE if debug else generate_random_user()\n\n\ndef set_postgres_user(file_path, value):\n postgres_user = set_flag(file_path, \"!!!SET POSTGRES_USER!!!\", value=value)\n return postgres_user\n\n\ndef set_postgres_password(file_path, value=None):\n postgres_password = set_flag(\n file_path,\n \"!!!SET POSTGRES_PASSWORD!!!\",\n value=value,\n length=64,\n using_digits=True,\n using_ascii_letters=True,\n )\n return postgres_password\n\n\ndef set_celery_flower_user(file_path, value):\n celery_flower_user = set_flag(file_path, \"!!!SET CELERY_FLOWER_USER!!!\", value=value)\n return celery_flower_user\n\n\ndef set_celery_flower_password(file_path, value=None):\n celery_flower_password = set_flag(\n file_path,\n \"!!!SET CELERY_FLOWER_PASSWORD!!!\",\n value=value,\n length=64,\n using_digits=True,\n using_ascii_letters=True,\n )\n return celery_flower_password\n\n\ndef append_to_gitignore_file(ignored_line):\n with open(\".gitignore\", \"a\") as gitignore_file:\n gitignore_file.write(ignored_line)\n gitignore_file.write(\"\\n\")\n\n\ndef set_flags_in_envs(postgres_user, celery_flower_user, debug=False):\n local_django_envs_path = os.path.join(\".envs\", \".local\", \".django\")\n production_django_envs_path = os.path.join(\".envs\", \".production\", \".django\")\n local_postgres_envs_path = os.path.join(\".envs\", \".local\", \".postgres\")\n production_postgres_envs_path = os.path.join(\".envs\", \".production\", \".postgres\")\n\n set_django_secret_key(production_django_envs_path)\n set_django_admin_url(production_django_envs_path)\n\n set_postgres_user(local_postgres_envs_path, value=postgres_user)\n set_postgres_password(local_postgres_envs_path, value=DEBUG_VALUE if debug else None)\n set_postgres_user(production_postgres_envs_path, value=postgres_user)\n set_postgres_password(production_postgres_envs_path, value=DEBUG_VALUE if debug else None)\n\n set_celery_flower_user(local_django_envs_path, value=celery_flower_user)\n set_celery_flower_password(local_django_envs_path, value=DEBUG_VALUE if debug else None)\n set_celery_flower_user(production_django_envs_path, value=celery_flower_user)\n set_celery_flower_password(production_django_envs_path, value=DEBUG_VALUE if debug else None)\n\n\ndef set_flags_in_settings_files():\n set_django_secret_key(os.path.join(\"config\", \"settings\", \"local.py\"))\n set_django_secret_key(os.path.join(\"config\", \"settings\", \"test.py\"))\n\n\ndef remove_envs_and_associated_files():\n shutil.rmtree(\".envs\")\n os.remove(\"merge_production_dotenvs_in_dotenv.py\")\n shutil.rmtree(\"tests\")\n\n\ndef remove_celery_compose_dirs():\n shutil.rmtree(os.path.join(\"compose\", \"local\", \"django\", \"celery\"))\n shutil.rmtree(os.path.join(\"compose\", \"production\", \"django\", \"celery\"))\n\n\ndef remove_node_dockerfile():\n shutil.rmtree(os.path.join(\"compose\", \"local\", \"node\"))\n\n\ndef remove_aws_dockerfile():\n shutil.rmtree(os.path.join(\"compose\", \"production\", \"aws\"))\n\n\ndef remove_drf_starter_files():\n os.remove(os.path.join(\"config\", \"api_router.py\"))\n shutil.rmtree(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"api\"))\n os.remove(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"tests\", \"test_drf_urls.py\"))\n os.remove(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"tests\", \"test_drf_views.py\"))\n os.remove(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"tests\", \"test_swagger.py\"))\n\n\ndef remove_storages_module():\n os.remove(os.path.join(\"{{cookiecutter.project_slug}}\", \"utils\", \"storages.py\"))\n\n\ndef main():\n debug = \"{{ cookiecutter.debug }}\".lower() == \"y\"\n\n set_flags_in_envs(\n DEBUG_VALUE if debug else generate_random_user(),\n DEBUG_VALUE if debug else generate_random_user(),\n debug=debug,\n )\n set_flags_in_settings_files()\n\n if \"{{ cookiecutter.open_source_license }}\" == \"Not open source\":\n remove_open_source_files()\n if \"{{ cookiecutter.open_source_license}}\" != \"GPLv3\":\n remove_gplv3_files()\n\n if \"{{ cookiecutter.username_type }}\" == \"username\":\n remove_custom_user_manager_files()\n\n if \"{{ cookiecutter.editor }}\" != \"PyCharm\":\n remove_pycharm_files()\n\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\":\n remove_utility_files()\n else:\n remove_docker_files()\n\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\" and \"{{ cookiecutter.cloud_provider}}\" != \"AWS\":\n remove_aws_dockerfile()\n\n if \"{{ cookiecutter.use_heroku }}\".lower() == \"n\":\n remove_heroku_files()\n\n if \"{{ cookiecutter.use_docker }}\".lower() == \"n\" and \"{{ cookiecutter.use_heroku }}\".lower() == \"n\":\n if \"{{ cookiecutter.keep_local_envs_in_vcs }}\".lower() == \"y\":\n print(\n INFO + \".env(s) are only utilized when Docker Compose and/or \"\n \"Heroku support is enabled so keeping them does not make sense \"\n \"given your current setup.\" + TERMINATOR\n )\n remove_envs_and_associated_files()\n else:\n append_to_gitignore_file(\".env\")\n append_to_gitignore_file(\".envs/*\")\n if \"{{ cookiecutter.keep_local_envs_in_vcs }}\".lower() == \"y\":\n append_to_gitignore_file(\"!.envs/.local/\")\n\n if \"{{ cookiecutter.frontend_pipeline }}\" in [\"None\", \"Django Compressor\"]:\n remove_gulp_files()\n remove_webpack_files()\n remove_sass_files()\n remove_packagejson_file()\n remove_prettier_pre_commit()\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\":\n remove_node_dockerfile()\n else:\n handle_js_runner(\n \"{{ cookiecutter.frontend_pipeline }}\",\n use_docker=(\"{{ cookiecutter.use_docker }}\".lower() == \"y\"),\n use_async=(\"{{ cookiecutter.use_async }}\".lower() == \"y\"),\n )\n\n if \"{{ cookiecutter.cloud_provider }}\" == \"None\" and \"{{ cookiecutter.use_docker }}\".lower() == \"n\":\n print(\n WARNING + \"You chose to not use any cloud providers nor Docker, \"\n \"media files won't be served in production.\" + TERMINATOR\n )\n remove_storages_module()\n\n if \"{{ cookiecutter.use_celery }}\".lower() == \"n\":\n remove_celery_files()\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\":\n remove_celery_compose_dirs()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Travis\":\n remove_dottravisyml_file()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Gitlab\":\n remove_dotgitlabciyml_file()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Github\":\n remove_dotgithub_folder()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Drone\":\n remove_dotdrone_file()\n\n if \"{{ cookiecutter.use_drf }}\".lower() == \"n\":\n remove_drf_starter_files()\n\n if \"{{ cookiecutter.use_async }}\".lower() == \"n\":\n remove_async_files()\n\n print(SUCCESS + \"Project initialized, keep up the good work!\" + TERMINATOR)\n\n\nif __name__ == \"__main__\":\n main()\n",
"path": "hooks/post_gen_project.py"
}
] | [
{
"content": "\"\"\"\nNOTE:\n the below code is to be maintained Python 2.x-compatible\n as the whole Cookiecutter Django project initialization\n can potentially be run in Python 2.x environment\n (at least so we presume in `pre_gen_project.py`).\n\nTODO: restrict Cookiecutter Django project initialization to\n Python 3.x environments only\n\"\"\"\nfrom __future__ import print_function\n\nimport json\nimport os\nimport random\nimport shutil\nimport string\n\ntry:\n # Inspired by\n # https://github.com/django/django/blob/master/django/utils/crypto.py\n random = random.SystemRandom()\n using_sysrandom = True\nexcept NotImplementedError:\n using_sysrandom = False\n\nTERMINATOR = \"\\x1b[0m\"\nWARNING = \"\\x1b[1;33m [WARNING]: \"\nINFO = \"\\x1b[1;33m [INFO]: \"\nHINT = \"\\x1b[3;33m\"\nSUCCESS = \"\\x1b[1;32m [SUCCESS]: \"\n\nDEBUG_VALUE = \"debug\"\n\n\ndef remove_open_source_files():\n file_names = [\"CONTRIBUTORS.txt\", \"LICENSE\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_gplv3_files():\n file_names = [\"COPYING\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_custom_user_manager_files():\n os.remove(\n os.path.join(\n \"{{cookiecutter.project_slug}}\",\n \"users\",\n \"managers.py\",\n )\n )\n os.remove(\n os.path.join(\n \"{{cookiecutter.project_slug}}\",\n \"users\",\n \"tests\",\n \"test_managers.py\",\n )\n )\n\n\ndef remove_pycharm_files():\n idea_dir_path = \".idea\"\n if os.path.exists(idea_dir_path):\n shutil.rmtree(idea_dir_path)\n\n docs_dir_path = os.path.join(\"docs\", \"pycharm\")\n if os.path.exists(docs_dir_path):\n shutil.rmtree(docs_dir_path)\n\n\ndef remove_docker_files():\n shutil.rmtree(\".devcontainer\")\n shutil.rmtree(\"compose\")\n\n file_names = [\"local.yml\", \"production.yml\", \".dockerignore\"]\n for file_name in file_names:\n os.remove(file_name)\n if \"{{ cookiecutter.editor }}\" == \"PyCharm\":\n file_names = [\"docker_compose_up_django.xml\", \"docker_compose_up_docs.xml\"]\n for file_name in file_names:\n os.remove(os.path.join(\".idea\", \"runConfigurations\", file_name))\n\n\ndef remove_utility_files():\n shutil.rmtree(\"utility\")\n\n\ndef remove_heroku_files():\n file_names = [\"Procfile\", \"runtime.txt\", \"requirements.txt\"]\n for file_name in file_names:\n if file_name == \"requirements.txt\" and \"{{ cookiecutter.ci_tool }}\".lower() == \"travis\":\n # don't remove the file if we are using travisci but not using heroku\n continue\n os.remove(file_name)\n shutil.rmtree(\"bin\")\n\n\ndef remove_sass_files():\n shutil.rmtree(os.path.join(\"{{cookiecutter.project_slug}}\", \"static\", \"sass\"))\n\n\ndef remove_gulp_files():\n file_names = [\"gulpfile.js\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_webpack_files():\n shutil.rmtree(\"webpack\")\n remove_vendors_js()\n\n\ndef remove_vendors_js():\n vendors_js_path = os.path.join(\n \"{{ cookiecutter.project_slug }}\",\n \"static\",\n \"js\",\n \"vendors.js\",\n )\n if os.path.exists(vendors_js_path):\n os.remove(vendors_js_path)\n\n\ndef remove_packagejson_file():\n file_names = [\"package.json\"]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef update_package_json(remove_dev_deps=None, remove_keys=None, scripts=None):\n remove_dev_deps = remove_dev_deps or []\n remove_keys = remove_keys or []\n scripts = scripts or {}\n with open(\"package.json\", mode=\"r\") as fd:\n content = json.load(fd)\n for package_name in remove_dev_deps:\n content[\"devDependencies\"].pop(package_name)\n for key in remove_keys:\n content.pop(key)\n content[\"scripts\"].update(scripts)\n with open(\"package.json\", mode=\"w\") as fd:\n json.dump(content, fd, ensure_ascii=False, indent=2)\n fd.write(\"\\n\")\n\n\ndef handle_js_runner(choice, use_docker, use_async):\n if choice == \"Gulp\":\n update_package_json(\n remove_dev_deps=[\n \"@babel/core\",\n \"@babel/preset-env\",\n \"babel-loader\",\n \"concurrently\",\n \"css-loader\",\n \"mini-css-extract-plugin\",\n \"postcss-loader\",\n \"postcss-preset-env\",\n \"sass-loader\",\n \"webpack\",\n \"webpack-bundle-tracker\",\n \"webpack-cli\",\n \"webpack-dev-server\",\n \"webpack-merge\",\n ],\n remove_keys=[\"babel\"],\n scripts={\n \"dev\": \"gulp\",\n \"build\": \"gulp generate-assets\",\n },\n )\n remove_webpack_files()\n elif choice == \"Webpack\":\n scripts = {\n \"dev\": \"webpack serve --config webpack/dev.config.js\",\n \"build\": \"webpack --config webpack/prod.config.js\",\n }\n remove_dev_deps = [\n \"browser-sync\",\n \"cssnano\",\n \"gulp\",\n \"gulp-concat\",\n \"gulp-imagemin\",\n \"gulp-plumber\",\n \"gulp-postcss\",\n \"gulp-rename\",\n \"gulp-sass\",\n \"gulp-uglify-es\",\n ]\n if not use_docker:\n dev_django_cmd = (\n \"uvicorn config.asgi:application --reload\" if use_async else \"python manage.py runserver_plus\"\n )\n scripts.update(\n {\n \"dev\": \"concurrently npm:dev:*\",\n \"dev:webpack\": \"webpack serve --config webpack/dev.config.js\",\n \"dev:django\": dev_django_cmd,\n }\n )\n else:\n remove_dev_deps.append(\"concurrently\")\n update_package_json(remove_dev_deps=remove_dev_deps, scripts=scripts)\n remove_gulp_files()\n\n\ndef remove_prettier_pre_commit():\n with open(\".pre-commit-config.yaml\", \"r\") as fd:\n content = fd.readlines()\n\n removing = False\n new_lines = []\n for line in content:\n if removing and \"- repo:\" in line:\n removing = False\n if \"mirrors-prettier\" in line:\n removing = True\n if not removing:\n new_lines.append(line)\n\n with open(\".pre-commit-config.yaml\", \"w\") as fd:\n fd.writelines(new_lines)\n\n\ndef remove_celery_files():\n file_names = [\n os.path.join(\"config\", \"celery_app.py\"),\n os.path.join(\"{{ cookiecutter.project_slug }}\", \"users\", \"tasks.py\"),\n os.path.join(\"{{ cookiecutter.project_slug }}\", \"users\", \"tests\", \"test_tasks.py\"),\n ]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_async_files():\n file_names = [\n os.path.join(\"config\", \"asgi.py\"),\n os.path.join(\"config\", \"websocket.py\"),\n ]\n for file_name in file_names:\n os.remove(file_name)\n\n\ndef remove_dottravisyml_file():\n os.remove(\".travis.yml\")\n\n\ndef remove_dotgitlabciyml_file():\n os.remove(\".gitlab-ci.yml\")\n\n\ndef remove_dotgithub_folder():\n shutil.rmtree(\".github\")\n\n\ndef remove_dotdrone_file():\n os.remove(\".drone.yml\")\n\n\ndef generate_random_string(length, using_digits=False, using_ascii_letters=False, using_punctuation=False):\n \"\"\"\n Example:\n opting out for 50 symbol-long, [a-z][A-Z][0-9] string\n would yield log_2((26+26+50)^50) ~= 334 bit strength.\n \"\"\"\n if not using_sysrandom:\n return None\n\n symbols = []\n if using_digits:\n symbols += string.digits\n if using_ascii_letters:\n symbols += string.ascii_letters\n if using_punctuation:\n all_punctuation = set(string.punctuation)\n # These symbols can cause issues in environment variables\n unsuitable = {\"'\", '\"', \"\\\\\", \"$\"}\n suitable = all_punctuation.difference(unsuitable)\n symbols += \"\".join(suitable)\n return \"\".join([random.choice(symbols) for _ in range(length)])\n\n\ndef set_flag(file_path, flag, value=None, formatted=None, *args, **kwargs):\n if value is None:\n random_string = generate_random_string(*args, **kwargs)\n if random_string is None:\n print(\n \"We couldn't find a secure pseudo-random number generator on your \"\n \"system. Please, make sure to manually {} later.\".format(flag)\n )\n random_string = flag\n if formatted is not None:\n random_string = formatted.format(random_string)\n value = random_string\n\n with open(file_path, \"r+\") as f:\n file_contents = f.read().replace(flag, value)\n f.seek(0)\n f.write(file_contents)\n f.truncate()\n\n return value\n\n\ndef set_django_secret_key(file_path):\n django_secret_key = set_flag(\n file_path,\n \"!!!SET DJANGO_SECRET_KEY!!!\",\n length=64,\n using_digits=True,\n using_ascii_letters=True,\n )\n return django_secret_key\n\n\ndef set_django_admin_url(file_path):\n django_admin_url = set_flag(\n file_path,\n \"!!!SET DJANGO_ADMIN_URL!!!\",\n formatted=\"{}/\",\n length=32,\n using_digits=True,\n using_ascii_letters=True,\n )\n return django_admin_url\n\n\ndef generate_random_user():\n return generate_random_string(length=32, using_ascii_letters=True)\n\n\ndef generate_postgres_user(debug=False):\n return DEBUG_VALUE if debug else generate_random_user()\n\n\ndef set_postgres_user(file_path, value):\n postgres_user = set_flag(file_path, \"!!!SET POSTGRES_USER!!!\", value=value)\n return postgres_user\n\n\ndef set_postgres_password(file_path, value=None):\n postgres_password = set_flag(\n file_path,\n \"!!!SET POSTGRES_PASSWORD!!!\",\n value=value,\n length=64,\n using_digits=True,\n using_ascii_letters=True,\n )\n return postgres_password\n\n\ndef set_celery_flower_user(file_path, value):\n celery_flower_user = set_flag(file_path, \"!!!SET CELERY_FLOWER_USER!!!\", value=value)\n return celery_flower_user\n\n\ndef set_celery_flower_password(file_path, value=None):\n celery_flower_password = set_flag(\n file_path,\n \"!!!SET CELERY_FLOWER_PASSWORD!!!\",\n value=value,\n length=64,\n using_digits=True,\n using_ascii_letters=True,\n )\n return celery_flower_password\n\n\ndef append_to_gitignore_file(ignored_line):\n with open(\".gitignore\", \"a\") as gitignore_file:\n gitignore_file.write(ignored_line)\n gitignore_file.write(\"\\n\")\n\n\ndef set_flags_in_envs(postgres_user, celery_flower_user, debug=False):\n local_django_envs_path = os.path.join(\".envs\", \".local\", \".django\")\n production_django_envs_path = os.path.join(\".envs\", \".production\", \".django\")\n local_postgres_envs_path = os.path.join(\".envs\", \".local\", \".postgres\")\n production_postgres_envs_path = os.path.join(\".envs\", \".production\", \".postgres\")\n\n set_django_secret_key(production_django_envs_path)\n set_django_admin_url(production_django_envs_path)\n\n set_postgres_user(local_postgres_envs_path, value=postgres_user)\n set_postgres_password(local_postgres_envs_path, value=DEBUG_VALUE if debug else None)\n set_postgres_user(production_postgres_envs_path, value=postgres_user)\n set_postgres_password(production_postgres_envs_path, value=DEBUG_VALUE if debug else None)\n\n set_celery_flower_user(local_django_envs_path, value=celery_flower_user)\n set_celery_flower_password(local_django_envs_path, value=DEBUG_VALUE if debug else None)\n set_celery_flower_user(production_django_envs_path, value=celery_flower_user)\n set_celery_flower_password(production_django_envs_path, value=DEBUG_VALUE if debug else None)\n\n\ndef set_flags_in_settings_files():\n set_django_secret_key(os.path.join(\"config\", \"settings\", \"local.py\"))\n set_django_secret_key(os.path.join(\"config\", \"settings\", \"test.py\"))\n\n\ndef remove_envs_and_associated_files():\n shutil.rmtree(\".envs\")\n os.remove(\"merge_production_dotenvs_in_dotenv.py\")\n shutil.rmtree(\"tests\")\n\n\ndef remove_celery_compose_dirs():\n shutil.rmtree(os.path.join(\"compose\", \"local\", \"django\", \"celery\"))\n shutil.rmtree(os.path.join(\"compose\", \"production\", \"django\", \"celery\"))\n\n\ndef remove_node_dockerfile():\n shutil.rmtree(os.path.join(\"compose\", \"local\", \"node\"))\n\n\ndef remove_aws_dockerfile():\n shutil.rmtree(os.path.join(\"compose\", \"production\", \"aws\"))\n\n\ndef remove_drf_starter_files():\n os.remove(os.path.join(\"config\", \"api_router.py\"))\n shutil.rmtree(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"api\"))\n os.remove(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"tests\", \"test_drf_urls.py\"))\n os.remove(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"tests\", \"test_drf_views.py\"))\n os.remove(os.path.join(\"{{cookiecutter.project_slug}}\", \"users\", \"tests\", \"test_swagger.py\"))\n\n\ndef remove_storages_module():\n os.remove(os.path.join(\"{{cookiecutter.project_slug}}\", \"utils\", \"storages.py\"))\n\n\ndef main():\n debug = \"{{ cookiecutter.debug }}\".lower() == \"y\"\n\n set_flags_in_envs(\n DEBUG_VALUE if debug else generate_random_user(),\n DEBUG_VALUE if debug else generate_random_user(),\n debug=debug,\n )\n set_flags_in_settings_files()\n\n if \"{{ cookiecutter.open_source_license }}\" == \"Not open source\":\n remove_open_source_files()\n if \"{{ cookiecutter.open_source_license}}\" != \"GPLv3\":\n remove_gplv3_files()\n\n if \"{{ cookiecutter.username_type }}\" == \"username\":\n remove_custom_user_manager_files()\n\n if \"{{ cookiecutter.editor }}\" != \"PyCharm\":\n remove_pycharm_files()\n\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\":\n remove_utility_files()\n else:\n remove_docker_files()\n\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\" and \"{{ cookiecutter.cloud_provider}}\" != \"AWS\":\n remove_aws_dockerfile()\n\n if \"{{ cookiecutter.use_heroku }}\".lower() == \"n\":\n remove_heroku_files()\n\n if \"{{ cookiecutter.use_docker }}\".lower() == \"n\" and \"{{ cookiecutter.use_heroku }}\".lower() == \"n\":\n if \"{{ cookiecutter.keep_local_envs_in_vcs }}\".lower() == \"y\":\n print(\n INFO + \".env(s) are only utilized when Docker Compose and/or \"\n \"Heroku support is enabled so keeping them does not make sense \"\n \"given your current setup.\" + TERMINATOR\n )\n remove_envs_and_associated_files()\n else:\n append_to_gitignore_file(\".env\")\n append_to_gitignore_file(\".envs/*\")\n if \"{{ cookiecutter.keep_local_envs_in_vcs }}\".lower() == \"y\":\n append_to_gitignore_file(\"!.envs/.local/\")\n\n if \"{{ cookiecutter.frontend_pipeline }}\" in [\"None\", \"Django Compressor\"]:\n remove_gulp_files()\n remove_webpack_files()\n remove_sass_files()\n remove_packagejson_file()\n remove_prettier_pre_commit()\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\":\n remove_node_dockerfile()\n else:\n handle_js_runner(\n \"{{ cookiecutter.frontend_pipeline }}\",\n use_docker=(\"{{ cookiecutter.use_docker }}\".lower() == \"y\"),\n use_async=(\"{{ cookiecutter.use_async }}\".lower() == \"y\"),\n )\n\n if \"{{ cookiecutter.cloud_provider }}\" == \"None\" and \"{{ cookiecutter.use_docker }}\".lower() == \"n\":\n print(\n WARNING + \"You chose to not use any cloud providers nor Docker, \"\n \"media files won't be served in production.\" + TERMINATOR\n )\n remove_storages_module()\n\n if \"{{ cookiecutter.use_celery }}\".lower() == \"n\":\n remove_celery_files()\n if \"{{ cookiecutter.use_docker }}\".lower() == \"y\":\n remove_celery_compose_dirs()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Travis\":\n remove_dottravisyml_file()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Gitlab\":\n remove_dotgitlabciyml_file()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Github\":\n remove_dotgithub_folder()\n\n if \"{{ cookiecutter.ci_tool }}\" != \"Drone\":\n remove_dotdrone_file()\n\n if \"{{ cookiecutter.use_drf }}\".lower() == \"n\":\n remove_drf_starter_files()\n\n if \"{{ cookiecutter.use_async }}\".lower() == \"n\":\n remove_async_files()\n\n print(SUCCESS + \"Project initialized, keep up the good work!\" + TERMINATOR)\n\n\nif __name__ == \"__main__\":\n main()\n",
"path": "hooks/post_gen_project.py"
}
] | diff --git a/hooks/post_gen_project.py b/hooks/post_gen_project.py
index 8d1be5a165..37f96efc03 100644
--- a/hooks/post_gen_project.py
+++ b/hooks/post_gen_project.py
@@ -183,6 +183,7 @@ def handle_js_runner(choice, use_docker, use_async):
"browser-sync",
"cssnano",
"gulp",
+ "gulp-concat",
"gulp-imagemin",
"gulp-plumber",
"gulp-postcss",
|
microsoft__botbuilder-python-1932 | Bump MSAL to the latest version
**Is your feature request related to a problem? Please describe.**
Old version of MSAL is used in [botframework-connector](https://github.com/microsoft/botbuilder-python/blob/main/libraries/botframework-connector/requirements.txt#L6) (v1.6.0)
**Describe the solution you'd like**
Upgrade to the [latest version](https://github.com/AzureAD/microsoft-authentication-library-for-python/releases) (v1.13.0 is the latest at this moment).
**Describe alternatives you've considered**
No alternatives.
**Additional context**
Please also consider to not pin this dependency (#1467).
| [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nNAME = \"botframework-connector\"\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\nREQUIRES = [\n \"msrest==0.6.19\",\n \"requests>=2.23.0,<2.26\",\n \"PyJWT>=1.5.3,<2.0.0\",\n \"botbuilder-schema==4.15.0\",\n \"msal==1.6.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"Microsoft Bot Framework Bot Builder SDK for Python.\",\n author=\"Microsoft\",\n url=\"https://www.github.com/Microsoft/botbuilder-python\",\n keywords=[\"BotFrameworkConnector\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n install_requires=REQUIRES,\n packages=[\n \"botframework.connector\",\n \"botframework.connector.auth\",\n \"botframework.connector.async_mixin\",\n \"botframework.connector.operations\",\n \"botframework.connector.models\",\n \"botframework.connector.aio\",\n \"botframework.connector.aio.operations_async\",\n \"botframework.connector.skills\",\n \"botframework.connector.teams\",\n \"botframework.connector.teams.operations\",\n \"botframework.connector.token_api\",\n \"botframework.connector.token_api.aio\",\n \"botframework.connector.token_api.aio.operations_async\",\n \"botframework.connector.token_api.models\",\n \"botframework.connector.token_api.operations\",\n ],\n include_package_data=True,\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=\"MIT\",\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n",
"path": "libraries/botframework-connector/setup.py"
}
] | [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nNAME = \"botframework-connector\"\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\nREQUIRES = [\n \"msrest==0.6.19\",\n \"requests>=2.23.0,<2.26\",\n \"PyJWT>=1.5.3,<2.0.0\",\n \"botbuilder-schema==4.15.0\",\n \"msal==1.17.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"Microsoft Bot Framework Bot Builder SDK for Python.\",\n author=\"Microsoft\",\n url=\"https://www.github.com/Microsoft/botbuilder-python\",\n keywords=[\"BotFrameworkConnector\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n install_requires=REQUIRES,\n packages=[\n \"botframework.connector\",\n \"botframework.connector.auth\",\n \"botframework.connector.async_mixin\",\n \"botframework.connector.operations\",\n \"botframework.connector.models\",\n \"botframework.connector.aio\",\n \"botframework.connector.aio.operations_async\",\n \"botframework.connector.skills\",\n \"botframework.connector.teams\",\n \"botframework.connector.teams.operations\",\n \"botframework.connector.token_api\",\n \"botframework.connector.token_api.aio\",\n \"botframework.connector.token_api.aio.operations_async\",\n \"botframework.connector.token_api.models\",\n \"botframework.connector.token_api.operations\",\n ],\n include_package_data=True,\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=\"MIT\",\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n",
"path": "libraries/botframework-connector/setup.py"
}
] | diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py
index 7e6bff958..84bb6dce9 100644
--- a/libraries/botframework-connector/setup.py
+++ b/libraries/botframework-connector/setup.py
@@ -11,7 +11,7 @@
"requests>=2.23.0,<2.26",
"PyJWT>=1.5.3,<2.0.0",
"botbuilder-schema==4.15.0",
- "msal==1.6.0",
+ "msal==1.17.0",
]
root = os.path.abspath(os.path.dirname(__file__))
|
webkom__lego-1505 | Add end_time of an event when getting all events with get request
I want to be able to get the end time of an event when getting all events. I know I can get the end time when getting a specific event, but it is a bit cumbersome.
| [
{
"content": "from django.db import transaction\nfrom rest_framework import serializers\nfrom rest_framework.fields import BooleanField, CharField\n\nfrom lego.apps.comments.serializers import CommentSerializer\nfrom lego.apps.companies.fields import CompanyField\nfrom lego.apps.companies.models import Company\nfrom lego.apps.content.fields import ContentSerializerField\nfrom lego.apps.events.constants import PRESENT\nfrom lego.apps.events.fields import ActivationTimeField, SpotsLeftField\nfrom lego.apps.events.models import Event, Pool\nfrom lego.apps.events.serializers.pools import (\n PoolAdministrateSerializer,\n PoolCreateAndUpdateSerializer,\n PoolReadAuthSerializer,\n PoolReadSerializer,\n)\nfrom lego.apps.events.serializers.registrations import (\n RegistrationReadDetailedSerializer,\n RegistrationReadSerializer,\n)\nfrom lego.apps.files.fields import ImageField\nfrom lego.apps.tags.serializers import TagSerializerMixin\nfrom lego.apps.users.constants import GROUP_GRADE\nfrom lego.apps.users.fields import AbakusGroupField\nfrom lego.apps.users.models import AbakusGroup\nfrom lego.apps.users.serializers.users import PublicUserSerializer\nfrom lego.utils.serializers import BasisModelSerializer\n\n\nclass EventPublicSerializer(BasisModelSerializer):\n\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n\n class Meta:\n model = Event\n fields = (\"id\", \"title\", \"description\", \"event_type\", \"location\", \"thumbnail\")\n read_only = True\n\n\nclass EventReadSerializer(TagSerializerMixin, BasisModelSerializer):\n company = CompanyField(queryset=Company.objects.all())\n cover = ImageField(required=False, options={\"height\": 500})\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n activation_time = ActivationTimeField()\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"event_type\",\n \"location\",\n \"start_time\",\n \"thumbnail\",\n \"total_capacity\",\n \"company\",\n \"registration_count\",\n \"tags\",\n \"activation_time\",\n )\n read_only = True\n\n\nclass EventReadDetailedSerializer(TagSerializerMixin, BasisModelSerializer):\n comments = CommentSerializer(read_only=True, many=True)\n comment_target = CharField(read_only=True)\n cover = ImageField(required=False, options={\"height\": 500})\n company = CompanyField(queryset=Company.objects.all())\n responsible_group = AbakusGroupField(\n queryset=AbakusGroup.objects.all(), required=False, allow_null=True\n )\n pools = PoolReadSerializer(many=True)\n active_capacity = serializers.ReadOnlyField()\n text = ContentSerializerField()\n created_by = PublicUserSerializer()\n\n registration_close_time = serializers.DateTimeField(read_only=True)\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"text\",\n \"event_type\",\n \"location\",\n \"comments\",\n \"comment_target\",\n \"start_time\",\n \"end_time\",\n \"merge_time\",\n \"pools\",\n \"registration_close_time\",\n \"registration_deadline_hours\",\n \"unregistration_deadline\",\n \"company\",\n \"responsible_group\",\n \"active_capacity\",\n \"feedback_description\",\n \"feedback_required\",\n \"is_priced\",\n \"price_member\",\n \"price_guest\",\n \"use_stripe\",\n \"payment_due_date\",\n \"use_captcha\",\n \"waiting_registration_count\",\n \"tags\",\n \"is_merged\",\n \"heed_penalties\",\n \"created_by\",\n \"is_abakom_only\",\n \"registration_count\",\n \"survey\",\n \"use_consent\",\n )\n read_only = True\n\n\nclass EventForSurveySerializer(EventReadSerializer):\n attended_count = serializers.SerializerMethodField()\n\n class Meta:\n model = Event\n fields = EventReadSerializer.Meta.fields + (\n \"registration_count\",\n \"waiting_registration_count\",\n \"attended_count\",\n )\n read_only = True\n\n def get_attended_count(self, event):\n return event.registrations.filter(presence=PRESENT).count()\n\n\nclass EventUserRegSerializer(EventReadSerializer):\n user_reg = serializers.SerializerMethodField()\n\n class Meta:\n model = Event\n fields = EventReadSerializer.Meta.fields + (\"user_reg\",)\n read_only = True\n\n def get_user_reg(self, event):\n return RegistrationReadSerializer(event.user_reg[0]).data\n\n\nclass EventReadUserDetailedSerializer(EventReadDetailedSerializer):\n \"\"\" User specfic event serializer that appends data based on request.user \"\"\"\n\n activation_time = ActivationTimeField()\n spots_left = SpotsLeftField()\n price = serializers.SerializerMethodField()\n\n class Meta(EventReadDetailedSerializer.Meta):\n fields = EventReadDetailedSerializer.Meta.fields + (\n \"price\",\n \"activation_time\",\n \"spots_left\",\n )\n\n def get_price(self, obj):\n request = self.context.get(\"request\", None)\n if request:\n return obj.get_price(user=request.user)\n\n\nclass EventReadAuthUserDetailedSerializer(EventReadUserDetailedSerializer):\n pools = PoolReadAuthSerializer(many=True)\n waiting_registrations = RegistrationReadSerializer(many=True)\n unanswered_surveys = serializers.SerializerMethodField()\n\n class Meta(EventReadUserDetailedSerializer.Meta):\n fields = EventReadUserDetailedSerializer.Meta.fields + (\n \"waiting_registrations\",\n \"unanswered_surveys\",\n )\n\n def get_unanswered_surveys(self, obj):\n request = self.context.get(\"request\", None)\n return request.user.unanswered_surveys()\n\n\nclass EventAdministrateSerializer(EventReadSerializer):\n pools = PoolAdministrateSerializer(many=True)\n unregistered = RegistrationReadDetailedSerializer(many=True)\n waiting_registrations = RegistrationReadDetailedSerializer(many=True)\n\n class Meta(EventReadSerializer.Meta):\n fields = EventReadSerializer.Meta.fields + (\n \"pools\",\n \"unregistered\",\n \"waiting_registrations\",\n \"use_consent\",\n )\n\n\nclass EventCreateAndUpdateSerializer(TagSerializerMixin, BasisModelSerializer):\n cover = ImageField(required=False, options={\"height\": 500})\n responsible_group = AbakusGroupField(\n queryset=AbakusGroup.objects.all(), required=False, allow_null=True\n )\n pools = PoolCreateAndUpdateSerializer(many=True, required=False)\n text = ContentSerializerField()\n is_abakom_only = BooleanField(required=False, default=False)\n\n registration_close_time = serializers.DateTimeField(read_only=True)\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"cover\",\n \"description\",\n \"text\",\n \"company\",\n \"responsible_group\",\n \"feedback_description\",\n \"feedback_required\",\n \"event_type\",\n \"location\",\n \"is_priced\",\n \"price_member\",\n \"price_guest\",\n \"use_stripe\",\n \"payment_due_date\",\n \"start_time\",\n \"end_time\",\n \"merge_time\",\n \"use_captcha\",\n \"tags\",\n \"pools\",\n \"unregistration_deadline\",\n \"pinned\",\n \"use_consent\",\n \"heed_penalties\",\n \"is_abakom_only\",\n \"registration_deadline_hours\",\n \"registration_close_time\",\n )\n\n def validate(self, data):\n \"\"\"\n Check that start is before finish.\n \"\"\"\n if hasattr(data, \"start_time\") and hasattr(data, \"end_time\"):\n if data[\"start_time\"] > data[\"end_time\"]:\n raise serializers.ValidationError(\n {\n \"end_time\": \"User does not have the required permissions for time travel\"\n }\n )\n return data\n\n def create(self, validated_data):\n pools = validated_data.pop(\"pools\", [])\n is_abakom_only = validated_data.pop(\"is_abakom_only\", False)\n with transaction.atomic():\n event = super().create(validated_data)\n for pool in pools:\n permission_groups = pool.pop(\"permission_groups\")\n created_pool = Pool.objects.create(event=event, **pool)\n created_pool.permission_groups.set(permission_groups)\n event.set_abakom_only(is_abakom_only)\n return event\n\n def update(self, instance, validated_data):\n pools = validated_data.pop(\"pools\", None)\n is_abakom_only = validated_data.pop(\"is_abakom_only\", False)\n with transaction.atomic():\n if pools is not None:\n existing_pools = list(instance.pools.all().values_list(\"id\", flat=True))\n for pool in pools:\n pool_id = pool.get(\"id\", None)\n if pool_id in existing_pools:\n existing_pools.remove(pool_id)\n permission_groups = pool.pop(\"permission_groups\")\n created_pool = Pool.objects.update_or_create(\n event=instance,\n id=pool_id,\n defaults={\n \"name\": pool.get(\"name\"),\n \"capacity\": pool.get(\"capacity\", 0),\n \"activation_date\": pool.get(\"activation_date\"),\n },\n )[0]\n created_pool.permission_groups.set(permission_groups)\n for pool_id in existing_pools:\n Pool.objects.get(id=pool_id).delete()\n instance.set_abakom_only(is_abakom_only)\n return super().update(instance, validated_data)\n\n\nclass EventSearchSerializer(serializers.ModelSerializer):\n cover = ImageField(required=False, options={\"height\": 500})\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n text = ContentSerializerField()\n activation_time = ActivationTimeField()\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"text\",\n \"event_type\",\n \"location\",\n \"start_time\",\n \"thumbnail\",\n \"end_time\",\n \"total_capacity\",\n \"company\",\n \"registration_count\",\n \"tags\",\n \"activation_time\",\n \"pinned\",\n )\n read_only = True\n\n\ndef populate_event_registration_users_with_grade(event_dict):\n \"\"\"\n Populates every user in registrations in a serialized event with `grade`.\n Mainly used in the administrate endpoint\n :param event_dict:\n :return:\n \"\"\"\n grades = AbakusGroup.objects.filter(type=GROUP_GRADE).values(\"id\", \"name\")\n grade_dict = {item[\"id\"]: item for item in grades}\n for pool in event_dict.get(\"pools\", []):\n for registration in pool.get(\"registrations\", []):\n user = registration.get(\"user\", {})\n abakus_groups = user.get(\"abakus_groups\", [])\n user[\"grade\"] = None\n for id in abakus_groups:\n grade = grade_dict.get(id, None)\n if grade:\n user[\"grade\"] = grade\n return event_dict\n",
"path": "lego/apps/events/serializers/events.py"
}
] | [
{
"content": "from django.db import transaction\nfrom rest_framework import serializers\nfrom rest_framework.fields import BooleanField, CharField\n\nfrom lego.apps.comments.serializers import CommentSerializer\nfrom lego.apps.companies.fields import CompanyField\nfrom lego.apps.companies.models import Company\nfrom lego.apps.content.fields import ContentSerializerField\nfrom lego.apps.events.constants import PRESENT\nfrom lego.apps.events.fields import ActivationTimeField, SpotsLeftField\nfrom lego.apps.events.models import Event, Pool\nfrom lego.apps.events.serializers.pools import (\n PoolAdministrateSerializer,\n PoolCreateAndUpdateSerializer,\n PoolReadAuthSerializer,\n PoolReadSerializer,\n)\nfrom lego.apps.events.serializers.registrations import (\n RegistrationReadDetailedSerializer,\n RegistrationReadSerializer,\n)\nfrom lego.apps.files.fields import ImageField\nfrom lego.apps.tags.serializers import TagSerializerMixin\nfrom lego.apps.users.constants import GROUP_GRADE\nfrom lego.apps.users.fields import AbakusGroupField\nfrom lego.apps.users.models import AbakusGroup\nfrom lego.apps.users.serializers.users import PublicUserSerializer\nfrom lego.utils.serializers import BasisModelSerializer\n\n\nclass EventPublicSerializer(BasisModelSerializer):\n\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n\n class Meta:\n model = Event\n fields = (\"id\", \"title\", \"description\", \"event_type\", \"location\", \"thumbnail\")\n read_only = True\n\n\nclass EventReadSerializer(TagSerializerMixin, BasisModelSerializer):\n company = CompanyField(queryset=Company.objects.all())\n cover = ImageField(required=False, options={\"height\": 500})\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n activation_time = ActivationTimeField()\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"event_type\",\n \"location\",\n \"start_time\",\n \"end_time\",\n \"thumbnail\",\n \"total_capacity\",\n \"company\",\n \"registration_count\",\n \"tags\",\n \"activation_time\",\n )\n read_only = True\n\n\nclass EventReadDetailedSerializer(TagSerializerMixin, BasisModelSerializer):\n comments = CommentSerializer(read_only=True, many=True)\n comment_target = CharField(read_only=True)\n cover = ImageField(required=False, options={\"height\": 500})\n company = CompanyField(queryset=Company.objects.all())\n responsible_group = AbakusGroupField(\n queryset=AbakusGroup.objects.all(), required=False, allow_null=True\n )\n pools = PoolReadSerializer(many=True)\n active_capacity = serializers.ReadOnlyField()\n text = ContentSerializerField()\n created_by = PublicUserSerializer()\n\n registration_close_time = serializers.DateTimeField(read_only=True)\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"text\",\n \"event_type\",\n \"location\",\n \"comments\",\n \"comment_target\",\n \"start_time\",\n \"end_time\",\n \"merge_time\",\n \"pools\",\n \"registration_close_time\",\n \"registration_deadline_hours\",\n \"unregistration_deadline\",\n \"company\",\n \"responsible_group\",\n \"active_capacity\",\n \"feedback_description\",\n \"feedback_required\",\n \"is_priced\",\n \"price_member\",\n \"price_guest\",\n \"use_stripe\",\n \"payment_due_date\",\n \"use_captcha\",\n \"waiting_registration_count\",\n \"tags\",\n \"is_merged\",\n \"heed_penalties\",\n \"created_by\",\n \"is_abakom_only\",\n \"registration_count\",\n \"survey\",\n \"use_consent\",\n )\n read_only = True\n\n\nclass EventForSurveySerializer(EventReadSerializer):\n attended_count = serializers.SerializerMethodField()\n\n class Meta:\n model = Event\n fields = EventReadSerializer.Meta.fields + (\n \"registration_count\",\n \"waiting_registration_count\",\n \"attended_count\",\n )\n read_only = True\n\n def get_attended_count(self, event):\n return event.registrations.filter(presence=PRESENT).count()\n\n\nclass EventUserRegSerializer(EventReadSerializer):\n user_reg = serializers.SerializerMethodField()\n\n class Meta:\n model = Event\n fields = EventReadSerializer.Meta.fields + (\"user_reg\",)\n read_only = True\n\n def get_user_reg(self, event):\n return RegistrationReadSerializer(event.user_reg[0]).data\n\n\nclass EventReadUserDetailedSerializer(EventReadDetailedSerializer):\n \"\"\" User specfic event serializer that appends data based on request.user \"\"\"\n\n activation_time = ActivationTimeField()\n spots_left = SpotsLeftField()\n price = serializers.SerializerMethodField()\n\n class Meta(EventReadDetailedSerializer.Meta):\n fields = EventReadDetailedSerializer.Meta.fields + (\n \"price\",\n \"activation_time\",\n \"spots_left\",\n )\n\n def get_price(self, obj):\n request = self.context.get(\"request\", None)\n if request:\n return obj.get_price(user=request.user)\n\n\nclass EventReadAuthUserDetailedSerializer(EventReadUserDetailedSerializer):\n pools = PoolReadAuthSerializer(many=True)\n waiting_registrations = RegistrationReadSerializer(many=True)\n unanswered_surveys = serializers.SerializerMethodField()\n\n class Meta(EventReadUserDetailedSerializer.Meta):\n fields = EventReadUserDetailedSerializer.Meta.fields + (\n \"waiting_registrations\",\n \"unanswered_surveys\",\n )\n\n def get_unanswered_surveys(self, obj):\n request = self.context.get(\"request\", None)\n return request.user.unanswered_surveys()\n\n\nclass EventAdministrateSerializer(EventReadSerializer):\n pools = PoolAdministrateSerializer(many=True)\n unregistered = RegistrationReadDetailedSerializer(many=True)\n waiting_registrations = RegistrationReadDetailedSerializer(many=True)\n\n class Meta(EventReadSerializer.Meta):\n fields = EventReadSerializer.Meta.fields + (\n \"pools\",\n \"unregistered\",\n \"waiting_registrations\",\n \"use_consent\",\n )\n\n\nclass EventCreateAndUpdateSerializer(TagSerializerMixin, BasisModelSerializer):\n cover = ImageField(required=False, options={\"height\": 500})\n responsible_group = AbakusGroupField(\n queryset=AbakusGroup.objects.all(), required=False, allow_null=True\n )\n pools = PoolCreateAndUpdateSerializer(many=True, required=False)\n text = ContentSerializerField()\n is_abakom_only = BooleanField(required=False, default=False)\n\n registration_close_time = serializers.DateTimeField(read_only=True)\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"cover\",\n \"description\",\n \"text\",\n \"company\",\n \"responsible_group\",\n \"feedback_description\",\n \"feedback_required\",\n \"event_type\",\n \"location\",\n \"is_priced\",\n \"price_member\",\n \"price_guest\",\n \"use_stripe\",\n \"payment_due_date\",\n \"start_time\",\n \"end_time\",\n \"merge_time\",\n \"use_captcha\",\n \"tags\",\n \"pools\",\n \"unregistration_deadline\",\n \"pinned\",\n \"use_consent\",\n \"heed_penalties\",\n \"is_abakom_only\",\n \"registration_deadline_hours\",\n \"registration_close_time\",\n )\n\n def validate(self, data):\n \"\"\"\n Check that start is before finish.\n \"\"\"\n if hasattr(data, \"start_time\") and hasattr(data, \"end_time\"):\n if data[\"start_time\"] > data[\"end_time\"]:\n raise serializers.ValidationError(\n {\n \"end_time\": \"User does not have the required permissions for time travel\"\n }\n )\n return data\n\n def create(self, validated_data):\n pools = validated_data.pop(\"pools\", [])\n is_abakom_only = validated_data.pop(\"is_abakom_only\", False)\n with transaction.atomic():\n event = super().create(validated_data)\n for pool in pools:\n permission_groups = pool.pop(\"permission_groups\")\n created_pool = Pool.objects.create(event=event, **pool)\n created_pool.permission_groups.set(permission_groups)\n event.set_abakom_only(is_abakom_only)\n return event\n\n def update(self, instance, validated_data):\n pools = validated_data.pop(\"pools\", None)\n is_abakom_only = validated_data.pop(\"is_abakom_only\", False)\n with transaction.atomic():\n if pools is not None:\n existing_pools = list(instance.pools.all().values_list(\"id\", flat=True))\n for pool in pools:\n pool_id = pool.get(\"id\", None)\n if pool_id in existing_pools:\n existing_pools.remove(pool_id)\n permission_groups = pool.pop(\"permission_groups\")\n created_pool = Pool.objects.update_or_create(\n event=instance,\n id=pool_id,\n defaults={\n \"name\": pool.get(\"name\"),\n \"capacity\": pool.get(\"capacity\", 0),\n \"activation_date\": pool.get(\"activation_date\"),\n },\n )[0]\n created_pool.permission_groups.set(permission_groups)\n for pool_id in existing_pools:\n Pool.objects.get(id=pool_id).delete()\n instance.set_abakom_only(is_abakom_only)\n return super().update(instance, validated_data)\n\n\nclass EventSearchSerializer(serializers.ModelSerializer):\n cover = ImageField(required=False, options={\"height\": 500})\n thumbnail = ImageField(\n source=\"cover\",\n required=False,\n options={\"height\": 500, \"width\": 500, \"smart\": True},\n )\n text = ContentSerializerField()\n activation_time = ActivationTimeField()\n\n class Meta:\n model = Event\n fields = (\n \"id\",\n \"title\",\n \"description\",\n \"cover\",\n \"text\",\n \"event_type\",\n \"location\",\n \"start_time\",\n \"thumbnail\",\n \"end_time\",\n \"total_capacity\",\n \"company\",\n \"registration_count\",\n \"tags\",\n \"activation_time\",\n \"pinned\",\n )\n read_only = True\n\n\ndef populate_event_registration_users_with_grade(event_dict):\n \"\"\"\n Populates every user in registrations in a serialized event with `grade`.\n Mainly used in the administrate endpoint\n :param event_dict:\n :return:\n \"\"\"\n grades = AbakusGroup.objects.filter(type=GROUP_GRADE).values(\"id\", \"name\")\n grade_dict = {item[\"id\"]: item for item in grades}\n for pool in event_dict.get(\"pools\", []):\n for registration in pool.get(\"registrations\", []):\n user = registration.get(\"user\", {})\n abakus_groups = user.get(\"abakus_groups\", [])\n user[\"grade\"] = None\n for id in abakus_groups:\n grade = grade_dict.get(id, None)\n if grade:\n user[\"grade\"] = grade\n return event_dict\n",
"path": "lego/apps/events/serializers/events.py"
}
] | diff --git a/lego/apps/events/serializers/events.py b/lego/apps/events/serializers/events.py
index 95da047a2..7f81934ee 100644
--- a/lego/apps/events/serializers/events.py
+++ b/lego/apps/events/serializers/events.py
@@ -62,6 +62,7 @@ class Meta:
"event_type",
"location",
"start_time",
+ "end_time",
"thumbnail",
"total_capacity",
"company",
diff --git a/requirements/base.txt b/requirements/base.txt
index 33e73c150..fc7b53fe4 100644
--- a/requirements/base.txt
+++ b/requirements/base.txt
@@ -29,6 +29,8 @@ libthumbor==1.3.2
channels==2.1.7
channels_redis==2.3.3
daphne==2.2.5
+# Implicit dependency
+flatbuffers==1.10
djangorestframework==3.9.0
djangorestframework-jwt==1.11.0
|
aio-libs-abandoned__aioredis-py-1048 | [2.0] Type annotations break mypy
I tried porting an existing project to aioredis 2.0. I've got it almost working, but the type annotations that have been added are too strict (and in some cases just wrong) and break mypy. The main problem is that all the functions that take keys annotate them as `str`, when `bytes` (and I think several other types) are perfectly acceptable and are used in my code. The same applies to `register_script`.
The `min` and `max` arguments of `zrangebylex` and `zrevrangebylex` are annotated as int, but they're used for lexicographical sorting so are string-like.
Getting the type annotations right is a fair large undertaking. If there is a desire to release 2.0 soon I'd suggest deleting `py.typed` so that mypy doesn't see this package as annotated. There are annotations for redis-py in typeshed; perhaps that would be a good place to start, although I've occasionally also had issues there.
| [
{
"content": "import os.path\nimport re\n\nfrom setuptools import find_packages, setup\n\n\ndef read(*parts):\n with open(os.path.join(*parts)) as f:\n return f.read().strip()\n\n\ndef read_version():\n regexp = re.compile(r\"^__version__\\W*=\\W*\\\"([\\d.abrc]+)\\\"\")\n init_py = os.path.join(os.path.dirname(__file__), \"aioredis\", \"__init__.py\")\n with open(init_py) as f:\n for line in f:\n match = regexp.match(line)\n if match is not None:\n return match.group(1)\n raise RuntimeError(f\"Cannot find version in {init_py}\")\n\n\nclassifiers = [\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Operating System :: POSIX\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Framework :: AsyncIO\",\n]\n\nsetup(\n name=\"aioredis\",\n version=read_version(),\n description=\"asyncio (PEP 3156) Redis support\",\n long_description=\"\\n\\n\".join((read(\"README.md\"), read(\"CHANGELOG.md\"))),\n long_description_content_type=\"text/markdown\",\n classifiers=classifiers,\n platforms=[\"POSIX\"],\n url=\"https://github.com/aio-libs/aioredis\",\n license=\"MIT\",\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=[\n \"async-timeout\",\n \"typing-extensions\",\n ],\n extras_require={\n \"hiredis\": 'hiredis>=1.0; implementation_name==\"cpython\"',\n },\n python_requires=\">=3.6\",\n include_package_data=True,\n)\n",
"path": "setup.py"
}
] | [
{
"content": "import os.path\nimport re\n\nfrom setuptools import find_packages, setup\n\n\ndef read(*parts):\n with open(os.path.join(*parts)) as f:\n return f.read().strip()\n\n\ndef read_version():\n regexp = re.compile(r\"^__version__\\W*=\\W*\\\"([\\d.abrc]+)\\\"\")\n init_py = os.path.join(os.path.dirname(__file__), \"aioredis\", \"__init__.py\")\n with open(init_py) as f:\n for line in f:\n match = regexp.match(line)\n if match is not None:\n return match.group(1)\n raise RuntimeError(f\"Cannot find version in {init_py}\")\n\n\nclassifiers = [\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Operating System :: POSIX\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Framework :: AsyncIO\",\n]\n\nsetup(\n name=\"aioredis\",\n version=read_version(),\n description=\"asyncio (PEP 3156) Redis support\",\n long_description=\"\\n\\n\".join((read(\"README.md\"), read(\"CHANGELOG.md\"))),\n long_description_content_type=\"text/markdown\",\n classifiers=classifiers,\n platforms=[\"POSIX\"],\n url=\"https://github.com/aio-libs/aioredis\",\n license=\"MIT\",\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=[\n \"async-timeout\",\n \"typing-extensions\",\n ],\n extras_require={\n \"hiredis\": 'hiredis>=1.0; implementation_name==\"cpython\"',\n },\n package_data={\"aioredis\": [\"py.typed\"]},\n python_requires=\">=3.6\",\n include_package_data=True,\n)\n",
"path": "setup.py"
}
] | diff --git a/CHANGES/1009.misc b/CHANGES/1009.misc
deleted file mode 100644
index e14ad9413..000000000
--- a/CHANGES/1009.misc
+++ /dev/null
@@ -1 +0,0 @@
-Temporarily remove py.typed because there are errors in the type annotations.
diff --git a/aioredis/py.typed b/aioredis/py.typed
new file mode 100644
index 000000000..e69de29bb
diff --git a/setup.py b/setup.py
index 942ed303e..6a2eb8ac7 100644
--- a/setup.py
+++ b/setup.py
@@ -54,6 +54,7 @@ def read_version():
extras_require={
"hiredis": 'hiredis>=1.0; implementation_name=="cpython"',
},
+ package_data={"aioredis": ["py.typed"]},
python_requires=">=3.6",
include_package_data=True,
)
|
fidals__shopelectro-346 | Do `gulp build` on image side
Currently, we have problems with gulp build on client side. See #344 for details.
Moreover, building static on container site is more good practice
| [
{
"content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\nimport dj_database_url\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'django_select2',\n 'images',\n 'pages',\n 'catalog',\n 'search',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front/build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# @todo #142 Drop `dj_database_url` dependency.\n# This package helps to take postgres credentials from URI.\n# Now we assemble this creds to URI, then parse them with dj_database_url.\nDATABASE_URL = (\n f'postgres://{os.environ[\"POSTGRES_USER\"]}:{os.environ[\"POSTGRES_PASSWORD\"]}'\n f'@{os.environ[\"POSTGRES_URL\"]}/{os.environ[\"POSTGRES_DB\"]}'\n)\nDATABASES = {\n 'default': dj_database_url.parse(DATABASE_URL),\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'pages': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'catalog': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'search': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'ecommerce': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'images': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'shopelectro': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://selenium:4444/wd/hub')\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = 'Логотип компании Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = 'Смотреть все результаты'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', 'Наличные'),\n ('cashless', 'Безналичные и денежные переводы'),\n ('AC', 'Банковская карта'),\n ('PC', 'Яндекс.Деньги'),\n ('GP', 'Связной (терминал)'),\n ('AB', 'Альфа-Клик'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': 'Обратный звонок',\n 'order': 'Заказ №{0.fake_order_number}',\n 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса',\n 'one_click': 'Заказ в один клик №{0.fake_order_number}',\n 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENT = '[email protected]'\nSHOP_EMAIL = '[email protected]'\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\nENV_TYPE = os.environ.get('ENV_TYPE', 'PROD') # LOCAL | CI | PROD\n\n# 'Prod' <-> 'Product #1 of Category #0 of Category #1' = 0.17\n# About trigram similarity: https://goo.gl/uYFcxN\nTRIGRAM_MIN_SIMILARITY = 0.15\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\nCATEGORY_STEP_MULTIPLIERS = [12, 15, 24, 25, 48, 50, 60, 100]\n\n# Reduce retail product prices by PRICE_REDUCER.\n# It is required to make prices on shopelectro.ru and se78.ru unique.\nPRICE_REDUCER = 1\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': 'Интернет-магазин элементов питания \"ShopElectro\"',\n 'menu_title': 'Главная',\n 'title': 'Интернет-магазин Элементов питания с доставкой по России',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': 'Карта сайта',\n 'name': 'Карта сайта',\n },\n 'order': {\n 'slug': 'order',\n 'name': 'Оформление заказа',\n 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург',\n },\n 'search': {\n 'slug': 'search',\n 'name': 'Результаты поиска',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': 'Каталог товаров',\n 'menu_title': 'Каталог',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': 'Заказ принят',\n }\n}\n\nTAGS_URL_DELIMITER = '-or-'\nTAG_GROUPS_URL_DELIMITER = '-and-'\n\nTAGS_TITLE_DELIMITER = ' или '\nTAG_GROUPS_TITLE_DELIMITER = ' и '\n\nTAGS_ORDER = ['group__position', 'group__name', 'position', 'name']\n\n# -- App business logic --\n# every product price will be multiplied on this value\n# during import from 1C.\n# Multipliers are related to prices in this order:\n# big/medium/small/retail. First three are wholesale prices.\nPRICE_MULTIPLIERS = 1.0, 1.0, 1.0, 1.0\n",
"path": "shopelectro/settings/base.py"
}
] | [
{
"content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\nimport dj_database_url\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'django_select2',\n 'images',\n 'pages',\n 'catalog',\n 'search',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.environ['FRONT_BUILD_DIR'],\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# @todo #142 Drop `dj_database_url` dependency.\n# This package helps to take postgres credentials from URI.\n# Now we assemble this creds to URI, then parse them with dj_database_url.\nDATABASE_URL = (\n f'postgres://{os.environ[\"POSTGRES_USER\"]}:{os.environ[\"POSTGRES_PASSWORD\"]}'\n f'@{os.environ[\"POSTGRES_URL\"]}/{os.environ[\"POSTGRES_DB\"]}'\n)\nDATABASES = {\n 'default': dj_database_url.parse(DATABASE_URL),\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'pages': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'catalog': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'search': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'ecommerce': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'images': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'shopelectro': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://selenium:4444/wd/hub')\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = 'Логотип компании Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = 'Смотреть все результаты'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', 'Наличные'),\n ('cashless', 'Безналичные и денежные переводы'),\n ('AC', 'Банковская карта'),\n ('PC', 'Яндекс.Деньги'),\n ('GP', 'Связной (терминал)'),\n ('AB', 'Альфа-Клик'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': 'Обратный звонок',\n 'order': 'Заказ №{0.fake_order_number}',\n 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса',\n 'one_click': 'Заказ в один клик №{0.fake_order_number}',\n 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENT = '[email protected]'\nSHOP_EMAIL = '[email protected]'\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\nENV_TYPE = os.environ.get('ENV_TYPE', 'PROD') # LOCAL | CI | PROD\n\n# 'Prod' <-> 'Product #1 of Category #0 of Category #1' = 0.17\n# About trigram similarity: https://goo.gl/uYFcxN\nTRIGRAM_MIN_SIMILARITY = 0.15\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\nCATEGORY_STEP_MULTIPLIERS = [12, 15, 24, 25, 48, 50, 60, 100]\n\n# Reduce retail product prices by PRICE_REDUCER.\n# It is required to make prices on shopelectro.ru and se78.ru unique.\nPRICE_REDUCER = 1\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': 'Интернет-магазин элементов питания \"ShopElectro\"',\n 'menu_title': 'Главная',\n 'title': 'Интернет-магазин Элементов питания с доставкой по России',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': 'Карта сайта',\n 'name': 'Карта сайта',\n },\n 'order': {\n 'slug': 'order',\n 'name': 'Оформление заказа',\n 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург',\n },\n 'search': {\n 'slug': 'search',\n 'name': 'Результаты поиска',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': 'Каталог товаров',\n 'menu_title': 'Каталог',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': 'Заказ принят',\n }\n}\n\nTAGS_URL_DELIMITER = '-or-'\nTAG_GROUPS_URL_DELIMITER = '-and-'\n\nTAGS_TITLE_DELIMITER = ' или '\nTAG_GROUPS_TITLE_DELIMITER = ' и '\n\nTAGS_ORDER = ['group__position', 'group__name', 'position', 'name']\n\n# -- App business logic --\n# every product price will be multiplied on this value\n# during import from 1C.\n# Multipliers are related to prices in this order:\n# big/medium/small/retail. First three are wholesale prices.\nPRICE_MULTIPLIERS = 1.0, 1.0, 1.0, 1.0\n",
"path": "shopelectro/settings/base.py"
}
] | diff --git a/.dockerignore b/.dockerignore
index cf35f273..ed67fd93 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,3 +1,4 @@
+node_modules/
media/
*.sqlite*
*.log
diff --git a/.drone.yml b/.drone.yml
index 1a762090..39dfb09d 100644
--- a/.drone.yml
+++ b/.drone.yml
@@ -13,24 +13,29 @@ pipeline:
event: [push, pull_request]
branch: master
+ # @todo #345:60m Use ready image for drone npm step. stb2
+ # Image `fidals/se-nodejs:dev` already contains built static and node_modules.
npm:
image: node:slim
environment:
- DEPS_DIR=/usr/app/deps
+ - FRONT_BUILD_DIR=/usr/app/front/build
commands:
- npm install
- npm install -g gulp-cli
- gulp build
volumes:
- - /tmp/cache/drone/shopelectro/node_modules:/drone/src/github.com/fidals/shopelectro/commit/${DRONE_COMMIT_SHA}/node_modules
- /tmp/cache/drone/shopelectro/site-packages/${DRONE_COMMIT_SHA}/site-packages:/usr/app/deps
+ - /tmp/cache/drone/shopelectro/front_build/${DRONE_COMMIT_SHA}:/usr/app/front/build
when:
event: [push, pull_request]
branch: master
+
test:
image: python
environment:
+ - FRONT_BUILD_DIR=/usr/app/front/build
- TEST_ENV=true
- DJANGO_SETTINGS_MODULE=shopelectro.settings.drone
- POSTGRES_USER=postgres
@@ -50,9 +55,10 @@ pipeline:
- python manage.py excel
- python manage.py price
- python manage.py collectstatic --noinput
- - python manage.py test --parallel --liveserver=test:8020-8030
+ - python manage.py test --parallel --liveserver=test:8021-8029
volumes:
- /tmp/cache/drone/shopelectro/site-packages/${DRONE_COMMIT_SHA}/site-packages:/usr/local/lib/python3.6/site-packages
+ - /tmp/cache/drone/shopelectro/front_build/${DRONE_COMMIT_SHA}:/usr/app/front/build
secrets: [ FTP_IP, FTP_USER, FTP_PASS ]
when:
event: [push, pull_request]
@@ -78,6 +84,9 @@ pipeline:
docker-build:
image: docker/compose:1.17.1
+ environment:
+ - DEPS_DIR=/usr/app/deps
+ - FRONT_BUILD_DIR=/usr/app/front/build
commands:
- cd docker
# Build python images with sources and static files
@@ -88,6 +97,9 @@ pipeline:
- /root/prog/shopelectro/docker/.env:/drone/src/github.com/fidals/shopelectro/commit/${DRONE_COMMIT_SHA}/docker/.env
# in case if "Pull Request Hooks" is enabled in Drone settings GUI
- /root/prog/shopelectro/docker/.env:/drone/src/github.com/fidals/shopelectro/pull/${DRONE_PULL_REQUEST}/docker/.env
+ # for nodejs build
+ - /tmp/cache/drone/shopelectro/site-packages/${DRONE_COMMIT_SHA}/site-packages:/usr/app/deps
+ - /tmp/cache/drone/shopelectro/front_build/${DRONE_COMMIT_SHA}:/usr/app/front/build
when:
event: push
branch: master
diff --git a/.env.prod b/.env.prod
deleted file mode 100644
index 7c47b15c..00000000
--- a/.env.prod
+++ /dev/null
@@ -1,48 +0,0 @@
-VIRTUAL_HOST_PORT=8000
-VIRTUAL_HOST_STAGE_PORT=8001
-VIRTUAL_HOST_LIVESERVER_PORT=8020-8030
-VIRTUAL_HOST_EXPOSE_PORT=8010
-VIRTUAL_HOST_STAGE_EXPOSE_PORT=8011
-
-DB_USER=postgres
-DB_PASS=oC7hY6qNiqeG4FnWS0dD
-DB_NAME=se_prod
-DB_DEV_NAME=se_dev
-
-DEPS_DIR=/usr/local/lib/python3.6/site-packages
-SRC_DIR=/usr/app/src
-
-RABBITMQ_DEFAULT_USER=se_rabbitmq_user
-RABBITMQ_DEFAULT_PASS=321CwIGuKKQJCcQOTk41Gw==
-
-DJANGO_SETTINGS_MODULE=shopelectro.settings.local
-SECRET_KEY=SE5&3h672i-gijy2ixibfjzp34pqpo7(iu6fv(wqu@=l&f+lqd0x
-
-EMAIL_HOST_PASSWORD=21b34b446a
-YANDEX_SHOP_PASS=0b782c87d61c9a9fa9dc
-
-FTP_PASS=e01ebbd06176f46b1f732bd0868ad6be
-FTP_USER=1c_exc
-FTP_IP=37.18.77.165
-
-REDIS_PASSWORD=ZGRiODViMDQ3ZmRmNzc5Y2U4ZGU2Njhm
-
-SLACK_REPORT_URL=https://hooks.slack.com/services/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
-
-DJANGO_LOG_LEVEL=INFO
-ENV_TYPE=PROD
-
-# TEST_ENV=false
-
-# SECRET_KEY=SE5&3h672i-gijy2ixibfjzp34pqpo7(iu6fv(wqu@=l&f+lqd0x
-# YANDEX_SHOP_PASS=0b782c87d61c9a9fa9dc
-# EMAIL_HOST_PASSWORD=21b34b446a
-# DB_PASS=oC7hY6qNiqeG4FnWS0dD
-# FTP_PASS=e01ebbd06176f46b1f732bd0868ad6be
-# FTP_USER=1c_exc
-# FTP_IP=37.18.77.165
-# REDIS_PASSWORD=ZGRiODViMDQ3ZmRmNzc5Y2U4ZGU2Njhm
-# RABBITMQ_DEFAULT_USER=se_rabbitmq_user
-# RABBITMQ_DEFAULT_PASS=321CwIGuKKQJCcQOTk41Gw==
-# SLACK_REPORT_URL=https://hooks.slack.com/services/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
-
diff --git a/docker/Makefile b/docker/Makefile
index 65482ac9..a0892ce7 100644
--- a/docker/Makefile
+++ b/docker/Makefile
@@ -29,7 +29,7 @@ excel:
create-env:
@bash ./create-env.sh
-create-env:
+create-config:
@bash ./create-config.sh
build-static:
@@ -54,25 +54,22 @@ lint:
restore:
@bash ../etc/stb-backup-restore.sh
-
-# ---------------------- Deploy section ----------------------
deploy-dev:
$(MAKE) create-env
$(MAKE) create-config
$(dc) pull
$(dc) up -d app
$(MAKE) build-static
+ $(MAKE) migrate
# Create admin user with login/pass: admin/asdfjkl;
+ $(MAKE) fixtures
# Launch "collectstatic" not in static recipe because ManifestStaticStorage writes to db
- $(dc) exec app bash -c "\
- python manage.py migrate \
- && python manage.py loaddata shopelectro/fixtures/admin.json \
- && python manage.py loaddata shopelectro/fixtures/dump.json \
- && python manage.py collectstatic --noinput \
- "
+ $(MAKE) collectstatic
# to make fresh collected static visible immediately
$(dc) stop app && $(dc) up -d app
+
+# ---------------------- Production deploy section ----------------------
backup:
$(dcp) run --rm backup-data sh /usr/bin/entrypoint.sh
diff --git a/docker/docker-compose-build.yml b/docker/docker-compose-build.yml
index 704b2a99..bdd5773b 100644
--- a/docker/docker-compose-build.yml
+++ b/docker/docker-compose-build.yml
@@ -18,6 +18,9 @@ services:
image: fidals/se-nodejs:dev
build:
context: ../
+ args:
+ - deps_dir=$DEPS_DIR
+ - front_build_dir=$FRONT_BUILD_DIR
dockerfile: docker/images/node/Dockerfile
nginx:
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index d18a6740..c302812f 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -23,6 +23,8 @@ services:
networks:
- se-backend
- se-frontend
+ volumes_from:
+ - nodejs
volumes:
- ./../:$SRC_DIR
# contains refarm-site modules
@@ -33,8 +35,12 @@ services:
nodejs:
image: fidals/se-nodejs:dev
- volumes_from:
- - app
+ volumes:
+ - $FRONT_BUILD_DIR
+ # Volumes for refarm-site's front development
+ #- ../gulpfile.babel.js:/usr/app/src_front/gulpfile.babel.js
+ #- ../package.json:/usr/app/src_front/package.json
+ #- ../front:/usr/app/src_front/front
env_file:
- env_files/paths
@@ -76,7 +82,7 @@ services:
image: selenium/standalone-chrome-debug:3.10.0
restart: always
ports:
- - 4444:4444
+ - 4444
# VNC port. Password: secret
- 5900:5900
environment:
diff --git a/docker/env_files/paths.dist b/docker/env_files/paths.dist
index e772ea4f..06f4e194 100644
--- a/docker/env_files/paths.dist
+++ b/docker/env_files/paths.dist
@@ -1,5 +1,9 @@
# Identify the dependencies folder
DEPS_DIR=/usr/local/lib/python3.6/site-packages
+# Directory, where you cloned `refarm-site` repository
+REFARM_DIR=/path/to_my/refarm_site
# Identify the source folder
SRC_DIR=/usr/app/src
+# Set smth like `/var/fidals/se_db`, if you use VirtualBox
POSTGRES_DATA_DIR=./../database
+FRONT_BUILD_DIR=/usr/app/front/build
diff --git a/docker/images/node/Dockerfile b/docker/images/node/Dockerfile
index 7a9530f2..9621a27c 100644
--- a/docker/images/node/Dockerfile
+++ b/docker/images/node/Dockerfile
@@ -1,8 +1,24 @@
FROM node:slim
-WORKDIR /usr/app/src/
+ARG front_build_dir
+ARG deps_dir
+ENV DEPS_DIR=$deps_dir
+ENV FRONT_BUILD_DIR=$front_build_dir
+
+# Also this directory differs from $SRC_DIR to avoid `node_modules/` volumes conflicts.
+WORKDIR /usr/app/src_front/
+
+RUN apt-get update \
+ && apt-get install --no-install-recommends --no-install-suggests -y ssh git \
+ && git clone https://github.com/fidals/refarm-site.git $DEPS_DIR \
+ && apt-get remove --purge -y git \
+ && apt-get -y --purge autoremove \
+ && rm -rf /var/lib/apt/lists/*
COPY package.json package.json
+COPY gulpfile.babel.js gulpfile.babel.js
# we use `--no-optional` because some optional npm dependencies fail on install
-RUN npm install -g gulp-cli && npm install --no-optional
+RUN npm install -g gulp-cli
+RUN npm install --no-optional
+RUN gulp build
diff --git a/gulpfile.babel.js b/gulpfile.babel.js
index 424bac89..f13b34c4 100755
--- a/gulpfile.babel.js
+++ b/gulpfile.babel.js
@@ -50,7 +50,7 @@ const plugins = [
}),
];
-const buildDir = 'front/build';
+const buildDir = process.env.FRONT_BUILD_DIR;
const ecommercePaths = getAppSrcPaths('ecommerce');
const genericAdminPaths = getAppSrcPaths('generic_admin');
@@ -188,7 +188,7 @@ gulp.task('build', () => {
// ================================================================
// Clear : Clear destination directory.
// ================================================================
-gulp.task('clear', () => del(`${buildDir}/**/*`));
+gulp.task('clear', () => del(`${buildDir}/**/*`, { force: true }));
// ================================================================
// STYLES
diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py
index 02a9b34b..2be7c4cc 100644
--- a/shopelectro/settings/base.py
+++ b/shopelectro/settings/base.py
@@ -152,7 +152,7 @@
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
STATICFILES_DIRS = [
- os.path.join(BASE_DIR, 'front/build'),
+ os.environ['FRONT_BUILD_DIR'],
ASSETS_DIR,
]
|
web2py__web2py-2144 | gluon.utils.unlocalised_http_header_date returns wrong time
**Describe the bug**
in function unlocalised_http_header_date, line 481:
`year_and_time = time.strftime("%Y %H:%M:%S GMT")`
should be:
`year_and_time = time.strftime("%Y %H:%M:%S GMT", data)`
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#pylint: disable=invalid-name,redefined-builtin\n\n\"\"\"\n| This file is part of the web2py Web Framework\n| Copyrighted by Massimo Di Pierro <[email protected]>\n| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n\nThis file specifically includes utilities for security.\n--------------------------------------------------------\n\"\"\"\n\nimport threading\nimport struct\nimport uuid\nimport random\nimport inspect\nimport time\nimport os\nimport sys\nimport re\nimport logging\nimport socket\nimport base64\nimport zlib\nimport hashlib\nimport hmac\nfrom gluon._compat import basestring, pickle, PY2, xrange, to_bytes, to_native\n\n_struct_2_long_long = struct.Struct('=QQ')\n\ntry:\n from Crypto.Cipher import AES\n HAVE_AES = True\nexcept ImportError:\n import gluon.contrib.pyaes as PYAES\n HAVE_AES = False\n\n\nHAVE_COMPARE_DIGEST = False\nif hasattr(hmac, 'compare_digest'):\n HAVE_COMPARE_DIGEST = True\n\nlogger = logging.getLogger(\"web2py\")\n\n\ndef AES_new(key, IV=None):\n \"\"\"Return an AES cipher object and random IV if None specified.\"\"\"\n if IV is None:\n IV = fast_urandom16()\n if HAVE_AES:\n return AES.new(key, AES.MODE_CBC, IV), IV\n else:\n return PYAES.AESModeOfOperationCBC(key, iv=IV), IV\n\n\ndef AES_enc(cipher, data):\n \"\"\"Encrypt data with the cipher.\"\"\"\n if HAVE_AES:\n return cipher.encrypt(data)\n else:\n encrypter = PYAES.Encrypter(cipher)\n enc = encrypter.feed(data)\n enc += encrypter.feed()\n return enc\n\n\ndef AES_dec(cipher, data):\n \"\"\"Decrypt data with the cipher.\"\"\"\n if HAVE_AES:\n return cipher.decrypt(data)\n else:\n decrypter = PYAES.Decrypter(cipher)\n dec = decrypter.feed(data)\n dec += decrypter.feed()\n return dec\n\n\ndef compare(a, b):\n \"\"\" Compares two strings and not vulnerable to timing attacks \"\"\"\n if HAVE_COMPARE_DIGEST:\n return hmac.compare_digest(a, b)\n result = len(a) ^ len(b)\n for i in xrange(len(b)):\n result |= ord(a[i % len(a)]) ^ ord(b[i])\n return result == 0\n\n\ndef md5_hash(text):\n \"\"\"Generate an md5 hash with the given text.\"\"\"\n return hashlib.md5(to_bytes(text)).hexdigest()\n\n\ndef get_callable_argspec(fn):\n if inspect.isfunction(fn) or inspect.ismethod(fn):\n inspectable = fn\n elif inspect.isclass(fn):\n inspectable = fn.__init__\n elif hasattr(fn, '__call__'):\n inspectable = fn.__call__\n else:\n inspectable = fn\n return inspect.getargspec(inspectable)\n\n\ndef pad(s, n=32):\n \"\"\"does padding according to PKCS7v1.5 https://www.ietf.org/rfc/rfc2315.txt\"\"\"\n padlen = n - len(s) % n\n return s + bytes(bytearray(padlen * [padlen]))\n\n\ndef unpad(s, n=32):\n \"\"\"removed padding\"\"\"\n padlen = s[-1]\n if isinstance(padlen, str):\n padlen = ord(padlen) # python2\n if (padlen < 1) | (padlen > n): # avoid short-circuit\n # return garbage to minimize side channels\n return bytes(bytearray(len(s) * [0]))\n return s[:-padlen]\n\n\ndef secure_dumps(data, encryption_key, hash_key=None, compression_level=None):\n \"\"\"dumps data, followed by a signature\"\"\"\n dump = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)\n if compression_level:\n dump = zlib.compress(dump, compression_level)\n encryption_key = to_bytes(encryption_key)\n if not hash_key:\n hash_key = hashlib.sha256(encryption_key).digest()\n cipher, IV = AES_new(pad(encryption_key)[:32])\n encrypted_data = base64.urlsafe_b64encode(IV + AES_enc(cipher, pad(dump)))\n signature = to_bytes(hmac.new(to_bytes(hash_key), encrypted_data, hashlib.sha256).hexdigest())\n return b'hmac256:' + signature + b':' + encrypted_data\n\n\ndef secure_loads(data, encryption_key, hash_key=None, compression_level=None):\n \"\"\"loads a signed data dump\"\"\"\n data = to_bytes(data)\n components = data.count(b':')\n if components == 1:\n return secure_loads_deprecated(data, encryption_key, hash_key, compression_level)\n if components != 2:\n return None\n version, signature, encrypted_data = data.split(b':', 2)\n if version != b'hmac256':\n return None\n encryption_key = to_bytes(encryption_key)\n if not hash_key:\n hash_key = hashlib.sha256(encryption_key).digest()\n actual_signature = hmac.new(to_bytes(hash_key), encrypted_data, hashlib.sha256).hexdigest()\n if not compare(to_native(signature), actual_signature):\n return None\n encrypted_data = base64.urlsafe_b64decode(encrypted_data)\n IV, encrypted_data = encrypted_data[:16], encrypted_data[16:]\n cipher, _ = AES_new(pad(encryption_key)[:32], IV=IV)\n try:\n data = unpad(AES_dec(cipher, encrypted_data))\n if compression_level:\n data = zlib.decompress(data)\n return pickle.loads(data)\n except Exception:\n return None\n\n\ndef __pad_deprecated(s, n=32, padchar=b' '):\n \"\"\"reprecated data, here for backward compatibility\"\"\"\n return s + (n - len(s) % n) * padchar\n\n\ndef secure_dumps_deprecated(data, encryption_key, hash_key=None, compression_level=None):\n \"\"\"dumps data with a signature (deprecated because of incorrect padding)\"\"\"\n encryption_key = to_bytes(encryption_key)\n if not hash_key:\n hash_key = hashlib.sha1(encryption_key).hexdigest()\n dump = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)\n if compression_level:\n dump = zlib.compress(dump, compression_level)\n key = __pad_deprecated(encryption_key)[:32]\n cipher, IV = AES_new(key)\n encrypted_data = base64.urlsafe_b64encode(IV + AES_enc(cipher, pad(dump)))\n signature = to_bytes(hmac.new(to_bytes(hash_key), encrypted_data, hashlib.md5).hexdigest())\n return signature + b':' + encrypted_data\n\n\ndef secure_loads_deprecated(data, encryption_key, hash_key=None, compression_level=None):\n \"\"\"loads signed data (deprecated because of incorrect padding)\"\"\"\n encryption_key = to_bytes(encryption_key)\n data = to_native(data)\n if ':' not in data:\n return None\n if not hash_key:\n hash_key = hashlib.sha1(encryption_key).hexdigest()\n signature, encrypted_data = data.split(':', 1)\n encrypted_data = to_bytes(encrypted_data)\n actual_signature = hmac.new(to_bytes(hash_key), encrypted_data, hashlib.md5).hexdigest()\n if not compare(signature, actual_signature):\n return None\n key = __pad_deprecated(encryption_key)[:32]\n encrypted_data = base64.urlsafe_b64decode(encrypted_data)\n IV, encrypted_data = encrypted_data[:16], encrypted_data[16:]\n cipher, _ = AES_new(key, IV=IV)\n try:\n data = AES_dec(cipher, encrypted_data)\n data = data.rstrip(b' ')\n if compression_level:\n data = zlib.decompress(data)\n return pickle.loads(data)\n except Exception:\n return None\n\n### compute constant CTOKENS\n\n\ndef initialize_urandom():\n \"\"\"\n This function and the web2py_uuid follow from the following discussion:\n `http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09`\n\n At startup web2py compute a unique ID that identifies the machine by adding\n uuid.getnode() + int(time.time() * 1e3)\n\n This is a 48-bit number. It converts the number into 16 8-bit tokens.\n It uses this value to initialize the entropy source ('/dev/urandom') and to seed random.\n\n If os.random() is not supported, it falls back to using random and issues a warning.\n \"\"\"\n node_id = uuid.getnode()\n microseconds = int(time.time() * 1e6)\n ctokens = [((node_id + microseconds) >> ((i % 6) * 8)) %\n 256 for i in range(16)]\n random.seed(node_id + microseconds)\n try:\n os.urandom(1)\n have_urandom = True\n if sys.platform != 'win32':\n try:\n # try to add process-specific entropy\n frandom = open('/dev/urandom', 'wb')\n try:\n if PY2:\n frandom.write(''.join(chr(t) for t in ctokens))\n else:\n frandom.write(bytes([]).join(bytes([t]) for t in ctokens))\n finally:\n frandom.close()\n except IOError:\n # works anyway\n pass\n except NotImplementedError:\n have_urandom = False\n logger.warning(\n \"\"\"Cryptographically secure session management is not possible on your system because\nyour system does not provide a cryptographically secure entropy source.\nThis is not specific to web2py; consider deploying on a different operating system.\"\"\")\n if PY2:\n packed = ''.join(chr(x) for x in ctokens)\n else:\n packed = bytes([]).join(bytes([x]) for x in ctokens)\n unpacked_ctokens = _struct_2_long_long.unpack(packed)\n return unpacked_ctokens, have_urandom\nUNPACKED_CTOKENS, HAVE_URANDOM = initialize_urandom()\n\n\ndef fast_urandom16(urandom=[], locker=threading.RLock()):\n \"\"\"\n This is 4x faster than calling os.urandom(16) and prevents\n the \"too many files open\" issue with concurrent access to os.urandom()\n \"\"\"\n try:\n return urandom.pop()\n except IndexError:\n try:\n locker.acquire()\n ur = os.urandom(16 * 1024)\n urandom += [ur[i:i + 16] for i in xrange(16, 1024 * 16, 16)]\n return ur[0:16]\n finally:\n locker.release()\n\n\ndef web2py_uuid(ctokens=UNPACKED_CTOKENS):\n \"\"\"\n This function follows from the following discussion:\n `http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09`\n\n It works like uuid.uuid4 except that tries to use os.urandom() if possible\n and it XORs the output with the tokens uniquely associated with this machine.\n \"\"\"\n rand_longs = (random.getrandbits(64), random.getrandbits(64))\n if HAVE_URANDOM:\n urand_longs = _struct_2_long_long.unpack(fast_urandom16())\n byte_s = _struct_2_long_long.pack(rand_longs[0] ^ urand_longs[0] ^ ctokens[0],\n rand_longs[1] ^ urand_longs[1] ^ ctokens[1])\n else:\n byte_s = _struct_2_long_long.pack(rand_longs[0] ^ ctokens[0],\n rand_longs[1] ^ ctokens[1])\n return str(uuid.UUID(bytes=byte_s, version=4))\n\nREGEX_IPv4 = re.compile(r'(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+)')\n\n\ndef is_valid_ip_address(address):\n \"\"\"\n Examples:\n Better than a thousand words::\n\n >>> is_valid_ip_address('127.0')\n False\n >>> is_valid_ip_address('127.0.0.1')\n True\n >>> is_valid_ip_address('2001:660::1')\n True\n \"\"\"\n # deal with special cases\n if address.lower() in ('127.0.0.1', 'localhost', '::1', '::ffff:127.0.0.1'):\n return True\n elif address.lower() in ('unknown', ''):\n return False\n elif address.count('.') == 3: # assume IPv4\n if address.startswith('::ffff:'):\n address = address[7:]\n if hasattr(socket, 'inet_aton'): # try validate using the OS\n try:\n socket.inet_aton(address)\n return True\n except socket.error: # invalid address\n return False\n else: # try validate using Regex\n match = REGEX_IPv4.match(address)\n if match and all(0 <= int(match.group(i)) < 256 for i in (1, 2, 3, 4)):\n return True\n return False\n elif hasattr(socket, 'inet_pton'): # assume IPv6, try using the OS\n try:\n socket.inet_pton(socket.AF_INET6, address)\n return True\n except socket.error: # invalid address\n return False\n else: # do not know what to do? assume it is a valid address\n return True\n\n\ndef is_loopback_ip_address(ip=None, addrinfo=None):\n \"\"\"\n Determines whether the address appears to be a loopback address.\n This assumes that the IP is valid.\n \"\"\"\n if addrinfo: # see socket.getaddrinfo() for layout of addrinfo tuple\n if addrinfo[0] == socket.AF_INET or addrinfo[0] == socket.AF_INET6:\n ip = addrinfo[4]\n if not isinstance(ip, basestring):\n return False\n # IPv4 or IPv6-embedded IPv4 or IPv4-compatible IPv6\n if ip.count('.') == 3:\n return ip.lower().startswith(('127', '::127', '0:0:0:0:0:0:127',\n '::ffff:127', '0:0:0:0:0:ffff:127'))\n return ip == '::1' or ip == '0:0:0:0:0:0:0:1' # IPv6 loopback\n\n\ndef getipaddrinfo(host):\n \"\"\"\n Filter out non-IP and bad IP addresses from getaddrinfo\n \"\"\"\n try:\n return [addrinfo for addrinfo in socket.getaddrinfo(host, None)\n if (addrinfo[0] == socket.AF_INET or\n addrinfo[0] == socket.AF_INET6)\n and isinstance(addrinfo[4][0], basestring)]\n except socket.error:\n return []\n\n\ndef unlocalised_http_header_date(data):\n \"\"\"\n Converts input datetime to format defined by RFC 7231, section 7.1.1.1\n\n Previously, %a and %b formats were used for weekday and month names, but\n those are not locale-safe. uWSGI requires latin1-encodable headers and\n for example in cs_CS locale, fourth day in week is not encodable in latin1,\n as it's \"Čt\".\n\n Example output: Sun, 06 Nov 1994 08:49:37 GMT\n \"\"\"\n\n short_weekday = {\n \"0\": \"Sun\",\n \"1\": \"Mon\",\n \"2\": \"Tue\",\n \"3\": \"Wed\",\n \"4\": \"Thu\",\n \"5\": \"Fri\",\n \"6\": \"Sat\",\n }.get(time.strftime(\"%w\", data))\n\n day_of_month = time.strftime(\"%d\", data)\n\n short_month = {\n \"01\": \"Jan\",\n \"02\": \"Feb\",\n \"03\": \"Mar\",\n \"04\": \"Apr\",\n \"05\": \"May\",\n \"06\": \"Jun\",\n \"07\": \"Jul\",\n \"08\": \"Aug\",\n \"09\": \"Sep\",\n \"10\": \"Oct\",\n \"11\": \"Nov\",\n \"12\": \"Dec\",\n }.get(time.strftime(\"%m\", data))\n\n year_and_time = time.strftime(\"%Y %H:%M:%S GMT\")\n\n return \"{}, {} {} {}\".format(\n short_weekday,\n day_of_month,\n short_month,\n year_and_time)\n",
"path": "gluon/utils.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#pylint: disable=invalid-name,redefined-builtin\n\n\"\"\"\n| This file is part of the web2py Web Framework\n| Copyrighted by Massimo Di Pierro <[email protected]>\n| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n\nThis file specifically includes utilities for security.\n--------------------------------------------------------\n\"\"\"\n\nimport threading\nimport struct\nimport uuid\nimport random\nimport inspect\nimport time\nimport os\nimport sys\nimport re\nimport logging\nimport socket\nimport base64\nimport zlib\nimport hashlib\nimport hmac\nfrom gluon._compat import basestring, pickle, PY2, xrange, to_bytes, to_native\n\n_struct_2_long_long = struct.Struct('=QQ')\n\ntry:\n from Crypto.Cipher import AES\n HAVE_AES = True\nexcept ImportError:\n import gluon.contrib.pyaes as PYAES\n HAVE_AES = False\n\n\nHAVE_COMPARE_DIGEST = False\nif hasattr(hmac, 'compare_digest'):\n HAVE_COMPARE_DIGEST = True\n\nlogger = logging.getLogger(\"web2py\")\n\n\ndef AES_new(key, IV=None):\n \"\"\"Return an AES cipher object and random IV if None specified.\"\"\"\n if IV is None:\n IV = fast_urandom16()\n if HAVE_AES:\n return AES.new(key, AES.MODE_CBC, IV), IV\n else:\n return PYAES.AESModeOfOperationCBC(key, iv=IV), IV\n\n\ndef AES_enc(cipher, data):\n \"\"\"Encrypt data with the cipher.\"\"\"\n if HAVE_AES:\n return cipher.encrypt(data)\n else:\n encrypter = PYAES.Encrypter(cipher)\n enc = encrypter.feed(data)\n enc += encrypter.feed()\n return enc\n\n\ndef AES_dec(cipher, data):\n \"\"\"Decrypt data with the cipher.\"\"\"\n if HAVE_AES:\n return cipher.decrypt(data)\n else:\n decrypter = PYAES.Decrypter(cipher)\n dec = decrypter.feed(data)\n dec += decrypter.feed()\n return dec\n\n\ndef compare(a, b):\n \"\"\" Compares two strings and not vulnerable to timing attacks \"\"\"\n if HAVE_COMPARE_DIGEST:\n return hmac.compare_digest(a, b)\n result = len(a) ^ len(b)\n for i in xrange(len(b)):\n result |= ord(a[i % len(a)]) ^ ord(b[i])\n return result == 0\n\n\ndef md5_hash(text):\n \"\"\"Generate an md5 hash with the given text.\"\"\"\n return hashlib.md5(to_bytes(text)).hexdigest()\n\n\ndef get_callable_argspec(fn):\n if inspect.isfunction(fn) or inspect.ismethod(fn):\n inspectable = fn\n elif inspect.isclass(fn):\n inspectable = fn.__init__\n elif hasattr(fn, '__call__'):\n inspectable = fn.__call__\n else:\n inspectable = fn\n return inspect.getargspec(inspectable)\n\n\ndef pad(s, n=32):\n \"\"\"does padding according to PKCS7v1.5 https://www.ietf.org/rfc/rfc2315.txt\"\"\"\n padlen = n - len(s) % n\n return s + bytes(bytearray(padlen * [padlen]))\n\n\ndef unpad(s, n=32):\n \"\"\"removed padding\"\"\"\n padlen = s[-1]\n if isinstance(padlen, str):\n padlen = ord(padlen) # python2\n if (padlen < 1) | (padlen > n): # avoid short-circuit\n # return garbage to minimize side channels\n return bytes(bytearray(len(s) * [0]))\n return s[:-padlen]\n\n\ndef secure_dumps(data, encryption_key, hash_key=None, compression_level=None):\n \"\"\"dumps data, followed by a signature\"\"\"\n dump = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)\n if compression_level:\n dump = zlib.compress(dump, compression_level)\n encryption_key = to_bytes(encryption_key)\n if not hash_key:\n hash_key = hashlib.sha256(encryption_key).digest()\n cipher, IV = AES_new(pad(encryption_key)[:32])\n encrypted_data = base64.urlsafe_b64encode(IV + AES_enc(cipher, pad(dump)))\n signature = to_bytes(hmac.new(to_bytes(hash_key), encrypted_data, hashlib.sha256).hexdigest())\n return b'hmac256:' + signature + b':' + encrypted_data\n\n\ndef secure_loads(data, encryption_key, hash_key=None, compression_level=None):\n \"\"\"loads a signed data dump\"\"\"\n data = to_bytes(data)\n components = data.count(b':')\n if components == 1:\n return secure_loads_deprecated(data, encryption_key, hash_key, compression_level)\n if components != 2:\n return None\n version, signature, encrypted_data = data.split(b':', 2)\n if version != b'hmac256':\n return None\n encryption_key = to_bytes(encryption_key)\n if not hash_key:\n hash_key = hashlib.sha256(encryption_key).digest()\n actual_signature = hmac.new(to_bytes(hash_key), encrypted_data, hashlib.sha256).hexdigest()\n if not compare(to_native(signature), actual_signature):\n return None\n encrypted_data = base64.urlsafe_b64decode(encrypted_data)\n IV, encrypted_data = encrypted_data[:16], encrypted_data[16:]\n cipher, _ = AES_new(pad(encryption_key)[:32], IV=IV)\n try:\n data = unpad(AES_dec(cipher, encrypted_data))\n if compression_level:\n data = zlib.decompress(data)\n return pickle.loads(data)\n except Exception:\n return None\n\n\ndef __pad_deprecated(s, n=32, padchar=b' '):\n \"\"\"reprecated data, here for backward compatibility\"\"\"\n return s + (n - len(s) % n) * padchar\n\n\ndef secure_dumps_deprecated(data, encryption_key, hash_key=None, compression_level=None):\n \"\"\"dumps data with a signature (deprecated because of incorrect padding)\"\"\"\n encryption_key = to_bytes(encryption_key)\n if not hash_key:\n hash_key = hashlib.sha1(encryption_key).hexdigest()\n dump = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)\n if compression_level:\n dump = zlib.compress(dump, compression_level)\n key = __pad_deprecated(encryption_key)[:32]\n cipher, IV = AES_new(key)\n encrypted_data = base64.urlsafe_b64encode(IV + AES_enc(cipher, pad(dump)))\n signature = to_bytes(hmac.new(to_bytes(hash_key), encrypted_data, hashlib.md5).hexdigest())\n return signature + b':' + encrypted_data\n\n\ndef secure_loads_deprecated(data, encryption_key, hash_key=None, compression_level=None):\n \"\"\"loads signed data (deprecated because of incorrect padding)\"\"\"\n encryption_key = to_bytes(encryption_key)\n data = to_native(data)\n if ':' not in data:\n return None\n if not hash_key:\n hash_key = hashlib.sha1(encryption_key).hexdigest()\n signature, encrypted_data = data.split(':', 1)\n encrypted_data = to_bytes(encrypted_data)\n actual_signature = hmac.new(to_bytes(hash_key), encrypted_data, hashlib.md5).hexdigest()\n if not compare(signature, actual_signature):\n return None\n key = __pad_deprecated(encryption_key)[:32]\n encrypted_data = base64.urlsafe_b64decode(encrypted_data)\n IV, encrypted_data = encrypted_data[:16], encrypted_data[16:]\n cipher, _ = AES_new(key, IV=IV)\n try:\n data = AES_dec(cipher, encrypted_data)\n data = data.rstrip(b' ')\n if compression_level:\n data = zlib.decompress(data)\n return pickle.loads(data)\n except Exception:\n return None\n\n### compute constant CTOKENS\n\n\ndef initialize_urandom():\n \"\"\"\n This function and the web2py_uuid follow from the following discussion:\n `http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09`\n\n At startup web2py compute a unique ID that identifies the machine by adding\n uuid.getnode() + int(time.time() * 1e3)\n\n This is a 48-bit number. It converts the number into 16 8-bit tokens.\n It uses this value to initialize the entropy source ('/dev/urandom') and to seed random.\n\n If os.random() is not supported, it falls back to using random and issues a warning.\n \"\"\"\n node_id = uuid.getnode()\n microseconds = int(time.time() * 1e6)\n ctokens = [((node_id + microseconds) >> ((i % 6) * 8)) %\n 256 for i in range(16)]\n random.seed(node_id + microseconds)\n try:\n os.urandom(1)\n have_urandom = True\n if sys.platform != 'win32':\n try:\n # try to add process-specific entropy\n frandom = open('/dev/urandom', 'wb')\n try:\n if PY2:\n frandom.write(''.join(chr(t) for t in ctokens))\n else:\n frandom.write(bytes([]).join(bytes([t]) for t in ctokens))\n finally:\n frandom.close()\n except IOError:\n # works anyway\n pass\n except NotImplementedError:\n have_urandom = False\n logger.warning(\n \"\"\"Cryptographically secure session management is not possible on your system because\nyour system does not provide a cryptographically secure entropy source.\nThis is not specific to web2py; consider deploying on a different operating system.\"\"\")\n if PY2:\n packed = ''.join(chr(x) for x in ctokens)\n else:\n packed = bytes([]).join(bytes([x]) for x in ctokens)\n unpacked_ctokens = _struct_2_long_long.unpack(packed)\n return unpacked_ctokens, have_urandom\nUNPACKED_CTOKENS, HAVE_URANDOM = initialize_urandom()\n\n\ndef fast_urandom16(urandom=[], locker=threading.RLock()):\n \"\"\"\n This is 4x faster than calling os.urandom(16) and prevents\n the \"too many files open\" issue with concurrent access to os.urandom()\n \"\"\"\n try:\n return urandom.pop()\n except IndexError:\n try:\n locker.acquire()\n ur = os.urandom(16 * 1024)\n urandom += [ur[i:i + 16] for i in xrange(16, 1024 * 16, 16)]\n return ur[0:16]\n finally:\n locker.release()\n\n\ndef web2py_uuid(ctokens=UNPACKED_CTOKENS):\n \"\"\"\n This function follows from the following discussion:\n `http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09`\n\n It works like uuid.uuid4 except that tries to use os.urandom() if possible\n and it XORs the output with the tokens uniquely associated with this machine.\n \"\"\"\n rand_longs = (random.getrandbits(64), random.getrandbits(64))\n if HAVE_URANDOM:\n urand_longs = _struct_2_long_long.unpack(fast_urandom16())\n byte_s = _struct_2_long_long.pack(rand_longs[0] ^ urand_longs[0] ^ ctokens[0],\n rand_longs[1] ^ urand_longs[1] ^ ctokens[1])\n else:\n byte_s = _struct_2_long_long.pack(rand_longs[0] ^ ctokens[0],\n rand_longs[1] ^ ctokens[1])\n return str(uuid.UUID(bytes=byte_s, version=4))\n\nREGEX_IPv4 = re.compile(r'(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+)')\n\n\ndef is_valid_ip_address(address):\n \"\"\"\n Examples:\n Better than a thousand words::\n\n >>> is_valid_ip_address('127.0')\n False\n >>> is_valid_ip_address('127.0.0.1')\n True\n >>> is_valid_ip_address('2001:660::1')\n True\n \"\"\"\n # deal with special cases\n if address.lower() in ('127.0.0.1', 'localhost', '::1', '::ffff:127.0.0.1'):\n return True\n elif address.lower() in ('unknown', ''):\n return False\n elif address.count('.') == 3: # assume IPv4\n if address.startswith('::ffff:'):\n address = address[7:]\n if hasattr(socket, 'inet_aton'): # try validate using the OS\n try:\n socket.inet_aton(address)\n return True\n except socket.error: # invalid address\n return False\n else: # try validate using Regex\n match = REGEX_IPv4.match(address)\n if match and all(0 <= int(match.group(i)) < 256 for i in (1, 2, 3, 4)):\n return True\n return False\n elif hasattr(socket, 'inet_pton'): # assume IPv6, try using the OS\n try:\n socket.inet_pton(socket.AF_INET6, address)\n return True\n except socket.error: # invalid address\n return False\n else: # do not know what to do? assume it is a valid address\n return True\n\n\ndef is_loopback_ip_address(ip=None, addrinfo=None):\n \"\"\"\n Determines whether the address appears to be a loopback address.\n This assumes that the IP is valid.\n \"\"\"\n if addrinfo: # see socket.getaddrinfo() for layout of addrinfo tuple\n if addrinfo[0] == socket.AF_INET or addrinfo[0] == socket.AF_INET6:\n ip = addrinfo[4]\n if not isinstance(ip, basestring):\n return False\n # IPv4 or IPv6-embedded IPv4 or IPv4-compatible IPv6\n if ip.count('.') == 3:\n return ip.lower().startswith(('127', '::127', '0:0:0:0:0:0:127',\n '::ffff:127', '0:0:0:0:0:ffff:127'))\n return ip == '::1' or ip == '0:0:0:0:0:0:0:1' # IPv6 loopback\n\n\ndef getipaddrinfo(host):\n \"\"\"\n Filter out non-IP and bad IP addresses from getaddrinfo\n \"\"\"\n try:\n return [addrinfo for addrinfo in socket.getaddrinfo(host, None)\n if (addrinfo[0] == socket.AF_INET or\n addrinfo[0] == socket.AF_INET6)\n and isinstance(addrinfo[4][0], basestring)]\n except socket.error:\n return []\n\n\ndef unlocalised_http_header_date(data):\n \"\"\"\n Converts input datetime to format defined by RFC 7231, section 7.1.1.1\n\n Previously, %a and %b formats were used for weekday and month names, but\n those are not locale-safe. uWSGI requires latin1-encodable headers and\n for example in cs_CS locale, fourth day in week is not encodable in latin1,\n as it's \"Čt\".\n\n Example output: Sun, 06 Nov 1994 08:49:37 GMT\n \"\"\"\n\n short_weekday = {\n \"0\": \"Sun\",\n \"1\": \"Mon\",\n \"2\": \"Tue\",\n \"3\": \"Wed\",\n \"4\": \"Thu\",\n \"5\": \"Fri\",\n \"6\": \"Sat\",\n }.get(time.strftime(\"%w\", data))\n\n day_of_month = time.strftime(\"%d\", data)\n\n short_month = {\n \"01\": \"Jan\",\n \"02\": \"Feb\",\n \"03\": \"Mar\",\n \"04\": \"Apr\",\n \"05\": \"May\",\n \"06\": \"Jun\",\n \"07\": \"Jul\",\n \"08\": \"Aug\",\n \"09\": \"Sep\",\n \"10\": \"Oct\",\n \"11\": \"Nov\",\n \"12\": \"Dec\",\n }.get(time.strftime(\"%m\", data))\n\n year_and_time = time.strftime(\"%Y %H:%M:%S GMT\", data)\n\n return \"{}, {} {} {}\".format(\n short_weekday,\n day_of_month,\n short_month,\n year_and_time)\n",
"path": "gluon/utils.py"
}
] | diff --git a/gluon/utils.py b/gluon/utils.py
index bbaba4e53..e4771d9cb 100644
--- a/gluon/utils.py
+++ b/gluon/utils.py
@@ -411,7 +411,7 @@ def unlocalised_http_header_date(data):
"12": "Dec",
}.get(time.strftime("%m", data))
- year_and_time = time.strftime("%Y %H:%M:%S GMT")
+ year_and_time = time.strftime("%Y %H:%M:%S GMT", data)
return "{}, {} {} {}".format(
short_weekday,
|
spyder-ide__spyder-16483 | Drag & drop error in the Help pane
## Description
### What steps will reproduce the problem?
<!--- You can use Markdown here --->
open spyder
Drag any python file from file explorer to spyder application
Spyder reports internal problem
### Traceback
```python-traceback
Traceback (most recent call last):
File "C:\Program Files\Spyder\pkgs\spyder\plugins\help\widgets.py", line 855, in handle_link_clicks
self.rich_text.load_url(url)
File "C:\Program Files\Spyder\pkgs\spyder\plugins\help\widgets.py", line 188, in load_url
self.load(qurl)
AttributeError: 'RichText' object has no attribute 'load'
```
## Versions
* Spyder version: 5.0.1
* Python version: 3.7.9
* Qt version: 5.12.10
* PyQt5 version: 5.12.3
* Operating System: Windows 10
### Dependencies
```
# Mandatory:
atomicwrites >=1.2.0 : 1.4.0 (OK)
chardet >=2.0.0 : 4.0.0 (OK)
cloudpickle >=0.5.0 : 1.6.0 (OK)
cookiecutter >=1.6.0 : 1.7.2 (OK)
diff_match_patch >=20181111 : 20200713 (OK)
intervaltree : None (OK)
IPython >=7.6.0 : 7.22.0 (OK)
jedi =0.17.2 : 0.17.2 (OK)
jsonschema >=3.2.0 : 3.2.0 (OK)
keyring >=17.0.0 : 23.0.1 (OK)
nbconvert >=4.0 : 6.0.7 (OK)
numpydoc >=0.6.0 : 1.1.0 (OK)
paramiko >=2.4.0 : 2.7.2 (OK)
parso =0.7.0 : 0.7.0 (OK)
pexpect >=4.4.0 : 4.8.0 (OK)
pickleshare >=0.4 : 0.7.5 (OK)
psutil >=5.3 : 5.8.0 (OK)
pygments >=2.0 : 2.8.1 (OK)
pylint >=1.0 : 2.7.4 (OK)
pyls >=0.36.2;<1.0.0 : 0.36.2 (OK)
pyls_black >=0.4.6 : 0.4.6 (OK)
pyls_spyder >=0.3.2 : 0.3.2 (OK)
qdarkstyle =3.0.2 : 3.0.2 (OK)
qstylizer >=0.1.10 : 0.1.10 (OK)
qtawesome >=1.0.2 : 1.0.2 (OK)
qtconsole >=5.0.3 : 5.0.3 (OK)
qtpy >=1.5.0 : 1.9.0 (OK)
rtree >=0.8.3 : 0.9.4 (OK)
setuptools >=39.0.0 : 56.0.0 (OK)
sphinx >=0.6.6 : 3.5.4 (OK)
spyder_kernels >=2.0.1;<2.1.0 : 2.0.1 (OK)
textdistance >=4.2.0 : 4.2.1 (OK)
three_merge >=0.1.1 : 0.1.1 (OK)
watchdog : 1.0.2 (OK)
zmq >=17 : 22.0.3 (OK)
# Optional:
cython >=0.21 : 0.29.23 (OK)
matplotlib >=2.0.0 : 3.4.1 (OK)
numpy >=1.7 : 1.19.3 (OK)
pandas >=1.1.1 : 1.2.4 (OK)
scipy >=0.17.0 : 1.6.2 (OK)
sympy >=0.7.3 : 1.8 (OK)
```
| [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Copyright © Spyder Project Contributors\n# Licensed under the terms of the MIT License\n#\n\"\"\"\nHelp plugin widgets.\n\"\"\"\n\n# Standard library imports\nimport os\nimport re\nimport socket\nimport sys\n\n# Third party imports\nfrom qtpy.QtCore import Qt, QUrl, Signal, Slot, QPoint\nfrom qtpy.QtGui import QColor\nfrom qtpy.QtWebEngineWidgets import WEBENGINE, QWebEnginePage\nfrom qtpy.QtWidgets import (QActionGroup, QComboBox, QLabel, QLineEdit,\n QMessageBox, QSizePolicy, QStackedLayout,\n QVBoxLayout, QWidget)\n\n# Local imports\nfrom spyder.api.config.decorators import on_conf_change\nfrom spyder.api.translations import get_translation\nfrom spyder.api.widgets.main_widget import PluginMainWidget\nfrom spyder.api.widgets.mixins import SpyderWidgetMixin\nfrom spyder.config.base import get_module_source_path\nfrom spyder.plugins.help.utils.sphinxify import (CSS_PATH, generate_context,\n loading, usage, warning)\nfrom spyder.plugins.help.utils.sphinxthread import SphinxThread\nfrom spyder.py3compat import get_meth_class_inst, to_text_string\nfrom spyder.utils import programs\nfrom spyder.utils.image_path_manager import get_image_path\nfrom spyder.utils.palette import QStylePalette\nfrom spyder.utils.qthelpers import start_file\nfrom spyder.widgets.browser import FrameWebView\nfrom spyder.widgets.comboboxes import EditableComboBox\nfrom spyder.widgets.findreplace import FindReplace\nfrom spyder.widgets.simplecodeeditor import SimpleCodeEditor\n\n\n# Localization\n_ = get_translation('spyder')\n\n\n# --- Constants\n# ----------------------------------------------------------------------------\nMAIN_BG_COLOR = QStylePalette.COLOR_BACKGROUND_1\n\n\nclass HelpWidgetActions:\n # Toggles\n ToggleAutomaticImport = 'toggle_automatic_import_action'\n ToggleLocked = 'toggle_locked_action'\n TogglePlainMode = 'toggle_plain_mode_action'\n ToggleRichMode = 'toggle_rich_mode_action'\n ToggleShowSource = 'toggle_show_source_action'\n ToggleWrap = 'toggle_wrap_action'\n CopyAction = \"help_widget_copy_action\"\n SelectAll = \"select_all_action\",\n Home = 'home_action'\n\n\nclass HelpWidgetOptionsMenuSections:\n Display = 'display_section'\n Other = 'other_section'\n\n\nclass HelpWidgetMainToolbarSections:\n Main = 'main_section'\n\n\nclass HelpWidgetToolbarItems:\n SourceLabel = 'source_label'\n SourceCombo = 'source_combo'\n ObjectLabel = 'object_label'\n ObjectCombo = 'object_combo'\n ObjectEdit = 'object_edit'\n\n\n# --- Widgets\n# ----------------------------------------------------------------------------\nclass ObjectComboBox(EditableComboBox):\n \"\"\"\n QComboBox handling object names\n \"\"\"\n # Signals\n valid = Signal(bool, bool)\n\n def __init__(self, parent, id_=None):\n EditableComboBox.__init__(self, parent)\n self.help = parent\n self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n self.tips = {True: '', False: ''}\n\n if id_ is not None:\n self.ID = id_\n\n def is_valid(self, qstr=None):\n \"\"\"Return True if string is valid\"\"\"\n if not self.help.source_is_console():\n return True\n if qstr is None:\n qstr = self.currentText()\n if not re.search(r'^[a-zA-Z0-9_\\.]*$', str(qstr), 0):\n return False\n objtxt = to_text_string(qstr)\n shell_is_defined = False\n if self.help.get_conf('automatic_import'):\n shell = self.help.internal_shell\n if shell is not None:\n shell_is_defined = shell.is_defined(objtxt, force_import=True)\n if not shell_is_defined:\n shell = self.help.get_shell()\n if shell is not None:\n try:\n shell_is_defined = shell.is_defined(objtxt)\n except socket.error:\n shell = self.help.get_shell()\n try:\n shell_is_defined = shell.is_defined(objtxt)\n except socket.error:\n # Well... too bad!\n pass\n return shell_is_defined\n\n def validate_current_text(self):\n self.validate(self.currentText())\n\n def validate(self, qstr, editing=True):\n \"\"\"Reimplemented to avoid formatting actions\"\"\"\n valid = self.is_valid(qstr)\n if self.hasFocus() and valid is not None:\n if editing and not valid:\n # Combo box text is being modified: invalidate the entry\n self.show_tip(self.tips[valid])\n self.valid.emit(False, False)\n else:\n # A new item has just been selected\n if valid:\n self.selected()\n # See spyder-ide/spyder#9542.\n self.lineEdit().cursorWordForward(False)\n else:\n self.valid.emit(False, False)\n\n\nclass RichText(QWidget, SpyderWidgetMixin):\n \"\"\"\n WebView widget with find dialog\n \"\"\"\n sig_link_clicked = Signal(QUrl)\n\n def __init__(self, parent):\n super().__init__(parent, class_parent=parent)\n\n self.webview = FrameWebView(self)\n self.webview.setup()\n\n if WEBENGINE:\n self.webview.web_widget.page().setBackgroundColor(\n QColor(MAIN_BG_COLOR))\n else:\n self.webview.web_widget.setStyleSheet(\n \"background:{}\".format(MAIN_BG_COLOR))\n self.webview.page().setLinkDelegationPolicy(\n QWebEnginePage.DelegateAllLinks)\n\n self.find_widget = FindReplace(self)\n self.find_widget.set_editor(self.webview.web_widget)\n self.find_widget.hide()\n\n # Layout\n layout = QVBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n layout.addWidget(self.webview)\n layout.addWidget(self.find_widget)\n self.setLayout(layout)\n\n # Signals\n self.webview.linkClicked.connect(self.sig_link_clicked)\n\n def set_font(self, font, fixed_font=None):\n \"\"\"Set font\"\"\"\n self.webview.set_font(font, fixed_font=fixed_font)\n\n def set_html(self, html_text, base_url):\n \"\"\"Set html text\"\"\"\n self.webview.setHtml(html_text, base_url)\n\n def load_url(self, url):\n if isinstance(url, QUrl):\n qurl = url\n else:\n qurl = QUrl(url)\n\n self.load(qurl)\n\n def clear(self):\n self.set_html('', self.webview.url())\n\n\nclass PlainText(QWidget):\n \"\"\"\n Read-only editor widget with find dialog\n \"\"\"\n # Signals\n focus_changed = Signal()\n\n sig_custom_context_menu_requested = Signal(QPoint)\n\n def __init__(self, parent):\n QWidget.__init__(self, parent)\n self.editor = None\n\n # Read-only simple code editor\n self.editor = SimpleCodeEditor(self)\n self.editor.setup_editor(\n language='py',\n highlight_current_line=False,\n linenumbers=False,\n )\n self.editor.sig_focus_changed.connect(self.focus_changed)\n self.editor.setReadOnly(True)\n self.editor.setContextMenuPolicy(Qt.CustomContextMenu)\n\n # Find/replace widget\n self.find_widget = FindReplace(self)\n self.find_widget.set_editor(self.editor)\n self.find_widget.hide()\n\n layout = QVBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n layout.addWidget(self.editor)\n layout.addWidget(self.find_widget)\n self.setLayout(layout)\n\n self.editor.customContextMenuRequested.connect(\n self.sig_custom_context_menu_requested)\n\n def set_font(self, font, color_scheme=None):\n \"\"\"Set font\"\"\"\n self.editor.set_color_scheme(color_scheme)\n self.editor.set_font(font)\n\n def set_color_scheme(self, color_scheme):\n \"\"\"Set color scheme\"\"\"\n self.editor.set_color_scheme(color_scheme)\n\n def set_text(self, text, is_code):\n if is_code:\n self.editor.set_language('py')\n else:\n self.editor.set_language(None)\n\n self.editor.set_text(text)\n self.editor.set_cursor_position('sof')\n\n def clear(self):\n self.editor.clear()\n\n def set_wrap_mode(self, value):\n self.editor.toggle_wrap_mode(value)\n\n def copy(self):\n self.editor.copy()\n\n def select_all(self):\n self.editor.selectAll()\n\n\nclass HelpWidget(PluginMainWidget):\n\n ENABLE_SPINNER = True\n\n # Signals\n sig_item_found = Signal()\n \"\"\"This signal is emitted when an item is found.\"\"\"\n\n sig_render_started = Signal()\n \"\"\"This signal is emitted to inform a help text rendering has started.\"\"\"\n\n sig_render_finished = Signal()\n \"\"\"This signal is emitted to inform a help text rendering has finished.\"\"\"\n\n def __init__(self, name=None, plugin=None, parent=None):\n super().__init__(name, plugin, parent)\n\n # Attributes\n self._starting_up = True\n self._current_color_scheme = None\n self._last_texts = [None, None]\n self._last_editor_doc = None\n self._last_console_cb = None\n self._last_editor_cb = None\n self.css_path = self.get_conf('css_path', CSS_PATH, 'appearance')\n self.no_docs = _(\"No documentation available\")\n self.docstring = True # TODO: What is this used for?\n\n # Widgets\n self._sphinx_thread = SphinxThread(\n html_text_no_doc=warning(self.no_docs, css_path=self.css_path),\n css_path=self.css_path,\n )\n self.shell = None\n self.internal_console = None\n self.internal_shell = None\n self.plain_text = PlainText(self)\n self.rich_text = RichText(self)\n\n self.source_label = QLabel(_(\"Source\"))\n self.source_label.ID = HelpWidgetToolbarItems.SourceLabel\n\n self.source_combo = QComboBox(self)\n self.source_combo.ID = HelpWidgetToolbarItems.SourceCombo\n\n self.object_label = QLabel(_(\"Object\"))\n self.object_label.ID = HelpWidgetToolbarItems.ObjectLabel\n\n self.object_combo = ObjectComboBox(\n self, HelpWidgetToolbarItems.ObjectCombo)\n\n self.object_edit = QLineEdit(self)\n self.object_edit.ID = HelpWidgetToolbarItems.ObjectEdit\n\n # Setup\n self.object_edit.setReadOnly(True)\n self.object_combo.setMaxCount(self.get_conf('max_history_entries'))\n self.object_combo.setItemText(0, '')\n self.plain_text.set_wrap_mode(self.get_conf('wrap'))\n self.source_combo.addItems([_(\"Console\"), _(\"Editor\")])\n if (not programs.is_module_installed('rope') and\n not programs.is_module_installed('jedi', '>=0.11.0')):\n self.source_combo.hide()\n self.source_label.hide()\n\n # Layout\n self.stack_layout = layout = QStackedLayout()\n layout.addWidget(self.rich_text)\n layout.addWidget(self.plain_text)\n self.setLayout(layout)\n\n # Signals\n self._sphinx_thread.html_ready.connect(\n self._on_sphinx_thread_html_ready)\n self._sphinx_thread.error_msg.connect(\n self._on_sphinx_thread_error_msg)\n self.object_combo.valid.connect(self.force_refresh)\n self.rich_text.sig_link_clicked.connect(self.handle_link_clicks)\n self.source_combo.currentIndexChanged.connect(\n lambda x: self.source_changed())\n self.sig_render_started.connect(self.start_spinner)\n self.sig_render_finished.connect(self.stop_spinner)\n\n # --- PluginMainWidget API\n # ------------------------------------------------------------------------\n def get_title(self):\n return _('Help')\n\n def setup(self):\n self.wrap_action = self.create_action(\n name=HelpWidgetActions.ToggleWrap,\n text=_(\"Wrap lines\"),\n toggled=True,\n initial=self.get_conf('wrap'),\n option='wrap'\n )\n self.copy_action = self.create_action(\n name=HelpWidgetActions.CopyAction,\n text=_(\"Copy\"),\n triggered=lambda value: self.plain_text.copy(),\n register_shortcut=False,\n )\n self.select_all_action = self.create_action(\n name=HelpWidgetActions.SelectAll,\n text=_(\"Select All\"),\n triggered=lambda value: self.plain_text.select_all(),\n register_shortcut=False,\n )\n self.auto_import_action = self.create_action(\n name=HelpWidgetActions.ToggleAutomaticImport,\n text=_(\"Automatic import\"),\n toggled=True,\n initial=self.get_conf('automatic_import'),\n option='automatic_import'\n )\n self.show_source_action = self.create_action(\n name=HelpWidgetActions.ToggleShowSource,\n text=_(\"Show Source\"),\n toggled=True,\n option='show_source'\n )\n self.rich_text_action = self.create_action(\n name=HelpWidgetActions.ToggleRichMode,\n text=_(\"Rich Text\"),\n toggled=True,\n initial=self.get_conf('rich_mode'),\n option='rich_mode'\n )\n self.plain_text_action = self.create_action(\n name=HelpWidgetActions.TogglePlainMode,\n text=_(\"Plain Text\"),\n toggled=True,\n initial=self.get_conf('plain_mode'),\n option='plain_mode'\n )\n self.locked_action = self.create_action(\n name=HelpWidgetActions.ToggleLocked,\n text=_(\"Lock/Unlock\"),\n toggled=True,\n icon=self.create_icon('lock_open'),\n initial=self.get_conf('locked'),\n option='locked'\n )\n self.home_action = self.create_action(\n name=HelpWidgetActions.Home,\n text=_(\"Home\"),\n triggered=self.show_intro_message,\n icon=self.create_icon('home'),\n )\n\n # Add the help actions to an exclusive QActionGroup\n help_actions = QActionGroup(self)\n help_actions.setExclusive(True)\n help_actions.addAction(self.plain_text_action)\n help_actions.addAction(self.rich_text_action)\n\n # Menu\n menu = self.get_options_menu()\n for item in [self.rich_text_action, self.plain_text_action,\n self.show_source_action]:\n self.add_item_to_menu(\n item,\n menu=menu,\n section=HelpWidgetOptionsMenuSections.Display,\n )\n\n self.add_item_to_menu(\n self.auto_import_action,\n menu=menu,\n section=HelpWidgetOptionsMenuSections.Other,\n )\n\n # Plain text menu\n self._plain_text_context_menu = self.create_menu(\n \"plain_text_context_menu\")\n self.add_item_to_menu(\n self.copy_action,\n self._plain_text_context_menu,\n section=\"copy_section\",\n )\n self.add_item_to_menu(\n self.select_all_action,\n self._plain_text_context_menu,\n section=\"select_section\",\n )\n self.add_item_to_menu(\n self.wrap_action,\n self._plain_text_context_menu,\n section=\"wrap_section\",\n )\n\n # Toolbar\n toolbar = self.get_main_toolbar()\n for item in [self.source_label, self.source_combo, self.object_label,\n self.object_combo, self.object_edit, self.home_action,\n self.locked_action]:\n self.add_item_to_toolbar(\n item,\n toolbar=toolbar,\n section=HelpWidgetMainToolbarSections.Main,\n )\n\n self.source_changed()\n self.switch_to_rich_text()\n self.show_intro_message()\n\n # Signals\n self.plain_text.sig_custom_context_menu_requested.connect(\n self._show_plain_text_context_menu)\n\n def _should_display_welcome_page(self):\n \"\"\"Determine if the help welcome page should be displayed.\"\"\"\n return (self._last_editor_doc is None or\n self._last_console_cb is None or\n self._last_editor_cb is None)\n\n @on_conf_change(option='wrap')\n def on_wrap_option_update(self, value):\n self.plain_text.set_wrap_mode(value)\n\n @on_conf_change(option='locked')\n def on_lock_update(self, value):\n if value:\n icon = self.create_icon('lock')\n tip = _(\"Unlock\")\n else:\n icon = self.create_icon('lock_open')\n tip = _(\"Lock\")\n\n action = self.get_action(HelpWidgetActions.ToggleLocked)\n action.setIcon(icon)\n action.setToolTip(tip)\n\n @on_conf_change(option='automatic_import')\n def on_automatic_import_update(self, value):\n self.object_combo.validate_current_text()\n if self._should_display_welcome_page():\n self.show_intro_message()\n else:\n self.force_refresh()\n\n @on_conf_change(option='rich_mode')\n def on_rich_mode_update(self, value):\n if value:\n # Plain Text OFF / Rich text ON\n self.docstring = not value\n self.stack_layout.setCurrentWidget(self.rich_text)\n self.get_action(HelpWidgetActions.ToggleShowSource).setChecked(\n False)\n else:\n # Plain Text ON / Rich text OFF\n self.docstring = value\n self.stack_layout.setCurrentWidget(self.plain_text)\n\n if self._should_display_welcome_page():\n self.show_intro_message()\n else:\n self.force_refresh()\n\n @on_conf_change(option='show_source')\n def on_show_source_update(self, value):\n if value:\n self.switch_to_plain_text()\n self.get_action(HelpWidgetActions.ToggleRichMode).setChecked(\n False)\n\n self.docstring = not value\n if self._should_display_welcome_page():\n self.show_intro_message()\n else:\n self.force_refresh()\n\n def update_actions(self):\n for __, action in self.get_actions().items():\n # IMPORTANT: Since we are defining the main actions in here\n # and the context is WidgetWithChildrenShortcut we need to\n # assign the same actions to the children widgets in order\n # for shortcuts to work\n for widget in [self.plain_text,\n self.rich_text,\n self.source_combo,\n self.object_combo,\n self.object_edit]:\n if action not in widget.actions():\n widget.addAction(action)\n\n def get_focus_widget(self):\n self.object_combo.lineEdit().selectAll()\n return self.object_combo\n\n # --- Private API\n # ------------------------------------------------------------------------\n @Slot(QPoint)\n def _show_plain_text_context_menu(self, point):\n point = self.plain_text.mapToGlobal(point)\n self._plain_text_context_menu.popup(point)\n\n def _on_sphinx_thread_html_ready(self, html_text):\n \"\"\"\n Set our sphinx documentation based on thread result.\n\n Parameters\n ----------\n html_text: str\n Html results text.\n \"\"\"\n self._sphinx_thread.wait()\n self.set_rich_text_html(html_text, QUrl.fromLocalFile(self.css_path))\n self.sig_render_finished.emit()\n self.stop_spinner()\n\n def _on_sphinx_thread_error_msg(self, error_msg):\n \"\"\"\n Display error message on Sphinx rich text failure.\n\n Parameters\n ----------\n error_msg: str\n Error message text.\n \"\"\"\n self._sphinx_thread.wait()\n self.plain_text_action.setChecked(True)\n sphinx_ver = programs.get_module_version('sphinx')\n QMessageBox.critical(\n self,\n _('Help'),\n _(\"The following error occurred when calling \"\n \"<b>Sphinx %s</b>. <br>Incompatible Sphinx \"\n \"version or doc string decoding failed.\"\n \"<br><br>Error message:<br>%s\"\n ) % (sphinx_ver, error_msg),\n )\n self.sig_render_finished.emit()\n\n # --- Public API\n # ------------------------------------------------------------------------\n def source_is_console(self):\n \"\"\"Return True if source is Console.\"\"\"\n return self.source_combo.currentIndex() == 0\n\n def switch_to_editor_source(self):\n \"\"\"Switch to editor view of the help viewer.\"\"\"\n self.source_combo.setCurrentIndex(1)\n\n def switch_to_console_source(self):\n \"\"\"Switch to console view of the help viewer.\"\"\"\n self.source_combo.setCurrentIndex(0)\n\n def source_changed(self):\n \"\"\"Handle a source (plain/rich) change.\"\"\"\n is_console = self.source_is_console()\n if is_console:\n self.object_combo.show()\n self.object_edit.hide()\n else:\n # Editor\n self.object_combo.hide()\n self.object_edit.show()\n\n self.get_action(HelpWidgetActions.ToggleShowSource).setEnabled(\n is_console)\n self.get_action(HelpWidgetActions.ToggleAutomaticImport).setEnabled(\n is_console)\n self.restore_text()\n\n def save_text(self, callback):\n \"\"\"\n Save help text.\n\n Parameters\n ----------\n callback: callable\n Method to call on save.\n \"\"\"\n if self.source_is_console():\n self._last_console_cb = callback\n else:\n self._last_editor_cb = callback\n\n def restore_text(self):\n \"\"\"Restore last text using callback.\"\"\"\n if self.source_is_console():\n cb = self._last_console_cb\n else:\n cb = self._last_editor_cb\n\n if cb is None:\n if self.get_conf('plain_mode'):\n self.switch_to_plain_text()\n else:\n self.switch_to_rich_text()\n else:\n func = cb[0]\n args = cb[1:]\n func(*args)\n if get_meth_class_inst(func) is self.rich_text:\n self.switch_to_rich_text()\n else:\n self.switch_to_plain_text()\n\n @property\n def find_widget(self):\n \"\"\"Show find widget.\"\"\"\n if self.get_conf('plain_mode'):\n return self.plain_text.find_widget\n else:\n return self.rich_text.find_widget\n\n def switch_to_plain_text(self):\n \"\"\"Switch to plain text mode.\"\"\"\n self.get_action(HelpWidgetActions.TogglePlainMode).setChecked(True)\n\n def switch_to_rich_text(self):\n \"\"\"Switch to rich text mode.\"\"\"\n self.get_action(HelpWidgetActions.ToggleRichMode).setChecked(True)\n\n def set_plain_text(self, text, is_code):\n \"\"\"\n Set plain text docs.\n\n Parameters\n ----------\n text: str\n Text content.\n is_code: bool\n True if it is code text.\n\n Notes\n -----\n Text is coming from utils.dochelpers.getdoc\n \"\"\"\n if type(text) is dict:\n name = text['name']\n if name:\n rst_title = ''.join(['='*len(name), '\\n', name, '\\n',\n '='*len(name), '\\n\\n'])\n else:\n rst_title = ''\n try:\n if text['argspec']:\n definition = ''.join(\n ['Definition: ', name, text['argspec'], '\\n\\n'])\n else:\n definition = ''\n\n if text['note']:\n note = ''.join(['Type: ', text['note'], '\\n\\n----\\n\\n'])\n else:\n note = ''\n except TypeError:\n definition = self.no_docs\n note = ''\n\n full_text = ''.join([rst_title, definition, note,\n text['docstring']])\n else:\n full_text = text\n\n self.plain_text.set_text(full_text, is_code)\n self.save_text([self.plain_text.set_text, full_text, is_code])\n\n def set_rich_text_html(self, html_text, base_url):\n \"\"\"\n Set rich text.\n\n Parameters\n ----------\n html_text: str\n Html string.\n base_url: str\n Location of stylesheets and images to load in the page.\n \"\"\"\n self.rich_text.set_html(html_text, base_url)\n self.save_text([self.rich_text.set_html, html_text, base_url])\n\n def show_loading_message(self):\n \"\"\"Create html page to show while the documentation is generated.\"\"\"\n self.sig_render_started.emit()\n loading_message = _(\"Retrieving documentation\")\n loading_img = get_image_path('loading_sprites')\n if os.name == 'nt':\n loading_img = loading_img.replace('\\\\', '/')\n\n self.set_rich_text_html(\n loading(loading_message, loading_img, css_path=self.css_path),\n QUrl.fromLocalFile(self.css_path),\n )\n\n def show_intro_message(self):\n \"\"\"Show message on Help with the right shortcuts.\"\"\"\n intro_message_eq = _(\n \"Here you can get help of any object by pressing \"\n \"%s in front of it, either on the Editor or the \"\n \"Console.%s\")\n intro_message_dif = _(\n \"Here you can get help of any object by pressing \"\n \"%s in front of it on the Editor, or %s in front \"\n \"of it on the Console.%s\")\n intro_message_common = _(\n \"Help can also be shown automatically after writing \"\n \"a left parenthesis next to an object. You can \"\n \"activate this behavior in %s.\")\n prefs = _(\"Preferences > Help\")\n\n shortcut_editor = self.get_conf('editor/inspect current object',\n section='shortcuts')\n shortcut_console = self.get_conf('console/inspect current object',\n section='shortcuts')\n\n if sys.platform == 'darwin':\n shortcut_editor = shortcut_editor.replace('Ctrl', 'Cmd')\n shortcut_console = shortcut_console.replace('Ctrl', 'Cmd')\n\n if self.get_conf('rich_mode'):\n title = _(\"Usage\")\n tutorial_message = _(\"New to Spyder? Read our\")\n tutorial = _(\"tutorial\")\n if shortcut_editor == shortcut_console:\n intro_message = (intro_message_eq + intro_message_common) % (\n \"<b>\"+shortcut_editor+\"</b>\", \"<br><br>\",\n \"<i>\"+prefs+\"</i>\")\n else:\n intro_message = (intro_message_dif + intro_message_common) % (\n \"<b>\"+shortcut_editor+\"</b>\",\n \"<b>\"+shortcut_console+\"</b>\",\n \"<br><br>\", \"<i>\"+prefs+\"</i>\")\n\n self.set_rich_text_html(usage(title, intro_message,\n tutorial_message, tutorial,\n css_path=self.css_path),\n QUrl.fromLocalFile(self.css_path))\n else:\n install_sphinx = \"\\n\\n%s\" % _(\"Please consider installing Sphinx \"\n \"to get documentation rendered in \"\n \"rich text.\")\n if shortcut_editor == shortcut_console:\n intro_message = (intro_message_eq + intro_message_common) % (\n shortcut_editor, \"\\n\\n\", prefs)\n else:\n intro_message = (intro_message_dif + intro_message_common) % (\n shortcut_editor, shortcut_console, \"\\n\\n\", prefs)\n\n intro_message += install_sphinx\n self.set_plain_text(intro_message, is_code=False)\n\n def show_rich_text(self, text, collapse=False, img_path=''):\n \"\"\"\n Show text in rich mode.\n\n Parameters\n ----------\n text: str\n Plain text to display.\n collapse: bool, optional\n Show collapsable sections as collapsed/expanded. Default is False.\n img_path: str, optional\n Path to folder with additional images needed to correctly\n display the rich text help. Default is ''.\n \"\"\"\n self.switch_to_rich_text()\n context = generate_context(collapse=collapse, img_path=img_path,\n css_path=self.css_path)\n self.render_sphinx_doc(text, context)\n\n def show_plain_text(self, text):\n \"\"\"\n Show text in plain mode.\n\n Parameters\n ----------\n text: str\n Plain text to display.\n \"\"\"\n self.switch_to_plain_text()\n self.set_plain_text(text, is_code=False)\n\n @Slot()\n def show_tutorial(self):\n \"\"\"Show the Spyder tutorial.\"\"\"\n tutorial_path = get_module_source_path('spyder.plugins.help.utils')\n tutorial = os.path.join(tutorial_path, 'tutorial.rst')\n\n with open(tutorial, 'r') as fh:\n text = fh.read()\n\n self.show_rich_text(text, collapse=True)\n\n def handle_link_clicks(self, url):\n \"\"\"\n Handle how url links should be opened.\n\n Parameters\n ----------\n url: QUrl\n QUrl object containing the link to open.\n \"\"\"\n url = to_text_string(url.toString())\n if url == \"spy://tutorial\":\n self.show_tutorial()\n elif url.startswith('http'):\n start_file(url)\n else:\n self.rich_text.load_url(url)\n\n @Slot()\n @Slot(bool)\n @Slot(bool, bool)\n def force_refresh(self, valid=True, editing=True):\n \"\"\"\n Force a refresh/rerender of the help viewer content.\n\n Parameters\n ----------\n valid: bool, optional\n Default is True.\n editing: bool, optional\n Default is True.\n \"\"\"\n if valid:\n if self.source_is_console():\n self.set_object_text(None, force_refresh=True)\n elif self._last_editor_doc is not None:\n self.set_editor_doc(self._last_editor_doc, force_refresh=True)\n\n def set_object_text(self, text, force_refresh=False, ignore_unknown=False):\n \"\"\"\n Set object's name in Help's combobox.\n\n Parameters\n ----------\n text: str\n Object name.\n force_refresh: bool, optional\n Force a refresh with the rendering.\n ignore_unknown: bool, optional\n Ignore not found object names.\n\n See Also\n --------\n :py:meth:spyder.widgets.mixins.GetHelpMixin.show_object_info\n \"\"\"\n if self.get_conf('locked') and not force_refresh:\n return\n\n self.switch_to_console_source()\n add_to_combo = True\n if text is None:\n text = to_text_string(self.object_combo.currentText())\n add_to_combo = False\n\n found = self.show_help(text, ignore_unknown=ignore_unknown)\n if ignore_unknown and not found:\n return\n\n if add_to_combo:\n self.object_combo.add_text(text)\n\n if found:\n self.sig_item_found.emit()\n\n index = self.source_combo.currentIndex()\n self._last_texts[index] = text\n\n def set_editor_doc(self, help_data, force_refresh=False):\n \"\"\"\n Set content for help data sent from the editor.\n\n Parameters\n ----------\n help_data: dict\n Dictionary with editor introspection information.\n force_refresh: bool, optional\n Force a refresh with the rendering.\n\n Examples\n --------\n >>> help_data = {\n 'obj_text': str,\n 'name': str,\n 'argspec': str,\n 'note': str,\n 'docstring': str,\n 'path': str,\n }\n \"\"\"\n if self.get_conf('locked') and not force_refresh:\n return\n\n self.switch_to_editor_source()\n self._last_editor_doc = help_data\n self.object_edit.setText(help_data['obj_text'])\n\n if self.get_conf('rich_mode'):\n self.render_sphinx_doc(help_data)\n else:\n self.set_plain_text(help_data, is_code=False)\n\n index = self.source_combo.currentIndex()\n self._last_texts[index] = help_data['docstring']\n\n def set_shell(self, shell):\n \"\"\"\n Bind to shell.\n\n Parameters\n ----------\n shell: object\n internal shell or ipython console shell\n \"\"\"\n self.shell = shell\n\n def get_shell(self):\n \"\"\"\n Return shell which is currently bound to Help.\n \"\"\"\n if self.shell is None:\n self.shell = self.internal_shell\n\n return self.shell\n\n def render_sphinx_doc(self, help_data, context=None, css_path=CSS_PATH):\n \"\"\"\n Transform help_data dictionary to HTML and show it.\n\n Parameters\n ----------\n help_data: str or dict\n Dictionary with editor introspection information.\n context: dict\n Sphinx context.\n css_path: str\n Path to CSS file for styling.\n \"\"\"\n if isinstance(help_data, dict):\n path = help_data.pop('path', '')\n dname = os.path.dirname(path)\n else:\n dname = ''\n\n # Math rendering option could have changed\n self._sphinx_thread.render(help_data, context, self.get_conf('math'),\n dname, css_path=self.css_path)\n self.show_loading_message()\n\n def show_help(self, obj_text, ignore_unknown=False):\n \"\"\"\n Show help for an object's name.\n\n Parameters\n ----------\n obj_text: str\n Object's name.\n ignore_unknown: bool, optional\n Ignore unknown object's name.\n \"\"\"\n # TODO: This method makes active use of the shells. It would be better\n # to use signals and pass information this way for better decoupling.\n shell = self.get_shell()\n if shell is None:\n return\n\n obj_text = to_text_string(obj_text)\n\n if not shell.is_defined(obj_text):\n if (self.get_conf('automatic_import')\n and self.internal_shell.is_defined(obj_text,\n force_import=True)):\n shell = self.internal_shell\n else:\n shell = None\n doc = None\n source_text = None\n\n if shell is not None:\n doc = shell.get_doc(obj_text)\n source_text = shell.get_source(obj_text)\n\n is_code = False\n\n if self.get_conf('rich_mode'):\n self.render_sphinx_doc(doc, css_path=self.css_path)\n return doc is not None\n elif self.docstring:\n hlp_text = doc\n if hlp_text is None:\n hlp_text = source_text\n if hlp_text is None:\n return False\n else:\n hlp_text = source_text\n if hlp_text is None:\n hlp_text = doc\n if hlp_text is None:\n hlp_text = _(\"No source code available.\")\n if ignore_unknown:\n return False\n else:\n is_code = True\n\n self.set_plain_text(hlp_text, is_code=is_code)\n return True\n\n def set_rich_text_font(self, font, fixed_font):\n \"\"\"\n Set rich text mode font.\n\n Parameters\n ----------\n fixed_font: QFont\n The current rich text font to use.\n \"\"\"\n\n self.rich_text.set_font(font, fixed_font=fixed_font)\n\n def set_plain_text_font(self, font, color_scheme=None):\n \"\"\"\n Set plain text mode font.\n\n Parameters\n ----------\n font: QFont\n The current plain text font to use.\n color_scheme: str\n The selected color scheme.\n \"\"\"\n if color_scheme is None:\n color_scheme = self._current_color_scheme\n\n self.plain_text.set_font(font, color_scheme=color_scheme)\n\n def set_plain_text_color_scheme(self, color_scheme):\n \"\"\"\n Set plain text mode color scheme.\n\n Parameters\n ----------\n color_scheme: str\n The selected color scheme.\n \"\"\"\n self._current_color_scheme = color_scheme\n self.plain_text.set_color_scheme(color_scheme)\n\n def set_history(self, history):\n \"\"\"\n Set list of strings on object combo box.\n\n Parameters\n ----------\n history: list\n List of strings of objects.\n \"\"\"\n self.object_combo.addItems(history)\n\n def get_history(self):\n \"\"\"\n Return list of strings on object combo box.\n \"\"\"\n history = []\n for index in range(self.object_combo.count()):\n history.append(to_text_string(self.object_combo.itemText(index)))\n\n return history\n\n def set_internal_console(self, console):\n \"\"\"\n Set the internal console shell.\n\n Parameters\n ----------\n console: :py:class:spyder.plugins.console.plugin.Console\n Console plugin.\n \"\"\"\n self.internal_console = console\n self.internal_shell = console.get_widget().shell\n",
"path": "spyder/plugins/help/widgets.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Copyright © Spyder Project Contributors\n# Licensed under the terms of the MIT License\n#\n\"\"\"\nHelp plugin widgets.\n\"\"\"\n\n# Standard library imports\nimport os\nimport re\nimport socket\nimport sys\n\n# Third party imports\nfrom qtpy.QtCore import Qt, QUrl, Signal, Slot, QPoint\nfrom qtpy.QtGui import QColor\nfrom qtpy.QtWebEngineWidgets import WEBENGINE, QWebEnginePage\nfrom qtpy.QtWidgets import (QActionGroup, QComboBox, QLabel, QLineEdit,\n QMessageBox, QSizePolicy, QStackedLayout,\n QVBoxLayout, QWidget)\n\n# Local imports\nfrom spyder.api.config.decorators import on_conf_change\nfrom spyder.api.translations import get_translation\nfrom spyder.api.widgets.main_widget import PluginMainWidget\nfrom spyder.api.widgets.mixins import SpyderWidgetMixin\nfrom spyder.config.base import get_module_source_path\nfrom spyder.plugins.help.utils.sphinxify import (CSS_PATH, generate_context,\n loading, usage, warning)\nfrom spyder.plugins.help.utils.sphinxthread import SphinxThread\nfrom spyder.py3compat import get_meth_class_inst, to_text_string\nfrom spyder.utils import programs\nfrom spyder.utils.image_path_manager import get_image_path\nfrom spyder.utils.palette import QStylePalette\nfrom spyder.utils.qthelpers import start_file\nfrom spyder.widgets.browser import FrameWebView\nfrom spyder.widgets.comboboxes import EditableComboBox\nfrom spyder.widgets.findreplace import FindReplace\nfrom spyder.widgets.simplecodeeditor import SimpleCodeEditor\n\n\n# Localization\n_ = get_translation('spyder')\n\n\n# --- Constants\n# ----------------------------------------------------------------------------\nMAIN_BG_COLOR = QStylePalette.COLOR_BACKGROUND_1\n\n\nclass HelpWidgetActions:\n # Toggles\n ToggleAutomaticImport = 'toggle_automatic_import_action'\n ToggleLocked = 'toggle_locked_action'\n TogglePlainMode = 'toggle_plain_mode_action'\n ToggleRichMode = 'toggle_rich_mode_action'\n ToggleShowSource = 'toggle_show_source_action'\n ToggleWrap = 'toggle_wrap_action'\n CopyAction = \"help_widget_copy_action\"\n SelectAll = \"select_all_action\",\n Home = 'home_action'\n\n\nclass HelpWidgetOptionsMenuSections:\n Display = 'display_section'\n Other = 'other_section'\n\n\nclass HelpWidgetMainToolbarSections:\n Main = 'main_section'\n\n\nclass HelpWidgetToolbarItems:\n SourceLabel = 'source_label'\n SourceCombo = 'source_combo'\n ObjectLabel = 'object_label'\n ObjectCombo = 'object_combo'\n ObjectEdit = 'object_edit'\n\n\n# --- Widgets\n# ----------------------------------------------------------------------------\nclass ObjectComboBox(EditableComboBox):\n \"\"\"\n QComboBox handling object names\n \"\"\"\n # Signals\n valid = Signal(bool, bool)\n\n def __init__(self, parent, id_=None):\n EditableComboBox.__init__(self, parent)\n self.help = parent\n self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n self.tips = {True: '', False: ''}\n\n if id_ is not None:\n self.ID = id_\n\n def is_valid(self, qstr=None):\n \"\"\"Return True if string is valid\"\"\"\n if not self.help.source_is_console():\n return True\n if qstr is None:\n qstr = self.currentText()\n if not re.search(r'^[a-zA-Z0-9_\\.]*$', str(qstr), 0):\n return False\n objtxt = to_text_string(qstr)\n shell_is_defined = False\n if self.help.get_conf('automatic_import'):\n shell = self.help.internal_shell\n if shell is not None:\n shell_is_defined = shell.is_defined(objtxt, force_import=True)\n if not shell_is_defined:\n shell = self.help.get_shell()\n if shell is not None:\n try:\n shell_is_defined = shell.is_defined(objtxt)\n except socket.error:\n shell = self.help.get_shell()\n try:\n shell_is_defined = shell.is_defined(objtxt)\n except socket.error:\n # Well... too bad!\n pass\n return shell_is_defined\n\n def validate_current_text(self):\n self.validate(self.currentText())\n\n def validate(self, qstr, editing=True):\n \"\"\"Reimplemented to avoid formatting actions\"\"\"\n valid = self.is_valid(qstr)\n if self.hasFocus() and valid is not None:\n if editing and not valid:\n # Combo box text is being modified: invalidate the entry\n self.show_tip(self.tips[valid])\n self.valid.emit(False, False)\n else:\n # A new item has just been selected\n if valid:\n self.selected()\n # See spyder-ide/spyder#9542.\n self.lineEdit().cursorWordForward(False)\n else:\n self.valid.emit(False, False)\n\n\nclass RichText(QWidget, SpyderWidgetMixin):\n \"\"\"\n WebView widget with find dialog\n \"\"\"\n sig_link_clicked = Signal(QUrl)\n\n def __init__(self, parent):\n super().__init__(parent, class_parent=parent)\n\n self.webview = FrameWebView(self)\n self.webview.setup()\n\n if WEBENGINE:\n self.webview.web_widget.page().setBackgroundColor(\n QColor(MAIN_BG_COLOR))\n else:\n self.webview.web_widget.setStyleSheet(\n \"background:{}\".format(MAIN_BG_COLOR))\n self.webview.page().setLinkDelegationPolicy(\n QWebEnginePage.DelegateAllLinks)\n\n self.find_widget = FindReplace(self)\n self.find_widget.set_editor(self.webview.web_widget)\n self.find_widget.hide()\n\n # Layout\n layout = QVBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n layout.addWidget(self.webview)\n layout.addWidget(self.find_widget)\n self.setLayout(layout)\n\n # Signals\n self.webview.linkClicked.connect(self.sig_link_clicked)\n\n def set_font(self, font, fixed_font=None):\n \"\"\"Set font\"\"\"\n self.webview.set_font(font, fixed_font=fixed_font)\n\n def set_html(self, html_text, base_url):\n \"\"\"Set html text\"\"\"\n self.webview.setHtml(html_text, base_url)\n\n def load_url(self, url):\n if isinstance(url, QUrl):\n qurl = url\n else:\n qurl = QUrl(url)\n self.webview.load(qurl)\n\n def clear(self):\n self.set_html('', self.webview.url())\n\n\nclass PlainText(QWidget):\n \"\"\"\n Read-only editor widget with find dialog\n \"\"\"\n # Signals\n focus_changed = Signal()\n\n sig_custom_context_menu_requested = Signal(QPoint)\n\n def __init__(self, parent):\n QWidget.__init__(self, parent)\n self.editor = None\n\n # Read-only simple code editor\n self.editor = SimpleCodeEditor(self)\n self.editor.setup_editor(\n language='py',\n highlight_current_line=False,\n linenumbers=False,\n )\n self.editor.sig_focus_changed.connect(self.focus_changed)\n self.editor.setReadOnly(True)\n self.editor.setContextMenuPolicy(Qt.CustomContextMenu)\n\n # Find/replace widget\n self.find_widget = FindReplace(self)\n self.find_widget.set_editor(self.editor)\n self.find_widget.hide()\n\n layout = QVBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n layout.addWidget(self.editor)\n layout.addWidget(self.find_widget)\n self.setLayout(layout)\n\n self.editor.customContextMenuRequested.connect(\n self.sig_custom_context_menu_requested)\n\n def set_font(self, font, color_scheme=None):\n \"\"\"Set font\"\"\"\n self.editor.set_color_scheme(color_scheme)\n self.editor.set_font(font)\n\n def set_color_scheme(self, color_scheme):\n \"\"\"Set color scheme\"\"\"\n self.editor.set_color_scheme(color_scheme)\n\n def set_text(self, text, is_code):\n if is_code:\n self.editor.set_language('py')\n else:\n self.editor.set_language(None)\n\n self.editor.set_text(text)\n self.editor.set_cursor_position('sof')\n\n def clear(self):\n self.editor.clear()\n\n def set_wrap_mode(self, value):\n self.editor.toggle_wrap_mode(value)\n\n def copy(self):\n self.editor.copy()\n\n def select_all(self):\n self.editor.selectAll()\n\n\nclass HelpWidget(PluginMainWidget):\n\n ENABLE_SPINNER = True\n\n # Signals\n sig_item_found = Signal()\n \"\"\"This signal is emitted when an item is found.\"\"\"\n\n sig_render_started = Signal()\n \"\"\"This signal is emitted to inform a help text rendering has started.\"\"\"\n\n sig_render_finished = Signal()\n \"\"\"This signal is emitted to inform a help text rendering has finished.\"\"\"\n\n def __init__(self, name=None, plugin=None, parent=None):\n super().__init__(name, plugin, parent)\n\n # Attributes\n self._starting_up = True\n self._current_color_scheme = None\n self._last_texts = [None, None]\n self._last_editor_doc = None\n self._last_console_cb = None\n self._last_editor_cb = None\n self.css_path = self.get_conf('css_path', CSS_PATH, 'appearance')\n self.no_docs = _(\"No documentation available\")\n self.docstring = True # TODO: What is this used for?\n\n # Widgets\n self._sphinx_thread = SphinxThread(\n html_text_no_doc=warning(self.no_docs, css_path=self.css_path),\n css_path=self.css_path,\n )\n self.shell = None\n self.internal_console = None\n self.internal_shell = None\n self.plain_text = PlainText(self)\n self.rich_text = RichText(self)\n\n self.source_label = QLabel(_(\"Source\"))\n self.source_label.ID = HelpWidgetToolbarItems.SourceLabel\n\n self.source_combo = QComboBox(self)\n self.source_combo.ID = HelpWidgetToolbarItems.SourceCombo\n\n self.object_label = QLabel(_(\"Object\"))\n self.object_label.ID = HelpWidgetToolbarItems.ObjectLabel\n\n self.object_combo = ObjectComboBox(\n self, HelpWidgetToolbarItems.ObjectCombo)\n\n self.object_edit = QLineEdit(self)\n self.object_edit.ID = HelpWidgetToolbarItems.ObjectEdit\n\n # Setup\n self.object_edit.setReadOnly(True)\n self.object_combo.setMaxCount(self.get_conf('max_history_entries'))\n self.object_combo.setItemText(0, '')\n self.plain_text.set_wrap_mode(self.get_conf('wrap'))\n self.source_combo.addItems([_(\"Console\"), _(\"Editor\")])\n if (not programs.is_module_installed('rope') and\n not programs.is_module_installed('jedi', '>=0.11.0')):\n self.source_combo.hide()\n self.source_label.hide()\n\n # Layout\n self.stack_layout = layout = QStackedLayout()\n layout.addWidget(self.rich_text)\n layout.addWidget(self.plain_text)\n self.setLayout(layout)\n\n # Signals\n self._sphinx_thread.html_ready.connect(\n self._on_sphinx_thread_html_ready)\n self._sphinx_thread.error_msg.connect(\n self._on_sphinx_thread_error_msg)\n self.object_combo.valid.connect(self.force_refresh)\n self.rich_text.sig_link_clicked.connect(self.handle_link_clicks)\n self.source_combo.currentIndexChanged.connect(\n lambda x: self.source_changed())\n self.sig_render_started.connect(self.start_spinner)\n self.sig_render_finished.connect(self.stop_spinner)\n\n # --- PluginMainWidget API\n # ------------------------------------------------------------------------\n def get_title(self):\n return _('Help')\n\n def setup(self):\n self.wrap_action = self.create_action(\n name=HelpWidgetActions.ToggleWrap,\n text=_(\"Wrap lines\"),\n toggled=True,\n initial=self.get_conf('wrap'),\n option='wrap'\n )\n self.copy_action = self.create_action(\n name=HelpWidgetActions.CopyAction,\n text=_(\"Copy\"),\n triggered=lambda value: self.plain_text.copy(),\n register_shortcut=False,\n )\n self.select_all_action = self.create_action(\n name=HelpWidgetActions.SelectAll,\n text=_(\"Select All\"),\n triggered=lambda value: self.plain_text.select_all(),\n register_shortcut=False,\n )\n self.auto_import_action = self.create_action(\n name=HelpWidgetActions.ToggleAutomaticImport,\n text=_(\"Automatic import\"),\n toggled=True,\n initial=self.get_conf('automatic_import'),\n option='automatic_import'\n )\n self.show_source_action = self.create_action(\n name=HelpWidgetActions.ToggleShowSource,\n text=_(\"Show Source\"),\n toggled=True,\n option='show_source'\n )\n self.rich_text_action = self.create_action(\n name=HelpWidgetActions.ToggleRichMode,\n text=_(\"Rich Text\"),\n toggled=True,\n initial=self.get_conf('rich_mode'),\n option='rich_mode'\n )\n self.plain_text_action = self.create_action(\n name=HelpWidgetActions.TogglePlainMode,\n text=_(\"Plain Text\"),\n toggled=True,\n initial=self.get_conf('plain_mode'),\n option='plain_mode'\n )\n self.locked_action = self.create_action(\n name=HelpWidgetActions.ToggleLocked,\n text=_(\"Lock/Unlock\"),\n toggled=True,\n icon=self.create_icon('lock_open'),\n initial=self.get_conf('locked'),\n option='locked'\n )\n self.home_action = self.create_action(\n name=HelpWidgetActions.Home,\n text=_(\"Home\"),\n triggered=self.show_intro_message,\n icon=self.create_icon('home'),\n )\n\n # Add the help actions to an exclusive QActionGroup\n help_actions = QActionGroup(self)\n help_actions.setExclusive(True)\n help_actions.addAction(self.plain_text_action)\n help_actions.addAction(self.rich_text_action)\n\n # Menu\n menu = self.get_options_menu()\n for item in [self.rich_text_action, self.plain_text_action,\n self.show_source_action]:\n self.add_item_to_menu(\n item,\n menu=menu,\n section=HelpWidgetOptionsMenuSections.Display,\n )\n\n self.add_item_to_menu(\n self.auto_import_action,\n menu=menu,\n section=HelpWidgetOptionsMenuSections.Other,\n )\n\n # Plain text menu\n self._plain_text_context_menu = self.create_menu(\n \"plain_text_context_menu\")\n self.add_item_to_menu(\n self.copy_action,\n self._plain_text_context_menu,\n section=\"copy_section\",\n )\n self.add_item_to_menu(\n self.select_all_action,\n self._plain_text_context_menu,\n section=\"select_section\",\n )\n self.add_item_to_menu(\n self.wrap_action,\n self._plain_text_context_menu,\n section=\"wrap_section\",\n )\n\n # Toolbar\n toolbar = self.get_main_toolbar()\n for item in [self.source_label, self.source_combo, self.object_label,\n self.object_combo, self.object_edit, self.home_action,\n self.locked_action]:\n self.add_item_to_toolbar(\n item,\n toolbar=toolbar,\n section=HelpWidgetMainToolbarSections.Main,\n )\n\n self.source_changed()\n self.switch_to_rich_text()\n self.show_intro_message()\n\n # Signals\n self.plain_text.sig_custom_context_menu_requested.connect(\n self._show_plain_text_context_menu)\n\n def _should_display_welcome_page(self):\n \"\"\"Determine if the help welcome page should be displayed.\"\"\"\n return (self._last_editor_doc is None or\n self._last_console_cb is None or\n self._last_editor_cb is None)\n\n @on_conf_change(option='wrap')\n def on_wrap_option_update(self, value):\n self.plain_text.set_wrap_mode(value)\n\n @on_conf_change(option='locked')\n def on_lock_update(self, value):\n if value:\n icon = self.create_icon('lock')\n tip = _(\"Unlock\")\n else:\n icon = self.create_icon('lock_open')\n tip = _(\"Lock\")\n\n action = self.get_action(HelpWidgetActions.ToggleLocked)\n action.setIcon(icon)\n action.setToolTip(tip)\n\n @on_conf_change(option='automatic_import')\n def on_automatic_import_update(self, value):\n self.object_combo.validate_current_text()\n if self._should_display_welcome_page():\n self.show_intro_message()\n else:\n self.force_refresh()\n\n @on_conf_change(option='rich_mode')\n def on_rich_mode_update(self, value):\n if value:\n # Plain Text OFF / Rich text ON\n self.docstring = not value\n self.stack_layout.setCurrentWidget(self.rich_text)\n self.get_action(HelpWidgetActions.ToggleShowSource).setChecked(\n False)\n else:\n # Plain Text ON / Rich text OFF\n self.docstring = value\n self.stack_layout.setCurrentWidget(self.plain_text)\n\n if self._should_display_welcome_page():\n self.show_intro_message()\n else:\n self.force_refresh()\n\n @on_conf_change(option='show_source')\n def on_show_source_update(self, value):\n if value:\n self.switch_to_plain_text()\n self.get_action(HelpWidgetActions.ToggleRichMode).setChecked(\n False)\n\n self.docstring = not value\n if self._should_display_welcome_page():\n self.show_intro_message()\n else:\n self.force_refresh()\n\n def update_actions(self):\n for __, action in self.get_actions().items():\n # IMPORTANT: Since we are defining the main actions in here\n # and the context is WidgetWithChildrenShortcut we need to\n # assign the same actions to the children widgets in order\n # for shortcuts to work\n for widget in [self.plain_text,\n self.rich_text,\n self.source_combo,\n self.object_combo,\n self.object_edit]:\n if action not in widget.actions():\n widget.addAction(action)\n\n def get_focus_widget(self):\n self.object_combo.lineEdit().selectAll()\n return self.object_combo\n\n # --- Private API\n # ------------------------------------------------------------------------\n @Slot(QPoint)\n def _show_plain_text_context_menu(self, point):\n point = self.plain_text.mapToGlobal(point)\n self._plain_text_context_menu.popup(point)\n\n def _on_sphinx_thread_html_ready(self, html_text):\n \"\"\"\n Set our sphinx documentation based on thread result.\n\n Parameters\n ----------\n html_text: str\n Html results text.\n \"\"\"\n self._sphinx_thread.wait()\n self.set_rich_text_html(html_text, QUrl.fromLocalFile(self.css_path))\n self.sig_render_finished.emit()\n self.stop_spinner()\n\n def _on_sphinx_thread_error_msg(self, error_msg):\n \"\"\"\n Display error message on Sphinx rich text failure.\n\n Parameters\n ----------\n error_msg: str\n Error message text.\n \"\"\"\n self._sphinx_thread.wait()\n self.plain_text_action.setChecked(True)\n sphinx_ver = programs.get_module_version('sphinx')\n QMessageBox.critical(\n self,\n _('Help'),\n _(\"The following error occurred when calling \"\n \"<b>Sphinx %s</b>. <br>Incompatible Sphinx \"\n \"version or doc string decoding failed.\"\n \"<br><br>Error message:<br>%s\"\n ) % (sphinx_ver, error_msg),\n )\n self.sig_render_finished.emit()\n\n # --- Public API\n # ------------------------------------------------------------------------\n def source_is_console(self):\n \"\"\"Return True if source is Console.\"\"\"\n return self.source_combo.currentIndex() == 0\n\n def switch_to_editor_source(self):\n \"\"\"Switch to editor view of the help viewer.\"\"\"\n self.source_combo.setCurrentIndex(1)\n\n def switch_to_console_source(self):\n \"\"\"Switch to console view of the help viewer.\"\"\"\n self.source_combo.setCurrentIndex(0)\n\n def source_changed(self):\n \"\"\"Handle a source (plain/rich) change.\"\"\"\n is_console = self.source_is_console()\n if is_console:\n self.object_combo.show()\n self.object_edit.hide()\n else:\n # Editor\n self.object_combo.hide()\n self.object_edit.show()\n\n self.get_action(HelpWidgetActions.ToggleShowSource).setEnabled(\n is_console)\n self.get_action(HelpWidgetActions.ToggleAutomaticImport).setEnabled(\n is_console)\n self.restore_text()\n\n def save_text(self, callback):\n \"\"\"\n Save help text.\n\n Parameters\n ----------\n callback: callable\n Method to call on save.\n \"\"\"\n if self.source_is_console():\n self._last_console_cb = callback\n else:\n self._last_editor_cb = callback\n\n def restore_text(self):\n \"\"\"Restore last text using callback.\"\"\"\n if self.source_is_console():\n cb = self._last_console_cb\n else:\n cb = self._last_editor_cb\n\n if cb is None:\n if self.get_conf('plain_mode'):\n self.switch_to_plain_text()\n else:\n self.switch_to_rich_text()\n else:\n func = cb[0]\n args = cb[1:]\n func(*args)\n if get_meth_class_inst(func) is self.rich_text:\n self.switch_to_rich_text()\n else:\n self.switch_to_plain_text()\n\n @property\n def find_widget(self):\n \"\"\"Show find widget.\"\"\"\n if self.get_conf('plain_mode'):\n return self.plain_text.find_widget\n else:\n return self.rich_text.find_widget\n\n def switch_to_plain_text(self):\n \"\"\"Switch to plain text mode.\"\"\"\n self.get_action(HelpWidgetActions.TogglePlainMode).setChecked(True)\n\n def switch_to_rich_text(self):\n \"\"\"Switch to rich text mode.\"\"\"\n self.get_action(HelpWidgetActions.ToggleRichMode).setChecked(True)\n\n def set_plain_text(self, text, is_code):\n \"\"\"\n Set plain text docs.\n\n Parameters\n ----------\n text: str\n Text content.\n is_code: bool\n True if it is code text.\n\n Notes\n -----\n Text is coming from utils.dochelpers.getdoc\n \"\"\"\n if type(text) is dict:\n name = text['name']\n if name:\n rst_title = ''.join(['='*len(name), '\\n', name, '\\n',\n '='*len(name), '\\n\\n'])\n else:\n rst_title = ''\n try:\n if text['argspec']:\n definition = ''.join(\n ['Definition: ', name, text['argspec'], '\\n\\n'])\n else:\n definition = ''\n\n if text['note']:\n note = ''.join(['Type: ', text['note'], '\\n\\n----\\n\\n'])\n else:\n note = ''\n except TypeError:\n definition = self.no_docs\n note = ''\n\n full_text = ''.join([rst_title, definition, note,\n text['docstring']])\n else:\n full_text = text\n\n self.plain_text.set_text(full_text, is_code)\n self.save_text([self.plain_text.set_text, full_text, is_code])\n\n def set_rich_text_html(self, html_text, base_url):\n \"\"\"\n Set rich text.\n\n Parameters\n ----------\n html_text: str\n Html string.\n base_url: str\n Location of stylesheets and images to load in the page.\n \"\"\"\n self.rich_text.set_html(html_text, base_url)\n self.save_text([self.rich_text.set_html, html_text, base_url])\n\n def show_loading_message(self):\n \"\"\"Create html page to show while the documentation is generated.\"\"\"\n self.sig_render_started.emit()\n loading_message = _(\"Retrieving documentation\")\n loading_img = get_image_path('loading_sprites')\n if os.name == 'nt':\n loading_img = loading_img.replace('\\\\', '/')\n\n self.set_rich_text_html(\n loading(loading_message, loading_img, css_path=self.css_path),\n QUrl.fromLocalFile(self.css_path),\n )\n\n def show_intro_message(self):\n \"\"\"Show message on Help with the right shortcuts.\"\"\"\n intro_message_eq = _(\n \"Here you can get help of any object by pressing \"\n \"%s in front of it, either on the Editor or the \"\n \"Console.%s\")\n intro_message_dif = _(\n \"Here you can get help of any object by pressing \"\n \"%s in front of it on the Editor, or %s in front \"\n \"of it on the Console.%s\")\n intro_message_common = _(\n \"Help can also be shown automatically after writing \"\n \"a left parenthesis next to an object. You can \"\n \"activate this behavior in %s.\")\n prefs = _(\"Preferences > Help\")\n\n shortcut_editor = self.get_conf('editor/inspect current object',\n section='shortcuts')\n shortcut_console = self.get_conf('console/inspect current object',\n section='shortcuts')\n\n if sys.platform == 'darwin':\n shortcut_editor = shortcut_editor.replace('Ctrl', 'Cmd')\n shortcut_console = shortcut_console.replace('Ctrl', 'Cmd')\n\n if self.get_conf('rich_mode'):\n title = _(\"Usage\")\n tutorial_message = _(\"New to Spyder? Read our\")\n tutorial = _(\"tutorial\")\n if shortcut_editor == shortcut_console:\n intro_message = (intro_message_eq + intro_message_common) % (\n \"<b>\"+shortcut_editor+\"</b>\", \"<br><br>\",\n \"<i>\"+prefs+\"</i>\")\n else:\n intro_message = (intro_message_dif + intro_message_common) % (\n \"<b>\"+shortcut_editor+\"</b>\",\n \"<b>\"+shortcut_console+\"</b>\",\n \"<br><br>\", \"<i>\"+prefs+\"</i>\")\n\n self.set_rich_text_html(usage(title, intro_message,\n tutorial_message, tutorial,\n css_path=self.css_path),\n QUrl.fromLocalFile(self.css_path))\n else:\n install_sphinx = \"\\n\\n%s\" % _(\"Please consider installing Sphinx \"\n \"to get documentation rendered in \"\n \"rich text.\")\n if shortcut_editor == shortcut_console:\n intro_message = (intro_message_eq + intro_message_common) % (\n shortcut_editor, \"\\n\\n\", prefs)\n else:\n intro_message = (intro_message_dif + intro_message_common) % (\n shortcut_editor, shortcut_console, \"\\n\\n\", prefs)\n\n intro_message += install_sphinx\n self.set_plain_text(intro_message, is_code=False)\n\n def show_rich_text(self, text, collapse=False, img_path=''):\n \"\"\"\n Show text in rich mode.\n\n Parameters\n ----------\n text: str\n Plain text to display.\n collapse: bool, optional\n Show collapsable sections as collapsed/expanded. Default is False.\n img_path: str, optional\n Path to folder with additional images needed to correctly\n display the rich text help. Default is ''.\n \"\"\"\n self.switch_to_rich_text()\n context = generate_context(collapse=collapse, img_path=img_path,\n css_path=self.css_path)\n self.render_sphinx_doc(text, context)\n\n def show_plain_text(self, text):\n \"\"\"\n Show text in plain mode.\n\n Parameters\n ----------\n text: str\n Plain text to display.\n \"\"\"\n self.switch_to_plain_text()\n self.set_plain_text(text, is_code=False)\n\n @Slot()\n def show_tutorial(self):\n \"\"\"Show the Spyder tutorial.\"\"\"\n tutorial_path = get_module_source_path('spyder.plugins.help.utils')\n tutorial = os.path.join(tutorial_path, 'tutorial.rst')\n\n with open(tutorial, 'r') as fh:\n text = fh.read()\n\n self.show_rich_text(text, collapse=True)\n\n def handle_link_clicks(self, url):\n \"\"\"\n Handle how url links should be opened.\n\n Parameters\n ----------\n url: QUrl\n QUrl object containing the link to open.\n \"\"\"\n url = to_text_string(url.toString())\n if url == \"spy://tutorial\":\n self.show_tutorial()\n elif url.startswith('http'):\n start_file(url)\n else:\n self.rich_text.load_url(url)\n\n @Slot()\n @Slot(bool)\n @Slot(bool, bool)\n def force_refresh(self, valid=True, editing=True):\n \"\"\"\n Force a refresh/rerender of the help viewer content.\n\n Parameters\n ----------\n valid: bool, optional\n Default is True.\n editing: bool, optional\n Default is True.\n \"\"\"\n if valid:\n if self.source_is_console():\n self.set_object_text(None, force_refresh=True)\n elif self._last_editor_doc is not None:\n self.set_editor_doc(self._last_editor_doc, force_refresh=True)\n\n def set_object_text(self, text, force_refresh=False, ignore_unknown=False):\n \"\"\"\n Set object's name in Help's combobox.\n\n Parameters\n ----------\n text: str\n Object name.\n force_refresh: bool, optional\n Force a refresh with the rendering.\n ignore_unknown: bool, optional\n Ignore not found object names.\n\n See Also\n --------\n :py:meth:spyder.widgets.mixins.GetHelpMixin.show_object_info\n \"\"\"\n if self.get_conf('locked') and not force_refresh:\n return\n\n self.switch_to_console_source()\n add_to_combo = True\n if text is None:\n text = to_text_string(self.object_combo.currentText())\n add_to_combo = False\n\n found = self.show_help(text, ignore_unknown=ignore_unknown)\n if ignore_unknown and not found:\n return\n\n if add_to_combo:\n self.object_combo.add_text(text)\n\n if found:\n self.sig_item_found.emit()\n\n index = self.source_combo.currentIndex()\n self._last_texts[index] = text\n\n def set_editor_doc(self, help_data, force_refresh=False):\n \"\"\"\n Set content for help data sent from the editor.\n\n Parameters\n ----------\n help_data: dict\n Dictionary with editor introspection information.\n force_refresh: bool, optional\n Force a refresh with the rendering.\n\n Examples\n --------\n >>> help_data = {\n 'obj_text': str,\n 'name': str,\n 'argspec': str,\n 'note': str,\n 'docstring': str,\n 'path': str,\n }\n \"\"\"\n if self.get_conf('locked') and not force_refresh:\n return\n\n self.switch_to_editor_source()\n self._last_editor_doc = help_data\n self.object_edit.setText(help_data['obj_text'])\n\n if self.get_conf('rich_mode'):\n self.render_sphinx_doc(help_data)\n else:\n self.set_plain_text(help_data, is_code=False)\n\n index = self.source_combo.currentIndex()\n self._last_texts[index] = help_data['docstring']\n\n def set_shell(self, shell):\n \"\"\"\n Bind to shell.\n\n Parameters\n ----------\n shell: object\n internal shell or ipython console shell\n \"\"\"\n self.shell = shell\n\n def get_shell(self):\n \"\"\"\n Return shell which is currently bound to Help.\n \"\"\"\n if self.shell is None:\n self.shell = self.internal_shell\n\n return self.shell\n\n def render_sphinx_doc(self, help_data, context=None, css_path=CSS_PATH):\n \"\"\"\n Transform help_data dictionary to HTML and show it.\n\n Parameters\n ----------\n help_data: str or dict\n Dictionary with editor introspection information.\n context: dict\n Sphinx context.\n css_path: str\n Path to CSS file for styling.\n \"\"\"\n if isinstance(help_data, dict):\n path = help_data.pop('path', '')\n dname = os.path.dirname(path)\n else:\n dname = ''\n\n # Math rendering option could have changed\n self._sphinx_thread.render(help_data, context, self.get_conf('math'),\n dname, css_path=self.css_path)\n self.show_loading_message()\n\n def show_help(self, obj_text, ignore_unknown=False):\n \"\"\"\n Show help for an object's name.\n\n Parameters\n ----------\n obj_text: str\n Object's name.\n ignore_unknown: bool, optional\n Ignore unknown object's name.\n \"\"\"\n # TODO: This method makes active use of the shells. It would be better\n # to use signals and pass information this way for better decoupling.\n shell = self.get_shell()\n if shell is None:\n return\n\n obj_text = to_text_string(obj_text)\n\n if not shell.is_defined(obj_text):\n if (self.get_conf('automatic_import')\n and self.internal_shell.is_defined(obj_text,\n force_import=True)):\n shell = self.internal_shell\n else:\n shell = None\n doc = None\n source_text = None\n\n if shell is not None:\n doc = shell.get_doc(obj_text)\n source_text = shell.get_source(obj_text)\n\n is_code = False\n\n if self.get_conf('rich_mode'):\n self.render_sphinx_doc(doc, css_path=self.css_path)\n return doc is not None\n elif self.docstring:\n hlp_text = doc\n if hlp_text is None:\n hlp_text = source_text\n if hlp_text is None:\n return False\n else:\n hlp_text = source_text\n if hlp_text is None:\n hlp_text = doc\n if hlp_text is None:\n hlp_text = _(\"No source code available.\")\n if ignore_unknown:\n return False\n else:\n is_code = True\n\n self.set_plain_text(hlp_text, is_code=is_code)\n return True\n\n def set_rich_text_font(self, font, fixed_font):\n \"\"\"\n Set rich text mode font.\n\n Parameters\n ----------\n fixed_font: QFont\n The current rich text font to use.\n \"\"\"\n\n self.rich_text.set_font(font, fixed_font=fixed_font)\n\n def set_plain_text_font(self, font, color_scheme=None):\n \"\"\"\n Set plain text mode font.\n\n Parameters\n ----------\n font: QFont\n The current plain text font to use.\n color_scheme: str\n The selected color scheme.\n \"\"\"\n if color_scheme is None:\n color_scheme = self._current_color_scheme\n\n self.plain_text.set_font(font, color_scheme=color_scheme)\n\n def set_plain_text_color_scheme(self, color_scheme):\n \"\"\"\n Set plain text mode color scheme.\n\n Parameters\n ----------\n color_scheme: str\n The selected color scheme.\n \"\"\"\n self._current_color_scheme = color_scheme\n self.plain_text.set_color_scheme(color_scheme)\n\n def set_history(self, history):\n \"\"\"\n Set list of strings on object combo box.\n\n Parameters\n ----------\n history: list\n List of strings of objects.\n \"\"\"\n self.object_combo.addItems(history)\n\n def get_history(self):\n \"\"\"\n Return list of strings on object combo box.\n \"\"\"\n history = []\n for index in range(self.object_combo.count()):\n history.append(to_text_string(self.object_combo.itemText(index)))\n\n return history\n\n def set_internal_console(self, console):\n \"\"\"\n Set the internal console shell.\n\n Parameters\n ----------\n console: :py:class:spyder.plugins.console.plugin.Console\n Console plugin.\n \"\"\"\n self.internal_console = console\n self.internal_shell = console.get_widget().shell\n",
"path": "spyder/plugins/help/widgets.py"
}
] | diff --git a/spyder/plugins/help/widgets.py b/spyder/plugins/help/widgets.py
index 17ed408c570..12bc411b5d8 100644
--- a/spyder/plugins/help/widgets.py
+++ b/spyder/plugins/help/widgets.py
@@ -195,8 +195,7 @@ def load_url(self, url):
qurl = url
else:
qurl = QUrl(url)
-
- self.load(qurl)
+ self.webview.load(qurl)
def clear(self):
self.set_html('', self.webview.url())
|
ansible-collections__community.general-2419 | svr4pkg on Solaris 11.4: TypeError: a bytes-like object is required, not 'str'
### Summary
When you try to install a package on Solaris 11.4 with the svr4pkg module, you get an error:
TypeError: a bytes-like object is required, not 'str'
Fix:
```
--- svr4pkg.py.orig 2021-04-29 08:28:55.110835528 -0400
+++ svr4pkg.py 2021-04-29 08:27:49.567089417 -0400
@@ -121,7 +121,7 @@
def create_admin_file():
(desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
- fullauto = '''
+ fullauto = b'''
mail=
instance=unique
partial=nocheck
```
After the fix it still works on Solaris 11.4 SRU15, Solaris 11.4 SRU31, Solaris 10 1/13
### Issue Type
Bug Report
### Component Name
communtiry.general.svr4pkg
### Ansible Version
```console (paste below)
$ ansible --version
[DEPRECATION WARNING]: Ansible will require Python 3.8 or newer on the controller starting with Ansible 2.12. Current version: 3.6.8 (default, Aug 18 2020, 08:33:21)
[GCC 8.3.1 20191121 (Red Hat 8.3.1-5)]. This feature will be removed from ansible-core in version 2.12. Deprecation warnings can be disabled by setting
deprecation_warnings=False in ansible.cfg.
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out
features under development. This is a rapidly changing source of code and can become unstable at any point.
ansible [core 2.12.0.dev0] (devel 60adf8e1ee) last updated 2021/04/29 08:21:55 (GMT -400)
config file = None
configured module search path = ['/home/srml/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/srml/ansible/lib/ansible
ansible collection location = /home/srml/.ansible/collections:/usr/share/ansible/collections
executable location = /home/srml/ansible/bin/ansible
python version = 3.6.8 (default, Aug 18 2020, 08:33:21) [GCC 8.3.1 20191121 (Red Hat 8.3.1-5)]
jinja version = 2.11.3
libyaml = True
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
[DEPRECATION WARNING]: Ansible will require Python 3.8 or newer on the controller starting with Ansible 2.12. Current version: 3.6.8 (default, Aug 18 2020, 08:33:21)
[GCC 8.3.1 20191121 (Red Hat 8.3.1-5)]. This feature will be removed from ansible-core in version 2.12. Deprecation warnings can be disabled by setting
deprecation_warnings=False in ansible.cfg.
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out
features under development. This is a rapidly changing source of code and can become unstable at any point.
```
### OS / Environment
RHEL 8.3
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
---
- hosts: all
become: yes
tasks:
- name: install svr4 package
community.general.svr4pkg:
name: CSWntop
state: present
src: /var/tmp/XYZsome.pkg
```
### Expected Results
Package should be installed
### Actual Results
```console (paste below)
$ ansible-playbook -i inventory -l sol11 svr4pkg.yml
[DEPRECATION WARNING]: Ansible will require Python 3.8 or newer on the controller starting with Ansible 2.12. Current version: 3.6.8 (default, Aug 18 2020, 08:33:21)
[GCC 8.3.1 20191121 (Red Hat 8.3.1-5)]. This feature will be removed from ansible-core in version 2.12. Deprecation warnings can be disabled by setting
deprecation_warnings=False in ansible.cfg.
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out
features under development. This is a rapidly changing source of code and can become unstable at any point.
PLAY [all] ***********************************************************************************************************************************************************
TASK [Gathering Facts] ***********************************************************************************************************************************************[WARNING]: Platform sunos on host sol11 is using the discovered Python interpreter at /usr/bin/python, but future installation of another Python interpreter could
change the meaning of that path. See https://docs.ansible.com/ansible/devel/reference_appendices/interpreter_discovery.html for more information.
ok: [sol11]
TASK [install svr4 package] ******************************************************************************************************************************************An exception occurred during task execution. To see the full traceback, use -vvv. The error was: TypeError: a bytes-like object is required, not 'str'
fatal: [sol11]: FAILED! => {"changed": false, "module_stderr": "Shared connection to 10.0.75.109 closed.\r\n", "module_stdout": "Traceback (most recent call last):\r\n File \"/export/home/srml/.ansible/tmp/ansible-tmp-1619699186.3019922-33970-236219862995078/AnsiballZ_svr4pkg.py\", line 100, in <module>\r\n _ansiballz_main()\r\n File \"/export/home/srml/.ansible/tmp/ansible-tmp-1619699186.3019922-33970-236219862995078/AnsiballZ_svr4pkg.py\", line 92, in _ansiballz_main\r\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\r\n File \"/export/home/srml/.ansible/tmp/ansible-tmp-1619699186.3019922-33970-236219862995078/AnsiballZ_svr4pkg.py\", line 41, in invoke_module\r\n run_name='__main__', alter_sys=True)\r\n File \"/usr/lib/python3.5/runpy.py\", line 205, in run_module\r\n return _run_module_code(code, init_globals, run_name, mod_spec)\r\n File \"/usr/lib/python3.5/runpy.py\", line 96, in _run_module_code\r\n mod_name, mod_spec, pkg_name, script_name)\r\n File \"/usr/lib/python3.5/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_ndukwobh/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 262, in <module>\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_ndukwobh/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 216, in main\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_ndukwobh/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 154, in package_install\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_ndukwobh/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 142, in create_admin_file\r\nTypeError: a bytes-like object is required, not 'str'\r\n", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1}
PLAY RECAP ***********************************************************************************************************************************************************sol11 : ok=1 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
The full traceback is:
Traceback (most recent call last):
File "/export/home/srml/.ansible/tmp/ansible-tmp-1619699820.2843351-34415-58061845298388/AnsiballZ_svr4pkg.py", line 100, in <module>
_ansiballz_main()
File "/export/home/srml/.ansible/tmp/ansible-tmp-1619699820.2843351-34415-58061845298388/AnsiballZ_svr4pkg.py", line 92, in _ansiballz_main
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
File "/export/home/srml/.ansible/tmp/ansible-tmp-1619699820.2843351-34415-58061845298388/AnsiballZ_svr4pkg.py", line 41, in invoke_module
run_name='__main__', alter_sys=True)
File "/usr/lib/python3.5/runpy.py", line 205, in run_module
return _run_module_code(code, init_globals, run_name, mod_spec)
File "/usr/lib/python3.5/runpy.py", line 96, in _run_module_code
mod_name, mod_spec, pkg_name, script_name)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py", line 262, in <module>
File "/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py", line 216, in main
File "/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py", line 154, in package_install
File "/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py", line 142, in create_admin_file
TypeError: a bytes-like object is required, not 'str'
fatal: [sol11]: FAILED! => {
"changed": false,
"module_stderr": "Shared connection to 10.0.75.109 closed.\r\n",
"module_stdout": "Traceback (most recent call last):\r\n File \"/export/home/srml/.ansible/tmp/ansible-tmp-1619699820.2843351-34415-58061845298388/AnsiballZ_svr4pkg.py\", line 100, in <module>\r\n _ansiballz_main()\r\n File \"/export/home/srml/.ansible/tmp/ansible-tmp-1619699820.2843351-34415-58061845298388/AnsiballZ_svr4pkg.py\", line 92, in _ansiballz_main\r\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\r\n File \"/export/home/srml/.ansible/tmp/ansible-tmp-1619699820.2843351-34415-58061845298388/AnsiballZ_svr4pkg.py\", line 41, in invoke_module\r\n run_name='__main__', alter_sys=True)\r\n File \"/usr/lib/python3.5/runpy.py\", line 205, in run_module\r\n return _run_module_code(code, init_globals, run_name, mod_spec)\r\n File \"/usr/lib/python3.5/runpy.py\", line 96, in _run_module_code\r\n mod_name, mod_spec, pkg_name, script_name)\r\n File \"/usr/lib/python3.5/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 262, in <module>\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 216, in main\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 154, in package_install\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 142, in create_admin_file\r\nTypeError: a bytes-like object is required, not 'str'\r\n",
"msg": "MODULE FAILURE\nSee stdout/stderr for the exact error",
"rc": 1
}
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
svr4pkg on Solaris 11.4: TypeError: a bytes-like object is required, not 'str'
### Summary
When you try to install a package on Solaris 11.4 with the svr4pkg module, you get an error:
TypeError: a bytes-like object is required, not 'str'
Fix:
```
--- svr4pkg.py.orig 2021-04-29 08:28:55.110835528 -0400
+++ svr4pkg.py 2021-04-29 08:27:49.567089417 -0400
@@ -121,7 +121,7 @@
def create_admin_file():
(desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
- fullauto = '''
+ fullauto = b'''
mail=
instance=unique
partial=nocheck
```
After the fix it still works on Solaris 11.4 SRU15, Solaris 11.4 SRU31, Solaris 10 1/13
### Issue Type
Bug Report
### Component Name
communtiry.general.svr4pkg
### Ansible Version
```console (paste below)
$ ansible --version
[DEPRECATION WARNING]: Ansible will require Python 3.8 or newer on the controller starting with Ansible 2.12. Current version: 3.6.8 (default, Aug 18 2020, 08:33:21)
[GCC 8.3.1 20191121 (Red Hat 8.3.1-5)]. This feature will be removed from ansible-core in version 2.12. Deprecation warnings can be disabled by setting
deprecation_warnings=False in ansible.cfg.
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out
features under development. This is a rapidly changing source of code and can become unstable at any point.
ansible [core 2.12.0.dev0] (devel 60adf8e1ee) last updated 2021/04/29 08:21:55 (GMT -400)
config file = None
configured module search path = ['/home/srml/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/srml/ansible/lib/ansible
ansible collection location = /home/srml/.ansible/collections:/usr/share/ansible/collections
executable location = /home/srml/ansible/bin/ansible
python version = 3.6.8 (default, Aug 18 2020, 08:33:21) [GCC 8.3.1 20191121 (Red Hat 8.3.1-5)]
jinja version = 2.11.3
libyaml = True
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
[DEPRECATION WARNING]: Ansible will require Python 3.8 or newer on the controller starting with Ansible 2.12. Current version: 3.6.8 (default, Aug 18 2020, 08:33:21)
[GCC 8.3.1 20191121 (Red Hat 8.3.1-5)]. This feature will be removed from ansible-core in version 2.12. Deprecation warnings can be disabled by setting
deprecation_warnings=False in ansible.cfg.
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out
features under development. This is a rapidly changing source of code and can become unstable at any point.
```
### OS / Environment
RHEL 8.3
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
---
- hosts: all
become: yes
tasks:
- name: install svr4 package
community.general.svr4pkg:
name: CSWntop
state: present
src: /var/tmp/XYZsome.pkg
```
### Expected Results
Package should be installed
### Actual Results
```console (paste below)
$ ansible-playbook -i inventory -l sol11 svr4pkg.yml
[DEPRECATION WARNING]: Ansible will require Python 3.8 or newer on the controller starting with Ansible 2.12. Current version: 3.6.8 (default, Aug 18 2020, 08:33:21)
[GCC 8.3.1 20191121 (Red Hat 8.3.1-5)]. This feature will be removed from ansible-core in version 2.12. Deprecation warnings can be disabled by setting
deprecation_warnings=False in ansible.cfg.
[WARNING]: You are running the development version of Ansible. You should only run Ansible from "devel" if you are modifying the Ansible engine, or trying out
features under development. This is a rapidly changing source of code and can become unstable at any point.
PLAY [all] ***********************************************************************************************************************************************************
TASK [Gathering Facts] ***********************************************************************************************************************************************[WARNING]: Platform sunos on host sol11 is using the discovered Python interpreter at /usr/bin/python, but future installation of another Python interpreter could
change the meaning of that path. See https://docs.ansible.com/ansible/devel/reference_appendices/interpreter_discovery.html for more information.
ok: [sol11]
TASK [install svr4 package] ******************************************************************************************************************************************An exception occurred during task execution. To see the full traceback, use -vvv. The error was: TypeError: a bytes-like object is required, not 'str'
fatal: [sol11]: FAILED! => {"changed": false, "module_stderr": "Shared connection to 10.0.75.109 closed.\r\n", "module_stdout": "Traceback (most recent call last):\r\n File \"/export/home/srml/.ansible/tmp/ansible-tmp-1619699186.3019922-33970-236219862995078/AnsiballZ_svr4pkg.py\", line 100, in <module>\r\n _ansiballz_main()\r\n File \"/export/home/srml/.ansible/tmp/ansible-tmp-1619699186.3019922-33970-236219862995078/AnsiballZ_svr4pkg.py\", line 92, in _ansiballz_main\r\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\r\n File \"/export/home/srml/.ansible/tmp/ansible-tmp-1619699186.3019922-33970-236219862995078/AnsiballZ_svr4pkg.py\", line 41, in invoke_module\r\n run_name='__main__', alter_sys=True)\r\n File \"/usr/lib/python3.5/runpy.py\", line 205, in run_module\r\n return _run_module_code(code, init_globals, run_name, mod_spec)\r\n File \"/usr/lib/python3.5/runpy.py\", line 96, in _run_module_code\r\n mod_name, mod_spec, pkg_name, script_name)\r\n File \"/usr/lib/python3.5/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_ndukwobh/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 262, in <module>\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_ndukwobh/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 216, in main\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_ndukwobh/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 154, in package_install\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_ndukwobh/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 142, in create_admin_file\r\nTypeError: a bytes-like object is required, not 'str'\r\n", "msg": "MODULE FAILURE\nSee stdout/stderr for the exact error", "rc": 1}
PLAY RECAP ***********************************************************************************************************************************************************sol11 : ok=1 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
The full traceback is:
Traceback (most recent call last):
File "/export/home/srml/.ansible/tmp/ansible-tmp-1619699820.2843351-34415-58061845298388/AnsiballZ_svr4pkg.py", line 100, in <module>
_ansiballz_main()
File "/export/home/srml/.ansible/tmp/ansible-tmp-1619699820.2843351-34415-58061845298388/AnsiballZ_svr4pkg.py", line 92, in _ansiballz_main
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
File "/export/home/srml/.ansible/tmp/ansible-tmp-1619699820.2843351-34415-58061845298388/AnsiballZ_svr4pkg.py", line 41, in invoke_module
run_name='__main__', alter_sys=True)
File "/usr/lib/python3.5/runpy.py", line 205, in run_module
return _run_module_code(code, init_globals, run_name, mod_spec)
File "/usr/lib/python3.5/runpy.py", line 96, in _run_module_code
mod_name, mod_spec, pkg_name, script_name)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py", line 262, in <module>
File "/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py", line 216, in main
File "/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py", line 154, in package_install
File "/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py", line 142, in create_admin_file
TypeError: a bytes-like object is required, not 'str'
fatal: [sol11]: FAILED! => {
"changed": false,
"module_stderr": "Shared connection to 10.0.75.109 closed.\r\n",
"module_stdout": "Traceback (most recent call last):\r\n File \"/export/home/srml/.ansible/tmp/ansible-tmp-1619699820.2843351-34415-58061845298388/AnsiballZ_svr4pkg.py\", line 100, in <module>\r\n _ansiballz_main()\r\n File \"/export/home/srml/.ansible/tmp/ansible-tmp-1619699820.2843351-34415-58061845298388/AnsiballZ_svr4pkg.py\", line 92, in _ansiballz_main\r\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\r\n File \"/export/home/srml/.ansible/tmp/ansible-tmp-1619699820.2843351-34415-58061845298388/AnsiballZ_svr4pkg.py\", line 41, in invoke_module\r\n run_name='__main__', alter_sys=True)\r\n File \"/usr/lib/python3.5/runpy.py\", line 205, in run_module\r\n return _run_module_code(code, init_globals, run_name, mod_spec)\r\n File \"/usr/lib/python3.5/runpy.py\", line 96, in _run_module_code\r\n mod_name, mod_spec, pkg_name, script_name)\r\n File \"/usr/lib/python3.5/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 262, in <module>\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 216, in main\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 154, in package_install\r\n File \"/tmp/ansible_community.general.svr4pkg_payload_n2ffzlfd/ansible_community.general.svr4pkg_payload.zip/ansible_collections/community/general/plugins/modules/svr4pkg.py\", line 142, in create_admin_file\r\nTypeError: a bytes-like object is required, not 'str'\r\n",
"msg": "MODULE FAILURE\nSee stdout/stderr for the exact error",
"rc": 1
}
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
| [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Boyd Adamson <boyd () boydadamson.com>\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: svr4pkg\nshort_description: Manage Solaris SVR4 packages\ndescription:\n - Manages SVR4 packages on Solaris 10 and 11.\n - These were the native packages on Solaris <= 10 and are available\n as a legacy feature in Solaris 11.\n - Note that this is a very basic packaging system. It will not enforce\n dependencies on install or remove.\nauthor: \"Boyd Adamson (@brontitall)\"\noptions:\n name:\n description:\n - Package name, e.g. C(SUNWcsr)\n required: true\n type: str\n\n state:\n description:\n - Whether to install (C(present)), or remove (C(absent)) a package.\n - If the package is to be installed, then I(src) is required.\n - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.\n required: true\n choices: [\"present\", \"absent\"]\n type: str\n\n src:\n description:\n - Specifies the location to install the package from. Required when C(state=present).\n - \"Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg).\"\n - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them there.\n type: str\n proxy:\n description:\n - HTTP[s] proxy to be used if C(src) is a URL.\n type: str\n response_file:\n description:\n - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)\n required: false\n type: str\n zone:\n description:\n - Whether to install the package only in the current zone, or install it into all zones.\n - The installation into all zones works only if you are working with the global zone.\n required: false\n default: \"all\"\n choices: [\"current\", \"all\"]\n type: str\n category:\n description:\n - Install/Remove category instead of a single package.\n required: false\n type: bool\n default: false\n'''\n\nEXAMPLES = '''\n- name: Install a package from an already copied file\n community.general.svr4pkg:\n name: CSWcommon\n src: /tmp/cswpkgs.pkg\n state: present\n\n- name: Install a package directly from an http site\n community.general.svr4pkg:\n name: CSWpkgutil\n src: 'http://get.opencsw.org/now'\n state: present\n zone: current\n\n- name: Install a package with a response file\n community.general.svr4pkg:\n name: CSWggrep\n src: /tmp/third-party.pkg\n response_file: /tmp/ggrep.response\n state: present\n\n- name: Ensure that a package is not installed\n community.general.svr4pkg:\n name: SUNWgnome-sound-recorder\n state: absent\n\n- name: Ensure that a category is not installed\n community.general.svr4pkg:\n name: FIREFOX\n state: absent\n category: true\n'''\n\n\nimport os\nimport tempfile\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\ndef package_installed(module, name, category):\n cmd = [module.get_bin_path('pkginfo', True), '-q']\n if category:\n cmd.append('-c')\n cmd.append(name)\n rc, out, err = module.run_command(' '.join(cmd))\n if rc == 0:\n return True\n else:\n return False\n\n\ndef create_admin_file():\n (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)\n fullauto = '''\nmail=\ninstance=unique\npartial=nocheck\nrunlevel=quit\nidepend=nocheck\nrdepend=nocheck\nspace=quit\nsetuid=nocheck\nconflict=nocheck\naction=nocheck\nnetworktimeout=60\nnetworkretries=3\nauthentication=quit\nkeystore=/var/sadm/security\nproxy=\nbasedir=default\n'''\n os.write(desc, fullauto)\n os.close(desc)\n return filename\n\n\ndef run_command(module, cmd):\n progname = cmd[0]\n cmd[0] = module.get_bin_path(progname, True)\n return module.run_command(cmd)\n\n\ndef package_install(module, name, src, proxy, response_file, zone, category):\n adminfile = create_admin_file()\n cmd = ['pkgadd', '-n']\n if zone == 'current':\n cmd += ['-G']\n cmd += ['-a', adminfile, '-d', src]\n if proxy is not None:\n cmd += ['-x', proxy]\n if response_file is not None:\n cmd += ['-r', response_file]\n if category:\n cmd += ['-Y']\n cmd.append(name)\n (rc, out, err) = run_command(module, cmd)\n os.unlink(adminfile)\n return (rc, out, err)\n\n\ndef package_uninstall(module, name, src, category):\n adminfile = create_admin_file()\n if category:\n cmd = ['pkgrm', '-na', adminfile, '-Y', name]\n else:\n cmd = ['pkgrm', '-na', adminfile, name]\n (rc, out, err) = run_command(module, cmd)\n os.unlink(adminfile)\n return (rc, out, err)\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(required=True),\n state=dict(required=True, choices=['present', 'absent']),\n src=dict(default=None),\n proxy=dict(default=None),\n response_file=dict(default=None),\n zone=dict(required=False, default='all', choices=['current', 'all']),\n category=dict(default=False, type='bool')\n ),\n supports_check_mode=True\n )\n state = module.params['state']\n name = module.params['name']\n src = module.params['src']\n proxy = module.params['proxy']\n response_file = module.params['response_file']\n zone = module.params['zone']\n category = module.params['category']\n rc = None\n out = ''\n err = ''\n result = {}\n result['name'] = name\n result['state'] = state\n\n if state == 'present':\n if src is None:\n module.fail_json(name=name,\n msg=\"src is required when state=present\")\n if not package_installed(module, name, category):\n if module.check_mode:\n module.exit_json(changed=True)\n (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)\n # Stdout is normally empty but for some packages can be\n # very long and is not often useful\n if len(out) > 75:\n out = out[:75] + '...'\n\n elif state == 'absent':\n if package_installed(module, name, category):\n if module.check_mode:\n module.exit_json(changed=True)\n (rc, out, err) = package_uninstall(module, name, src, category)\n out = out[:75]\n\n # Returncodes as per pkgadd(1m)\n # 0 Successful completion\n # 1 Fatal error.\n # 2 Warning.\n # 3 Interruption.\n # 4 Administration.\n # 5 Administration. Interaction is required. Do not use pkgadd -n.\n # 10 Reboot after installation of all packages.\n # 20 Reboot after installation of this package.\n # 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg>\n if rc in (0, 2, 3, 10, 20):\n result['changed'] = True\n # no install nor uninstall, or failed\n else:\n result['changed'] = False\n\n # rc will be none when the package already was installed and no action took place\n # Only return failed=False when the returncode is known to be good as there may be more\n # undocumented failure return codes\n if rc not in (None, 0, 2, 10, 20):\n result['failed'] = True\n else:\n result['failed'] = False\n\n if out:\n result['stdout'] = out\n if err:\n result['stderr'] = err\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "plugins/modules/packaging/os/svr4pkg.py"
}
] | [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Boyd Adamson <boyd () boydadamson.com>\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: svr4pkg\nshort_description: Manage Solaris SVR4 packages\ndescription:\n - Manages SVR4 packages on Solaris 10 and 11.\n - These were the native packages on Solaris <= 10 and are available\n as a legacy feature in Solaris 11.\n - Note that this is a very basic packaging system. It will not enforce\n dependencies on install or remove.\nauthor: \"Boyd Adamson (@brontitall)\"\noptions:\n name:\n description:\n - Package name, e.g. C(SUNWcsr)\n required: true\n type: str\n\n state:\n description:\n - Whether to install (C(present)), or remove (C(absent)) a package.\n - If the package is to be installed, then I(src) is required.\n - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.\n required: true\n choices: [\"present\", \"absent\"]\n type: str\n\n src:\n description:\n - Specifies the location to install the package from. Required when C(state=present).\n - \"Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg).\"\n - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them there.\n type: str\n proxy:\n description:\n - HTTP[s] proxy to be used if C(src) is a URL.\n type: str\n response_file:\n description:\n - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)\n required: false\n type: str\n zone:\n description:\n - Whether to install the package only in the current zone, or install it into all zones.\n - The installation into all zones works only if you are working with the global zone.\n required: false\n default: \"all\"\n choices: [\"current\", \"all\"]\n type: str\n category:\n description:\n - Install/Remove category instead of a single package.\n required: false\n type: bool\n default: false\n'''\n\nEXAMPLES = '''\n- name: Install a package from an already copied file\n community.general.svr4pkg:\n name: CSWcommon\n src: /tmp/cswpkgs.pkg\n state: present\n\n- name: Install a package directly from an http site\n community.general.svr4pkg:\n name: CSWpkgutil\n src: 'http://get.opencsw.org/now'\n state: present\n zone: current\n\n- name: Install a package with a response file\n community.general.svr4pkg:\n name: CSWggrep\n src: /tmp/third-party.pkg\n response_file: /tmp/ggrep.response\n state: present\n\n- name: Ensure that a package is not installed\n community.general.svr4pkg:\n name: SUNWgnome-sound-recorder\n state: absent\n\n- name: Ensure that a category is not installed\n community.general.svr4pkg:\n name: FIREFOX\n state: absent\n category: true\n'''\n\n\nimport os\nimport tempfile\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\ndef package_installed(module, name, category):\n cmd = [module.get_bin_path('pkginfo', True), '-q']\n if category:\n cmd.append('-c')\n cmd.append(name)\n rc, out, err = module.run_command(' '.join(cmd))\n if rc == 0:\n return True\n else:\n return False\n\n\ndef create_admin_file():\n (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)\n fullauto = b'''\nmail=\ninstance=unique\npartial=nocheck\nrunlevel=quit\nidepend=nocheck\nrdepend=nocheck\nspace=quit\nsetuid=nocheck\nconflict=nocheck\naction=nocheck\nnetworktimeout=60\nnetworkretries=3\nauthentication=quit\nkeystore=/var/sadm/security\nproxy=\nbasedir=default\n'''\n os.write(desc, fullauto)\n os.close(desc)\n return filename\n\n\ndef run_command(module, cmd):\n progname = cmd[0]\n cmd[0] = module.get_bin_path(progname, True)\n return module.run_command(cmd)\n\n\ndef package_install(module, name, src, proxy, response_file, zone, category):\n adminfile = create_admin_file()\n cmd = ['pkgadd', '-n']\n if zone == 'current':\n cmd += ['-G']\n cmd += ['-a', adminfile, '-d', src]\n if proxy is not None:\n cmd += ['-x', proxy]\n if response_file is not None:\n cmd += ['-r', response_file]\n if category:\n cmd += ['-Y']\n cmd.append(name)\n (rc, out, err) = run_command(module, cmd)\n os.unlink(adminfile)\n return (rc, out, err)\n\n\ndef package_uninstall(module, name, src, category):\n adminfile = create_admin_file()\n if category:\n cmd = ['pkgrm', '-na', adminfile, '-Y', name]\n else:\n cmd = ['pkgrm', '-na', adminfile, name]\n (rc, out, err) = run_command(module, cmd)\n os.unlink(adminfile)\n return (rc, out, err)\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(required=True),\n state=dict(required=True, choices=['present', 'absent']),\n src=dict(default=None),\n proxy=dict(default=None),\n response_file=dict(default=None),\n zone=dict(required=False, default='all', choices=['current', 'all']),\n category=dict(default=False, type='bool')\n ),\n supports_check_mode=True\n )\n state = module.params['state']\n name = module.params['name']\n src = module.params['src']\n proxy = module.params['proxy']\n response_file = module.params['response_file']\n zone = module.params['zone']\n category = module.params['category']\n rc = None\n out = ''\n err = ''\n result = {}\n result['name'] = name\n result['state'] = state\n\n if state == 'present':\n if src is None:\n module.fail_json(name=name,\n msg=\"src is required when state=present\")\n if not package_installed(module, name, category):\n if module.check_mode:\n module.exit_json(changed=True)\n (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)\n # Stdout is normally empty but for some packages can be\n # very long and is not often useful\n if len(out) > 75:\n out = out[:75] + '...'\n\n elif state == 'absent':\n if package_installed(module, name, category):\n if module.check_mode:\n module.exit_json(changed=True)\n (rc, out, err) = package_uninstall(module, name, src, category)\n out = out[:75]\n\n # Returncodes as per pkgadd(1m)\n # 0 Successful completion\n # 1 Fatal error.\n # 2 Warning.\n # 3 Interruption.\n # 4 Administration.\n # 5 Administration. Interaction is required. Do not use pkgadd -n.\n # 10 Reboot after installation of all packages.\n # 20 Reboot after installation of this package.\n # 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg>\n if rc in (0, 2, 3, 10, 20):\n result['changed'] = True\n # no install nor uninstall, or failed\n else:\n result['changed'] = False\n\n # rc will be none when the package already was installed and no action took place\n # Only return failed=False when the returncode is known to be good as there may be more\n # undocumented failure return codes\n if rc not in (None, 0, 2, 10, 20):\n result['failed'] = True\n else:\n result['failed'] = False\n\n if out:\n result['stdout'] = out\n if err:\n result['stderr'] = err\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "plugins/modules/packaging/os/svr4pkg.py"
}
] | diff --git a/changelogs/fragments/2373-svr4pkg-fix-typeerror.yml b/changelogs/fragments/2373-svr4pkg-fix-typeerror.yml
new file mode 100644
index 00000000000..d0b35808895
--- /dev/null
+++ b/changelogs/fragments/2373-svr4pkg-fix-typeerror.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - svr4pkg - convert string to a bytes-like object to avoid ``TypeError`` with Python 3 (https://github.com/ansible-collections/community.general/issues/2373).
diff --git a/plugins/modules/packaging/os/svr4pkg.py b/plugins/modules/packaging/os/svr4pkg.py
index ea3cd7d4683..aa7a5c2e523 100644
--- a/plugins/modules/packaging/os/svr4pkg.py
+++ b/plugins/modules/packaging/os/svr4pkg.py
@@ -121,7 +121,7 @@ def package_installed(module, name, category):
def create_admin_file():
(desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
- fullauto = '''
+ fullauto = b'''
mail=
instance=unique
partial=nocheck
|
internetarchive__openlibrary-4551 | Add February carousel to top of homepage
@seabelis has created a collection for February! Let's add it to the top of the home page as "Books for February". We want:
- [ ] A search query that summarizes the collection so that books appear on the homepage (here's the literal aggregate of all the carousels: https://openlibrary.org/search?mode=everything&q=subject%3A%22february%22+OR++subject%3A%22groundhog+day%22+OR++subject%3A%22valentines+day%22+OR++subject%3A%22heart%22+OR++subject%3A%22black+history%22+OR++key%3A%28%2Fworks%2FOL3912087W+OR+%2Fworks%2FOL19728320W+OR+%2Fworks%2FOL19666828W+OR+%2Fworks%2FOL3459949W+OR+%2Fworks%2FOL66728W+OR+%2Fworks%2FOL17768453W+OR+%2Fworks%2FOL16190571W+OR+%2Fworks%2FOL15160873W+OR+%2Fworks%2FOL8275668W+OR+%2Fworks%2FOL17211582W+OR+%2Fworks%2FOL17628545W+OR+%2Fworks%2FOL20163236W+OR+%2Fworks%2FOL20153225W++OR+%2Fworks%2FOL17371695W%29+OR++subject%3A%22canned+food%22+OR++subject%3A%22friendship%22+OR++subject%3A%22pie%22+OR++subject%3A%22libraries%22+OR++subject%3A%22baking%22+OR++title%3A%22bird+feeding%22+OR++title%3A%22cat+health%22+OR++subject%3A%22cherries%22+OR++subject%3A%22Childrens+Dental+Health%22+OR++title%3A%22Childrens+Dental+Health%22+OR++subject%3A%22Embroidery%22+OR++title%3A%22Grapefruit%22+OR++subject%3A%22hot+breakfast%22+OR++title%3A%22hot+breakfast%22+OR++subject%3A%22snack+food%22+OR++title%3A%22Youth+Leadership%22+OR++title%3A%22Teen+Dating+Violence%22&has_fulltext=true , but the results aren't super great. Maybe @seabelis can come up with one?)
- [ ] The carousel should link to the collection: https://openlibrary.org/collections/february
### Describe the problem that you'd like solved
Showcase the February collection :)
### Proposal & Constraints
- Note: We might have to do some stuff to make sure it caches (I don't believe it caches by default)
### Additional context
- https://github.com/internetarchive/openlibrary/blob/ce81c3986dae8bce9df8e4d81b17578f30454d1b/openlibrary/templates/home/index.html#L21
### Stakeholders
@seabelis @Sabreen-Parveen
| [
{
"content": "import web\nimport json\nimport babel\nimport babel.core\nimport babel.dates\nfrom collections import defaultdict\nimport re\nimport random\nimport xml.etree.ElementTree as etree\nimport datetime\nimport logging\n\nimport six\nfrom six import PY3\nfrom six.moves import urllib\nfrom six.moves.collections_abc import MutableMapping\n\nfrom infogami import config\nfrom infogami.utils import view, delegate, stats\nfrom infogami.utils.view import render, get_template, public, query_param\nfrom infogami.utils.macro import macro\nfrom infogami.utils.context import context\nfrom infogami.infobase.client import Thing, Changeset, storify\n\nfrom openlibrary.core.helpers import commify, parse_datetime\nfrom openlibrary.core.middleware import GZipMiddleware\nfrom openlibrary.core import cache, ab\n\nclass MultiDict(MutableMapping):\n \"\"\"Ordered Dictionary that can store multiple values.\n\n >>> d = MultiDict()\n >>> d['x'] = 1\n >>> d['x'] = 2\n >>> d['y'] = 3\n >>> d['x']\n 2\n >>> d['y']\n 3\n >>> d['z']\n Traceback (most recent call last):\n ...\n KeyError: 'z'\n >>> list(d)\n ['x', 'x', 'y']\n >>> list(d.items())\n [('x', 1), ('x', 2), ('y', 3)]\n >>> list(d.multi_items())\n [('x', [1, 2]), ('y', [3])]\n \"\"\"\n def __init__(self, items=(), **kw):\n self._items = []\n\n for k, v in items:\n self[k] = v\n self.update(kw)\n\n def __getitem__(self, key):\n values = self.getall(key)\n if values:\n return values[-1]\n else:\n raise KeyError(key)\n\n def __setitem__(self, key, value):\n self._items.append((key, value))\n\n def __delitem__(self, key):\n self._items = [(k, v) for k, v in self._items if k != key]\n\n def __iter__(self):\n for key in self.keys():\n yield key\n\n def __len__(self):\n return len(list(self.keys()))\n\n def getall(self, key):\n return [v for k, v in self._items if k == key]\n\n def keys(self):\n return [k for k, v in self._items]\n\n def values(self):\n return [v for k, v in self._items]\n\n def items(self):\n return self._items[:]\n\n def multi_items(self):\n \"\"\"Returns items as tuple of key and a list of values.\"\"\"\n items = []\n d = {}\n\n for k, v in self._items:\n if k not in d:\n d[k] = []\n items.append((k, d[k]))\n d[k].append(v)\n return items\n\n@macro\n@public\ndef render_template(name, *a, **kw):\n if \".\" in name:\n name = name.rsplit(\".\", 1)[0]\n return render[name](*a, **kw)\n\n\ndef kebab_case(upper_camel_case):\n \"\"\"\n :param str upper_camel_case: Text in upper camel case (e.g. \"HelloWorld\")\n :return: text in kebab case (e.g. 'hello-world')\n\n >>> kebab_case('HelloWorld')\n 'hello-world'\n >>> kebab_case(\"MergeUI\")\n 'merge-u-i'\n \"\"\"\n parts = re.findall(r'[A-Z][^A-Z]*', upper_camel_case)\n return '-'.join(parts).lower()\n\n\n@public\ndef render_component(name, attrs=None, json_encode=True):\n \"\"\"\n :param str name: Name of the component (excluding extension)\n :param dict attrs: attributes to add to the component element\n \"\"\"\n from openlibrary.plugins.upstream.code import static_url\n\n attrs = attrs or {}\n attrs_str = ''\n for (key, val) in attrs.items():\n if json_encode and isinstance(val, dict) or isinstance(val, list):\n val = json.dumps(val)\n attrs_str += ' %s=\"%s\"' % (key, val.replace('\"', \"'\"))\n\n html = ''\n included = web.ctx.setdefault(\"included-components\", [])\n\n if len(included) == 0:\n # Need to include Vue\n html += '<script src=\"%s\"></script>' % static_url('build/vue.js')\n\n if name not in included:\n url = static_url('build/components/production/ol-%s.min.js' % name)\n if query_param('debug'):\n url = static_url('build/components/development/ol-%s.js' % name)\n html += '<script src=\"%s\"></script>' % url\n included.append(name)\n\n html += '<ol-%(name)s %(attrs)s></ol-%(name)s>' % {\n 'name': kebab_case(name),\n 'attrs': attrs_str,\n }\n return html\n\n\n@public\ndef get_error(name, *args):\n \"\"\"Return error with the given name from errors.tmpl template.\"\"\"\n return get_message_from_template(\"errors\", name, args)\n\n@public\ndef get_message(name, *args):\n \"\"\"Return message with given name from messages.tmpl template\"\"\"\n return get_message_from_template(\"messages\", name, args)\n\ndef get_message_from_template(template_name, name, args):\n d = render_template(template_name).get(\"messages\", {})\n msg = d.get(name) or name.lower().replace(\"_\", \" \")\n\n if msg and args:\n return msg % args\n else:\n return msg\n\n@public\ndef list_recent_pages(path, limit=100, offset=0):\n \"\"\"Lists all pages with name path/* in the order of last_modified.\"\"\"\n q = {}\n\n q['key~' ] = path + '/*'\n # don't show /type/delete and /type/redirect\n q['a:type!='] = '/type/delete'\n q['b:type!='] = '/type/redirect'\n\n q['sort'] = 'key'\n q['limit'] = limit\n q['offset'] = offset\n q['sort'] = '-last_modified'\n # queries are very slow with != conditions\n # q['type'] != '/type/delete'\n return web.ctx.site.get_many(web.ctx.site.things(q))\n\n@public\ndef json_encode(d):\n return json.dumps(d)\n\ndef unflatten(d, seperator=\"--\"):\n \"\"\"Convert flattened data into nested form.\n\n >>> unflatten({\"a\": 1, \"b--x\": 2, \"b--y\": 3, \"c--0\": 4, \"c--1\": 5})\n {'a': 1, 'c': [4, 5], 'b': {'y': 3, 'x': 2}}\n >>> unflatten({\"a--0--x\": 1, \"a--0--y\": 2, \"a--1--x\": 3, \"a--1--y\": 4})\n {'a': [{'x': 1, 'y': 2}, {'x': 3, 'y': 4}]}\n\n \"\"\"\n def isint(k):\n try:\n int(k)\n return True\n except ValueError:\n return False\n\n def setvalue(data, k, v):\n if '--' in k:\n k, k2 = k.split(seperator, 1)\n setvalue(data.setdefault(k, {}), k2, v)\n else:\n data[k] = v\n\n def makelist(d):\n \"\"\"Convert d into a list if all the keys of d are integers.\"\"\"\n if isinstance(d, dict):\n if all(isint(k) for k in d):\n return [makelist(d[k]) for k in sorted(d, key=int)]\n else:\n return web.storage((k, makelist(v)) for k, v in d.items())\n else:\n return d\n\n d2 = {}\n for k, v in d.items():\n setvalue(d2, k, v)\n return makelist(d2)\n\n\ndef fuzzy_find(value, options, stopwords=None):\n stopwords = stopwords or []\n \"\"\"Try find the option nearest to the value.\n\n >>> fuzzy_find(\"O'Reilly\", [\"O'Reilly Inc\", \"Addison-Wesley\"])\n \"O'Reilly Inc\"\n \"\"\"\n if not options:\n return value\n\n rx = web.re_compile(r\"[-_\\.&, ]+\")\n\n # build word frequency\n d = defaultdict(list)\n for option in options:\n for t in rx.split(option):\n d[t].append(option)\n\n # find score for each option\n score = defaultdict(lambda: 0)\n for t in rx.split(value):\n if t.lower() in stopwords:\n continue\n for option in d[t]:\n score[option] += 1\n\n # take the option with maximum score\n return max(options, key=score.__getitem__)\n\n@public\ndef radio_input(checked=False, **params):\n params['type'] = 'radio'\n if checked:\n params['checked'] = \"checked\"\n return \"<input %s />\" % \" \".join(['%s=\"%s\"' % (k, web.websafe(v)) for k, v in params.items()])\n\n@public\ndef radio_list(name, args, value):\n html = []\n for arg in args:\n if isinstance(arg, tuple):\n arg, label = arg\n else:\n label = arg\n html.append(radio_input())\n\n@public\ndef get_coverstore_url():\n return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')\n\n\n@public\ndef get_the_best_book_on_url():\n return config.get('tbbo_url')\n\n\ndef _get_changes_v1_raw(query, revision=None):\n \"\"\"Returns the raw versions response.\n\n Revision is taken as argument to make sure a new cache entry is used when a new revision of the page is created.\n \"\"\"\n if 'env' not in web.ctx:\n delegate.fakeload()\n\n versions = web.ctx.site.versions(query)\n\n for v in versions:\n v.created = v.created.isoformat()\n v.author = v.author and v.author.key\n\n # XXX-Anand: hack to avoid too big data to be stored in memcache.\n # v.changes is not used and it contrinutes to memcache bloat in a big way.\n v.changes = '[]'\n\n return versions\n\ndef get_changes_v1(query, revision=None):\n # uses the cached function _get_changes_v1_raw to get the raw data\n # and processes to before returning.\n def process(v):\n v = web.storage(v)\n v.created = parse_datetime(v.created)\n v.author = v.author and web.ctx.site.get(v.author, lazy=True)\n return v\n\n return [process(v) for v in _get_changes_v1_raw(query, revision)]\n\ndef _get_changes_v2_raw(query, revision=None):\n \"\"\"Returns the raw recentchanges response.\n\n Revision is taken as argument to make sure a new cache entry is used when a new revision of the page is created.\n \"\"\"\n if 'env' not in web.ctx:\n delegate.fakeload()\n\n changes = web.ctx.site.recentchanges(query)\n return [c.dict() for c in changes]\n\n# XXX-Anand: disabled temporarily to avoid too much memcache usage.\n#_get_changes_v2_raw = cache.memcache_memoize(_get_changes_v2_raw, key_prefix=\"upstream._get_changes_v2_raw\", timeout=10*60)\n\ndef get_changes_v2(query, revision=None):\n page = web.ctx.site.get(query['key'])\n\n def first(seq, default=None):\n try:\n return next(seq)\n except StopIteration:\n return default\n\n def process_change(change):\n change = Changeset.create(web.ctx.site, storify(change))\n change.thing = page\n change.key = page.key\n change.revision = first(c.revision for c in change.changes if c.key == page.key)\n change.created = change.timestamp\n\n change.get = change.__dict__.get\n change.get_comment = lambda: get_comment(change)\n change.machine_comment = change.data.get(\"machine_comment\")\n\n return change\n\n def get_comment(change):\n t = get_template(\"recentchanges/\" + change.kind + \"/comment\") or get_template(\"recentchanges/default/comment\")\n return t(change, page)\n\n query['key'] = page.key\n changes = _get_changes_v2_raw(query, revision=page.revision)\n return [process_change(c) for c in changes]\n\ndef get_changes(query, revision=None):\n return get_changes_v2(query, revision=revision)\n\n@public\ndef get_history(page):\n h = web.storage(revision=page.revision, lastest_revision=page.revision, created=page.created)\n if h.revision < 5:\n h.recent = get_changes({\"key\": page.key, \"limit\": 5}, revision=page.revision)\n h.initial = h.recent[-1:]\n h.recent = h.recent[:-1]\n else:\n h.initial = get_changes({\"key\": page.key, \"limit\": 1, \"offset\": h.revision-1}, revision=page.revision)\n h.recent = get_changes({\"key\": page.key, \"limit\": 4}, revision=page.revision)\n\n return h\n\n@public\ndef get_version(key, revision):\n try:\n return web.ctx.site.versions({\"key\": key, \"revision\": revision, \"limit\": 1})[0]\n except IndexError:\n return None\n\n@public\ndef get_recent_author(doc):\n versions = get_changes_v1({'key': doc.key, 'limit': 1, \"offset\": 0}, revision=doc.revision)\n if versions:\n return versions[0].author\n\n@public\ndef get_recent_accounts(limit=5, offset=0):\n versions = web.ctx.site.versions({'type': '/type/user', 'revision': 1, 'limit': limit, 'offset': offset})\n return web.ctx.site.get_many([v.key for v in versions])\n\ndef get_locale():\n try:\n return babel.Locale(web.ctx.get(\"lang\") or \"en\")\n except babel.core.UnknownLocaleError:\n return babel.Locale(\"en\")\n\n@public\ndef process_version(v):\n \"\"\"Looks at the version and adds machine_comment required for showing \"View MARC\" link.\"\"\"\n comments = [\n \"found a matching marc record\",\n \"add publisher and source\",\n ]\n if v.key.startswith('/books/') and not v.get('machine_comment'):\n thing = v.get('thing') or web.ctx.site.get(v.key, v.revision)\n if thing.source_records and v.revision == 1 or (v.comment and v.comment.lower() in comments):\n marc = thing.source_records[-1]\n if marc.startswith('marc:'):\n v.machine_comment = marc[len(\"marc:\"):]\n else:\n v.machine_comment = marc\n return v\n\n@public\ndef is_thing(t):\n return isinstance(t, Thing)\n\n@public\ndef putctx(key, value):\n \"\"\"Save a value in the context.\"\"\"\n context[key] = value\n return \"\"\n\nclass Metatag:\n def __init__(self, tag=\"meta\", **attrs):\n self.tag = tag\n self.attrs = attrs\n\n def __str__(self):\n attrs = ' '.join(\n '%s=\"%s\"' % (k, websafe(v) if PY3 else websafe(v).encode('utf8'))\n for k, v in self.attrs.items())\n return '<%s %s />' % (self.tag, attrs)\n\n def __repr__(self):\n return 'Metatag(%s)' % str(self)\n\n@public\ndef add_metatag(tag=\"meta\", **attrs):\n context.setdefault('metatags', [])\n context.metatags.append(Metatag(tag, **attrs))\n\n@public\ndef url_quote(text):\n if isinstance(text, six.text_type):\n text = text.encode('utf8')\n return urllib.parse.quote_plus(text)\n\n\n@public\ndef urlencode(dict_or_list_of_tuples):\n \"\"\"\n You probably want to use this, if you're looking to urlencode parameters. This will\n encode things to utf8 that would otherwise cause urlencode to error.\n :param dict or list dict_or_list_of_tuples:\n :rtype: basestring\n \"\"\"\n from six.moves.urllib.parse import urlencode as og_urlencode\n tuples = dict_or_list_of_tuples\n if isinstance(dict_or_list_of_tuples, dict):\n tuples = dict_or_list_of_tuples.items()\n params = [\n (k, v.encode('utf-8') if isinstance(v, six.text_type) else v)\n for (k, v) in tuples\n ]\n return og_urlencode(params)\n\n\n@public\ndef entity_decode(text):\n try:\n return six.moves.html_parser.unescape(text)\n except AttributeError:\n return six.moves.html_parser.HTMLParser().unescape(text)\n\n@public\ndef set_share_links(url='#', title='', view_context=None):\n \"\"\"\n Constructs list share links for social platforms and assigns to view context attribute\n\n Args (all required):\n url (str or unicode) - complete canonical url to page being shared\n title (str or unicode) - title of page being shared\n view_context (object that has/can-have share_links attribute)\n \"\"\"\n encoded_url = url_quote(url)\n text = url_quote(\"Check this out: \" + entity_decode(title))\n links = [\n {'text': 'Facebook', 'url': 'https://www.facebook.com/sharer/sharer.php?u=' + encoded_url},\n {'text': 'Twitter', 'url': 'https://twitter.com/intent/tweet?url=%s&via=openlibrary&text=%s' % (encoded_url, text)},\n {'text': 'Pinterest', 'url': 'https://pinterest.com/pin/create/link/?url=%s&description=%s' % (encoded_url, text)}\n ]\n view_context.share_links = links\n\ndef pad(seq, size, e=None):\n \"\"\"\n >>> pad([1, 2], 4, 0)\n [1, 2, 0, 0]\n \"\"\"\n seq = seq[:]\n while len(seq) < size:\n seq.append(e)\n return seq\n\ndef parse_toc_row(line):\n \"\"\"Parse one row of table of contents.\n\n >>> def f(text):\n ... d = parse_toc_row(text)\n ... return (d['level'], d['label'], d['title'], d['pagenum'])\n ...\n >>> f(\"* chapter 1 | Welcome to the real world! | 2\")\n (1, 'chapter 1', 'Welcome to the real world!', '2')\n >>> f(\"Welcome to the real world!\")\n (0, '', 'Welcome to the real world!', '')\n >>> f(\"** | Welcome to the real world! | 2\")\n (2, '', 'Welcome to the real world!', '2')\n >>> f(\"|Preface | 1\")\n (0, '', 'Preface', '1')\n >>> f(\"1.1 | Apple\")\n (0, '1.1', 'Apple', '')\n \"\"\"\n RE_LEVEL = web.re_compile(r\"(\\**)(.*)\")\n level, text = RE_LEVEL.match(line.strip()).groups()\n\n if \"|\" in text:\n tokens = text.split(\"|\", 2)\n label, title, page = pad(tokens, 3, '')\n else:\n title = text\n label = page = \"\"\n\n return web.storage(level=len(level), label=label.strip(), title=title.strip(), pagenum=page.strip())\n\ndef parse_toc(text):\n \"\"\"Parses each line of toc\"\"\"\n if text is None:\n return []\n return [parse_toc_row(line) for line in text.splitlines() if line.strip(\" |\")]\n\n_languages = None\n\n@public\ndef get_languages():\n global _languages\n if _languages is None:\n keys = web.ctx.site.things({\"type\": \"/type/language\", \"key~\": \"/languages/*\", \"limit\": 1000})\n _languages = sorted([web.storage(name=d.name, code=d.code, key=d.key) for d in web.ctx.site.get_many(keys)], key=lambda d: d.name.lower())\n return _languages\n\n@public\ndef get_edition_config():\n return _get_edition_config()\n\[email protected]\ndef _get_edition_config():\n \"\"\"Returns the edition config.\n\n The results are cached on the first invocation. Any changes to /config/edition page require restarting the app.\n\n This is is cached because fetching and creating the Thing object was taking about 20ms of time for each book request.\n \"\"\"\n thing = web.ctx.site.get('/config/edition')\n classifications = [web.storage(t.dict()) for t in thing.classifications if 'name' in t]\n identifiers = [web.storage(t.dict()) for t in thing.identifiers if 'name' in t]\n roles = thing.roles\n return web.storage(classifications=classifications, identifiers=identifiers, roles=roles)\n\nfrom openlibrary.core.olmarkdown import OLMarkdown\ndef get_markdown(text, safe_mode=False):\n md = OLMarkdown(source=text, safe_mode=safe_mode)\n view._register_mdx_extensions(md)\n md.postprocessors += view.wiki_processors\n return md\n\n\nclass HTML(six.text_type):\n def __init__(self, html):\n six.text_type.__init__(self, web.safeunicode(html))\n\n def __repr__(self):\n return \"<html: %s>\" % six.text_type.__repr__(self)\n\n_websafe = web.websafe\ndef websafe(text):\n if isinstance(text, HTML):\n return text\n elif isinstance(text, web.template.TemplateResult):\n return web.safestr(text)\n else:\n return _websafe(text)\n\n\nfrom openlibrary.plugins.upstream import adapter\nfrom openlibrary.utils.olcompress import OLCompressor\nfrom openlibrary.utils import olmemcache\nimport memcache\n\nclass UpstreamMemcacheClient:\n \"\"\"Wrapper to memcache Client to handle upstream specific conversion and OL specific compression.\n Compatible with memcache Client API.\n \"\"\"\n def __init__(self, servers):\n self._client = memcache.Client(servers)\n compressor = OLCompressor()\n self.compress = compressor.compress\n def decompress(*args, **kw):\n d = json.loads(compressor.decompress(*args, **kw))\n return json.dumps(adapter.unconvert_dict(d))\n self.decompress = decompress\n\n def get(self, key):\n key = adapter.convert_key(key)\n if key is None:\n return None\n\n try:\n value = self._client.get(web.safestr(key))\n except memcache.Client.MemcachedKeyError:\n return None\n\n return value and self.decompress(value)\n\n def get_multi(self, keys):\n keys = [adapter.convert_key(k) for k in keys]\n keys = [web.safestr(k) for k in keys]\n\n d = self._client.get_multi(keys)\n return dict((web.safeunicode(adapter.unconvert_key(k)), self.decompress(v)) for k, v in d.items())\n\nif config.get('upstream_memcache_servers'):\n olmemcache.Client = UpstreamMemcacheClient\n # set config.memcache_servers only after olmemcache.Client is updated\n config.memcache_servers = config.upstream_memcache_servers\n\ndef _get_recent_changes():\n site = web.ctx.get('site') or delegate.create_site()\n web.ctx.setdefault(\"ip\", \"127.0.0.1\")\n\n # The recentchanges can have multiple revisions for a document if it has been modified more than once.\n # Take only the most recent revision in that case.\n visited = set()\n def is_visited(key):\n if key in visited:\n return True\n else:\n visited.add(key)\n return False\n\n # ignore reverts\n re_revert = web.re_compile(r\"reverted to revision \\d+\")\n def is_revert(r):\n return re_revert.match(r.comment or \"\")\n\n # take the 100 recent changes, filter them and take the first 50\n q = {\"bot\": False, \"limit\": 100}\n result = site.versions(q)\n result = [r for r in result if not is_visited(r.key) and not is_revert(r)]\n result = result[:50]\n\n def process_thing(thing):\n t = web.storage()\n for k in [\"key\", \"title\", \"name\", \"displayname\"]:\n t[k] = thing[k]\n t['type'] = web.storage(key=thing.type.key)\n return t\n\n for r in result:\n r.author = r.author and process_thing(r.author)\n r.thing = process_thing(site.get(r.key, r.revision))\n\n return result\n\ndef _get_recent_changes2():\n \"\"\"New recent changes for around the library.\n\n This function returns the message to display for each change.\n The message is get by calling `recentchanges/$kind/message.html` template.\n\n If `$var ignore=True` is set by the message template, the change is ignored.\n \"\"\"\n if 'env' not in web.ctx:\n delegate.fakeload()\n\n q = {\"bot\": False, \"limit\": 100}\n changes = web.ctx.site.recentchanges(q)\n\n def is_ignored(c):\n return (\n # c.kind=='update' allow us to ignore update recent changes on people\n c.kind == 'update' or\n # ignore change if author has been deleted (e.g. spammer)\n (c.author and c.author.type.key == '/type/delete'))\n\n def render(c):\n t = get_template(\"recentchanges/\" + c.kind + \"/message\") or get_template(\"recentchanges/default/message\")\n return t(c)\n\n messages = [render(c) for c in changes if not is_ignored(c)]\n messages = [m for m in messages if str(m.get(\"ignore\", \"false\")).lower() != \"true\"]\n return messages\n\n_get_recent_changes = web.memoize(_get_recent_changes, expires=5*60, background=True)\n_get_recent_changes2 = web.memoize(_get_recent_changes2, expires=5*60, background=True)\n\n@public\ndef get_random_recent_changes(n):\n if \"recentchanges_v2\" in web.ctx.get(\"features\", []):\n changes = _get_recent_changes2()\n else:\n changes = _get_recent_changes()\n\n _changes = random.sample(changes, n) if len(changes) > n else changes\n for i, change in enumerate(_changes):\n _changes[i]['__body__'] = _changes[i]['__body__'].replace('<script>', '')\\\n .replace('</script>', '')\n return _changes\n\ndef _get_blog_feeds():\n url = \"http://blog.openlibrary.org/feed/\"\n try:\n stats.begin(\"get_blog_feeds\", url=url)\n tree = etree.parse(urllib.request.urlopen(url))\n except Exception:\n # Handle error gracefully.\n logging.getLogger(\"openlibrary\").error(\"Failed to fetch blog feeds\", exc_info=True)\n return []\n finally:\n stats.end()\n\n def parse_item(item):\n pubdate = datetime.datetime.strptime(item.find(\"pubDate\").text, '%a, %d %b %Y %H:%M:%S +0000').isoformat()\n return dict(\n title=item.find(\"title\").text,\n link=item.find(\"link\").text,\n pubdate=pubdate\n )\n return [parse_item(item) for item in tree.findall(\"//item\")]\n\n_get_blog_feeds = cache.memcache_memoize(_get_blog_feeds, key_prefix=\"upstream.get_blog_feeds\", timeout=5*60)\n\ndef get_donation_include(include):\n web_input = web.input()\n\n # The following allows archive.org staff to test banners without\n # needing to reload openlibrary services:\n dev_host = web_input.pop(\"dev_host\", \"\") # e.g. `www-user`\n if dev_host and re.match('^[a-zA-Z0-9-.]+$', dev_host):\n dev_host += \".\" # e.g. `www-user.`\n script_src = \"https://%sarchive.org/includes/donate.js\" % dev_host\n if 'ymd' in web_input:\n script_src += '?ymd=' + web_input.ymd\n\n html = \"\"\"\n <div id=\"donato\"></div>\n <script src=\"%s\" data-platform=\"ol\"></script>\n \"\"\" % script_src\n return html\n\n#get_donation_include = cache.memcache_memoize(get_donation_include, key_prefix=\"upstream.get_donation_include\", timeout=60)\n\n@public\ndef item_image(image_path, default=None):\n if image_path is None:\n return default\n if image_path.startswith('https:'):\n return image_path\n return \"https:\" + image_path\n\n@public\ndef get_blog_feeds():\n def process(post):\n post = web.storage(post)\n post.pubdate = parse_datetime(post.pubdate)\n return post\n return [process(post) for post in _get_blog_feeds()]\n\nclass Request:\n path = property(lambda self: web.ctx.path)\n home = property(lambda self: web.ctx.home)\n domain = property(lambda self: web.ctx.host)\n\n @property\n def canonical_url(self):\n \"\"\"Returns the https:// version of the URL.\n\n Used for adding <meta rel=\"canonical\" ..> tag in all web pages.\n Required to make OL retain the page rank after https migration.\n \"\"\"\n readable_path = web.ctx.get('readable_path', web.ctx.path) or ''\n query = web.ctx.query or ''\n host = web.ctx.host or ''\n url = (host + readable_path + query)\n return (\"https://\" + url) if url else ''\n\n\n@public\ndef render_once(key):\n rendered = web.ctx.setdefault('render_once', {})\n if key in rendered:\n return False\n else:\n rendered[key] = True\n return True\n\n\ndef setup():\n \"\"\"Do required initialization\"\"\"\n # monkey-patch get_markdown to use OL Flavored Markdown\n view.get_markdown = get_markdown\n\n # Provide alternate implementations for websafe and commify\n web.websafe = websafe\n web.template.Template.FILTERS['.html'] = websafe\n web.template.Template.FILTERS['.xml'] = websafe\n\n web.commify = commify\n\n web.template.Template.globals.update({\n 'HTML': HTML,\n 'request': Request(),\n 'logger': logging.getLogger(\"openlibrary.template\"),\n 'sum': sum,\n 'get_donation_include': get_donation_include,\n 'websafe': web.websafe,\n })\n\n from openlibrary.core import helpers as h\n web.template.Template.globals.update(h.helpers)\n\n if config.get('use_gzip') == True:\n config.middleware.append(GZipMiddleware)\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n",
"path": "openlibrary/plugins/upstream/utils.py"
}
] | [
{
"content": "import web\nimport json\nimport babel\nimport babel.core\nimport babel.dates\nfrom collections import defaultdict\nimport re\nimport random\nimport xml.etree.ElementTree as etree\nimport datetime\nimport logging\n\nimport six\nfrom six import PY3\nfrom six.moves import urllib\nfrom six.moves.collections_abc import MutableMapping\n\nfrom infogami import config\nfrom infogami.utils import view, delegate, stats\nfrom infogami.utils.view import render, get_template, public, query_param\nfrom infogami.utils.macro import macro\nfrom infogami.utils.context import context\nfrom infogami.infobase.client import Thing, Changeset, storify\n\nfrom openlibrary.core.helpers import commify, parse_datetime\nfrom openlibrary.core.middleware import GZipMiddleware\nfrom openlibrary.core import cache, ab\n\nclass MultiDict(MutableMapping):\n \"\"\"Ordered Dictionary that can store multiple values.\n\n >>> d = MultiDict()\n >>> d['x'] = 1\n >>> d['x'] = 2\n >>> d['y'] = 3\n >>> d['x']\n 2\n >>> d['y']\n 3\n >>> d['z']\n Traceback (most recent call last):\n ...\n KeyError: 'z'\n >>> list(d)\n ['x', 'x', 'y']\n >>> list(d.items())\n [('x', 1), ('x', 2), ('y', 3)]\n >>> list(d.multi_items())\n [('x', [1, 2]), ('y', [3])]\n \"\"\"\n def __init__(self, items=(), **kw):\n self._items = []\n\n for k, v in items:\n self[k] = v\n self.update(kw)\n\n def __getitem__(self, key):\n values = self.getall(key)\n if values:\n return values[-1]\n else:\n raise KeyError(key)\n\n def __setitem__(self, key, value):\n self._items.append((key, value))\n\n def __delitem__(self, key):\n self._items = [(k, v) for k, v in self._items if k != key]\n\n def __iter__(self):\n for key in self.keys():\n yield key\n\n def __len__(self):\n return len(list(self.keys()))\n\n def getall(self, key):\n return [v for k, v in self._items if k == key]\n\n def keys(self):\n return [k for k, v in self._items]\n\n def values(self):\n return [v for k, v in self._items]\n\n def items(self):\n return self._items[:]\n\n def multi_items(self):\n \"\"\"Returns items as tuple of key and a list of values.\"\"\"\n items = []\n d = {}\n\n for k, v in self._items:\n if k not in d:\n d[k] = []\n items.append((k, d[k]))\n d[k].append(v)\n return items\n\n@macro\n@public\ndef render_template(name, *a, **kw):\n if \".\" in name:\n name = name.rsplit(\".\", 1)[0]\n return render[name](*a, **kw)\n\n\ndef kebab_case(upper_camel_case):\n \"\"\"\n :param str upper_camel_case: Text in upper camel case (e.g. \"HelloWorld\")\n :return: text in kebab case (e.g. 'hello-world')\n\n >>> kebab_case('HelloWorld')\n 'hello-world'\n >>> kebab_case(\"MergeUI\")\n 'merge-u-i'\n \"\"\"\n parts = re.findall(r'[A-Z][^A-Z]*', upper_camel_case)\n return '-'.join(parts).lower()\n\n\n@public\ndef render_component(name, attrs=None, json_encode=True):\n \"\"\"\n :param str name: Name of the component (excluding extension)\n :param dict attrs: attributes to add to the component element\n \"\"\"\n from openlibrary.plugins.upstream.code import static_url\n\n attrs = attrs or {}\n attrs_str = ''\n for (key, val) in attrs.items():\n if json_encode and isinstance(val, dict) or isinstance(val, list):\n val = json.dumps(val)\n attrs_str += ' %s=\"%s\"' % (key, val.replace('\"', \"'\"))\n\n html = ''\n included = web.ctx.setdefault(\"included-components\", [])\n\n if len(included) == 0:\n # Need to include Vue\n html += '<script src=\"%s\"></script>' % static_url('build/vue.js')\n\n if name not in included:\n url = static_url('build/components/production/ol-%s.min.js' % name)\n if query_param('debug'):\n url = static_url('build/components/development/ol-%s.js' % name)\n html += '<script src=\"%s\"></script>' % url\n included.append(name)\n\n html += '<ol-%(name)s %(attrs)s></ol-%(name)s>' % {\n 'name': kebab_case(name),\n 'attrs': attrs_str,\n }\n return html\n\n\n@public\ndef get_error(name, *args):\n \"\"\"Return error with the given name from errors.tmpl template.\"\"\"\n return get_message_from_template(\"errors\", name, args)\n\n@public\ndef get_message(name, *args):\n \"\"\"Return message with given name from messages.tmpl template\"\"\"\n return get_message_from_template(\"messages\", name, args)\n\ndef get_message_from_template(template_name, name, args):\n d = render_template(template_name).get(\"messages\", {})\n msg = d.get(name) or name.lower().replace(\"_\", \" \")\n\n if msg and args:\n return msg % args\n else:\n return msg\n\n@public\ndef list_recent_pages(path, limit=100, offset=0):\n \"\"\"Lists all pages with name path/* in the order of last_modified.\"\"\"\n q = {}\n\n q['key~' ] = path + '/*'\n # don't show /type/delete and /type/redirect\n q['a:type!='] = '/type/delete'\n q['b:type!='] = '/type/redirect'\n\n q['sort'] = 'key'\n q['limit'] = limit\n q['offset'] = offset\n q['sort'] = '-last_modified'\n # queries are very slow with != conditions\n # q['type'] != '/type/delete'\n return web.ctx.site.get_many(web.ctx.site.things(q))\n\n@public\ndef json_encode(d):\n return json.dumps(d)\n\ndef unflatten(d, seperator=\"--\"):\n \"\"\"Convert flattened data into nested form.\n\n >>> unflatten({\"a\": 1, \"b--x\": 2, \"b--y\": 3, \"c--0\": 4, \"c--1\": 5})\n {'a': 1, 'c': [4, 5], 'b': {'y': 3, 'x': 2}}\n >>> unflatten({\"a--0--x\": 1, \"a--0--y\": 2, \"a--1--x\": 3, \"a--1--y\": 4})\n {'a': [{'x': 1, 'y': 2}, {'x': 3, 'y': 4}]}\n\n \"\"\"\n def isint(k):\n try:\n int(k)\n return True\n except ValueError:\n return False\n\n def setvalue(data, k, v):\n if '--' in k:\n k, k2 = k.split(seperator, 1)\n setvalue(data.setdefault(k, {}), k2, v)\n else:\n data[k] = v\n\n def makelist(d):\n \"\"\"Convert d into a list if all the keys of d are integers.\"\"\"\n if isinstance(d, dict):\n if all(isint(k) for k in d):\n return [makelist(d[k]) for k in sorted(d, key=int)]\n else:\n return web.storage((k, makelist(v)) for k, v in d.items())\n else:\n return d\n\n d2 = {}\n for k, v in d.items():\n setvalue(d2, k, v)\n return makelist(d2)\n\n\ndef fuzzy_find(value, options, stopwords=None):\n stopwords = stopwords or []\n \"\"\"Try find the option nearest to the value.\n\n >>> fuzzy_find(\"O'Reilly\", [\"O'Reilly Inc\", \"Addison-Wesley\"])\n \"O'Reilly Inc\"\n \"\"\"\n if not options:\n return value\n\n rx = web.re_compile(r\"[-_\\.&, ]+\")\n\n # build word frequency\n d = defaultdict(list)\n for option in options:\n for t in rx.split(option):\n d[t].append(option)\n\n # find score for each option\n score = defaultdict(lambda: 0)\n for t in rx.split(value):\n if t.lower() in stopwords:\n continue\n for option in d[t]:\n score[option] += 1\n\n # take the option with maximum score\n return max(options, key=score.__getitem__)\n\n@public\ndef radio_input(checked=False, **params):\n params['type'] = 'radio'\n if checked:\n params['checked'] = \"checked\"\n return \"<input %s />\" % \" \".join(['%s=\"%s\"' % (k, web.websafe(v)) for k, v in params.items()])\n\n@public\ndef radio_list(name, args, value):\n html = []\n for arg in args:\n if isinstance(arg, tuple):\n arg, label = arg\n else:\n label = arg\n html.append(radio_input())\n\n@public\ndef get_coverstore_url():\n return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')\n\n\n@public\ndef get_the_best_book_on_url():\n return config.get('tbbo_url')\n\n\ndef _get_changes_v1_raw(query, revision=None):\n \"\"\"Returns the raw versions response.\n\n Revision is taken as argument to make sure a new cache entry is used when a new revision of the page is created.\n \"\"\"\n if 'env' not in web.ctx:\n delegate.fakeload()\n\n versions = web.ctx.site.versions(query)\n\n for v in versions:\n v.created = v.created.isoformat()\n v.author = v.author and v.author.key\n\n # XXX-Anand: hack to avoid too big data to be stored in memcache.\n # v.changes is not used and it contrinutes to memcache bloat in a big way.\n v.changes = '[]'\n\n return versions\n\ndef get_changes_v1(query, revision=None):\n # uses the cached function _get_changes_v1_raw to get the raw data\n # and processes to before returning.\n def process(v):\n v = web.storage(v)\n v.created = parse_datetime(v.created)\n v.author = v.author and web.ctx.site.get(v.author, lazy=True)\n return v\n\n return [process(v) for v in _get_changes_v1_raw(query, revision)]\n\ndef _get_changes_v2_raw(query, revision=None):\n \"\"\"Returns the raw recentchanges response.\n\n Revision is taken as argument to make sure a new cache entry is used when a new revision of the page is created.\n \"\"\"\n if 'env' not in web.ctx:\n delegate.fakeload()\n\n changes = web.ctx.site.recentchanges(query)\n return [c.dict() for c in changes]\n\n# XXX-Anand: disabled temporarily to avoid too much memcache usage.\n#_get_changes_v2_raw = cache.memcache_memoize(_get_changes_v2_raw, key_prefix=\"upstream._get_changes_v2_raw\", timeout=10*60)\n\ndef get_changes_v2(query, revision=None):\n page = web.ctx.site.get(query['key'])\n\n def first(seq, default=None):\n try:\n return next(seq)\n except StopIteration:\n return default\n\n def process_change(change):\n change = Changeset.create(web.ctx.site, storify(change))\n change.thing = page\n change.key = page.key\n change.revision = first(c.revision for c in change.changes if c.key == page.key)\n change.created = change.timestamp\n\n change.get = change.__dict__.get\n change.get_comment = lambda: get_comment(change)\n change.machine_comment = change.data.get(\"machine_comment\")\n\n return change\n\n def get_comment(change):\n t = get_template(\"recentchanges/\" + change.kind + \"/comment\") or get_template(\"recentchanges/default/comment\")\n return t(change, page)\n\n query['key'] = page.key\n changes = _get_changes_v2_raw(query, revision=page.revision)\n return [process_change(c) for c in changes]\n\ndef get_changes(query, revision=None):\n return get_changes_v2(query, revision=revision)\n\n@public\ndef get_history(page):\n h = web.storage(revision=page.revision, lastest_revision=page.revision, created=page.created)\n if h.revision < 5:\n h.recent = get_changes({\"key\": page.key, \"limit\": 5}, revision=page.revision)\n h.initial = h.recent[-1:]\n h.recent = h.recent[:-1]\n else:\n h.initial = get_changes({\"key\": page.key, \"limit\": 1, \"offset\": h.revision-1}, revision=page.revision)\n h.recent = get_changes({\"key\": page.key, \"limit\": 4}, revision=page.revision)\n\n return h\n\n@public\ndef get_version(key, revision):\n try:\n return web.ctx.site.versions({\"key\": key, \"revision\": revision, \"limit\": 1})[0]\n except IndexError:\n return None\n\n@public\ndef get_recent_author(doc):\n versions = get_changes_v1({'key': doc.key, 'limit': 1, \"offset\": 0}, revision=doc.revision)\n if versions:\n return versions[0].author\n\n@public\ndef get_recent_accounts(limit=5, offset=0):\n versions = web.ctx.site.versions({'type': '/type/user', 'revision': 1, 'limit': limit, 'offset': offset})\n return web.ctx.site.get_many([v.key for v in versions])\n\ndef get_locale():\n try:\n return babel.Locale(web.ctx.get(\"lang\") or \"en\")\n except babel.core.UnknownLocaleError:\n return babel.Locale(\"en\")\n\n@public\ndef process_version(v):\n \"\"\"Looks at the version and adds machine_comment required for showing \"View MARC\" link.\"\"\"\n comments = [\n \"found a matching marc record\",\n \"add publisher and source\",\n ]\n if v.key.startswith('/books/') and not v.get('machine_comment'):\n thing = v.get('thing') or web.ctx.site.get(v.key, v.revision)\n if thing.source_records and v.revision == 1 or (v.comment and v.comment.lower() in comments):\n marc = thing.source_records[-1]\n if marc.startswith('marc:'):\n v.machine_comment = marc[len(\"marc:\"):]\n else:\n v.machine_comment = marc\n return v\n\n@public\ndef is_thing(t):\n return isinstance(t, Thing)\n\n@public\ndef putctx(key, value):\n \"\"\"Save a value in the context.\"\"\"\n context[key] = value\n return \"\"\n\nclass Metatag:\n def __init__(self, tag=\"meta\", **attrs):\n self.tag = tag\n self.attrs = attrs\n\n def __str__(self):\n attrs = ' '.join(\n '%s=\"%s\"' % (k, websafe(v) if PY3 else websafe(v).encode('utf8'))\n for k, v in self.attrs.items())\n return '<%s %s />' % (self.tag, attrs)\n\n def __repr__(self):\n return 'Metatag(%s)' % str(self)\n\n@public\ndef add_metatag(tag=\"meta\", **attrs):\n context.setdefault('metatags', [])\n context.metatags.append(Metatag(tag, **attrs))\n\n@public\ndef url_quote(text):\n if isinstance(text, six.text_type):\n text = text.encode('utf8')\n return urllib.parse.quote_plus(text)\n\n\n@public\ndef urlencode(dict_or_list_of_tuples):\n \"\"\"\n You probably want to use this, if you're looking to urlencode parameters. This will\n encode things to utf8 that would otherwise cause urlencode to error.\n :param dict or list dict_or_list_of_tuples:\n :rtype: basestring\n \"\"\"\n from six.moves.urllib.parse import urlencode as og_urlencode\n tuples = dict_or_list_of_tuples\n if isinstance(dict_or_list_of_tuples, dict):\n tuples = dict_or_list_of_tuples.items()\n params = [\n (k, v.encode('utf-8') if isinstance(v, six.text_type) else v)\n for (k, v) in tuples\n ]\n return og_urlencode(params)\n\n\n@public\ndef entity_decode(text):\n try:\n return six.moves.html_parser.unescape(text)\n except AttributeError:\n return six.moves.html_parser.HTMLParser().unescape(text)\n\n@public\ndef set_share_links(url='#', title='', view_context=None):\n \"\"\"\n Constructs list share links for social platforms and assigns to view context attribute\n\n Args (all required):\n url (str or unicode) - complete canonical url to page being shared\n title (str or unicode) - title of page being shared\n view_context (object that has/can-have share_links attribute)\n \"\"\"\n encoded_url = url_quote(url)\n text = url_quote(\"Check this out: \" + entity_decode(title))\n links = [\n {'text': 'Facebook', 'url': 'https://www.facebook.com/sharer/sharer.php?u=' + encoded_url},\n {'text': 'Twitter', 'url': 'https://twitter.com/intent/tweet?url=%s&via=openlibrary&text=%s' % (encoded_url, text)},\n {'text': 'Pinterest', 'url': 'https://pinterest.com/pin/create/link/?url=%s&description=%s' % (encoded_url, text)}\n ]\n view_context.share_links = links\n\ndef pad(seq, size, e=None):\n \"\"\"\n >>> pad([1, 2], 4, 0)\n [1, 2, 0, 0]\n \"\"\"\n seq = seq[:]\n while len(seq) < size:\n seq.append(e)\n return seq\n\ndef parse_toc_row(line):\n \"\"\"Parse one row of table of contents.\n\n >>> def f(text):\n ... d = parse_toc_row(text)\n ... return (d['level'], d['label'], d['title'], d['pagenum'])\n ...\n >>> f(\"* chapter 1 | Welcome to the real world! | 2\")\n (1, 'chapter 1', 'Welcome to the real world!', '2')\n >>> f(\"Welcome to the real world!\")\n (0, '', 'Welcome to the real world!', '')\n >>> f(\"** | Welcome to the real world! | 2\")\n (2, '', 'Welcome to the real world!', '2')\n >>> f(\"|Preface | 1\")\n (0, '', 'Preface', '1')\n >>> f(\"1.1 | Apple\")\n (0, '1.1', 'Apple', '')\n \"\"\"\n RE_LEVEL = web.re_compile(r\"(\\**)(.*)\")\n level, text = RE_LEVEL.match(line.strip()).groups()\n\n if \"|\" in text:\n tokens = text.split(\"|\", 2)\n label, title, page = pad(tokens, 3, '')\n else:\n title = text\n label = page = \"\"\n\n return web.storage(level=len(level), label=label.strip(), title=title.strip(), pagenum=page.strip())\n\ndef parse_toc(text):\n \"\"\"Parses each line of toc\"\"\"\n if text is None:\n return []\n return [parse_toc_row(line) for line in text.splitlines() if line.strip(\" |\")]\n\n_languages = None\n\n@public\ndef get_languages():\n global _languages\n if _languages is None:\n keys = web.ctx.site.things({\"type\": \"/type/language\", \"key~\": \"/languages/*\", \"limit\": 1000})\n _languages = sorted([web.storage(name=d.name, code=d.code, key=d.key) for d in web.ctx.site.get_many(keys)], key=lambda d: d.name.lower())\n return _languages\n\n@public\ndef get_edition_config():\n return _get_edition_config()\n\[email protected]\ndef _get_edition_config():\n \"\"\"Returns the edition config.\n\n The results are cached on the first invocation. Any changes to /config/edition page require restarting the app.\n\n This is is cached because fetching and creating the Thing object was taking about 20ms of time for each book request.\n \"\"\"\n thing = web.ctx.site.get('/config/edition')\n classifications = [web.storage(t.dict()) for t in thing.classifications if 'name' in t]\n identifiers = [web.storage(t.dict()) for t in thing.identifiers if 'name' in t]\n roles = thing.roles\n return web.storage(classifications=classifications, identifiers=identifiers, roles=roles)\n\nfrom openlibrary.core.olmarkdown import OLMarkdown\ndef get_markdown(text, safe_mode=False):\n md = OLMarkdown(source=text, safe_mode=safe_mode)\n view._register_mdx_extensions(md)\n md.postprocessors += view.wiki_processors\n return md\n\n\nclass HTML(six.text_type):\n def __init__(self, html):\n six.text_type.__init__(self, web.safeunicode(html))\n\n def __repr__(self):\n return \"<html: %s>\" % six.text_type.__repr__(self)\n\n_websafe = web.websafe\ndef websafe(text):\n if isinstance(text, HTML):\n return text\n elif isinstance(text, web.template.TemplateResult):\n return web.safestr(text)\n else:\n return _websafe(text)\n\n\nfrom openlibrary.plugins.upstream import adapter\nfrom openlibrary.utils.olcompress import OLCompressor\nfrom openlibrary.utils import olmemcache\nimport memcache\n\nclass UpstreamMemcacheClient:\n \"\"\"Wrapper to memcache Client to handle upstream specific conversion and OL specific compression.\n Compatible with memcache Client API.\n \"\"\"\n def __init__(self, servers):\n self._client = memcache.Client(servers)\n compressor = OLCompressor()\n self.compress = compressor.compress\n def decompress(*args, **kw):\n d = json.loads(compressor.decompress(*args, **kw))\n return json.dumps(adapter.unconvert_dict(d))\n self.decompress = decompress\n\n def get(self, key):\n key = adapter.convert_key(key)\n if key is None:\n return None\n\n try:\n value = self._client.get(web.safestr(key))\n except memcache.Client.MemcachedKeyError:\n return None\n\n return value and self.decompress(value)\n\n def get_multi(self, keys):\n keys = [adapter.convert_key(k) for k in keys]\n keys = [web.safestr(k) for k in keys]\n\n d = self._client.get_multi(keys)\n return dict((web.safeunicode(adapter.unconvert_key(k)), self.decompress(v)) for k, v in d.items())\n\nif config.get('upstream_memcache_servers'):\n olmemcache.Client = UpstreamMemcacheClient\n # set config.memcache_servers only after olmemcache.Client is updated\n config.memcache_servers = config.upstream_memcache_servers\n\ndef _get_recent_changes():\n site = web.ctx.get('site') or delegate.create_site()\n web.ctx.setdefault(\"ip\", \"127.0.0.1\")\n\n # The recentchanges can have multiple revisions for a document if it has been modified more than once.\n # Take only the most recent revision in that case.\n visited = set()\n def is_visited(key):\n if key in visited:\n return True\n else:\n visited.add(key)\n return False\n\n # ignore reverts\n re_revert = web.re_compile(r\"reverted to revision \\d+\")\n def is_revert(r):\n return re_revert.match(r.comment or \"\")\n\n # take the 100 recent changes, filter them and take the first 50\n q = {\"bot\": False, \"limit\": 100}\n result = site.versions(q)\n result = [r for r in result if not is_visited(r.key) and not is_revert(r)]\n result = result[:50]\n\n def process_thing(thing):\n t = web.storage()\n for k in [\"key\", \"title\", \"name\", \"displayname\"]:\n t[k] = thing[k]\n t['type'] = web.storage(key=thing.type.key)\n return t\n\n for r in result:\n r.author = r.author and process_thing(r.author)\n r.thing = process_thing(site.get(r.key, r.revision))\n\n return result\n\ndef _get_recent_changes2():\n \"\"\"New recent changes for around the library.\n\n This function returns the message to display for each change.\n The message is get by calling `recentchanges/$kind/message.html` template.\n\n If `$var ignore=True` is set by the message template, the change is ignored.\n \"\"\"\n if 'env' not in web.ctx:\n delegate.fakeload()\n\n q = {\"bot\": False, \"limit\": 100}\n changes = web.ctx.site.recentchanges(q)\n\n def is_ignored(c):\n return (\n # c.kind=='update' allow us to ignore update recent changes on people\n c.kind == 'update' or\n # ignore change if author has been deleted (e.g. spammer)\n (c.author and c.author.type.key == '/type/delete'))\n\n def render(c):\n t = get_template(\"recentchanges/\" + c.kind + \"/message\") or get_template(\"recentchanges/default/message\")\n return t(c)\n\n messages = [render(c) for c in changes if not is_ignored(c)]\n messages = [m for m in messages if str(m.get(\"ignore\", \"false\")).lower() != \"true\"]\n return messages\n\n_get_recent_changes = web.memoize(_get_recent_changes, expires=5*60, background=True)\n_get_recent_changes2 = web.memoize(_get_recent_changes2, expires=5*60, background=True)\n\n@public\ndef get_random_recent_changes(n):\n if \"recentchanges_v2\" in web.ctx.get(\"features\", []):\n changes = _get_recent_changes2()\n else:\n changes = _get_recent_changes()\n\n _changes = random.sample(changes, n) if len(changes) > n else changes\n for i, change in enumerate(_changes):\n _changes[i]['__body__'] = _changes[i]['__body__'].replace('<script>', '')\\\n .replace('</script>', '')\n return _changes\n\ndef _get_blog_feeds():\n url = \"http://blog.openlibrary.org/feed/\"\n try:\n stats.begin(\"get_blog_feeds\", url=url)\n tree = etree.parse(urllib.request.urlopen(url))\n except Exception:\n # Handle error gracefully.\n logging.getLogger(\"openlibrary\").error(\"Failed to fetch blog feeds\", exc_info=True)\n return []\n finally:\n stats.end()\n\n def parse_item(item):\n pubdate = datetime.datetime.strptime(item.find(\"pubDate\").text, '%a, %d %b %Y %H:%M:%S +0000').isoformat()\n return dict(\n title=item.find(\"title\").text,\n link=item.find(\"link\").text,\n pubdate=pubdate\n )\n return [parse_item(item) for item in tree.findall(\"//item\")]\n\n_get_blog_feeds = cache.memcache_memoize(_get_blog_feeds, key_prefix=\"upstream.get_blog_feeds\", timeout=5*60)\n\ndef get_donation_include(include):\n web_input = web.input()\n\n # The following allows archive.org staff to test banners without\n # needing to reload openlibrary services:\n dev_host = web_input.pop(\"dev_host\", \"\") # e.g. `www-user`\n if dev_host and re.match('^[a-zA-Z0-9-.]+$', dev_host):\n dev_host += \".\" # e.g. `www-user.`\n script_src = \"https://%sarchive.org/includes/donate.js\" % dev_host\n if 'ymd' in web_input:\n script_src += '?ymd=' + web_input.ymd\n\n html = \"\"\"\n <div id=\"donato\"></div>\n <script src=\"%s\" data-platform=\"ol\"></script>\n \"\"\" % script_src\n return html\n\n#get_donation_include = cache.memcache_memoize(get_donation_include, key_prefix=\"upstream.get_donation_include\", timeout=60)\n\n@public\ndef item_image(image_path, default=None):\n if image_path is None:\n return default\n if image_path.startswith('https:'):\n return image_path\n return \"https:\" + image_path\n\n@public\ndef get_blog_feeds():\n def process(post):\n post = web.storage(post)\n post.pubdate = parse_datetime(post.pubdate)\n return post\n return [process(post) for post in _get_blog_feeds()]\n\nclass Request:\n path = property(lambda self: web.ctx.path)\n home = property(lambda self: web.ctx.home)\n domain = property(lambda self: web.ctx.host)\n\n @property\n def canonical_url(self):\n \"\"\"Returns the https:// version of the URL.\n\n Used for adding <meta rel=\"canonical\" ..> tag in all web pages.\n Required to make OL retain the page rank after https migration.\n \"\"\"\n readable_path = web.ctx.get('readable_path', web.ctx.path) or ''\n query = web.ctx.query or ''\n host = web.ctx.host or ''\n url = (host + readable_path + query)\n return (\"https://\" + url) if url else ''\n\n\n@public\ndef render_once(key):\n rendered = web.ctx.setdefault('render_once', {})\n if key in rendered:\n return False\n else:\n rendered[key] = True\n return True\n\n\n@public\ndef today():\n return datetime.datetime.today()\n\n\ndef setup():\n \"\"\"Do required initialization\"\"\"\n # monkey-patch get_markdown to use OL Flavored Markdown\n view.get_markdown = get_markdown\n\n # Provide alternate implementations for websafe and commify\n web.websafe = websafe\n web.template.Template.FILTERS['.html'] = websafe\n web.template.Template.FILTERS['.xml'] = websafe\n\n web.commify = commify\n\n web.template.Template.globals.update({\n 'HTML': HTML,\n 'request': Request(),\n 'logger': logging.getLogger(\"openlibrary.template\"),\n 'sum': sum,\n 'get_donation_include': get_donation_include,\n 'websafe': web.websafe,\n })\n\n from openlibrary.core import helpers as h\n web.template.Template.globals.update(h.helpers)\n\n if config.get('use_gzip') == True:\n config.middleware.append(GZipMiddleware)\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n",
"path": "openlibrary/plugins/upstream/utils.py"
}
] | diff --git a/openlibrary/macros/QueryCarousel.html b/openlibrary/macros/QueryCarousel.html
index 55ad3a3f842..e9998acc3c9 100644
--- a/openlibrary/macros/QueryCarousel.html
+++ b/openlibrary/macros/QueryCarousel.html
@@ -1,4 +1,4 @@
-$def with(query, title=None, sort='new', key='', limit=20, search=False, has_fulltext_only=True)
+$def with(query, title=None, sort='new', key='', limit=20, search=False, has_fulltext_only=True, url=None)
$# Takes following parameters
$# * query (str) -- Any arbitrary Open Library search query, e.g. subject:"Textbooks"
@@ -20,6 +20,7 @@
$code:
params = { 'q': query }
+ url = url or "/search?" + urlencode(params)
if has_fulltext_only:
params['has_fulltext'] = 'true'
@@ -27,4 +28,4 @@
books = [storage(b) for b in (results.get('docs', []))]
load_more = {"url": "/search.json?" + urlencode(params), "limit": limit }
-$:render_template("books/custom_carousel", books=books, title=title, url="/search?" + urlencode(params), key=key, load_more=load_more)
+$:render_template("books/custom_carousel", books=books, title=title, url=url, key=key, load_more=load_more)
diff --git a/openlibrary/plugins/upstream/utils.py b/openlibrary/plugins/upstream/utils.py
index 5daf8d7e00f..e29cc83e347 100644
--- a/openlibrary/plugins/upstream/utils.py
+++ b/openlibrary/plugins/upstream/utils.py
@@ -818,6 +818,11 @@ def render_once(key):
return True
+@public
+def today():
+ return datetime.datetime.today()
+
+
def setup():
"""Do required initialization"""
# monkey-patch get_markdown to use OL Flavored Markdown
diff --git a/openlibrary/templates/home/index.html b/openlibrary/templates/home/index.html
index 60c1f523d5a..6c16e7a5121 100644
--- a/openlibrary/templates/home/index.html
+++ b/openlibrary/templates/home/index.html
@@ -15,11 +15,17 @@
$add_metatag(name="twitter:image:alt", content="Open Library Logo")
$add_metatag(name="twitter:card", content="homepage_summary")
+$code:
+ FEB_READS = 'key:(/works/OL18181363W OR /works/OL3481095W OR /works/OL4360244W OR /works/OL20017931W OR /works/OL20615204W OR /works/OL2363176W OR /works/OL17869588W OR /works/OL17784026W OR /works/OL21179764W OR /works/OL8870595W OR /works/OL21054973W OR /works/OL21673730W OR /works/OL20548582W OR /works/OL15279153W OR /works/OL19992836W OR /works/OL15691480W OR /works/OL16305795W OR /works/OL19923407W OR /works/OL16529029W OR /works/OL9242636W OR /works/OL17529769W OR /works/OL3345332W OR /works/OL20013209W OR /works/OL20015483W OR /works/OL19987474W OR /works/OL19992114W OR /works/OL17893900W OR /works/OL18435803W OR /works/OL17314666W OR /works/OL17358927W OR /works/OL15933199W OR /works/OL17858931W OR /works/OL18187603W OR /works/OL16853133W OR /works/OL16894393W OR /works/OL19976062W OR /works/OL20037832W OR /works/OL16885033W OR /works/OL19708155W OR /works/OL17921756W OR /works/OL21037237W OR /works/OL17786027W OR /works/OL17345141W OR /works/OL21294275W OR /works/OL9582417W OR /works/OL9357555W OR /works/OL20907853W OR /works/OL20005568W OR /works/OL3296483W OR /works/OL11983310W OR /works/OL7159886W OR /works/OL1662667W OR /works/OL19990553W OR /works/OL15285884W OR /works/OL6888879W OR /works/OL17900435W OR /works/OL5706069W OR /works/OL2977589W OR /works/OL1593701W OR /works/OL16451688W OR /works/OL16910779W OR /works/OL18215336W OR /works/OL17371695W OR /works/OL3521634W OR /works/OL17355199W OR /works/OL5739152W OR /works/OL20016962W OR /works/OL3191599W OR /works/OL20896695W OR /works/OL19752490W OR /works/OL18335154W OR /works/OL4582875W OR /works/OL16515210W OR /works/OL16868407W OR /works/OL3459949W OR /works/OL16025481W OR /works/OL1928280W OR /works/OL6208302W OR /works/OL17566265W OR /works/OL20652811W OR /works/OL22059158W OR /works/OL4370955W OR /works/OL19998526W OR /works/OL6218060W OR /works/OL16813953W OR /works/OL21179974W OR /works/OL7213898W OR /works/OL17872185W OR /works/OL17340085W OR /works/OL21584979W OR /works/OL21078916W OR /works/OL158519W OR /works/OL4114499W OR /works/OL19638041W OR /works/OL16844793W OR /works/OL20940485W OR /works/OL17392121W OR /works/OL20030448W OR /works/OL15920474W OR /works/OL20544657W)'
+
<div id="contentBody">
$:render_template("home/categories", test=test)
$:render_template("books/custom_carousel", books=readonline_carousel(), title=_('Classic Books'), url="/read", key="public_domain", test=test)
+ $if today().month == 2 and not test:
+ $:macros.QueryCarousel(query=FEB_READS, title=_('Books For February'), key="monthly_reads", url="/collections/february", sort='editions')
+
$:render_template("home/custom_ia_carousel", title=_('Books We Love'), key="staff_picks", query='languageSorter:("English")', subject="openlibrary_staff_picks", sorts=["lending___last_browse desc"], limit=18, test=test)
$:render_template("home/custom_ia_carousel", title=_('Recently Returned'), key="recently_returned", sorts=["lending___last_browse desc"], limit=18, test=test)
|
pwndbg__pwndbg-760 | `find_fake_fast` fails when providing a size argument
<!--
Before reporting a new issue, make sure that we do not have any duplicates already open.
If there is one it might be good to take part in the discussion there.
Please make sure you have checked that the issue persists on LATEST pwndbg version.
Below is a template for BUG REPORTS.
Don't include it if this is a FEATURE REQUEST.
-->
### Description
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
Providing a size argument to the `find_fake_fast` command causes a TypeError at [heap.py:519](https://github.com/pwndbg/pwndbg/blob/dev/pwndbg/commands/heap.py#L519).
### Steps to reproduce
<!--
What do we have to do to reproduce the problem?
If this is connected to particular C/asm code,
please provide the smallest C code that reproduces the issue.
-->
1. Run gdb on a program that utilizes the heap
2. Once the heap is initialized, run `find_fake_fast &__malloc_hook 0x7f`
`find_fake_fast` working correctly as of commit 1158a3086d2eaa137e3ce30810539c1aa578e87a

Same command, same program, updated to commit 609284cee279de345dcb0706e11a0b56abe349f4

### My setup
<!--
Show us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).
NOTE: We are currently supporting only Ubuntu installations.
It is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).
If you would like to change this situation - help us improving pwndbg and supporting other distros!
This can be displayed in pwndbg through `version` command.
If it is somehow unavailable, use:
* `show version` - for gdb
* `py import sys; print(sys.version)` - for python
* pwndbg version/git commit id
-->
Gdb: 7.11.1
Python: 3.5.2 (default, Oct 8 2019, 13:06:37) [GCC 5.4.0 20160609]
Pwndbg: 1.1.0 build: 609284c
Capstone: 4.0.1024
Unicorn: 1.0.1
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport ctypes\nimport struct\n\nimport gdb\nimport six\n\nimport pwndbg.color.context as C\nimport pwndbg.color.memory as M\nimport pwndbg.commands\nimport pwndbg.typeinfo\nfrom pwndbg.color import generateColorFunction\nfrom pwndbg.color import message\n\n\ndef read_chunk(addr):\n \"\"\"Read a chunk's metadata.\"\"\"\n # In GLIBC versions <= 2.24 the `mchunk_[prev_]size` field was named `[prev_]size`.\n # To support both versions, change the new names to the old ones here so that\n # the rest of the code can deal with uniform names.\n renames = {\n \"mchunk_size\": \"size\",\n \"mchunk_prev_size\": \"prev_size\",\n }\n val = pwndbg.typeinfo.read_gdbvalue(\"struct malloc_chunk\", addr)\n return dict({ renames.get(key, key): int(val[key]) for key in val.type.keys() })\n\n\ndef format_bin(bins, verbose=False, offset=None):\n allocator = pwndbg.heap.current\n if offset is None:\n offset = allocator.chunk_key_offset('fd')\n\n result = []\n bins_type = bins.pop('type')\n\n for size in bins:\n b = bins[size]\n count, is_chain_corrupted = None, False\n\n # fastbins consists of only single linked list\n if bins_type == 'fastbins':\n chain_fd = b\n # tcachebins consists of single linked list and entries count\n elif bins_type == 'tcachebins':\n chain_fd, count = b\n # normal bins consists of double linked list and may be corrupted (we can detect corruption)\n else: # normal bin\n chain_fd, chain_bk, is_chain_corrupted = b\n\n if not verbose and (chain_fd == [0] and not count) and not is_chain_corrupted:\n continue\n\n if bins_type == 'tcachebins':\n limit = 8\n if count <= 7:\n limit = count + 1\n formatted_chain = pwndbg.chain.format(chain_fd[0], offset=offset, limit=limit)\n else:\n formatted_chain = pwndbg.chain.format(chain_fd[0], offset=offset)\n\n\n if isinstance(size, int):\n size = hex(size)\n\n if is_chain_corrupted:\n line = message.hint(size) + message.error(' [corrupted]') + '\\n'\n line += message.hint('FD: ') + formatted_chain + '\\n'\n line += message.hint('BK: ') + pwndbg.chain.format(chain_bk[0], offset=allocator.chunk_key_offset('bk'))\n else:\n if count is not None:\n line = (message.hint(size) + message.hint(' [%3d]' % count) + ': ').ljust(13)\n else:\n line = (message.hint(size) + ': ').ljust(13)\n line += formatted_chain\n\n result.append(line)\n\n if not result:\n result.append(message.hint('empty'))\n\n return result\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Iteratively print chunks on a heap, default to the current thread's active heap.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the first chunk (malloc_chunk struct start, prev_size field).\")\nparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Print all chunk fields, even unused ones.\")\nparser.add_argument(\"-s\", \"--simple\", action=\"store_true\", help=\"Simply print malloc_chunk struct's contents.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef heap(addr=None, verbose=False, simple=False):\n \"\"\"Iteratively print chunks on a heap, default to the current thread's\n active heap.\n \"\"\"\n allocator = pwndbg.heap.current\n heap_region = allocator.get_heap_boundaries(addr)\n arena = allocator.get_arena_for_chunk(addr) if addr else allocator.get_arena()\n top_chunk = arena['top']\n ptr_size = allocator.size_sz\n\n # Calculate where to start printing; if an address was supplied, use that,\n # if this heap belongs to the main arena, start at the beginning of the\n # heap's mapping, otherwise, compensate for the presence of a heap_info\n # struct and possibly an arena.\n if addr:\n cursor = int(addr)\n elif arena == allocator.main_arena:\n cursor = heap_region.start\n else:\n cursor = heap_region.start + allocator.heap_info.sizeof\n if pwndbg.vmmap.find(allocator.get_heap(heap_region.start)['ar_ptr']) == heap_region:\n # Round up to a 2-machine-word alignment after an arena to\n # compensate for the presence of the have_fastchunks variable\n # in GLIBC versions >= 2.27.\n cursor += (allocator.malloc_state.sizeof + ptr_size) & ~allocator.malloc_align_mask\n\n # i686 alignment heuristic\n first_chunk_size = pwndbg.arch.unpack(pwndbg.memory.read(cursor + ptr_size, ptr_size))\n if first_chunk_size == 0:\n cursor += ptr_size * 2\n\n while cursor in heap_region:\n malloc_chunk(cursor, verbose=verbose, simple=simple)\n\n if cursor == top_chunk:\n break\n\n size_field = pwndbg.memory.u(cursor + allocator.chunk_key_offset('size'))\n real_size = size_field & ~allocator.malloc_align_mask\n cursor += real_size\n\n # Avoid an infinite loop when a chunk's size is 0.\n if real_size == 0:\n break\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the contents of an arena, default to the current thread's arena.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the arena.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef arena(addr=None):\n \"\"\"Print the contents of an arena, default to the current thread's arena.\"\"\"\n allocator = pwndbg.heap.current\n arena = allocator.get_arena(addr)\n print(arena)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"List this process's arenas.\"\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef arenas():\n \"\"\"Lists this process's arenas.\"\"\"\n allocator = pwndbg.heap.current\n for ar in allocator.arenas:\n print(ar)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print a thread's tcache contents, default to the current thread's tcache.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the tcache.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\[email protected]\ndef tcache(addr=None):\n \"\"\"Print a thread's tcache contents, default to the current thread's\n tcache.\n \"\"\"\n allocator = pwndbg.heap.current\n tcache = allocator.get_tcache(addr)\n print(tcache)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the mp_ struct's contents.\"\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef mp():\n \"\"\"Print the mp_ struct's contents.\"\"\"\n allocator = pwndbg.heap.current\n print(allocator.mp)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print relevant information about an arena's top chunk, default to current thread's arena.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the arena.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef top_chunk(addr=None):\n \"\"\"Print relevant information about an arena's top chunk, default to the\n current thread's arena.\n \"\"\"\n allocator = pwndbg.heap.current\n arena = allocator.get_arena(addr)\n address = arena['top']\n size = pwndbg.memory.u(int(address) + allocator.chunk_key_offset('size'))\n\n out = message.off(\"Top chunk\\n\") + \"Addr: {}\\nSize: 0x{:02x}\".format(M.get(address), size)\n print(out)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print a chunk.\"\nparser.add_argument(\"addr\", type=int, help=\"Address of the chunk (malloc_chunk struct start, prev_size field).\")\nparser.add_argument(\"-f\", \"--fake\", action=\"store_true\", help=\"Is this a fake chunk?\")\nparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Print all chunk fields, even unused ones.\")\nparser.add_argument(\"-s\", \"--simple\", action=\"store_true\", help=\"Simply print malloc_chunk struct's contents.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef malloc_chunk(addr, fake=False, verbose=False, simple=False):\n \"\"\"Print a malloc_chunk struct's contents.\"\"\"\n # points to the real start of the chunk\n cursor = int(addr)\n\n allocator = pwndbg.heap.current\n ptr_size = allocator.size_sz\n\n size_field = pwndbg.memory.u(cursor + allocator.chunk_key_offset('size'))\n real_size = size_field & ~allocator.malloc_align_mask\n\n headers_to_print = [] # both state (free/allocated) and flags\n fields_to_print = set() # in addition to addr and size\n out_fields = \"Addr: {}\\n\".format(M.get(cursor))\n\n arena = allocator.get_arena_for_chunk(cursor)\n arena_address = None\n\n if fake:\n headers_to_print.append(message.on(\"Fake chunk\"))\n verbose = True # print all fields for fake chunks\n\n if simple:\n chunk = read_chunk(cursor)\n\n if not headers_to_print:\n headers_to_print.append(message.hint(M.get(cursor)))\n\n prev_inuse, is_mmapped, non_main_arena = allocator.chunk_flags(int(chunk['size']))\n if prev_inuse:\n headers_to_print.append(message.hint('PREV_INUSE'))\n if is_mmapped:\n headers_to_print.append(message.hint('IS_MMAPED'))\n if non_main_arena:\n headers_to_print.append(message.hint('NON_MAIN_ARENA'))\n\n print(' | '.join(headers_to_print))\n for key, val in chunk.items():\n print(message.system(key) + \": 0x{:02x}\".format(int(val)))\n print('')\n return\n\n is_top = False\n if arena:\n arena_address = arena.address\n top_chunk = arena['top']\n if cursor == top_chunk:\n headers_to_print.append(message.off(\"Top chunk\"))\n is_top = True\n\n if not is_top:\n fastbins = allocator.fastbins(arena_address) or {}\n smallbins = allocator.smallbins(arena_address) or {}\n largebins = allocator.largebins(arena_address) or {}\n unsortedbin = allocator.unsortedbin(arena_address) or {}\n if allocator.has_tcache():\n tcachebins = allocator.tcachebins(None)\n\n if real_size in fastbins.keys() and cursor in fastbins[real_size]:\n headers_to_print.append(message.on(\"Free chunk (fastbins)\"))\n if not verbose:\n fields_to_print.add('fd')\n\n elif real_size in smallbins.keys() and cursor in bin_addrs(smallbins[real_size], \"smallbins\"):\n headers_to_print.append(message.on(\"Free chunk (smallbins)\"))\n if not verbose:\n fields_to_print.update(['fd', 'bk'])\n\n elif real_size >= list(largebins.items())[0][0] and cursor in bin_addrs(largebins[(list(largebins.items())[allocator.largebin_index(real_size) - 64][0])], \"largebins\"):\n headers_to_print.append(message.on(\"Free chunk (largebins)\"))\n if not verbose:\n fields_to_print.update(['fd', 'bk', 'fd_nextsize', 'bk_nextsize'])\n \n elif cursor in bin_addrs(unsortedbin['all'], \"unsortedbin\"):\n headers_to_print.append(message.on(\"Free chunk (unsortedbin)\"))\n if not verbose:\n fields_to_print.update(['fd', 'bk'])\n\n elif allocator.has_tcache() and real_size in tcachebins.keys() and cursor + ptr_size*2 in bin_addrs(tcachebins[real_size], \"tcachebins\"):\n headers_to_print.append(message.on(\"Free chunk (tcache)\"))\n if not verbose:\n fields_to_print.add('fd')\n\n else:\n headers_to_print.append(message.hint(\"Allocated chunk\"))\n\n if verbose:\n fields_to_print.update(['prev_size', 'size', 'fd', 'bk', 'fd_nextsize', 'bk_nextsize'])\n else:\n out_fields += \"Size: 0x{:02x}\\n\".format(size_field)\n\n prev_inuse, is_mmapped, non_main_arena = allocator.chunk_flags(size_field)\n if prev_inuse:\n headers_to_print.append(message.hint('PREV_INUSE'))\n if is_mmapped:\n headers_to_print.append(message.hint('IS_MMAPED'))\n if non_main_arena:\n headers_to_print.append(message.hint('NON_MAIN_ARENA'))\n\n fields_ordered = ['prev_size', 'size', 'fd', 'bk', 'fd_nextsize', 'bk_nextsize']\n for field_to_print in fields_ordered:\n if field_to_print in fields_to_print:\n out_fields += message.system(field_to_print) + \": 0x{:02x}\\n\".format(pwndbg.memory.u(cursor + allocator.chunk_key_offset(field_to_print)))\n\n print(' | '.join(headers_to_print) + \"\\n\" + out_fields)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the contents of all an arena's bins and a thread's tcache, default to the current thread's arena and tcache.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the arena.\")\nparser.add_argument(\"tcache_addr\", nargs=\"?\", type=int, default=None, help=\"Address of the tcache.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef bins(addr=None, tcache_addr=None):\n \"\"\"Print the contents of all an arena's bins and a thread's tcache,\n default to the current thread's arena and tcache.\n \"\"\"\n if pwndbg.heap.current.has_tcache():\n tcachebins(tcache_addr)\n fastbins(addr)\n unsortedbin(addr)\n smallbins(addr)\n largebins(addr)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the contents of an arena's fastbins, default to the current thread's arena.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the arena.\")\nparser.add_argument(\"verbose\", nargs=\"?\", type=bool, default=True, help=\"Show extra detail.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef fastbins(addr=None, verbose=True):\n \"\"\"Print the contents of an arena's fastbins, default to the current\n thread's arena.\n \"\"\"\n allocator = pwndbg.heap.current\n fastbins = allocator.fastbins(addr)\n\n if fastbins is None:\n return\n\n formatted_bins = format_bin(fastbins, verbose)\n\n print(C.banner('fastbins'))\n for node in formatted_bins:\n print(node)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the contents of an arena's unsortedbin, default to the current thread's arena.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the arena.\")\nparser.add_argument(\"verbose\", nargs=\"?\", type=bool, default=True, help=\"Show extra detail.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef unsortedbin(addr=None, verbose=True):\n \"\"\"Print the contents of an arena's unsortedbin, default to the current\n thread's arena.\n \"\"\"\n allocator = pwndbg.heap.current\n unsortedbin = allocator.unsortedbin(addr)\n\n if unsortedbin is None:\n return\n\n formatted_bins = format_bin(unsortedbin, verbose)\n\n print(C.banner('unsortedbin'))\n for node in formatted_bins:\n print(node)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the contents of an arena's smallbins, default to the current thread's arena.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the arena.\")\nparser.add_argument(\"verbose\", nargs=\"?\", type=bool, default=False, help=\"Show extra detail.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef smallbins(addr=None, verbose=False):\n \"\"\"Print the contents of an arena's smallbins, default to the current\n thread's arena.\n \"\"\"\n allocator = pwndbg.heap.current\n smallbins = allocator.smallbins(addr)\n\n if smallbins is None:\n return\n\n formatted_bins = format_bin(smallbins, verbose)\n\n print(C.banner('smallbins'))\n for node in formatted_bins:\n print(node)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the contents of an arena's largebins, default to the current thread's arena.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the arena.\")\nparser.add_argument(\"verbose\", nargs=\"?\", type=bool, default=False, help=\"Show extra detail.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef largebins(addr=None, verbose=False):\n \"\"\"Print the contents of an arena's largebins, default to the current\n thread's arena.\n \"\"\"\n allocator = pwndbg.heap.current\n largebins = allocator.largebins(addr)\n\n if largebins is None:\n return\n\n formatted_bins = format_bin(largebins, verbose)\n\n print(C.banner('largebins'))\n for node in formatted_bins:\n print(node)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the contents of a tcache, default to the current thread's tcache.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"The address of the tcache bins.\")\nparser.add_argument(\"verbose\", nargs=\"?\", type=bool, default=False, help=\"Whether to show more details or not.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\[email protected]\ndef tcachebins(addr=None, verbose=False):\n \"\"\"Print the contents of a tcache, default to the current thread's tcache.\"\"\"\n allocator = pwndbg.heap.current\n tcachebins = allocator.tcachebins(addr)\n\n if tcachebins is None:\n return\n\n formatted_bins = format_bin(tcachebins, verbose, offset = allocator.tcache_next_offset)\n\n print(C.banner('tcachebins'))\n for node in formatted_bins:\n print(node)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Find candidate fake fast chunks overlapping the specified address.\"\nparser.add_argument(\"addr\", type=int, help=\"Address of the word-sized value to overlap.\")\nparser.add_argument(\"size\", nargs=\"?\", type=int, default=None, help=\"Size of fake chunks to find.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef find_fake_fast(addr, size=None):\n \"\"\"Find candidate fake fast chunks overlapping the specified address.\"\"\"\n psize = pwndbg.arch.ptrsize\n allocator = pwndbg.heap.current\n align = allocator.malloc_alignment\n min_fast = allocator.min_chunk_size\n max_fast = allocator.global_max_fast\n max_fastbin = allocator.fastbin_index(max_fast)\n start = int(addr) - max_fast + psize\n mem = pwndbg.memory.read(start, max_fast - psize, partial=True)\n\n fmt = {\n 'little': '<',\n 'big': '>'\n }[pwndbg.arch.endian] + {\n 4: 'I',\n 8: 'Q'\n }[psize]\n\n if size is None:\n sizes = range(min_fast, max_fast + 1, align)\n else:\n sizes = [size]\n\n print(C.banner(\"FAKE CHUNKS\"))\n for size in sizes:\n fastbin = allocator.fastbin_index(size)\n for offset in range((max_fastbin - fastbin) * align, max_fast - align + 1):\n candidate = mem[offset : offset + psize]\n if len(candidate) == psize:\n value = struct.unpack(fmt, candidate)[0]\n if allocator.fastbin_index(value) == fastbin:\n malloc_chunk(start+offset-psize, fake=True)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Visualize chunks on a heap, default to the current arena's active heap.\"\nparser.add_argument(\"count\", nargs=\"?\", type=lambda n:max(int(n, 0),1), default=10, help=\"Number of chunks to visualize.\")\nparser.add_argument(\"addr\", nargs=\"?\", default=None, help=\"Address of the first chunk.\")\nparser.add_argument(\"--naive\", \"-n\", action=\"store_true\", default=False, help=\"Attempt to keep printing beyond the top chunk.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef vis_heap_chunks(addr=None, count=None, naive=None):\n \"\"\"Visualize chunks on a heap, default to the current arena's active heap.\"\"\"\n allocator = pwndbg.heap.current\n heap_region = allocator.get_heap_boundaries(addr)\n arena = allocator.get_arena_for_chunk(addr) if addr else allocator.get_arena()\n\n top_chunk = arena['top']\n ptr_size = allocator.size_sz\n\n # Build a list of addresses that delimit each chunk.\n chunk_delims = []\n if addr:\n cursor = int(addr)\n elif arena == allocator.main_arena:\n cursor = heap_region.start\n else:\n cursor = heap_region.start + allocator.heap_info.sizeof\n if pwndbg.vmmap.find(allocator.get_heap(heap_region.start)['ar_ptr']) == heap_region:\n # Round up to a 2-machine-word alignment after an arena to\n # compensate for the presence of the have_fastchunks variable\n # in GLIBC versions >= 2.27.\n cursor += (allocator.malloc_state.sizeof + ptr_size) & ~allocator.malloc_align_mask\n\n # Check if there is an alignment at the start of the heap, adjust if necessary.\n if not addr:\n first_chunk_size = pwndbg.arch.unpack(pwndbg.memory.read(cursor + ptr_size, ptr_size))\n if first_chunk_size == 0:\n cursor += ptr_size * 2\n\n cursor_backup = cursor\n\n for _ in range(count + 1):\n # Don't read beyond the heap mapping if --naive or corrupted heap.\n if cursor not in heap_region:\n chunk_delims.append(heap_region.end)\n break\n\n size_field = pwndbg.memory.u(cursor + ptr_size)\n real_size = size_field & ~allocator.malloc_align_mask\n prev_inuse = allocator.chunk_flags(size_field)[0]\n\n # Don't repeatedly operate on the same address (e.g. chunk size of 0).\n if cursor in chunk_delims or cursor + ptr_size in chunk_delims:\n break\n\n if prev_inuse:\n chunk_delims.append(cursor + ptr_size)\n else:\n chunk_delims.append(cursor)\n\n if (cursor == top_chunk and not naive) or (cursor == heap_region.end - ptr_size*2):\n chunk_delims.append(cursor + ptr_size*2)\n break\n\n cursor += real_size\n\n # Build the output buffer, changing color at each chunk delimiter.\n # TODO: maybe print free chunks in bold or underlined\n color_funcs = [\n generateColorFunction(\"yellow\"),\n generateColorFunction(\"cyan\"),\n generateColorFunction(\"purple\"),\n generateColorFunction(\"green\"),\n generateColorFunction(\"blue\"),\n ]\n\n bin_collections = [\n allocator.fastbins(arena.address),\n allocator.unsortedbin(arena.address),\n allocator.smallbins(arena.address),\n allocator.largebins(arena.address),\n ]\n if allocator.has_tcache():\n # Only check for tcache entries belonging to the current thread,\n # it's difficult (impossible?) to find all the thread caches for a\n # specific heap.\n bin_collections.insert(0, allocator.tcachebins(None))\n\n printed = 0\n out = ''\n asc = ''\n labels = []\n\n cursor = cursor_backup\n\n for c, stop in enumerate(chunk_delims):\n color_func = color_funcs[c % len(color_funcs)]\n\n while cursor != stop:\n if printed % 2 == 0:\n out += \"\\n0x%x\" % cursor\n\n cell = pwndbg.arch.unpack(pwndbg.memory.read(cursor, ptr_size))\n cell_hex = '\\t0x{:0{n}x}'.format(cell, n=ptr_size*2)\n\n out += color_func(cell_hex)\n printed += 1\n\n labels.extend(bin_labels(cursor, bin_collections))\n if cursor == top_chunk:\n labels.append('Top chunk')\n\n asc += bin_ascii(pwndbg.memory.read(cursor, ptr_size))\n if printed % 2 == 0:\n out += '\\t' + color_func(asc) + ('\\t <-- ' + ', '.join(labels) if len(labels) else '')\n asc = ''\n labels = []\n\n cursor += ptr_size\n\n print(out)\n\n\ndef bin_ascii(bs):\n from string import printable\n valid_chars = list(map(ord, set(printable) - set('\\t\\r\\n\\x0c')))\n return ''.join(chr(c) if c in valid_chars else '.'for c in bs)\n\n\ndef bin_labels(addr, collections):\n labels = []\n for bins in collections:\n bins_type = bins.get('type', None)\n if not bins_type:\n continue\n\n for size in filter(lambda x: x != 'type', bins.keys()):\n b = bins[size]\n if isinstance(size, int):\n size = hex(size)\n count = '/{:d}'.format(b[1]) if bins_type == 'tcachebins' else None\n chunks = bin_addrs(b, bins_type)\n for chunk_addr in chunks:\n if addr == chunk_addr:\n labels.append('{:s}[{:s}][{:d}{}]'.format(bins_type, size, chunks.index(addr), count or ''))\n\n return labels\n\n\ndef bin_addrs(b, bins_type):\n addrs = []\n if bins_type == 'fastbins':\n return b\n # tcachebins consists of single linked list and entries count\n elif bins_type == 'tcachebins':\n addrs, _ = b\n # normal bins consists of double linked list and may be corrupted (we can detect corruption)\n else: # normal bin\n addrs, _, _ = b\n return addrs\n\n\ntry_free_parser = argparse.ArgumentParser(description='Check what would happen if free was called with given address')\ntry_free_parser.add_argument('addr', nargs='?', help='Address passed to free')\[email protected](try_free_parser)\[email protected]\[email protected]\ndef try_free(addr):\n addr = int(addr)\n\n # check hook\n free_hook = pwndbg.symbol.address('__free_hook')\n if free_hook is not None:\n if pwndbg.memory.pvoid(free_hook) != 0:\n message.success('__libc_free: will execute __free_hook')\n\n # free(0) has no effect\n if addr == 0:\n message.success('__libc_free: addr is 0, nothing to do')\n return\n\n # constants\n allocator = pwndbg.heap.current\n arena = allocator.get_arena()\n\n aligned_lsb = allocator.malloc_align_mask.bit_length()\n size_sz = allocator.size_sz\n malloc_alignment = allocator.malloc_alignment\n malloc_align_mask = allocator.malloc_align_mask\n chunk_minsize = allocator.minsize\n\n ptr_size = pwndbg.arch.ptrsize\n\n def unsigned_size(size):\n # read_chunk()['size'] is signed in pwndbg ;/\n # there may be better way to handle that\n if ptr_size < 8:\n return ctypes.c_uint32(size).value\n x = ctypes.c_uint64(size).value\n return x\n\n def chunksize(chunk_size):\n # maybe move this to ptmalloc.py\n return chunk_size & (~7)\n\n def finalize(errors_found, returned_before_error):\n print('-'*10)\n if returned_before_error:\n print(message.success('Free should succeed!'))\n elif errors_found > 0:\n print(message.error('Errors found!'))\n else:\n print(message.success('All checks passed!'))\n\n\n # mem2chunk\n addr -= 2 * size_sz\n\n # try to get the chunk\n try:\n chunk = read_chunk(addr)\n except gdb.MemoryError as e:\n print(message.error('Can\\'t read chunk at address 0x{:x}, memory error'.format(addr)))\n return\n\n chunk_size = unsigned_size(chunk['size'])\n chunk_size_unmasked = chunksize(chunk_size)\n _, is_mmapped, _ = allocator.chunk_flags(chunk_size)\n\n if is_mmapped:\n print(message.notice('__libc_free: Doing munmap_chunk'))\n return\n\n errors_found = False\n returned_before_error = False\n\n # chunk doesn't overlap memory\n print(message.notice('General checks'))\n max_mem = (1 << (ptr_size*8)) - 1\n if addr + chunk_size >= max_mem:\n err = 'free(): invalid pointer -> &chunk + chunk->size > max memory\\n'\n err += ' 0x{:x} + 0x{:x} > 0x{:x}'\n err = err.format(addr, chunk_size, max_mem)\n print(message.error(err))\n errors_found += 1\n\n # chunk address is aligned\n addr_tmp = addr\n if malloc_alignment != 2 * size_sz:\n addr_tmp = addr + 2 * size_sz\n\n if addr_tmp & malloc_align_mask != 0:\n err = 'free(): invalid pointer -> misaligned chunk\\n'\n err += ' LSB of 0x{:x} are 0b{}, should be 0b{}'\n if addr_tmp != addr:\n err += ' (0x{:x} was added to the address)'.format(2*size_sz)\n err = err.format(addr_tmp, bin(addr_tmp)[-aligned_lsb:], '0'*aligned_lsb)\n print(message.error(err))\n errors_found += 1\n\n # chunk's size is big enough\n if chunk_size_unmasked < chunk_minsize:\n err = 'free(): invalid size -> chunk\\'s size smaller than MINSIZE\\n'\n err += ' size is 0x{:x}, MINSIZE is 0x{:x}'\n err = err.format(chunk_size_unmasked, chunk_minsize)\n print(message.error(err))\n errors_found += 1\n\n # chunk's size is aligned\n if chunk_size_unmasked & malloc_align_mask != 0:\n err = 'free(): invalid size -> chunk\\'s size is not aligned\\n'\n err += ' LSB of size 0x{:x} are 0b{}, should be 0b{}'\n err = err.format(chunk_size_unmasked, bin(chunk_size_unmasked)[-aligned_lsb:], '0'*aligned_lsb)\n print(message.error(err))\n errors_found += 1\n\n # tcache\n if allocator.has_tcache() and 'key' in allocator.tcache_entry.keys():\n tc_idx = (chunk_size_unmasked - chunk_minsize + malloc_alignment - 1) // malloc_alignment\n if tc_idx < allocator.mp['tcache_bins']:\n print(message.notice('Tcache checks'))\n e = addr + 2*size_sz\n e += allocator.tcache_entry.keys().index('key') * ptr_size\n e = pwndbg.memory.pvoid(e)\n tcache_addr = int(allocator.thread_cache.address)\n if e == tcache_addr:\n # todo, actually do checks\n print(message.error('Will do checks for tcache double-free (memory_tcache_double_free)'))\n errors_found += 1\n\n if int(allocator.get_tcache()['counts'][tc_idx]) < int(allocator.mp['tcache_count']):\n print(message.success('Using tcache_put'))\n if errors_found == 0:\n returned_before_error = True\n\n if errors_found > 0:\n finalize(errors_found, returned_before_error)\n return\n\n # is fastbin\n if chunk_size_unmasked <= allocator.global_max_fast:\n print(message.notice('Fastbin checks'))\n chunk_fastbin_idx = allocator.fastbin_index(chunk_size_unmasked)\n fastbin_list = allocator.fastbins(int(arena.address))[(chunk_fastbin_idx+2)*(ptr_size*2)]\n\n try:\n next_chunk = read_chunk(addr + chunk_size_unmasked)\n except gdb.MemoryError as e:\n print(message.error('Can\\'t read next chunk at address 0x{:x}, memory error'.format(chunk + chunk_size_unmasked)))\n finalize(errors_found, returned_before_error)\n return\n\n # next chunk's size is big enough and small enough\n next_chunk_size = unsigned_size(next_chunk['size'])\n if next_chunk_size <= 2*size_sz or chunksize(next_chunk_size) >= int(arena['system_mem']):\n err = 'free(): invalid next size (fast) -> next chunk\\'s size not in [2*size_sz; av->system_mem]\\n'\n err += ' next chunk\\'s size is 0x{:x}, 2*size_sz is 0x{:x}, system_mem is 0x{:x}'\n err = err.format(next_chunk_size, 2*size_sz, int(arena['system_mem']))\n print(message.error(err))\n errors_found += 1\n\n # chunk is not the same as the one on top of fastbin[idx]\n if int(fastbin_list[0]) == addr:\n err = 'double free or corruption (fasttop) -> chunk already is on top of fastbin list\\n'\n err += ' fastbin idx == {}'\n err = err.format(chunk_fastbin_idx)\n print(message.error(err))\n errors_found += 1\n\n # chunk's size is ~same as top chunk's size\n fastbin_top_chunk = int(fastbin_list[0])\n if fastbin_top_chunk != 0:\n try:\n fastbin_top_chunk = read_chunk(fastbin_top_chunk)\n except gdb.MemoryError as e:\n print(message.error('Can\\'t read top fastbin chunk at address 0x{:x}, memory error'.format(fastbin_top_chunk)))\n finalize(errors_found, returned_before_error)\n return\n\n fastbin_top_chunk_size = chunksize(unsigned_size(fastbin_top_chunk['size']))\n if chunk_fastbin_idx != allocator.fastbin_index(fastbin_top_chunk_size):\n err = 'invalid fastbin entry (free) -> chunk\\'s size is not near top chunk\\'s size\\n'\n err += ' chunk\\'s size == {}, idx == {}\\n'\n err += ' top chunk\\'s size == {}, idx == {}'\n err += ' if `have_lock` is false then the error is invalid'\n err = err.format(chunk['size'], chunk_fastbin_idx,\n fastbin_top_chunk_size, allocator.fastbin_index(fastbin_top_chunk_size))\n print(message.error(err))\n errors_found += 1\n\n # is not mapped\n elif is_mmapped == 0:\n print(message.notice('Not mapped checks'))\n\n # chunks is not top chunk\n if addr == int(arena['top']):\n err = 'double free or corruption (top) -> chunk is top chunk'\n print(message.error(err))\n errors_found += 1\n\n # next chunk is not beyond the boundaries of the arena\n NONCONTIGUOUS_BIT = 2\n top_chunk_addr = (int(arena['top']))\n top_chunk = read_chunk(top_chunk_addr)\n next_chunk_addr = addr + chunk_size_unmasked\n\n # todo: in libc, addition may overflow\n if (arena['flags'] & NONCONTIGUOUS_BIT == 0) and next_chunk_addr >= top_chunk_addr + chunksize(top_chunk['size']):\n err = 'double free or corruption (out) -> next chunk is beyond arena and arena is contiguous\\n'\n err += 'next chunk at 0x{:x}, end of arena at 0x{:x}'\n err = err.format(next_chunk_addr, top_chunk_addr + chunksize(unsigned_size(top_chunk['size'])))\n print(message.error(err))\n errors_found += 1\n\n # now we need to dereference chunk\n try :\n next_chunk = read_chunk(next_chunk_addr)\n next_chunk_size = chunksize(unsigned_size(next_chunk['size']))\n except (OverflowError, gdb.MemoryError) as e:\n print(message.error('Can\\'t read next chunk at address 0x{:x}'.format(next_chunk_addr)))\n finalize(errors_found, returned_before_error)\n return\n\n # next chunk's P bit is set\n prev_inuse,_,_ = allocator.chunk_flags(next_chunk['size'])\n if prev_inuse == 0:\n err = 'double free or corruption (!prev) -> next chunk\\'s previous-in-use bit is 0\\n'\n print(message.error(err))\n errors_found += 1\n\n # next chunk's size is big enough and small enough\n if next_chunk_size <= 2*size_sz or next_chunk_size >= int(arena['system_mem']):\n err = 'free(): invalid next size (normal) -> next chunk\\'s size not in [2*size_sz; system_mem]\\n'\n err += 'next chunk\\'s size is 0x{:x}, 2*size_sz is 0x{:x}, system_mem is 0x{:x}'\n err = err.format(next_chunk_size, 2*size_sz, int(arena['system_mem']))\n print(message.error(err))\n errors_found += 1\n\n # consolidate backward\n prev_inuse,_,_ = allocator.chunk_flags(chunk['size'])\n if prev_inuse == 0:\n print(message.notice('Backward consolidation'))\n prev_size = chunksize(unsigned_size(chunk['prev_size']))\n prev_chunk_addr = addr - prev_size\n\n try :\n prev_chunk = read_chunk(prev_chunk_addr)\n prev_chunk_size = chunksize(unsigned_size(prev_chunk['size']))\n except (OverflowError, gdb.MemoryError) as e:\n print(message.error('Can\\'t read next chunk at address 0x{:x}'.format(prev_chunk_addr)))\n finalize(errors_found, returned_before_error)\n return\n\n if unsigned_size(prev_chunk['size']) != prev_size:\n err = 'corrupted size vs. prev_size while consolidating\\n'\n err += 'prev_size field is 0x{:x}, prev chunk at 0x{:x}, prev chunk size is 0x{:x}'\n err = err.format(prev_size, prev_chunk_addr, unsigned_size(prev_chunk['size']))\n print(message.error(err))\n errors_found += 1\n else:\n addr = prev_chunk_addr\n chunk_size += prev_size\n chunk_size_unmasked += prev_size\n try_unlink(addr)\n\n # consolidate forward\n if next_chunk_addr != top_chunk_addr:\n print(message.notice('Next chunk is not top chunk'))\n try :\n next_next_chunk_addr = next_chunk_addr + next_chunk_size\n next_next_chunk = read_chunk(next_next_chunk_addr)\n except (OverflowError, gdb.MemoryError) as e:\n print(message.error('Can\\'t read next chunk at address 0x{:x}'.format(next_next_chunk_addr)))\n finalize(errors_found, returned_before_error)\n return\n \n prev_inuse,_,_ = allocator.chunk_flags(next_next_chunk['size'])\n if prev_inuse == 0:\n print(message.notice('Forward consolidation'))\n try_unlink(next_chunk_addr)\n chunk_size += next_chunk_size\n chunk_size_unmasked += next_chunk_size\n else:\n print(message.notice('Clearing next chunk\\'s P bit'))\n\n # unsorted bin fd->bk should be unsorted bean\n unsorted_addr = int(arena['bins']) - 2*ptr_size\n try:\n unsorted = read_chunk(unsorted_addr)\n try:\n if read_chunk(unsorted['fd'])['bk'] != unsorted_addr:\n err = 'free(): corrupted unsorted chunks -> unsorted_chunk->fd->bk != unsorted_chunk\\n'\n err += 'unsorted at 0x{:x}, unsorted->fd == 0x{:x}, unsorted->fd->bk == 0x{:x}'\n err = err.format(unsorted_addr, unsorted['fd'], read_chunk(unsorted['fd'])['bk'])\n print(message.error(err))\n errors_found += 1\n except (OverflowError, gdb.MemoryError) as e:\n print(message.error('Can\\'t read chunk at 0x{:x}, it is unsorted bin fd'.format(unsorted['fd'])))\n errors_found += 1\n except (OverflowError, gdb.MemoryError) as e:\n print(message.error('Can\\'t read unsorted bin chunk at 0x{:x}'.format(unsorted_addr)))\n errors_found += 1\n\n else:\n print(message.notice('Next chunk is top chunk'))\n chunk_size += next_chunk_size\n chunk_size_unmasked += next_chunk_size\n\n # todo: this may vary strongly\n FASTBIN_CONSOLIDATION_THRESHOLD = 65536\n if chunk_size_unmasked >= FASTBIN_CONSOLIDATION_THRESHOLD:\n print(message.notice('Doing malloc_consolidate and systrim/heap_trim'))\n\n #is mapped\n else:\n message.notice('Doing munmap_chunk')\n\n finalize(errors_found, returned_before_error)\n\n\ndef try_unlink(addr):\n pass\n\n",
"path": "pwndbg/commands/heap.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport ctypes\nimport struct\n\nimport gdb\nimport six\n\nimport pwndbg.color.context as C\nimport pwndbg.color.memory as M\nimport pwndbg.commands\nimport pwndbg.typeinfo\nfrom pwndbg.color import generateColorFunction\nfrom pwndbg.color import message\n\n\ndef read_chunk(addr):\n \"\"\"Read a chunk's metadata.\"\"\"\n # In GLIBC versions <= 2.24 the `mchunk_[prev_]size` field was named `[prev_]size`.\n # To support both versions, change the new names to the old ones here so that\n # the rest of the code can deal with uniform names.\n renames = {\n \"mchunk_size\": \"size\",\n \"mchunk_prev_size\": \"prev_size\",\n }\n val = pwndbg.typeinfo.read_gdbvalue(\"struct malloc_chunk\", addr)\n return dict({ renames.get(key, key): int(val[key]) for key in val.type.keys() })\n\n\ndef format_bin(bins, verbose=False, offset=None):\n allocator = pwndbg.heap.current\n if offset is None:\n offset = allocator.chunk_key_offset('fd')\n\n result = []\n bins_type = bins.pop('type')\n\n for size in bins:\n b = bins[size]\n count, is_chain_corrupted = None, False\n\n # fastbins consists of only single linked list\n if bins_type == 'fastbins':\n chain_fd = b\n # tcachebins consists of single linked list and entries count\n elif bins_type == 'tcachebins':\n chain_fd, count = b\n # normal bins consists of double linked list and may be corrupted (we can detect corruption)\n else: # normal bin\n chain_fd, chain_bk, is_chain_corrupted = b\n\n if not verbose and (chain_fd == [0] and not count) and not is_chain_corrupted:\n continue\n\n if bins_type == 'tcachebins':\n limit = 8\n if count <= 7:\n limit = count + 1\n formatted_chain = pwndbg.chain.format(chain_fd[0], offset=offset, limit=limit)\n else:\n formatted_chain = pwndbg.chain.format(chain_fd[0], offset=offset)\n\n\n if isinstance(size, int):\n size = hex(size)\n\n if is_chain_corrupted:\n line = message.hint(size) + message.error(' [corrupted]') + '\\n'\n line += message.hint('FD: ') + formatted_chain + '\\n'\n line += message.hint('BK: ') + pwndbg.chain.format(chain_bk[0], offset=allocator.chunk_key_offset('bk'))\n else:\n if count is not None:\n line = (message.hint(size) + message.hint(' [%3d]' % count) + ': ').ljust(13)\n else:\n line = (message.hint(size) + ': ').ljust(13)\n line += formatted_chain\n\n result.append(line)\n\n if not result:\n result.append(message.hint('empty'))\n\n return result\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Iteratively print chunks on a heap, default to the current thread's active heap.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the first chunk (malloc_chunk struct start, prev_size field).\")\nparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Print all chunk fields, even unused ones.\")\nparser.add_argument(\"-s\", \"--simple\", action=\"store_true\", help=\"Simply print malloc_chunk struct's contents.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef heap(addr=None, verbose=False, simple=False):\n \"\"\"Iteratively print chunks on a heap, default to the current thread's\n active heap.\n \"\"\"\n allocator = pwndbg.heap.current\n heap_region = allocator.get_heap_boundaries(addr)\n arena = allocator.get_arena_for_chunk(addr) if addr else allocator.get_arena()\n top_chunk = arena['top']\n ptr_size = allocator.size_sz\n\n # Calculate where to start printing; if an address was supplied, use that,\n # if this heap belongs to the main arena, start at the beginning of the\n # heap's mapping, otherwise, compensate for the presence of a heap_info\n # struct and possibly an arena.\n if addr:\n cursor = int(addr)\n elif arena == allocator.main_arena:\n cursor = heap_region.start\n else:\n cursor = heap_region.start + allocator.heap_info.sizeof\n if pwndbg.vmmap.find(allocator.get_heap(heap_region.start)['ar_ptr']) == heap_region:\n # Round up to a 2-machine-word alignment after an arena to\n # compensate for the presence of the have_fastchunks variable\n # in GLIBC versions >= 2.27.\n cursor += (allocator.malloc_state.sizeof + ptr_size) & ~allocator.malloc_align_mask\n\n # i686 alignment heuristic\n first_chunk_size = pwndbg.arch.unpack(pwndbg.memory.read(cursor + ptr_size, ptr_size))\n if first_chunk_size == 0:\n cursor += ptr_size * 2\n\n while cursor in heap_region:\n malloc_chunk(cursor, verbose=verbose, simple=simple)\n\n if cursor == top_chunk:\n break\n\n size_field = pwndbg.memory.u(cursor + allocator.chunk_key_offset('size'))\n real_size = size_field & ~allocator.malloc_align_mask\n cursor += real_size\n\n # Avoid an infinite loop when a chunk's size is 0.\n if real_size == 0:\n break\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the contents of an arena, default to the current thread's arena.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the arena.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef arena(addr=None):\n \"\"\"Print the contents of an arena, default to the current thread's arena.\"\"\"\n allocator = pwndbg.heap.current\n arena = allocator.get_arena(addr)\n print(arena)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"List this process's arenas.\"\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef arenas():\n \"\"\"Lists this process's arenas.\"\"\"\n allocator = pwndbg.heap.current\n for ar in allocator.arenas:\n print(ar)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print a thread's tcache contents, default to the current thread's tcache.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the tcache.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\[email protected]\ndef tcache(addr=None):\n \"\"\"Print a thread's tcache contents, default to the current thread's\n tcache.\n \"\"\"\n allocator = pwndbg.heap.current\n tcache = allocator.get_tcache(addr)\n print(tcache)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the mp_ struct's contents.\"\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef mp():\n \"\"\"Print the mp_ struct's contents.\"\"\"\n allocator = pwndbg.heap.current\n print(allocator.mp)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print relevant information about an arena's top chunk, default to current thread's arena.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the arena.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef top_chunk(addr=None):\n \"\"\"Print relevant information about an arena's top chunk, default to the\n current thread's arena.\n \"\"\"\n allocator = pwndbg.heap.current\n arena = allocator.get_arena(addr)\n address = arena['top']\n size = pwndbg.memory.u(int(address) + allocator.chunk_key_offset('size'))\n\n out = message.off(\"Top chunk\\n\") + \"Addr: {}\\nSize: 0x{:02x}\".format(M.get(address), size)\n print(out)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print a chunk.\"\nparser.add_argument(\"addr\", type=int, help=\"Address of the chunk (malloc_chunk struct start, prev_size field).\")\nparser.add_argument(\"-f\", \"--fake\", action=\"store_true\", help=\"Is this a fake chunk?\")\nparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Print all chunk fields, even unused ones.\")\nparser.add_argument(\"-s\", \"--simple\", action=\"store_true\", help=\"Simply print malloc_chunk struct's contents.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef malloc_chunk(addr, fake=False, verbose=False, simple=False):\n \"\"\"Print a malloc_chunk struct's contents.\"\"\"\n # points to the real start of the chunk\n cursor = int(addr)\n\n allocator = pwndbg.heap.current\n ptr_size = allocator.size_sz\n\n size_field = pwndbg.memory.u(cursor + allocator.chunk_key_offset('size'))\n real_size = size_field & ~allocator.malloc_align_mask\n\n headers_to_print = [] # both state (free/allocated) and flags\n fields_to_print = set() # in addition to addr and size\n out_fields = \"Addr: {}\\n\".format(M.get(cursor))\n\n arena = allocator.get_arena_for_chunk(cursor)\n arena_address = None\n\n if fake:\n headers_to_print.append(message.on(\"Fake chunk\"))\n verbose = True # print all fields for fake chunks\n\n if simple:\n chunk = read_chunk(cursor)\n\n if not headers_to_print:\n headers_to_print.append(message.hint(M.get(cursor)))\n\n prev_inuse, is_mmapped, non_main_arena = allocator.chunk_flags(int(chunk['size']))\n if prev_inuse:\n headers_to_print.append(message.hint('PREV_INUSE'))\n if is_mmapped:\n headers_to_print.append(message.hint('IS_MMAPED'))\n if non_main_arena:\n headers_to_print.append(message.hint('NON_MAIN_ARENA'))\n\n print(' | '.join(headers_to_print))\n for key, val in chunk.items():\n print(message.system(key) + \": 0x{:02x}\".format(int(val)))\n print('')\n return\n\n is_top = False\n if arena:\n arena_address = arena.address\n top_chunk = arena['top']\n if cursor == top_chunk:\n headers_to_print.append(message.off(\"Top chunk\"))\n is_top = True\n\n if not is_top:\n fastbins = allocator.fastbins(arena_address) or {}\n smallbins = allocator.smallbins(arena_address) or {}\n largebins = allocator.largebins(arena_address) or {}\n unsortedbin = allocator.unsortedbin(arena_address) or {}\n if allocator.has_tcache():\n tcachebins = allocator.tcachebins(None)\n\n if real_size in fastbins.keys() and cursor in fastbins[real_size]:\n headers_to_print.append(message.on(\"Free chunk (fastbins)\"))\n if not verbose:\n fields_to_print.add('fd')\n\n elif real_size in smallbins.keys() and cursor in bin_addrs(smallbins[real_size], \"smallbins\"):\n headers_to_print.append(message.on(\"Free chunk (smallbins)\"))\n if not verbose:\n fields_to_print.update(['fd', 'bk'])\n\n elif real_size >= list(largebins.items())[0][0] and cursor in bin_addrs(largebins[(list(largebins.items())[allocator.largebin_index(real_size) - 64][0])], \"largebins\"):\n headers_to_print.append(message.on(\"Free chunk (largebins)\"))\n if not verbose:\n fields_to_print.update(['fd', 'bk', 'fd_nextsize', 'bk_nextsize'])\n \n elif cursor in bin_addrs(unsortedbin['all'], \"unsortedbin\"):\n headers_to_print.append(message.on(\"Free chunk (unsortedbin)\"))\n if not verbose:\n fields_to_print.update(['fd', 'bk'])\n\n elif allocator.has_tcache() and real_size in tcachebins.keys() and cursor + ptr_size*2 in bin_addrs(tcachebins[real_size], \"tcachebins\"):\n headers_to_print.append(message.on(\"Free chunk (tcache)\"))\n if not verbose:\n fields_to_print.add('fd')\n\n else:\n headers_to_print.append(message.hint(\"Allocated chunk\"))\n\n if verbose:\n fields_to_print.update(['prev_size', 'size', 'fd', 'bk', 'fd_nextsize', 'bk_nextsize'])\n else:\n out_fields += \"Size: 0x{:02x}\\n\".format(size_field)\n\n prev_inuse, is_mmapped, non_main_arena = allocator.chunk_flags(size_field)\n if prev_inuse:\n headers_to_print.append(message.hint('PREV_INUSE'))\n if is_mmapped:\n headers_to_print.append(message.hint('IS_MMAPED'))\n if non_main_arena:\n headers_to_print.append(message.hint('NON_MAIN_ARENA'))\n\n fields_ordered = ['prev_size', 'size', 'fd', 'bk', 'fd_nextsize', 'bk_nextsize']\n for field_to_print in fields_ordered:\n if field_to_print in fields_to_print:\n out_fields += message.system(field_to_print) + \": 0x{:02x}\\n\".format(pwndbg.memory.u(cursor + allocator.chunk_key_offset(field_to_print)))\n\n print(' | '.join(headers_to_print) + \"\\n\" + out_fields)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the contents of all an arena's bins and a thread's tcache, default to the current thread's arena and tcache.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the arena.\")\nparser.add_argument(\"tcache_addr\", nargs=\"?\", type=int, default=None, help=\"Address of the tcache.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef bins(addr=None, tcache_addr=None):\n \"\"\"Print the contents of all an arena's bins and a thread's tcache,\n default to the current thread's arena and tcache.\n \"\"\"\n if pwndbg.heap.current.has_tcache():\n tcachebins(tcache_addr)\n fastbins(addr)\n unsortedbin(addr)\n smallbins(addr)\n largebins(addr)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the contents of an arena's fastbins, default to the current thread's arena.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the arena.\")\nparser.add_argument(\"verbose\", nargs=\"?\", type=bool, default=True, help=\"Show extra detail.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef fastbins(addr=None, verbose=True):\n \"\"\"Print the contents of an arena's fastbins, default to the current\n thread's arena.\n \"\"\"\n allocator = pwndbg.heap.current\n fastbins = allocator.fastbins(addr)\n\n if fastbins is None:\n return\n\n formatted_bins = format_bin(fastbins, verbose)\n\n print(C.banner('fastbins'))\n for node in formatted_bins:\n print(node)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the contents of an arena's unsortedbin, default to the current thread's arena.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the arena.\")\nparser.add_argument(\"verbose\", nargs=\"?\", type=bool, default=True, help=\"Show extra detail.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef unsortedbin(addr=None, verbose=True):\n \"\"\"Print the contents of an arena's unsortedbin, default to the current\n thread's arena.\n \"\"\"\n allocator = pwndbg.heap.current\n unsortedbin = allocator.unsortedbin(addr)\n\n if unsortedbin is None:\n return\n\n formatted_bins = format_bin(unsortedbin, verbose)\n\n print(C.banner('unsortedbin'))\n for node in formatted_bins:\n print(node)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the contents of an arena's smallbins, default to the current thread's arena.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the arena.\")\nparser.add_argument(\"verbose\", nargs=\"?\", type=bool, default=False, help=\"Show extra detail.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef smallbins(addr=None, verbose=False):\n \"\"\"Print the contents of an arena's smallbins, default to the current\n thread's arena.\n \"\"\"\n allocator = pwndbg.heap.current\n smallbins = allocator.smallbins(addr)\n\n if smallbins is None:\n return\n\n formatted_bins = format_bin(smallbins, verbose)\n\n print(C.banner('smallbins'))\n for node in formatted_bins:\n print(node)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the contents of an arena's largebins, default to the current thread's arena.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"Address of the arena.\")\nparser.add_argument(\"verbose\", nargs=\"?\", type=bool, default=False, help=\"Show extra detail.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef largebins(addr=None, verbose=False):\n \"\"\"Print the contents of an arena's largebins, default to the current\n thread's arena.\n \"\"\"\n allocator = pwndbg.heap.current\n largebins = allocator.largebins(addr)\n\n if largebins is None:\n return\n\n formatted_bins = format_bin(largebins, verbose)\n\n print(C.banner('largebins'))\n for node in formatted_bins:\n print(node)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Print the contents of a tcache, default to the current thread's tcache.\"\nparser.add_argument(\"addr\", nargs=\"?\", type=int, default=None, help=\"The address of the tcache bins.\")\nparser.add_argument(\"verbose\", nargs=\"?\", type=bool, default=False, help=\"Whether to show more details or not.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\[email protected]\ndef tcachebins(addr=None, verbose=False):\n \"\"\"Print the contents of a tcache, default to the current thread's tcache.\"\"\"\n allocator = pwndbg.heap.current\n tcachebins = allocator.tcachebins(addr)\n\n if tcachebins is None:\n return\n\n formatted_bins = format_bin(tcachebins, verbose, offset = allocator.tcache_next_offset)\n\n print(C.banner('tcachebins'))\n for node in formatted_bins:\n print(node)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Find candidate fake fast chunks overlapping the specified address.\"\nparser.add_argument(\"addr\", type=int, help=\"Address of the word-sized value to overlap.\")\nparser.add_argument(\"size\", nargs=\"?\", type=int, default=None, help=\"Size of fake chunks to find.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef find_fake_fast(addr, size=None):\n \"\"\"Find candidate fake fast chunks overlapping the specified address.\"\"\"\n psize = pwndbg.arch.ptrsize\n allocator = pwndbg.heap.current\n align = allocator.malloc_alignment\n min_fast = allocator.min_chunk_size\n max_fast = allocator.global_max_fast\n max_fastbin = allocator.fastbin_index(max_fast)\n start = int(addr) - max_fast + psize\n mem = pwndbg.memory.read(start, max_fast - psize, partial=True)\n\n fmt = {\n 'little': '<',\n 'big': '>'\n }[pwndbg.arch.endian] + {\n 4: 'I',\n 8: 'Q'\n }[psize]\n\n if size is None:\n sizes = range(min_fast, max_fast + 1, align)\n else:\n sizes = [int(size)]\n\n print(C.banner(\"FAKE CHUNKS\"))\n for size in sizes:\n fastbin = allocator.fastbin_index(size)\n for offset in range((max_fastbin - fastbin) * align, max_fast - align + 1):\n candidate = mem[offset : offset + psize]\n if len(candidate) == psize:\n value = struct.unpack(fmt, candidate)[0]\n if allocator.fastbin_index(value) == fastbin:\n malloc_chunk(start+offset-psize, fake=True)\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"Visualize chunks on a heap, default to the current arena's active heap.\"\nparser.add_argument(\"count\", nargs=\"?\", type=lambda n:max(int(n, 0),1), default=10, help=\"Number of chunks to visualize.\")\nparser.add_argument(\"addr\", nargs=\"?\", default=None, help=\"Address of the first chunk.\")\nparser.add_argument(\"--naive\", \"-n\", action=\"store_true\", default=False, help=\"Attempt to keep printing beyond the top chunk.\")\[email protected](parser)\[email protected]\[email protected]\[email protected]\ndef vis_heap_chunks(addr=None, count=None, naive=None):\n \"\"\"Visualize chunks on a heap, default to the current arena's active heap.\"\"\"\n allocator = pwndbg.heap.current\n heap_region = allocator.get_heap_boundaries(addr)\n arena = allocator.get_arena_for_chunk(addr) if addr else allocator.get_arena()\n\n top_chunk = arena['top']\n ptr_size = allocator.size_sz\n\n # Build a list of addresses that delimit each chunk.\n chunk_delims = []\n if addr:\n cursor = int(addr)\n elif arena == allocator.main_arena:\n cursor = heap_region.start\n else:\n cursor = heap_region.start + allocator.heap_info.sizeof\n if pwndbg.vmmap.find(allocator.get_heap(heap_region.start)['ar_ptr']) == heap_region:\n # Round up to a 2-machine-word alignment after an arena to\n # compensate for the presence of the have_fastchunks variable\n # in GLIBC versions >= 2.27.\n cursor += (allocator.malloc_state.sizeof + ptr_size) & ~allocator.malloc_align_mask\n\n # Check if there is an alignment at the start of the heap, adjust if necessary.\n if not addr:\n first_chunk_size = pwndbg.arch.unpack(pwndbg.memory.read(cursor + ptr_size, ptr_size))\n if first_chunk_size == 0:\n cursor += ptr_size * 2\n\n cursor_backup = cursor\n\n for _ in range(count + 1):\n # Don't read beyond the heap mapping if --naive or corrupted heap.\n if cursor not in heap_region:\n chunk_delims.append(heap_region.end)\n break\n\n size_field = pwndbg.memory.u(cursor + ptr_size)\n real_size = size_field & ~allocator.malloc_align_mask\n prev_inuse = allocator.chunk_flags(size_field)[0]\n\n # Don't repeatedly operate on the same address (e.g. chunk size of 0).\n if cursor in chunk_delims or cursor + ptr_size in chunk_delims:\n break\n\n if prev_inuse:\n chunk_delims.append(cursor + ptr_size)\n else:\n chunk_delims.append(cursor)\n\n if (cursor == top_chunk and not naive) or (cursor == heap_region.end - ptr_size*2):\n chunk_delims.append(cursor + ptr_size*2)\n break\n\n cursor += real_size\n\n # Build the output buffer, changing color at each chunk delimiter.\n # TODO: maybe print free chunks in bold or underlined\n color_funcs = [\n generateColorFunction(\"yellow\"),\n generateColorFunction(\"cyan\"),\n generateColorFunction(\"purple\"),\n generateColorFunction(\"green\"),\n generateColorFunction(\"blue\"),\n ]\n\n bin_collections = [\n allocator.fastbins(arena.address),\n allocator.unsortedbin(arena.address),\n allocator.smallbins(arena.address),\n allocator.largebins(arena.address),\n ]\n if allocator.has_tcache():\n # Only check for tcache entries belonging to the current thread,\n # it's difficult (impossible?) to find all the thread caches for a\n # specific heap.\n bin_collections.insert(0, allocator.tcachebins(None))\n\n printed = 0\n out = ''\n asc = ''\n labels = []\n\n cursor = cursor_backup\n\n for c, stop in enumerate(chunk_delims):\n color_func = color_funcs[c % len(color_funcs)]\n\n while cursor != stop:\n if printed % 2 == 0:\n out += \"\\n0x%x\" % cursor\n\n cell = pwndbg.arch.unpack(pwndbg.memory.read(cursor, ptr_size))\n cell_hex = '\\t0x{:0{n}x}'.format(cell, n=ptr_size*2)\n\n out += color_func(cell_hex)\n printed += 1\n\n labels.extend(bin_labels(cursor, bin_collections))\n if cursor == top_chunk:\n labels.append('Top chunk')\n\n asc += bin_ascii(pwndbg.memory.read(cursor, ptr_size))\n if printed % 2 == 0:\n out += '\\t' + color_func(asc) + ('\\t <-- ' + ', '.join(labels) if len(labels) else '')\n asc = ''\n labels = []\n\n cursor += ptr_size\n\n print(out)\n\n\ndef bin_ascii(bs):\n from string import printable\n valid_chars = list(map(ord, set(printable) - set('\\t\\r\\n\\x0c')))\n return ''.join(chr(c) if c in valid_chars else '.'for c in bs)\n\n\ndef bin_labels(addr, collections):\n labels = []\n for bins in collections:\n bins_type = bins.get('type', None)\n if not bins_type:\n continue\n\n for size in filter(lambda x: x != 'type', bins.keys()):\n b = bins[size]\n if isinstance(size, int):\n size = hex(size)\n count = '/{:d}'.format(b[1]) if bins_type == 'tcachebins' else None\n chunks = bin_addrs(b, bins_type)\n for chunk_addr in chunks:\n if addr == chunk_addr:\n labels.append('{:s}[{:s}][{:d}{}]'.format(bins_type, size, chunks.index(addr), count or ''))\n\n return labels\n\n\ndef bin_addrs(b, bins_type):\n addrs = []\n if bins_type == 'fastbins':\n return b\n # tcachebins consists of single linked list and entries count\n elif bins_type == 'tcachebins':\n addrs, _ = b\n # normal bins consists of double linked list and may be corrupted (we can detect corruption)\n else: # normal bin\n addrs, _, _ = b\n return addrs\n\n\ntry_free_parser = argparse.ArgumentParser(description='Check what would happen if free was called with given address')\ntry_free_parser.add_argument('addr', nargs='?', help='Address passed to free')\[email protected](try_free_parser)\[email protected]\[email protected]\ndef try_free(addr):\n addr = int(addr)\n\n # check hook\n free_hook = pwndbg.symbol.address('__free_hook')\n if free_hook is not None:\n if pwndbg.memory.pvoid(free_hook) != 0:\n message.success('__libc_free: will execute __free_hook')\n\n # free(0) has no effect\n if addr == 0:\n message.success('__libc_free: addr is 0, nothing to do')\n return\n\n # constants\n allocator = pwndbg.heap.current\n arena = allocator.get_arena()\n\n aligned_lsb = allocator.malloc_align_mask.bit_length()\n size_sz = allocator.size_sz\n malloc_alignment = allocator.malloc_alignment\n malloc_align_mask = allocator.malloc_align_mask\n chunk_minsize = allocator.minsize\n\n ptr_size = pwndbg.arch.ptrsize\n\n def unsigned_size(size):\n # read_chunk()['size'] is signed in pwndbg ;/\n # there may be better way to handle that\n if ptr_size < 8:\n return ctypes.c_uint32(size).value\n x = ctypes.c_uint64(size).value\n return x\n\n def chunksize(chunk_size):\n # maybe move this to ptmalloc.py\n return chunk_size & (~7)\n\n def finalize(errors_found, returned_before_error):\n print('-'*10)\n if returned_before_error:\n print(message.success('Free should succeed!'))\n elif errors_found > 0:\n print(message.error('Errors found!'))\n else:\n print(message.success('All checks passed!'))\n\n\n # mem2chunk\n addr -= 2 * size_sz\n\n # try to get the chunk\n try:\n chunk = read_chunk(addr)\n except gdb.MemoryError as e:\n print(message.error('Can\\'t read chunk at address 0x{:x}, memory error'.format(addr)))\n return\n\n chunk_size = unsigned_size(chunk['size'])\n chunk_size_unmasked = chunksize(chunk_size)\n _, is_mmapped, _ = allocator.chunk_flags(chunk_size)\n\n if is_mmapped:\n print(message.notice('__libc_free: Doing munmap_chunk'))\n return\n\n errors_found = False\n returned_before_error = False\n\n # chunk doesn't overlap memory\n print(message.notice('General checks'))\n max_mem = (1 << (ptr_size*8)) - 1\n if addr + chunk_size >= max_mem:\n err = 'free(): invalid pointer -> &chunk + chunk->size > max memory\\n'\n err += ' 0x{:x} + 0x{:x} > 0x{:x}'\n err = err.format(addr, chunk_size, max_mem)\n print(message.error(err))\n errors_found += 1\n\n # chunk address is aligned\n addr_tmp = addr\n if malloc_alignment != 2 * size_sz:\n addr_tmp = addr + 2 * size_sz\n\n if addr_tmp & malloc_align_mask != 0:\n err = 'free(): invalid pointer -> misaligned chunk\\n'\n err += ' LSB of 0x{:x} are 0b{}, should be 0b{}'\n if addr_tmp != addr:\n err += ' (0x{:x} was added to the address)'.format(2*size_sz)\n err = err.format(addr_tmp, bin(addr_tmp)[-aligned_lsb:], '0'*aligned_lsb)\n print(message.error(err))\n errors_found += 1\n\n # chunk's size is big enough\n if chunk_size_unmasked < chunk_minsize:\n err = 'free(): invalid size -> chunk\\'s size smaller than MINSIZE\\n'\n err += ' size is 0x{:x}, MINSIZE is 0x{:x}'\n err = err.format(chunk_size_unmasked, chunk_minsize)\n print(message.error(err))\n errors_found += 1\n\n # chunk's size is aligned\n if chunk_size_unmasked & malloc_align_mask != 0:\n err = 'free(): invalid size -> chunk\\'s size is not aligned\\n'\n err += ' LSB of size 0x{:x} are 0b{}, should be 0b{}'\n err = err.format(chunk_size_unmasked, bin(chunk_size_unmasked)[-aligned_lsb:], '0'*aligned_lsb)\n print(message.error(err))\n errors_found += 1\n\n # tcache\n if allocator.has_tcache() and 'key' in allocator.tcache_entry.keys():\n tc_idx = (chunk_size_unmasked - chunk_minsize + malloc_alignment - 1) // malloc_alignment\n if tc_idx < allocator.mp['tcache_bins']:\n print(message.notice('Tcache checks'))\n e = addr + 2*size_sz\n e += allocator.tcache_entry.keys().index('key') * ptr_size\n e = pwndbg.memory.pvoid(e)\n tcache_addr = int(allocator.thread_cache.address)\n if e == tcache_addr:\n # todo, actually do checks\n print(message.error('Will do checks for tcache double-free (memory_tcache_double_free)'))\n errors_found += 1\n\n if int(allocator.get_tcache()['counts'][tc_idx]) < int(allocator.mp['tcache_count']):\n print(message.success('Using tcache_put'))\n if errors_found == 0:\n returned_before_error = True\n\n if errors_found > 0:\n finalize(errors_found, returned_before_error)\n return\n\n # is fastbin\n if chunk_size_unmasked <= allocator.global_max_fast:\n print(message.notice('Fastbin checks'))\n chunk_fastbin_idx = allocator.fastbin_index(chunk_size_unmasked)\n fastbin_list = allocator.fastbins(int(arena.address))[(chunk_fastbin_idx+2)*(ptr_size*2)]\n\n try:\n next_chunk = read_chunk(addr + chunk_size_unmasked)\n except gdb.MemoryError as e:\n print(message.error('Can\\'t read next chunk at address 0x{:x}, memory error'.format(chunk + chunk_size_unmasked)))\n finalize(errors_found, returned_before_error)\n return\n\n # next chunk's size is big enough and small enough\n next_chunk_size = unsigned_size(next_chunk['size'])\n if next_chunk_size <= 2*size_sz or chunksize(next_chunk_size) >= int(arena['system_mem']):\n err = 'free(): invalid next size (fast) -> next chunk\\'s size not in [2*size_sz; av->system_mem]\\n'\n err += ' next chunk\\'s size is 0x{:x}, 2*size_sz is 0x{:x}, system_mem is 0x{:x}'\n err = err.format(next_chunk_size, 2*size_sz, int(arena['system_mem']))\n print(message.error(err))\n errors_found += 1\n\n # chunk is not the same as the one on top of fastbin[idx]\n if int(fastbin_list[0]) == addr:\n err = 'double free or corruption (fasttop) -> chunk already is on top of fastbin list\\n'\n err += ' fastbin idx == {}'\n err = err.format(chunk_fastbin_idx)\n print(message.error(err))\n errors_found += 1\n\n # chunk's size is ~same as top chunk's size\n fastbin_top_chunk = int(fastbin_list[0])\n if fastbin_top_chunk != 0:\n try:\n fastbin_top_chunk = read_chunk(fastbin_top_chunk)\n except gdb.MemoryError as e:\n print(message.error('Can\\'t read top fastbin chunk at address 0x{:x}, memory error'.format(fastbin_top_chunk)))\n finalize(errors_found, returned_before_error)\n return\n\n fastbin_top_chunk_size = chunksize(unsigned_size(fastbin_top_chunk['size']))\n if chunk_fastbin_idx != allocator.fastbin_index(fastbin_top_chunk_size):\n err = 'invalid fastbin entry (free) -> chunk\\'s size is not near top chunk\\'s size\\n'\n err += ' chunk\\'s size == {}, idx == {}\\n'\n err += ' top chunk\\'s size == {}, idx == {}'\n err += ' if `have_lock` is false then the error is invalid'\n err = err.format(chunk['size'], chunk_fastbin_idx,\n fastbin_top_chunk_size, allocator.fastbin_index(fastbin_top_chunk_size))\n print(message.error(err))\n errors_found += 1\n\n # is not mapped\n elif is_mmapped == 0:\n print(message.notice('Not mapped checks'))\n\n # chunks is not top chunk\n if addr == int(arena['top']):\n err = 'double free or corruption (top) -> chunk is top chunk'\n print(message.error(err))\n errors_found += 1\n\n # next chunk is not beyond the boundaries of the arena\n NONCONTIGUOUS_BIT = 2\n top_chunk_addr = (int(arena['top']))\n top_chunk = read_chunk(top_chunk_addr)\n next_chunk_addr = addr + chunk_size_unmasked\n\n # todo: in libc, addition may overflow\n if (arena['flags'] & NONCONTIGUOUS_BIT == 0) and next_chunk_addr >= top_chunk_addr + chunksize(top_chunk['size']):\n err = 'double free or corruption (out) -> next chunk is beyond arena and arena is contiguous\\n'\n err += 'next chunk at 0x{:x}, end of arena at 0x{:x}'\n err = err.format(next_chunk_addr, top_chunk_addr + chunksize(unsigned_size(top_chunk['size'])))\n print(message.error(err))\n errors_found += 1\n\n # now we need to dereference chunk\n try :\n next_chunk = read_chunk(next_chunk_addr)\n next_chunk_size = chunksize(unsigned_size(next_chunk['size']))\n except (OverflowError, gdb.MemoryError) as e:\n print(message.error('Can\\'t read next chunk at address 0x{:x}'.format(next_chunk_addr)))\n finalize(errors_found, returned_before_error)\n return\n\n # next chunk's P bit is set\n prev_inuse,_,_ = allocator.chunk_flags(next_chunk['size'])\n if prev_inuse == 0:\n err = 'double free or corruption (!prev) -> next chunk\\'s previous-in-use bit is 0\\n'\n print(message.error(err))\n errors_found += 1\n\n # next chunk's size is big enough and small enough\n if next_chunk_size <= 2*size_sz or next_chunk_size >= int(arena['system_mem']):\n err = 'free(): invalid next size (normal) -> next chunk\\'s size not in [2*size_sz; system_mem]\\n'\n err += 'next chunk\\'s size is 0x{:x}, 2*size_sz is 0x{:x}, system_mem is 0x{:x}'\n err = err.format(next_chunk_size, 2*size_sz, int(arena['system_mem']))\n print(message.error(err))\n errors_found += 1\n\n # consolidate backward\n prev_inuse,_,_ = allocator.chunk_flags(chunk['size'])\n if prev_inuse == 0:\n print(message.notice('Backward consolidation'))\n prev_size = chunksize(unsigned_size(chunk['prev_size']))\n prev_chunk_addr = addr - prev_size\n\n try :\n prev_chunk = read_chunk(prev_chunk_addr)\n prev_chunk_size = chunksize(unsigned_size(prev_chunk['size']))\n except (OverflowError, gdb.MemoryError) as e:\n print(message.error('Can\\'t read next chunk at address 0x{:x}'.format(prev_chunk_addr)))\n finalize(errors_found, returned_before_error)\n return\n\n if unsigned_size(prev_chunk['size']) != prev_size:\n err = 'corrupted size vs. prev_size while consolidating\\n'\n err += 'prev_size field is 0x{:x}, prev chunk at 0x{:x}, prev chunk size is 0x{:x}'\n err = err.format(prev_size, prev_chunk_addr, unsigned_size(prev_chunk['size']))\n print(message.error(err))\n errors_found += 1\n else:\n addr = prev_chunk_addr\n chunk_size += prev_size\n chunk_size_unmasked += prev_size\n try_unlink(addr)\n\n # consolidate forward\n if next_chunk_addr != top_chunk_addr:\n print(message.notice('Next chunk is not top chunk'))\n try :\n next_next_chunk_addr = next_chunk_addr + next_chunk_size\n next_next_chunk = read_chunk(next_next_chunk_addr)\n except (OverflowError, gdb.MemoryError) as e:\n print(message.error('Can\\'t read next chunk at address 0x{:x}'.format(next_next_chunk_addr)))\n finalize(errors_found, returned_before_error)\n return\n \n prev_inuse,_,_ = allocator.chunk_flags(next_next_chunk['size'])\n if prev_inuse == 0:\n print(message.notice('Forward consolidation'))\n try_unlink(next_chunk_addr)\n chunk_size += next_chunk_size\n chunk_size_unmasked += next_chunk_size\n else:\n print(message.notice('Clearing next chunk\\'s P bit'))\n\n # unsorted bin fd->bk should be unsorted bean\n unsorted_addr = int(arena['bins']) - 2*ptr_size\n try:\n unsorted = read_chunk(unsorted_addr)\n try:\n if read_chunk(unsorted['fd'])['bk'] != unsorted_addr:\n err = 'free(): corrupted unsorted chunks -> unsorted_chunk->fd->bk != unsorted_chunk\\n'\n err += 'unsorted at 0x{:x}, unsorted->fd == 0x{:x}, unsorted->fd->bk == 0x{:x}'\n err = err.format(unsorted_addr, unsorted['fd'], read_chunk(unsorted['fd'])['bk'])\n print(message.error(err))\n errors_found += 1\n except (OverflowError, gdb.MemoryError) as e:\n print(message.error('Can\\'t read chunk at 0x{:x}, it is unsorted bin fd'.format(unsorted['fd'])))\n errors_found += 1\n except (OverflowError, gdb.MemoryError) as e:\n print(message.error('Can\\'t read unsorted bin chunk at 0x{:x}'.format(unsorted_addr)))\n errors_found += 1\n\n else:\n print(message.notice('Next chunk is top chunk'))\n chunk_size += next_chunk_size\n chunk_size_unmasked += next_chunk_size\n\n # todo: this may vary strongly\n FASTBIN_CONSOLIDATION_THRESHOLD = 65536\n if chunk_size_unmasked >= FASTBIN_CONSOLIDATION_THRESHOLD:\n print(message.notice('Doing malloc_consolidate and systrim/heap_trim'))\n\n #is mapped\n else:\n message.notice('Doing munmap_chunk')\n\n finalize(errors_found, returned_before_error)\n\n\ndef try_unlink(addr):\n pass\n\n",
"path": "pwndbg/commands/heap.py"
}
] | diff --git a/pwndbg/commands/heap.py b/pwndbg/commands/heap.py
index 743d55d4bee..ae28b14852f 100755
--- a/pwndbg/commands/heap.py
+++ b/pwndbg/commands/heap.py
@@ -511,7 +511,7 @@ def find_fake_fast(addr, size=None):
if size is None:
sizes = range(min_fast, max_fast + 1, align)
else:
- sizes = [size]
+ sizes = [int(size)]
print(C.banner("FAKE CHUNKS"))
for size in sizes:
|
ivy-llc__ivy-17162 | is_integer
| [
{
"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef is_complex(x):\n return ivy.is_complex_dtype(x)\n\n\n@to_ivy_arrays_and_back\ndef is_floating_point(x):\n return ivy.is_float_dtype(x)\n",
"path": "ivy/functional/frontends/paddle/tensor/attribute.py"
}
] | [
{
"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef is_complex(x):\n return ivy.is_complex_dtype(x)\n\n\n@to_ivy_arrays_and_back\ndef is_integer(x):\n return ivy.is_int_dtype(x)\n\n\n@to_ivy_arrays_and_back\ndef is_floating_point(x):\n return ivy.is_float_dtype(x)\n",
"path": "ivy/functional/frontends/paddle/tensor/attribute.py"
}
] | diff --git a/ivy/functional/frontends/paddle/tensor/attribute.py b/ivy/functional/frontends/paddle/tensor/attribute.py
index 9520930395f91..cc5d69066978b 100644
--- a/ivy/functional/frontends/paddle/tensor/attribute.py
+++ b/ivy/functional/frontends/paddle/tensor/attribute.py
@@ -10,6 +10,11 @@ def is_complex(x):
return ivy.is_complex_dtype(x)
+@to_ivy_arrays_and_back
+def is_integer(x):
+ return ivy.is_int_dtype(x)
+
+
@to_ivy_arrays_and_back
def is_floating_point(x):
return ivy.is_float_dtype(x)
diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_attribute.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_attribute.py
index 5e6573d4acd69..5f25f247e137b 100644
--- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_attribute.py
+++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_attribute.py
@@ -30,6 +30,31 @@ def test_paddle_is_complex(
)
+@handle_frontend_test(
+ fn_tree="paddle.tensor.attribute.is_integer",
+ dtype_and_x=helpers.dtype_and_values(
+ available_dtypes=helpers.get_dtypes("valid"),
+ ),
+)
+def test_paddle_is_integer(
+ *,
+ dtype_and_x,
+ on_device,
+ fn_tree,
+ frontend,
+ test_flags,
+):
+ input_dtype, input = dtype_and_x
+ helpers.test_frontend_function(
+ input_dtypes=input_dtype,
+ frontend=frontend,
+ test_flags=test_flags,
+ fn_tree=fn_tree,
+ on_device=on_device,
+ x=input[0],
+ )
+
+
@handle_frontend_test(
fn_tree="paddle.tensor.attribute.is_floating_point",
dtype_and_x=helpers.dtype_and_values(
|
mitmproxy__mitmproxy-1510 | Divide by Zero error
Its in `netlib/strutils.py`
This line # around 126 :
``` python
for i in six.iterbytes(s[:100])
) / len(s[:100]) > 0.3
```
if s is empty, it gives this error in the mitmproxy, (doesn't crash though due to recent improvements in mitmproxy i guess..)
| [
{
"content": "from __future__ import absolute_import, print_function, division\nimport re\nimport codecs\n\nimport six\n\n\ndef always_bytes(unicode_or_bytes, *encode_args):\n if isinstance(unicode_or_bytes, six.text_type):\n return unicode_or_bytes.encode(*encode_args)\n return unicode_or_bytes\n\n\ndef native(s, *encoding_opts):\n \"\"\"\n Convert :py:class:`bytes` or :py:class:`unicode` to the native\n :py:class:`str` type, using latin1 encoding if conversion is necessary.\n\n https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types\n \"\"\"\n if not isinstance(s, (six.binary_type, six.text_type)):\n raise TypeError(\"%r is neither bytes nor unicode\" % s)\n if six.PY2:\n if isinstance(s, six.text_type):\n return s.encode(*encoding_opts)\n else:\n if isinstance(s, six.binary_type):\n return s.decode(*encoding_opts)\n return s\n\n\n# Translate control characters to \"safe\" characters. This implementation initially\n# replaced them with the matching control pictures (http://unicode.org/charts/PDF/U2400.pdf),\n# but that turned out to render badly with monospace fonts. We are back to \".\" therefore.\n_control_char_trans = {\n x: ord(\".\") # x + 0x2400 for unicode control group pictures\n for x in range(32)\n}\n_control_char_trans[127] = ord(\".\") # 0x2421\n_control_char_trans_newline = _control_char_trans.copy()\nfor x in (\"\\r\", \"\\n\", \"\\t\"):\n del _control_char_trans_newline[ord(x)]\n\n\nif six.PY2:\n pass\nelse:\n _control_char_trans = str.maketrans(_control_char_trans)\n _control_char_trans_newline = str.maketrans(_control_char_trans_newline)\n\n\ndef escape_control_characters(text, keep_spacing=True):\n \"\"\"\n Replace all unicode C1 control characters from the given text with a single \".\"\n\n Args:\n keep_spacing: If True, tabs and newlines will not be replaced.\n \"\"\"\n # type: (six.string_types) -> six.text_type\n if not isinstance(text, six.string_types):\n raise ValueError(\"text type must be unicode but is {}\".format(type(text).__name__))\n\n trans = _control_char_trans_newline if keep_spacing else _control_char_trans\n if six.PY2:\n return u\"\".join(\n six.unichr(trans.get(ord(ch), ord(ch)))\n for ch in text\n )\n return text.translate(trans)\n\n\ndef bytes_to_escaped_str(data, keep_spacing=False, escape_single_quotes=False):\n \"\"\"\n Take bytes and return a safe string that can be displayed to the user.\n\n Single quotes are always escaped, double quotes are never escaped:\n \"'\" + bytes_to_escaped_str(...) + \"'\"\n gives a valid Python string.\n\n Args:\n keep_spacing: If True, tabs and newlines will not be escaped.\n \"\"\"\n\n if not isinstance(data, bytes):\n raise ValueError(\"data must be bytes, but is {}\".format(data.__class__.__name__))\n # We always insert a double-quote here so that we get a single-quoted string back\n # https://stackoverflow.com/questions/29019340/why-does-python-use-different-quotes-for-representing-strings-depending-on-their\n ret = repr(b'\"' + data).lstrip(\"b\")[2:-1]\n if not escape_single_quotes:\n ret = re.sub(r\"(?<!\\\\)(\\\\\\\\)*\\\\'\", lambda m: (m.group(1) or \"\") + \"'\", ret)\n if keep_spacing:\n ret = re.sub(\n r\"(?<!\\\\)(\\\\\\\\)*\\\\([nrt])\",\n lambda m: (m.group(1) or \"\") + dict(n=\"\\n\", r=\"\\r\", t=\"\\t\")[m.group(2)],\n ret\n )\n return ret\n\n\ndef escaped_str_to_bytes(data):\n \"\"\"\n Take an escaped string and return the unescaped bytes equivalent.\n\n Raises:\n ValueError, if the escape sequence is invalid.\n \"\"\"\n if not isinstance(data, six.string_types):\n if six.PY2:\n raise ValueError(\"data must be str or unicode, but is {}\".format(data.__class__.__name__))\n raise ValueError(\"data must be str, but is {}\".format(data.__class__.__name__))\n\n if six.PY2:\n if isinstance(data, unicode):\n data = data.encode(\"utf8\")\n return data.decode(\"string-escape\")\n\n # This one is difficult - we use an undocumented Python API here\n # as per http://stackoverflow.com/a/23151714/934719\n return codecs.escape_decode(data)[0]\n\n\ndef is_mostly_bin(s):\n # type: (bytes) -> bool\n return sum(\n i < 9 or 13 < i < 32 or 126 < i\n for i in six.iterbytes(s[:100])\n ) / len(s[:100]) > 0.3\n\n\ndef is_xml(s):\n # type: (bytes) -> bool\n return s.strip().startswith(b\"<\")\n\n\ndef clean_hanging_newline(t):\n \"\"\"\n Many editors will silently add a newline to the final line of a\n document (I'm looking at you, Vim). This function fixes this common\n problem at the risk of removing a hanging newline in the rare cases\n where the user actually intends it.\n \"\"\"\n if t and t[-1] == \"\\n\":\n return t[:-1]\n return t\n\n\ndef hexdump(s):\n \"\"\"\n Returns:\n A generator of (offset, hex, str) tuples\n \"\"\"\n for i in range(0, len(s), 16):\n offset = \"{:0=10x}\".format(i)\n part = s[i:i + 16]\n x = \" \".join(\"{:0=2x}\".format(i) for i in six.iterbytes(part))\n x = x.ljust(47) # 16*2 + 15\n part_repr = native(escape_control_characters(\n part.decode(\"ascii\", \"replace\").replace(u\"\\ufffd\", u\".\"),\n False\n ))\n yield (offset, x, part_repr)\n",
"path": "netlib/strutils.py"
}
] | [
{
"content": "from __future__ import absolute_import, print_function, division\nimport re\nimport codecs\n\nimport six\n\n\ndef always_bytes(unicode_or_bytes, *encode_args):\n if isinstance(unicode_or_bytes, six.text_type):\n return unicode_or_bytes.encode(*encode_args)\n return unicode_or_bytes\n\n\ndef native(s, *encoding_opts):\n \"\"\"\n Convert :py:class:`bytes` or :py:class:`unicode` to the native\n :py:class:`str` type, using latin1 encoding if conversion is necessary.\n\n https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types\n \"\"\"\n if not isinstance(s, (six.binary_type, six.text_type)):\n raise TypeError(\"%r is neither bytes nor unicode\" % s)\n if six.PY2:\n if isinstance(s, six.text_type):\n return s.encode(*encoding_opts)\n else:\n if isinstance(s, six.binary_type):\n return s.decode(*encoding_opts)\n return s\n\n\n# Translate control characters to \"safe\" characters. This implementation initially\n# replaced them with the matching control pictures (http://unicode.org/charts/PDF/U2400.pdf),\n# but that turned out to render badly with monospace fonts. We are back to \".\" therefore.\n_control_char_trans = {\n x: ord(\".\") # x + 0x2400 for unicode control group pictures\n for x in range(32)\n}\n_control_char_trans[127] = ord(\".\") # 0x2421\n_control_char_trans_newline = _control_char_trans.copy()\nfor x in (\"\\r\", \"\\n\", \"\\t\"):\n del _control_char_trans_newline[ord(x)]\n\n\nif six.PY2:\n pass\nelse:\n _control_char_trans = str.maketrans(_control_char_trans)\n _control_char_trans_newline = str.maketrans(_control_char_trans_newline)\n\n\ndef escape_control_characters(text, keep_spacing=True):\n \"\"\"\n Replace all unicode C1 control characters from the given text with a single \".\"\n\n Args:\n keep_spacing: If True, tabs and newlines will not be replaced.\n \"\"\"\n # type: (six.string_types) -> six.text_type\n if not isinstance(text, six.string_types):\n raise ValueError(\"text type must be unicode but is {}\".format(type(text).__name__))\n\n trans = _control_char_trans_newline if keep_spacing else _control_char_trans\n if six.PY2:\n return u\"\".join(\n six.unichr(trans.get(ord(ch), ord(ch)))\n for ch in text\n )\n return text.translate(trans)\n\n\ndef bytes_to_escaped_str(data, keep_spacing=False, escape_single_quotes=False):\n \"\"\"\n Take bytes and return a safe string that can be displayed to the user.\n\n Single quotes are always escaped, double quotes are never escaped:\n \"'\" + bytes_to_escaped_str(...) + \"'\"\n gives a valid Python string.\n\n Args:\n keep_spacing: If True, tabs and newlines will not be escaped.\n \"\"\"\n\n if not isinstance(data, bytes):\n raise ValueError(\"data must be bytes, but is {}\".format(data.__class__.__name__))\n # We always insert a double-quote here so that we get a single-quoted string back\n # https://stackoverflow.com/questions/29019340/why-does-python-use-different-quotes-for-representing-strings-depending-on-their\n ret = repr(b'\"' + data).lstrip(\"b\")[2:-1]\n if not escape_single_quotes:\n ret = re.sub(r\"(?<!\\\\)(\\\\\\\\)*\\\\'\", lambda m: (m.group(1) or \"\") + \"'\", ret)\n if keep_spacing:\n ret = re.sub(\n r\"(?<!\\\\)(\\\\\\\\)*\\\\([nrt])\",\n lambda m: (m.group(1) or \"\") + dict(n=\"\\n\", r=\"\\r\", t=\"\\t\")[m.group(2)],\n ret\n )\n return ret\n\n\ndef escaped_str_to_bytes(data):\n \"\"\"\n Take an escaped string and return the unescaped bytes equivalent.\n\n Raises:\n ValueError, if the escape sequence is invalid.\n \"\"\"\n if not isinstance(data, six.string_types):\n if six.PY2:\n raise ValueError(\"data must be str or unicode, but is {}\".format(data.__class__.__name__))\n raise ValueError(\"data must be str, but is {}\".format(data.__class__.__name__))\n\n if six.PY2:\n if isinstance(data, unicode):\n data = data.encode(\"utf8\")\n return data.decode(\"string-escape\")\n\n # This one is difficult - we use an undocumented Python API here\n # as per http://stackoverflow.com/a/23151714/934719\n return codecs.escape_decode(data)[0]\n\n\ndef is_mostly_bin(s):\n # type: (bytes) -> bool\n if not s or len(s) == 0:\n return False\n\n return sum(\n i < 9 or 13 < i < 32 or 126 < i\n for i in six.iterbytes(s[:100])\n ) / len(s[:100]) > 0.3\n\n\ndef is_xml(s):\n # type: (bytes) -> bool\n return s.strip().startswith(b\"<\")\n\n\ndef clean_hanging_newline(t):\n \"\"\"\n Many editors will silently add a newline to the final line of a\n document (I'm looking at you, Vim). This function fixes this common\n problem at the risk of removing a hanging newline in the rare cases\n where the user actually intends it.\n \"\"\"\n if t and t[-1] == \"\\n\":\n return t[:-1]\n return t\n\n\ndef hexdump(s):\n \"\"\"\n Returns:\n A generator of (offset, hex, str) tuples\n \"\"\"\n for i in range(0, len(s), 16):\n offset = \"{:0=10x}\".format(i)\n part = s[i:i + 16]\n x = \" \".join(\"{:0=2x}\".format(i) for i in six.iterbytes(part))\n x = x.ljust(47) # 16*2 + 15\n part_repr = native(escape_control_characters(\n part.decode(\"ascii\", \"replace\").replace(u\"\\ufffd\", u\".\"),\n False\n ))\n yield (offset, x, part_repr)\n",
"path": "netlib/strutils.py"
}
] | diff --git a/netlib/strutils.py b/netlib/strutils.py
index 4a46b6b1ff..4cb3b80560 100644
--- a/netlib/strutils.py
+++ b/netlib/strutils.py
@@ -121,6 +121,9 @@ def escaped_str_to_bytes(data):
def is_mostly_bin(s):
# type: (bytes) -> bool
+ if not s or len(s) == 0:
+ return False
+
return sum(
i < 9 or 13 < i < 32 or 126 < i
for i in six.iterbytes(s[:100])
diff --git a/test/netlib/test_strutils.py b/test/netlib/test_strutils.py
index 52299e5991..5be254a3e1 100644
--- a/test/netlib/test_strutils.py
+++ b/test/netlib/test_strutils.py
@@ -85,6 +85,7 @@ def test_escaped_str_to_bytes():
def test_is_mostly_bin():
assert not strutils.is_mostly_bin(b"foo\xFF")
assert strutils.is_mostly_bin(b"foo" + b"\xFF" * 10)
+ assert not strutils.is_mostly_bin("")
def test_is_xml():
|
learningequality__kolibri-5237 | running kolibri pex on mac in background mode causes a seg fault
### Observed behavior
Trying to run the kolibri pex in background mode causes a hard python crash with no script-level errors:

running with `--foreground` does not have this issue
### Expected behavior
no crash
### User-facing consequences
frustration
### Errors and logs
This is the top of the report
```
Crashed Thread: 2
Exception Type: EXC_BAD_ACCESS (SIGSEGV)
Exception Codes: KERN_INVALID_ADDRESS at 0x000000011207bb36
Exception Note: EXC_CORPSE_NOTIFY
Termination Signal: Segmentation fault: 11
Termination Reason: Namespace SIGNAL, Code 0xb
Terminating Process: exc handler [54294]
```
I can supply additional info
### Steps to reproduce
```kolibri start```
### Context
0.12
| [
{
"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport importlib\nimport logging\nimport os\nimport signal\nimport sys\nfrom sqlite3 import DatabaseError as SQLite3DatabaseError\n\nimport django\nfrom django.core.exceptions import AppRegistryNotReady\nfrom django.core.management import call_command\nfrom django.db import connections\nfrom django.db.utils import DatabaseError\nfrom docopt import docopt\n\nimport kolibri\nfrom .debian_check import check_debian_user\n# Check if the current user is the kolibri user when running kolibri from .deb.\n# Putting it here because importing server module creates KOLIBRI_HOME directory.\ncheck_debian_user()\n\nfrom . import server # noqa\nfrom .conf import OPTIONS # noqa\nfrom .sanity_checks import check_content_directory_exists_and_writable # noqa\nfrom .sanity_checks import check_other_kolibri_running # noqa\nfrom .system import become_daemon # noqa\nfrom kolibri.core.deviceadmin.utils import IncompatibleDatabase # noqa\nfrom kolibri.utils import conf # noqa\n\n\nUSAGE = \"\"\"\nKolibri\n\nSupported by Foundation for Learning Equality\nwww.learningequality.org\n\nUsage:\n kolibri start [--foreground] [--port=<port>] [options]\n kolibri stop [options]\n kolibri restart [options]\n kolibri status [options]\n kolibri shell [options]\n kolibri manage COMMAND [DJANGO_OPTIONS ...]\n kolibri manage COMMAND [options] [-- DJANGO_OPTIONS ...]\n kolibri diagnose [options]\n kolibri plugin [options] PLUGIN (enable | disable)\n kolibri language setdefault <langcode>\n kolibri plugin --list\n kolibri -h | --help\n kolibri --version\n\nOptions:\n -h --help Show this screen.\n --version Show version.\n --debug Output debug messages (for development)\n COMMAND The name of any available django manage command. For\n help, type `kolibri manage help`\n DJANGO_OPTIONS Command options are passed on to the django manage\n command. Notice that all django options must appear\n *last* and should not be mixed with other options.\n\nExamples:\n kolibri start Start Kolibri\n kolibri stop Stop Kolibri\n kolibri status How is Kolibri doing?\n kolibri url Tell me the address of Kolibri\n kolibri shell Display a Django shell\n kolibri manage help Show the Django management usage dialogue\n kolibri manage runserver Runs Django's development server\n kolibri diagnose Show system information for debugging\n\n\nEnvironment:\n\n DJANGO_SETTINGS_MODULE\n - The Django settings module to load. Useful if you are deploying Kolibri\n in a specific setup such as your own web server.\n - Default: \"kolibri.deployment.default.settings.base\"\n\n KOLIBRI_HOME\n - Where Kolibri will store its data and configuration files.\n\n KOLIBRI_HTTP_PORT\n - Default: 8080\n\n\"\"\"\n\n__doc__ = \"\"\"\nKolibri Command Line Interface (CLI)\n====================================\n\nAuto-generated usage instructions from ``kolibri -h``::\n\n{usage:s}\n\n\"\"\".format(usage=\"\\n\".join(map(lambda x: \" \" + x, USAGE.split(\"\\n\"))))\n\nlogger = logging.getLogger(__name__)\n\n\nclass PluginDoesNotExist(Exception):\n \"\"\"\n This exception is local to the CLI environment in case actions are performed\n on a plugin that cannot be loaded.\n \"\"\"\n\n\nclass PluginBaseLoadsApp(Exception):\n \"\"\"\n An exception raised in case a kolibri_plugin.py results in loading of the\n Django app stack.\n \"\"\"\n pass\n\n\ndef version_file():\n \"\"\"\n During test runtime, this path may differ because KOLIBRI_HOME is\n regenerated\n \"\"\"\n from .conf import KOLIBRI_HOME\n return os.path.join(KOLIBRI_HOME, '.data_version')\n\n\ndef should_back_up(kolibri_version, version_file_contents):\n change_version = kolibri_version != version_file_contents\n return change_version and 'dev' not in version_file_contents and 'dev' not in kolibri_version\n\n\ndef initialize(debug=False):\n \"\"\"\n Currently, always called before running commands. This may change in case\n commands that conflict with this behavior show up.\n\n :param: debug: Tells initialization to setup logging etc.\n \"\"\"\n if not os.path.isfile(version_file()):\n django.setup()\n\n setup_logging(debug=debug)\n\n _first_run()\n else:\n # Do this here so that we can fix any issues with our configuration file before\n # we attempt to set up django.\n from .conf import autoremove_unavailable_plugins, enable_default_plugins\n autoremove_unavailable_plugins()\n\n version = open(version_file(), \"r\").read()\n version = version.strip() if version else \"\"\n\n if should_back_up(kolibri.__version__, version):\n # dbbackup will load settings.INSTALLED_APPS.\n # we need to ensure plugins are correct in conf.config before\n enable_default_plugins()\n # Version changed, make a backup no matter what.\n from kolibri.core.deviceadmin.utils import dbbackup\n try:\n backup = dbbackup(version)\n logger.info(\n \"Backed up database to: {path}\".format(path=backup))\n except IncompatibleDatabase:\n logger.warning(\n \"Skipped automatic database backup, not compatible with \"\n \"this DB engine.\")\n\n django.setup()\n\n setup_logging(debug=debug)\n\n if kolibri.__version__ != version:\n logger.info(\n \"Version was {old}, new version: {new}\".format(\n old=version,\n new=kolibri.__version__\n )\n )\n update()\n\n\ndef _migrate_databases():\n \"\"\"\n Try to migrate all active databases. This should not be called unless Django has\n been initialized.\n \"\"\"\n from django.conf import settings\n for database in settings.DATABASES:\n call_command(\"migrate\", interactive=False, database=database)\n\n # load morango fixtures needed for certificate related operations\n call_command(\"loaddata\", \"scopedefinitions\")\n\n\ndef _first_run():\n \"\"\"\n Called once at least.\n \"\"\"\n if os.path.exists(version_file()):\n logger.error(\n \"_first_run() called, but Kolibri is already initialized.\"\n )\n return\n logger.info(\"Kolibri running for the first time.\")\n logger.info(\n \"We don't yet use pre-migrated database seeds, so you're going to have \"\n \"to wait a bit while we create a blank database...\\n\\n\"\n )\n\n from kolibri.core.settings import SKIP_AUTO_DATABASE_MIGRATION, DEFAULT_PLUGINS\n\n # We need to migrate the database before enabling plugins, because they\n # might depend on database readiness.\n if not SKIP_AUTO_DATABASE_MIGRATION:\n _migrate_databases()\n\n for plugin_module in DEFAULT_PLUGINS:\n try:\n plugin(plugin_module, enable=True)\n except PluginDoesNotExist:\n continue\n\n logger.info(\"Automatically enabling applications.\")\n\n # Finally collect static assets and run migrations again\n update()\n\n\ndef update():\n \"\"\"\n Called whenever a version change in kolibri is detected\n\n TODO: We should look at version numbers of external plugins, too!\n \"\"\"\n # Can be removed once we stop calling update() from start()\n # See: https://github.com/learningequality/kolibri/issues/1615\n if update.called:\n return\n update.called = True\n\n logger.info(\"Running update routines for new version...\")\n\n # Need to do this here, before we run any Django management commands that\n # import settings. Otherwise the updated configuration will not be used\n # during this runtime.\n\n call_command(\"collectstatic\", interactive=False)\n\n from kolibri.core.settings import SKIP_AUTO_DATABASE_MIGRATION\n\n if not SKIP_AUTO_DATABASE_MIGRATION:\n _migrate_databases()\n\n with open(version_file(), \"w\") as f:\n f.write(kolibri.__version__)\n\n from kolibri.core.content.utils.annotation import update_channel_metadata\n update_channel_metadata()\n\n from django.core.cache import caches\n cache = caches['built_files']\n cache.clear()\n\n\nupdate.called = False\n\n\ndef start(port=None, daemon=True):\n \"\"\"\n Start the server on given port.\n\n :param: port: Port number (default: 8080)\n :param: daemon: Fork to background process (default: True)\n \"\"\"\n run_cherrypy = conf.OPTIONS[\"Server\"][\"CHERRYPY_START\"]\n\n # This is temporarily put in place because of\n # https://github.com/learningequality/kolibri/issues/1615\n update()\n\n # In case some tests run start() function only\n if not isinstance(port, int):\n port = _get_port(port)\n\n if not daemon:\n logger.info(\"Running 'kolibri start' in foreground...\")\n\n else:\n logger.info(\"Running 'kolibri start' as daemon (system service)\")\n\n if run_cherrypy:\n __, urls = server.get_urls(listen_port=port)\n if not urls:\n logger.error(\n \"Could not detect an IP address that Kolibri binds to, but try \"\n \"opening up the following addresses:\\n\")\n urls = [\n \"http://{}:{}\".format(ip, port) for ip in (\"localhost\", \"127.0.0.1\")\n ]\n else:\n logger.info(\"Kolibri running on:\\n\")\n for addr in urls:\n sys.stderr.write(\"\\t{}\\n\".format(addr))\n sys.stderr.write(\"\\n\")\n else:\n logger.info(\"Starting Kolibri background services\")\n\n # Daemonize at this point, no more user output is needed\n if daemon:\n\n kwargs = {}\n # Truncate the file\n if os.path.isfile(server.DAEMON_LOG):\n open(server.DAEMON_LOG, \"w\").truncate()\n logger.info(\n \"Going to daemon mode, logging to {0}\".format(server.DAEMON_LOG)\n )\n kwargs['out_log'] = server.DAEMON_LOG\n kwargs['err_log'] = server.DAEMON_LOG\n\n # close all connections before forking, to avoid SQLite corruption:\n # https://www.sqlite.org/howtocorrupt.html#_carrying_an_open_database_connection_across_a_fork_\n connections.close_all()\n\n become_daemon(**kwargs)\n\n server.start(port=port, run_cherrypy=run_cherrypy)\n\n\ndef stop():\n \"\"\"\n Stops the server unless it isn't running\n \"\"\"\n try:\n pid, __, __ = server.get_status()\n server.stop(pid=pid)\n stopped = True\n if conf.OPTIONS[\"Server\"][\"CHERRYPY_START\"]:\n logger.info(\"Kolibri server has successfully been stopped.\")\n else:\n logger.info(\"Kolibri background services have successfully been stopped.\")\n except server.NotRunning as e:\n verbose_status = \"{msg:s} ({code:d})\".format(\n code=e.status_code,\n msg=status.codes[e.status_code]\n )\n if e.status_code == server.STATUS_STOPPED:\n logger.info(\"Already stopped: {}\".format(verbose_status))\n stopped = True\n elif e.status_code == server.STATUS_STARTING_UP:\n logger.error(\n \"Not stopped: {}\".format(verbose_status)\n )\n sys.exit(e.status_code)\n else:\n logger.error(\n \"During graceful shutdown, server says: {}\".format(\n verbose_status\n )\n )\n logger.error(\n \"Not responding, killing with force\"\n )\n server.stop(force=True)\n stopped = True\n\n if stopped:\n sys.exit(0)\n\n\ndef status():\n \"\"\"\n Check the server's status. For possible statuses, see the status dictionary\n status.codes\n\n Status *always* outputs the current status in the first line of stderr.\n The following lines contain optional information such as the addresses where\n the server is listening.\n\n TODO: We can't guarantee the above behavior because of the django stack\n being loaded regardless\n\n :returns: status_code, key has description in status.codes\n \"\"\"\n status_code, urls = server.get_urls()\n\n if status_code == server.STATUS_RUNNING:\n sys.stderr.write(\"{msg:s} (0)\\n\".format(msg=status.codes[0]))\n if urls:\n sys.stderr.write(\"Kolibri running on:\\n\\n\")\n for addr in urls:\n sys.stderr.write(\"\\t{}\\n\".format(addr))\n return server.STATUS_RUNNING\n else:\n verbose_status = status.codes[status_code]\n sys.stderr.write(\"{msg:s} ({code:d})\\n\".format(\n code=status_code, msg=verbose_status))\n return status_code\n\n\nstatus.codes = {\n server.STATUS_RUNNING: 'OK, running',\n server.STATUS_STOPPED: 'Stopped',\n server.STATUS_STARTING_UP: 'Starting up',\n server.STATUS_NOT_RESPONDING: 'Not responding',\n server.STATUS_FAILED_TO_START:\n 'Failed to start (check log file: {0})'.format(server.DAEMON_LOG),\n server.STATUS_UNCLEAN_SHUTDOWN: 'Unclean shutdown',\n server.STATUS_UNKNOWN_INSTANCE: 'Unknown Kolibri running on port',\n server.STATUS_SERVER_CONFIGURATION_ERROR: 'Kolibri server configuration error',\n server.STATUS_PID_FILE_READ_ERROR: 'Could not read PID file',\n server.STATUS_PID_FILE_INVALID: 'Invalid PID file',\n server.STATUS_UNKNOWN: 'Could not determine status',\n}\n\n\ndef setup_logging(debug=False):\n \"\"\"\n Configures logging in cases where a Django environment is not supposed\n to be configured.\n\n TODO: This is really confusing, importing django settings is allowed to\n fail when debug=False, but if it's true it can fail?\n \"\"\"\n try:\n from django.conf.settings import LOGGING\n except ImportError:\n from kolibri.deployment.default.settings.base import LOGGING\n if debug:\n from django.conf import settings\n settings.DEBUG = True\n LOGGING['handlers']['console']['level'] = 'DEBUG'\n LOGGING['loggers']['kolibri']['level'] = 'DEBUG'\n logger.debug(\"Debug mode is on!\")\n logging.config.dictConfig(LOGGING)\n\n\ndef manage(cmd, args=[]):\n \"\"\"\n Invokes a django command\n\n :param: cmd: The command to invoke, for instance \"runserver\"\n :param: args: arguments for the command\n \"\"\"\n # Set sys.argv to correctly reflect the way we invoke kolibri as a Python\n # module\n sys.argv = [\"-m\", \"kolibri\"] + sys.argv[1:]\n from django.core.management import execute_from_command_line\n argv = ['kolibri manage', cmd] + args\n execute_from_command_line(argv=argv)\n\n\ndef _is_plugin(obj):\n from kolibri.plugins.base import KolibriPluginBase # NOQA\n\n return (\n isinstance(obj, type) and obj is not KolibriPluginBase\n and issubclass(obj, KolibriPluginBase)\n )\n\n\ndef get_kolibri_plugin(plugin_name):\n \"\"\"\n Try to load kolibri_plugin from given plugin module identifier\n\n :returns: A list of classes inheriting from KolibriPluginBase\n \"\"\"\n\n plugin_classes = []\n\n try:\n plugin_module = importlib.import_module(\n plugin_name + \".kolibri_plugin\"\n )\n for obj in plugin_module.__dict__.values():\n if _is_plugin(obj):\n plugin_classes.append(obj)\n except ImportError as e:\n # Python 2: message, Python 3: msg\n exc_message = getattr(e, 'message', getattr(e, 'msg', None))\n if exc_message.startswith(\"No module named\"):\n msg = (\n \"Plugin '{}' does not seem to exist. Is it on the PYTHONPATH?\"\n ).format(plugin_name)\n raise PluginDoesNotExist(msg)\n else:\n raise\n except AppRegistryNotReady:\n msg = (\n \"Plugin '{}' loads the Django app registry, which it isn't \"\n \"allowed to do while enabling or disabling itself.\"\n ).format(plugin_name)\n raise PluginBaseLoadsApp(msg)\n\n if not plugin_classes:\n # There's no clear use case for a plugin without a KolibriPluginBase\n # inheritor, for now just throw a warning\n logger.warning(\n \"Plugin '{}' has no KolibriPluginBase defined\".format(plugin_name)\n )\n\n return plugin_classes\n\n\ndef plugin(plugin_name, **kwargs):\n \"\"\"\n Receives a plugin identifier and tries to load its main class. Calls class\n functions.\n \"\"\"\n from kolibri.utils import conf\n\n if kwargs.get('enable', False):\n plugin_classes = get_kolibri_plugin(plugin_name)\n for klass in plugin_classes:\n klass.enable()\n\n if kwargs.get('disable', False):\n try:\n plugin_classes = get_kolibri_plugin(plugin_name)\n for klass in plugin_classes:\n klass.disable()\n except PluginDoesNotExist as e:\n logger.error(str(e))\n logger.warning(\n \"Removing '{}' from configuration in a naive way.\".format(\n plugin_name\n )\n )\n if plugin_name in conf.config['INSTALLED_APPS']:\n conf.config['INSTALLED_APPS'].remove(plugin_name)\n logger.info(\n \"Removed '{}' from INSTALLED_APPS\".format(plugin_name)\n )\n else:\n logger.warning(\n (\n \"Could not find any matches for {} in INSTALLED_APPS\"\n .format(plugin_name)\n )\n )\n\n conf.save()\n\n\ndef set_default_language(lang):\n \"\"\"\n Set the default language for this installation of Kolibri. Any running\n instance of Kolibri needs to be restarted in order for this change to work.\n \"\"\"\n\n from kolibri.utils import conf\n from django.conf import settings\n\n valid_languages = [l[0] for l in settings.LANGUAGES]\n\n if lang in valid_languages:\n conf.config['LANGUAGE_CODE'] = lang\n conf.save()\n else:\n msg = \"Invalid language code {langcode}. Must be one of: {validlangs}\".format(\n langcode=lang, validlangs=valid_languages\n )\n\n logging.warning(msg)\n\n\ndef parse_args(args=None):\n \"\"\"\n Parses arguments by invoking docopt. Arguments for django management\n commands are split out before returning.\n\n :returns: (parsed_arguments, raw_django_ars)\n \"\"\"\n\n if not args:\n args = sys.argv[1:]\n\n # Split out the parts of the argument list that we pass on to Django\n # and don't feed to docopt.\n if '--' in args:\n # At the moment, we keep this for backwards-compatibility and in case there\n # is a real case of having to force the parsing of DJANGO_OPTIONS to a\n # specific location. Example:\n # kolibri manage commandname --non-django-arg -- --django-arg\n pivot = args.index('--')\n args, django_args = args[:pivot], args[pivot + 1:]\n elif 'manage' in args:\n # Include \"manage COMMAND\" for docopt parsing, but split out the rest\n pivot = args.index('manage') + 2\n args, django_args = args[:pivot], args[pivot:]\n else:\n django_args = []\n\n docopt_kwargs = dict(\n version=str(kolibri.__version__),\n options_first=False,\n )\n\n if args:\n docopt_kwargs['argv'] = args\n\n return docopt(USAGE, **docopt_kwargs), django_args\n\n\ndef _get_port(port):\n return int(port) if port else OPTIONS[\"Deployment\"]['HTTP_PORT']\n\n\ndef main(args=None): # noqa: max-complexity=13\n \"\"\"\n Kolibri's main function. Parses arguments and calls utility functions.\n Utility functions should be callable for unit testing purposes, but remember\n to use main() for integration tests in order to test the argument API.\n \"\"\"\n\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n arguments, django_args = parse_args(args)\n\n debug = arguments['--debug']\n\n if arguments['start']:\n port = _get_port(arguments['--port'])\n if OPTIONS[\"Server\"][\"CHERRYPY_START\"]:\n check_other_kolibri_running(port)\n\n try:\n initialize(debug=debug)\n except (DatabaseError, SQLite3DatabaseError) as e:\n if \"malformed\" in str(e):\n logger.error(\n \"Your database appears to be corrupted. If you encounter this,\"\n \"please immediately back up all files in the .kolibri folder that\"\n \"end in .sqlite3, .sqlite3-shm, .sqlite3-wal, or .log and then\"\n \"contact Learning Equality. Thank you!\"\n )\n raise\n\n # Alias\n if arguments['shell']:\n arguments['manage'] = True\n arguments['COMMAND'] = 'shell'\n\n if arguments['manage']:\n command = arguments['COMMAND']\n manage(command, args=django_args)\n return\n\n if arguments['plugin']:\n plugin_name = arguments['PLUGIN']\n plugin(plugin_name, **arguments)\n return\n\n if arguments['start']:\n try:\n server._write_pid_file(server.STARTUP_LOCK, port)\n except (IOError, OSError):\n logger.warn('Impossible to create file lock to communicate starting process')\n # Check if the content directory exists when Kolibri runs after the first time.\n check_content_directory_exists_and_writable()\n\n # Defragment the db\n call_command(\"vacuumsqlite\")\n\n # Clear old sessions up\n call_command(\"clearsessions\")\n\n daemon = not arguments['--foreground']\n start(port, daemon=daemon)\n return\n\n if arguments['stop']:\n stop()\n return\n\n if arguments['status']:\n status_code = status()\n sys.exit(status_code)\n return\n\n if arguments['language'] and arguments['setdefault']:\n set_default_language(arguments['<langcode>'])\n",
"path": "kolibri/utils/cli.py"
}
] | [
{
"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport importlib\nimport logging\nimport os\nimport signal\nimport sys\nfrom sqlite3 import DatabaseError as SQLite3DatabaseError\n\nimport django\nfrom django.core.exceptions import AppRegistryNotReady\nfrom django.core.management import call_command\nfrom django.db import connections\nfrom django.db.utils import DatabaseError\nfrom docopt import docopt\n\nimport kolibri\nfrom .debian_check import check_debian_user\n# Check if the current user is the kolibri user when running kolibri from .deb.\n# Putting it here because importing server module creates KOLIBRI_HOME directory.\ncheck_debian_user()\n\nfrom . import server # noqa\nfrom .conf import OPTIONS # noqa\nfrom .sanity_checks import check_content_directory_exists_and_writable # noqa\nfrom .sanity_checks import check_other_kolibri_running # noqa\nfrom .system import become_daemon # noqa\nfrom kolibri.core.deviceadmin.utils import IncompatibleDatabase # noqa\nfrom kolibri.utils import conf # noqa\n\n\nUSAGE = \"\"\"\nKolibri\n\nSupported by Foundation for Learning Equality\nwww.learningequality.org\n\nUsage:\n kolibri start [--foreground] [--port=<port>] [options]\n kolibri stop [options]\n kolibri restart [options]\n kolibri status [options]\n kolibri shell [options]\n kolibri manage COMMAND [DJANGO_OPTIONS ...]\n kolibri manage COMMAND [options] [-- DJANGO_OPTIONS ...]\n kolibri diagnose [options]\n kolibri plugin [options] PLUGIN (enable | disable)\n kolibri language setdefault <langcode>\n kolibri plugin --list\n kolibri -h | --help\n kolibri --version\n\nOptions:\n -h --help Show this screen.\n --version Show version.\n --debug Output debug messages (for development)\n COMMAND The name of any available django manage command. For\n help, type `kolibri manage help`\n DJANGO_OPTIONS Command options are passed on to the django manage\n command. Notice that all django options must appear\n *last* and should not be mixed with other options.\n\nExamples:\n kolibri start Start Kolibri\n kolibri stop Stop Kolibri\n kolibri status How is Kolibri doing?\n kolibri url Tell me the address of Kolibri\n kolibri shell Display a Django shell\n kolibri manage help Show the Django management usage dialogue\n kolibri manage runserver Runs Django's development server\n kolibri diagnose Show system information for debugging\n\n\nEnvironment:\n\n DJANGO_SETTINGS_MODULE\n - The Django settings module to load. Useful if you are deploying Kolibri\n in a specific setup such as your own web server.\n - Default: \"kolibri.deployment.default.settings.base\"\n\n KOLIBRI_HOME\n - Where Kolibri will store its data and configuration files.\n\n KOLIBRI_HTTP_PORT\n - Default: 8080\n\n\"\"\"\n\n__doc__ = \"\"\"\nKolibri Command Line Interface (CLI)\n====================================\n\nAuto-generated usage instructions from ``kolibri -h``::\n\n{usage:s}\n\n\"\"\".format(usage=\"\\n\".join(map(lambda x: \" \" + x, USAGE.split(\"\\n\"))))\n\nlogger = logging.getLogger(__name__)\n\n\nclass PluginDoesNotExist(Exception):\n \"\"\"\n This exception is local to the CLI environment in case actions are performed\n on a plugin that cannot be loaded.\n \"\"\"\n\n\nclass PluginBaseLoadsApp(Exception):\n \"\"\"\n An exception raised in case a kolibri_plugin.py results in loading of the\n Django app stack.\n \"\"\"\n pass\n\n\ndef version_file():\n \"\"\"\n During test runtime, this path may differ because KOLIBRI_HOME is\n regenerated\n \"\"\"\n from .conf import KOLIBRI_HOME\n return os.path.join(KOLIBRI_HOME, '.data_version')\n\n\ndef should_back_up(kolibri_version, version_file_contents):\n change_version = kolibri_version != version_file_contents\n return change_version and 'dev' not in version_file_contents and 'dev' not in kolibri_version\n\n\ndef initialize(debug=False):\n \"\"\"\n Currently, always called before running commands. This may change in case\n commands that conflict with this behavior show up.\n\n :param: debug: Tells initialization to setup logging etc.\n \"\"\"\n if not os.path.isfile(version_file()):\n django.setup()\n\n setup_logging(debug=debug)\n\n _first_run()\n else:\n # Do this here so that we can fix any issues with our configuration file before\n # we attempt to set up django.\n from .conf import autoremove_unavailable_plugins, enable_default_plugins\n autoremove_unavailable_plugins()\n\n version = open(version_file(), \"r\").read()\n version = version.strip() if version else \"\"\n\n if should_back_up(kolibri.__version__, version):\n # dbbackup will load settings.INSTALLED_APPS.\n # we need to ensure plugins are correct in conf.config before\n enable_default_plugins()\n # Version changed, make a backup no matter what.\n from kolibri.core.deviceadmin.utils import dbbackup\n try:\n backup = dbbackup(version)\n logger.info(\n \"Backed up database to: {path}\".format(path=backup))\n except IncompatibleDatabase:\n logger.warning(\n \"Skipped automatic database backup, not compatible with \"\n \"this DB engine.\")\n\n django.setup()\n\n setup_logging(debug=debug)\n\n if kolibri.__version__ != version:\n logger.info(\n \"Version was {old}, new version: {new}\".format(\n old=version,\n new=kolibri.__version__\n )\n )\n update()\n\n\ndef _migrate_databases():\n \"\"\"\n Try to migrate all active databases. This should not be called unless Django has\n been initialized.\n \"\"\"\n from django.conf import settings\n for database in settings.DATABASES:\n call_command(\"migrate\", interactive=False, database=database)\n\n # load morango fixtures needed for certificate related operations\n call_command(\"loaddata\", \"scopedefinitions\")\n\n\ndef _first_run():\n \"\"\"\n Called once at least.\n \"\"\"\n if os.path.exists(version_file()):\n logger.error(\n \"_first_run() called, but Kolibri is already initialized.\"\n )\n return\n logger.info(\"Kolibri running for the first time.\")\n logger.info(\n \"We don't yet use pre-migrated database seeds, so you're going to have \"\n \"to wait a bit while we create a blank database...\\n\\n\"\n )\n\n from kolibri.core.settings import SKIP_AUTO_DATABASE_MIGRATION, DEFAULT_PLUGINS\n\n # We need to migrate the database before enabling plugins, because they\n # might depend on database readiness.\n if not SKIP_AUTO_DATABASE_MIGRATION:\n _migrate_databases()\n\n for plugin_module in DEFAULT_PLUGINS:\n try:\n plugin(plugin_module, enable=True)\n except PluginDoesNotExist:\n continue\n\n logger.info(\"Automatically enabling applications.\")\n\n # Finally collect static assets and run migrations again\n update()\n\n\ndef update():\n \"\"\"\n Called whenever a version change in kolibri is detected\n\n TODO: We should look at version numbers of external plugins, too!\n \"\"\"\n # Can be removed once we stop calling update() from start()\n # See: https://github.com/learningequality/kolibri/issues/1615\n if update.called:\n return\n update.called = True\n\n logger.info(\"Running update routines for new version...\")\n\n # Need to do this here, before we run any Django management commands that\n # import settings. Otherwise the updated configuration will not be used\n # during this runtime.\n\n call_command(\"collectstatic\", interactive=False)\n\n from kolibri.core.settings import SKIP_AUTO_DATABASE_MIGRATION\n\n if not SKIP_AUTO_DATABASE_MIGRATION:\n _migrate_databases()\n\n with open(version_file(), \"w\") as f:\n f.write(kolibri.__version__)\n\n from kolibri.core.content.utils.annotation import update_channel_metadata\n update_channel_metadata()\n\n from django.core.cache import caches\n cache = caches['built_files']\n cache.clear()\n\n\nupdate.called = False\n\n\ndef start(port=None, daemon=True):\n \"\"\"\n Start the server on given port.\n\n :param: port: Port number (default: 8080)\n :param: daemon: Fork to background process (default: True)\n \"\"\"\n run_cherrypy = conf.OPTIONS[\"Server\"][\"CHERRYPY_START\"]\n\n # This is temporarily put in place because of\n # https://github.com/learningequality/kolibri/issues/1615\n update()\n\n # In case some tests run start() function only\n if not isinstance(port, int):\n port = _get_port(port)\n\n if not daemon:\n logger.info(\"Running 'kolibri start' in foreground...\")\n\n else:\n logger.info(\"Running 'kolibri start' as daemon (system service)\")\n\n if run_cherrypy:\n __, urls = server.get_urls(listen_port=port)\n if not urls:\n logger.error(\n \"Could not detect an IP address that Kolibri binds to, but try \"\n \"opening up the following addresses:\\n\")\n urls = [\n \"http://{}:{}\".format(ip, port) for ip in (\"localhost\", \"127.0.0.1\")\n ]\n else:\n logger.info(\"Kolibri running on:\\n\")\n for addr in urls:\n sys.stderr.write(\"\\t{}\\n\".format(addr))\n sys.stderr.write(\"\\n\")\n else:\n logger.info(\"Starting Kolibri background services\")\n\n # Daemonize at this point, no more user output is needed\n if daemon:\n\n kwargs = {}\n # Truncate the file\n if os.path.isfile(server.DAEMON_LOG):\n open(server.DAEMON_LOG, \"w\").truncate()\n logger.info(\n \"Going to daemon mode, logging to {0}\".format(server.DAEMON_LOG)\n )\n kwargs['out_log'] = server.DAEMON_LOG\n kwargs['err_log'] = server.DAEMON_LOG\n\n # close all connections before forking, to avoid SQLite corruption:\n # https://www.sqlite.org/howtocorrupt.html#_carrying_an_open_database_connection_across_a_fork_\n connections.close_all()\n\n become_daemon(**kwargs)\n\n server.start(port=port, run_cherrypy=run_cherrypy)\n\n\ndef stop():\n \"\"\"\n Stops the server unless it isn't running\n \"\"\"\n try:\n pid, __, __ = server.get_status()\n server.stop(pid=pid)\n stopped = True\n if conf.OPTIONS[\"Server\"][\"CHERRYPY_START\"]:\n logger.info(\"Kolibri server has successfully been stopped.\")\n else:\n logger.info(\"Kolibri background services have successfully been stopped.\")\n except server.NotRunning as e:\n verbose_status = \"{msg:s} ({code:d})\".format(\n code=e.status_code,\n msg=status.codes[e.status_code]\n )\n if e.status_code == server.STATUS_STOPPED:\n logger.info(\"Already stopped: {}\".format(verbose_status))\n stopped = True\n elif e.status_code == server.STATUS_STARTING_UP:\n logger.error(\n \"Not stopped: {}\".format(verbose_status)\n )\n sys.exit(e.status_code)\n else:\n logger.error(\n \"During graceful shutdown, server says: {}\".format(\n verbose_status\n )\n )\n logger.error(\n \"Not responding, killing with force\"\n )\n server.stop(force=True)\n stopped = True\n\n if stopped:\n sys.exit(0)\n\n\ndef status():\n \"\"\"\n Check the server's status. For possible statuses, see the status dictionary\n status.codes\n\n Status *always* outputs the current status in the first line of stderr.\n The following lines contain optional information such as the addresses where\n the server is listening.\n\n TODO: We can't guarantee the above behavior because of the django stack\n being loaded regardless\n\n :returns: status_code, key has description in status.codes\n \"\"\"\n status_code, urls = server.get_urls()\n\n if status_code == server.STATUS_RUNNING:\n sys.stderr.write(\"{msg:s} (0)\\n\".format(msg=status.codes[0]))\n if urls:\n sys.stderr.write(\"Kolibri running on:\\n\\n\")\n for addr in urls:\n sys.stderr.write(\"\\t{}\\n\".format(addr))\n return server.STATUS_RUNNING\n else:\n verbose_status = status.codes[status_code]\n sys.stderr.write(\"{msg:s} ({code:d})\\n\".format(\n code=status_code, msg=verbose_status))\n return status_code\n\n\nstatus.codes = {\n server.STATUS_RUNNING: 'OK, running',\n server.STATUS_STOPPED: 'Stopped',\n server.STATUS_STARTING_UP: 'Starting up',\n server.STATUS_NOT_RESPONDING: 'Not responding',\n server.STATUS_FAILED_TO_START:\n 'Failed to start (check log file: {0})'.format(server.DAEMON_LOG),\n server.STATUS_UNCLEAN_SHUTDOWN: 'Unclean shutdown',\n server.STATUS_UNKNOWN_INSTANCE: 'Unknown Kolibri running on port',\n server.STATUS_SERVER_CONFIGURATION_ERROR: 'Kolibri server configuration error',\n server.STATUS_PID_FILE_READ_ERROR: 'Could not read PID file',\n server.STATUS_PID_FILE_INVALID: 'Invalid PID file',\n server.STATUS_UNKNOWN: 'Could not determine status',\n}\n\n\ndef setup_logging(debug=False):\n \"\"\"\n Configures logging in cases where a Django environment is not supposed\n to be configured.\n\n TODO: This is really confusing, importing django settings is allowed to\n fail when debug=False, but if it's true it can fail?\n \"\"\"\n try:\n from django.conf.settings import LOGGING\n except ImportError:\n from kolibri.deployment.default.settings.base import LOGGING\n if debug:\n from django.conf import settings\n settings.DEBUG = True\n LOGGING['handlers']['console']['level'] = 'DEBUG'\n LOGGING['loggers']['kolibri']['level'] = 'DEBUG'\n logger.debug(\"Debug mode is on!\")\n logging.config.dictConfig(LOGGING)\n\n\ndef manage(cmd, args=[]):\n \"\"\"\n Invokes a django command\n\n :param: cmd: The command to invoke, for instance \"runserver\"\n :param: args: arguments for the command\n \"\"\"\n # Set sys.argv to correctly reflect the way we invoke kolibri as a Python\n # module\n sys.argv = [\"-m\", \"kolibri\"] + sys.argv[1:]\n from django.core.management import execute_from_command_line\n argv = ['kolibri manage', cmd] + args\n execute_from_command_line(argv=argv)\n\n\ndef _is_plugin(obj):\n from kolibri.plugins.base import KolibriPluginBase # NOQA\n\n return (\n isinstance(obj, type) and obj is not KolibriPluginBase\n and issubclass(obj, KolibriPluginBase)\n )\n\n\ndef get_kolibri_plugin(plugin_name):\n \"\"\"\n Try to load kolibri_plugin from given plugin module identifier\n\n :returns: A list of classes inheriting from KolibriPluginBase\n \"\"\"\n\n plugin_classes = []\n\n try:\n plugin_module = importlib.import_module(\n plugin_name + \".kolibri_plugin\"\n )\n for obj in plugin_module.__dict__.values():\n if _is_plugin(obj):\n plugin_classes.append(obj)\n except ImportError as e:\n # Python 2: message, Python 3: msg\n exc_message = getattr(e, 'message', getattr(e, 'msg', None))\n if exc_message.startswith(\"No module named\"):\n msg = (\n \"Plugin '{}' does not seem to exist. Is it on the PYTHONPATH?\"\n ).format(plugin_name)\n raise PluginDoesNotExist(msg)\n else:\n raise\n except AppRegistryNotReady:\n msg = (\n \"Plugin '{}' loads the Django app registry, which it isn't \"\n \"allowed to do while enabling or disabling itself.\"\n ).format(plugin_name)\n raise PluginBaseLoadsApp(msg)\n\n if not plugin_classes:\n # There's no clear use case for a plugin without a KolibriPluginBase\n # inheritor, for now just throw a warning\n logger.warning(\n \"Plugin '{}' has no KolibriPluginBase defined\".format(plugin_name)\n )\n\n return plugin_classes\n\n\ndef plugin(plugin_name, **kwargs):\n \"\"\"\n Receives a plugin identifier and tries to load its main class. Calls class\n functions.\n \"\"\"\n from kolibri.utils import conf\n\n if kwargs.get('enable', False):\n plugin_classes = get_kolibri_plugin(plugin_name)\n for klass in plugin_classes:\n klass.enable()\n\n if kwargs.get('disable', False):\n try:\n plugin_classes = get_kolibri_plugin(plugin_name)\n for klass in plugin_classes:\n klass.disable()\n except PluginDoesNotExist as e:\n logger.error(str(e))\n logger.warning(\n \"Removing '{}' from configuration in a naive way.\".format(\n plugin_name\n )\n )\n if plugin_name in conf.config['INSTALLED_APPS']:\n conf.config['INSTALLED_APPS'].remove(plugin_name)\n logger.info(\n \"Removed '{}' from INSTALLED_APPS\".format(plugin_name)\n )\n else:\n logger.warning(\n (\n \"Could not find any matches for {} in INSTALLED_APPS\"\n .format(plugin_name)\n )\n )\n\n conf.save()\n\n\ndef set_default_language(lang):\n \"\"\"\n Set the default language for this installation of Kolibri. Any running\n instance of Kolibri needs to be restarted in order for this change to work.\n \"\"\"\n\n from kolibri.utils import conf\n from django.conf import settings\n\n valid_languages = [l[0] for l in settings.LANGUAGES]\n\n if lang in valid_languages:\n conf.config['LANGUAGE_CODE'] = lang\n conf.save()\n else:\n msg = \"Invalid language code {langcode}. Must be one of: {validlangs}\".format(\n langcode=lang, validlangs=valid_languages\n )\n\n logging.warning(msg)\n\n\ndef parse_args(args=None):\n \"\"\"\n Parses arguments by invoking docopt. Arguments for django management\n commands are split out before returning.\n\n :returns: (parsed_arguments, raw_django_ars)\n \"\"\"\n\n if not args:\n args = sys.argv[1:]\n\n # Split out the parts of the argument list that we pass on to Django\n # and don't feed to docopt.\n if '--' in args:\n # At the moment, we keep this for backwards-compatibility and in case there\n # is a real case of having to force the parsing of DJANGO_OPTIONS to a\n # specific location. Example:\n # kolibri manage commandname --non-django-arg -- --django-arg\n pivot = args.index('--')\n args, django_args = args[:pivot], args[pivot + 1:]\n elif 'manage' in args:\n # Include \"manage COMMAND\" for docopt parsing, but split out the rest\n pivot = args.index('manage') + 2\n args, django_args = args[:pivot], args[pivot:]\n else:\n django_args = []\n\n docopt_kwargs = dict(\n version=str(kolibri.__version__),\n options_first=False,\n )\n\n if args:\n docopt_kwargs['argv'] = args\n\n return docopt(USAGE, **docopt_kwargs), django_args\n\n\ndef _get_port(port):\n return int(port) if port else OPTIONS[\"Deployment\"]['HTTP_PORT']\n\n\ndef main(args=None): # noqa: max-complexity=13\n \"\"\"\n Kolibri's main function. Parses arguments and calls utility functions.\n Utility functions should be callable for unit testing purposes, but remember\n to use main() for integration tests in order to test the argument API.\n \"\"\"\n\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n arguments, django_args = parse_args(args)\n\n debug = arguments['--debug']\n\n if arguments['start']:\n port = _get_port(arguments['--port'])\n if OPTIONS[\"Server\"][\"CHERRYPY_START\"]:\n check_other_kolibri_running(port)\n\n try:\n initialize(debug=debug)\n except (DatabaseError, SQLite3DatabaseError) as e:\n if \"malformed\" in str(e):\n logger.error(\n \"Your database appears to be corrupted. If you encounter this,\"\n \"please immediately back up all files in the .kolibri folder that\"\n \"end in .sqlite3, .sqlite3-shm, .sqlite3-wal, or .log and then\"\n \"contact Learning Equality. Thank you!\"\n )\n raise\n\n # Alias\n if arguments['shell']:\n arguments['manage'] = True\n arguments['COMMAND'] = 'shell'\n\n if arguments['manage']:\n command = arguments['COMMAND']\n manage(command, args=django_args)\n return\n\n if arguments['plugin']:\n plugin_name = arguments['PLUGIN']\n plugin(plugin_name, **arguments)\n return\n\n if arguments['start']:\n try:\n server._write_pid_file(server.STARTUP_LOCK, port)\n except (IOError, OSError):\n logger.warn('Impossible to create file lock to communicate starting process')\n # Check if the content directory exists when Kolibri runs after the first time.\n check_content_directory_exists_and_writable()\n\n # Defragment the db\n call_command(\"vacuumsqlite\")\n\n # Clear old sessions up\n call_command(\"clearsessions\")\n\n daemon = not arguments['--foreground']\n if sys.platform == 'darwin':\n daemon = False\n start(port, daemon=daemon)\n return\n\n if arguments['stop']:\n stop()\n return\n\n if arguments['status']:\n status_code = status()\n sys.exit(status_code)\n return\n\n if arguments['language'] and arguments['setdefault']:\n set_default_language(arguments['<langcode>'])\n",
"path": "kolibri/utils/cli.py"
}
] | diff --git a/kolibri/utils/cli.py b/kolibri/utils/cli.py
index 998417f5ebf..bb6d2c3264b 100644
--- a/kolibri/utils/cli.py
+++ b/kolibri/utils/cli.py
@@ -668,6 +668,8 @@ def main(args=None): # noqa: max-complexity=13
call_command("clearsessions")
daemon = not arguments['--foreground']
+ if sys.platform == 'darwin':
+ daemon = False
start(port, daemon=daemon)
return
|
python-gitlab__python-gitlab-2088 | sphinx warnings `reference target not found`
On building my packages I'm using `sphinx-build` command with `-n` switch which shows warmings about missing references. These are not critical issues.
Here is the output with warnings:
```console
+ /usr/bin/sphinx-build -n -T -b man docs build/sphinx/man
Running Sphinx v4.5.0
making output directory... done
myst v0.17.2: MdParserConfig(commonmark_only=False, gfm_only=False, enable_extensions=[], linkify_fuzzy_links=True, dmath_allow_labels=True, dmath_allow_space=True, dmath_allow_digits=True, dmath_double_inline=False, update_mathjax=True, mathjax_classes='tex2jax_process|mathjax_process|math|output_area', disable_syntax=[], all_links_external=False, url_schemes=('http', 'https', 'mailto', 'ftp'), ref_domains=None, highlight_code_blocks=True, number_code_blocks=[], title_to_header=False, heading_anchors=None, heading_slug_func=None, footnote_transition=True, sub_delimiters=('{', '}'), words_per_minute=200)
[autosummary] generating autosummary for: api-objects.rst, api-usage.rst, api/gitlab.rst, api/gitlab.v4.rst, changelog.md, cli-examples.rst, cli-objects.rst, cli-usage.rst, faq.rst, gl_objects/access_requests.rst, ..., gl_objects/snippets.rst, gl_objects/system_hooks.rst, gl_objects/templates.rst, gl_objects/todos.rst, gl_objects/topics.rst, gl_objects/users.rst, gl_objects/variables.rst, gl_objects/wikis.rst, index.rst, release-notes.rst
building [mo]: targets for 0 po files that are out of date
building [man]: all manpages
updating environment: [new config] 65 added, 0 changed, 0 removed
reading sources... [100%] release-notes
looking for now-outdated files... none found
pickling environment... done
checking consistency... done
writing... python-gitlab.3 { cli-usage api-usage cli-examples api-objects gl_objects/access_requests gl_objects/appearance gl_objects/applications gl_objects/emojis gl_objects/badges gl_objects/branches gl_objects/clusters gl_objects/messages gl_objects/commits gl_objects/deploy_keys gl_objects/deploy_tokens gl_objects/deployments gl_objects/discussions gl_objects/environments gl_objects/events gl_objects/epics gl_objects/features gl_objects/geo_nodes gl_objects/groups gl_objects/group_access_tokens gl_objects/issues gl_objects/keys gl_objects/boards gl_objects/labels gl_objects/notifications gl_objects/merge_trains gl_objects/merge_requests gl_objects/merge_request_approvals gl_objects/milestones gl_objects/namespaces gl_objects/notes gl_objects/packages gl_objects/pagesdomains gl_objects/personal_access_tokens gl_objects/pipelines_and_jobs gl_objects/projects gl_objects/project_access_tokens gl_objects/protected_branches gl_objects/releases gl_objects/runners gl_objects/remote_mirrors gl_objects/repositories gl_objects/repository_tags gl_objects/search gl_objects/settings gl_objects/snippets gl_objects/system_hooks gl_objects/templates gl_objects/todos gl_objects/topics gl_objects/users gl_objects/variables gl_objects/sidekiq gl_objects/wikis api/gitlab api/gitlab.v4 cli-objects changelog release-notes faq } /home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/applications.rst:10: WARNING: py:class reference target not found: gitlab.v4.objects.Applications
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/epics.rst:15: WARNING: py:attr reference target not found: gitlab.Gitlab.Group.epics
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/epics.rst:54: WARNING: py:attr reference target not found: gitlab.Gitlab.GroupEpic.issues
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/group_access_tokens.rst:14: WARNING: py:attr reference target not found: gitlab.Gitlab.group_access_tokens
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/issues.rst:239: WARNING: py:attr reference target not found: gitlab.issues_statistics
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/notes.rst:19: WARNING: py:attr reference target not found: gitlab.v4.objects.GroupEpic.notes
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/personal_access_tokens.rst:11: WARNING: py:class reference target not found: gitlab.v4.objects.PersonalAcessTokenManager
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/personal_access_tokens.rst:14: WARNING: py:class reference target not found: gitlab.v4.objects.UserPersonalAcessTokenManager
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/personal_access_tokens.rst:15: WARNING: py:attr reference target not found: gitlab.Gitlab.User.personal_access_tokens
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/project_access_tokens.rst:14: WARNING: py:attr reference target not found: gitlab.Gitlab.project_access_tokens
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/repository_tags.rst:12: WARNING: py:attr reference target not found: gitlab.v4.objects.Repository.tags
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/snippets.rst:11: WARNING: py:class reference target not found: gitlab.v4.objects.SnipptManager
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/todos.rst:10: WARNING: py:class reference target not found: gitlab.objects.Todo
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/todos.rst:11: WARNING: py:class reference target not found: gitlab.objects.TodoManager
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/gl_objects/users.rst:219: WARNING: py:attr reference target not found: gitlab.Gitlab.user
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/client.py:docstring of gitlab.client.Gitlab:: WARNING: py:class reference target not found: requests.sessions.Session
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/client.py:docstring of gitlab.client.Gitlab:: WARNING: py:class reference target not found: requests.sessions.Session
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/client.py:docstring of gitlab.client.Gitlab.from_config:: WARNING: py:class reference target not found: config_files
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/client.py:docstring of gitlab.client.Gitlab.http_delete:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/client.py:docstring of gitlab.client.Gitlab.http_delete:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/client.py:docstring of gitlab.client.Gitlab.http_get:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/client.py:docstring of gitlab.client.Gitlab.http_get:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/client.py:docstring of gitlab.client.Gitlab.http_post:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/client.py:docstring of gitlab.client.Gitlab.http_post:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/client.py:docstring of gitlab.client.Gitlab.http_put:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/client.py:docstring of gitlab.client.Gitlab.http_put:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/client.py:docstring of gitlab.client.Gitlab.http_request:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/client.py:docstring of gitlab.client.Gitlab.http_request:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/client.py:docstring of gitlab.client.Gitlab.set_license:: WARNING: py:exc reference target not found: GitlabPostError
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/packages.py:docstring of gitlab.v4.objects.packages.GenericPackageManager.upload:: WARNING: py:class reference target not found: pathlib.Path
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/packages.py:docstring of gitlab.v4.objects.packages.GenericPackageManager.upload:: WARNING: py:class reference target not found: pathlib.Path
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/groups.py:docstring of gitlab.v4.objects.groups.GroupManager.import_group:: WARNING: py:obj reference target not found: typing.BinaryIO
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/groups.py:docstring of gitlab.v4.objects.groups.GroupManager.import_group:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/groups.py:docstring of gitlab.v4.objects.groups.GroupManager.import_group:: WARNING: py:obj reference target not found: typing.BinaryIO
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/groups.py:docstring of gitlab.v4.objects.groups.GroupManager.import_group:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/projects.py:docstring of gitlab.v4.objects.Project.groups:: WARNING: py:class reference target not found: gitlab.v4.objects.projects.ProjectGroupManager
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/projects.py:docstring of gitlab.v4.objects.projects.Project.languages:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/projects.py:docstring of gitlab.v4.objects.projects.Project.languages:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/projects.py:docstring of gitlab.v4.objects.projects.Project.transfer:: WARNING: py:class reference target not found: project
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/artifacts.py:docstring of gitlab.v4.objects.artifacts.ProjectArtifactManager.download:: WARNING: py:class reference target not found: are not
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/commits.py:docstring of gitlab.v4.objects.commits.ProjectCommit.revert:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/commits.py:docstring of gitlab.v4.objects.commits.ProjectCommit.revert:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/commits.py:docstring of gitlab.v4.objects.commits.ProjectCommit.signature:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/commits.py:docstring of gitlab.v4.objects.commits.ProjectCommit.signature:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/environments.py:docstring of gitlab.v4.objects.environments.ProjectEnvironment.stop:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/environments.py:docstring of gitlab.v4.objects.environments.ProjectEnvironment.stop:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/issues.py:docstring of gitlab.v4.objects.issues.ProjectIssue.closed_by:: WARNING: py:exc reference target not found: GitlabGetErrot
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/issues.py:docstring of gitlab.v4.objects.issues.ProjectIssue.related_merge_requests:: WARNING: py:exc reference target not found: GitlabGetErrot
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/deploy_keys.py:docstring of gitlab.v4.objects.deploy_keys.ProjectKeyManager.enable:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/deploy_keys.py:docstring of gitlab.v4.objects.deploy_keys.ProjectKeyManager.enable:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/labels.py:docstring of gitlab.v4.objects.labels.ProjectLabel:1: WARNING: py:class reference target not found: gitlab.mixins.PromoteMixin
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/projects.py:docstring of gitlab.v4.objects.projects.ProjectManager.import_bitbucket_server:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/projects.py:docstring of gitlab.v4.objects.projects.ProjectManager.import_bitbucket_server:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/projects.py:docstring of gitlab.v4.objects.projects.ProjectManager.import_github:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/projects.py:docstring of gitlab.v4.objects.projects.ProjectManager.import_github:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/projects.py:docstring of gitlab.v4.objects.projects.ProjectManager.import_project:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/projects.py:docstring of gitlab.v4.objects.projects.ProjectManager.import_project:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/merge_requests.py:docstring of gitlab.v4.objects.merge_requests.ProjectMergeRequest.changes:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/merge_requests.py:docstring of gitlab.v4.objects.merge_requests.ProjectMergeRequest.changes:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/merge_requests.py:docstring of gitlab.v4.objects.merge_requests.ProjectMergeRequest.merge_ref:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/merge_requests.py:docstring of gitlab.v4.objects.merge_requests.ProjectMergeRequest.merge_ref:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/merge_requests.py:docstring of gitlab.v4.objects.merge_requests.ProjectMergeRequest.rebase:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/merge_requests.py:docstring of gitlab.v4.objects.merge_requests.ProjectMergeRequest.rebase:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/milestones.py:docstring of gitlab.v4.objects.milestones.ProjectMilestone:1: WARNING: py:class reference target not found: gitlab.mixins.PromoteMixin
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/pipelines.py:docstring of gitlab.v4.objects.pipelines.ProjectPipeline.cancel:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/pipelines.py:docstring of gitlab.v4.objects.pipelines.ProjectPipeline.cancel:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/pipelines.py:docstring of gitlab.v4.objects.pipelines.ProjectPipeline.retry:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/pipelines.py:docstring of gitlab.v4.objects.pipelines.ProjectPipeline.retry:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/repositories.py:docstring of gitlab.v4.objects.repositories.RepositoryMixin.repository_blob:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/repositories.py:docstring of gitlab.v4.objects.repositories.RepositoryMixin.repository_blob:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/repositories.py:docstring of gitlab.v4.objects.repositories.RepositoryMixin.repository_compare:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/repositories.py:docstring of gitlab.v4.objects.repositories.RepositoryMixin.repository_compare:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/repositories.py:docstring of gitlab.v4.objects.repositories.RepositoryMixin.update_submodule:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/repositories.py:docstring of gitlab.v4.objects.repositories.RepositoryMixin.update_submodule:: WARNING: py:exc reference target not found: GitlabPutError
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/repositories.py:docstring of gitlab.v4.objects.repositories.RepositoryMixin.update_submodule:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/sidekiq.py:docstring of gitlab.v4.objects.sidekiq.SidekiqManager.compound_metrics:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/sidekiq.py:docstring of gitlab.v4.objects.sidekiq.SidekiqManager.compound_metrics:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/sidekiq.py:docstring of gitlab.v4.objects.sidekiq.SidekiqManager.job_stats:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/sidekiq.py:docstring of gitlab.v4.objects.sidekiq.SidekiqManager.job_stats:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/sidekiq.py:docstring of gitlab.v4.objects.sidekiq.SidekiqManager.process_metrics:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/sidekiq.py:docstring of gitlab.v4.objects.sidekiq.SidekiqManager.process_metrics:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/sidekiq.py:docstring of gitlab.v4.objects.sidekiq.SidekiqManager.queue_metrics:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/sidekiq.py:docstring of gitlab.v4.objects.sidekiq.SidekiqManager.queue_metrics:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/users.py:docstring of gitlab.v4.objects.users.User.activate:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/users.py:docstring of gitlab.v4.objects.users.User.activate:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/users.py:docstring of gitlab.v4.objects.users.User.block:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/users.py:docstring of gitlab.v4.objects.users.User.block:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/users.py:docstring of gitlab.v4.objects.users.User.deactivate:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/users.py:docstring of gitlab.v4.objects.users.User.deactivate:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/users.py:docstring of gitlab.v4.objects.users.User.follow:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/users.py:docstring of gitlab.v4.objects.users.User.follow:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/users.py:docstring of gitlab.v4.objects.User.followers_users:: WARNING: py:class reference target not found: UserFollowersManager
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/users.py:docstring of gitlab.v4.objects.User.following_users:: WARNING: py:class reference target not found: UserFollowingManager
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/users.py:docstring of gitlab.v4.objects.users.User.unblock:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/users.py:docstring of gitlab.v4.objects.users.User.unblock:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/users.py:docstring of gitlab.v4.objects.users.User.unfollow:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/v4/objects/users.py:docstring of gitlab.v4.objects.users.User.unfollow:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/cli.py:docstring of gitlab.cli.docs:: WARNING: py:class reference target not found: argparse.ArgumentParser
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/cli.py:docstring of gitlab.cli.docs:: WARNING: py:class reference target not found: argparse.ArgumentParser
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/cli.py:docstring of gitlab.cli.register_custom_action:: WARNING: py:class reference target not found: gitlab.cli.__F
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/cli.py:docstring of gitlab.cli.register_custom_action:: WARNING: py:class reference target not found: gitlab.cli.__F
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/cli.py:docstring of gitlab.cli.register_custom_action:: WARNING: py:class reference target not found: gitlab.cli.__F
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/cli.py:docstring of gitlab.cli.register_custom_action:: WARNING: py:class reference target not found: gitlab.cli.__F
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/cli.py:docstring of gitlab.cli.what_to_cls:: WARNING: py:class reference target not found: module
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/cli.py:docstring of gitlab.cli.what_to_cls:: WARNING: py:class reference target not found: module
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/exceptions.py:docstring of gitlab.exceptions.on_http_error:: WARNING: py:class reference target not found: gitlab.exceptions.__F
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/exceptions.py:docstring of gitlab.exceptions.on_http_error:: WARNING: py:class reference target not found: gitlab.exceptions.__F
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/exceptions.py:docstring of gitlab.exceptions.on_http_error:: WARNING: py:class reference target not found: The exception type to raise -- must inherit from
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/exceptions.py:docstring of gitlab.exceptions.on_http_error:: WARNING: py:class reference target not found: gitlab.exceptions.__F
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/exceptions.py:docstring of gitlab.exceptions.on_http_error:: WARNING: py:class reference target not found: gitlab.exceptions.__F
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/utils.py:docstring of gitlab.utils.response_content:: WARNING: py:class reference target not found: requests.models.Response
/home/tkloczko/rpmbuild/BUILD/python-gitlab-3.4.0/docs/../gitlab/utils.py:docstring of gitlab.utils.response_content:: WARNING: py:class reference target not found: requests.models.Response
done
build succeeded, 112 warnings.
```
| [
{
"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# python-gitlab documentation build configuration file, created by\n# sphinx-quickstart on Mon Dec 8 15:17:39 2014.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\nfrom datetime import datetime\n\nfrom sphinx.domains.python import PythonDomain\n\nsys.path.append(\"../\")\nsys.path.append(os.path.dirname(__file__))\nimport gitlab # noqa: E402. Needed purely for readthedocs' build\n\n\n# Sphinx will warn when attributes are exported in multiple places. See workaround:\n# https://github.com/sphinx-doc/sphinx/issues/3866#issuecomment-768167824\n# This patch can be removed when this issue is resolved:\n# https://github.com/sphinx-doc/sphinx/issues/4961\nclass PatchedPythonDomain(PythonDomain):\n def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):\n if \"refspecific\" in node:\n del node[\"refspecific\"]\n return super(PatchedPythonDomain, self).resolve_xref(\n env, fromdocname, builder, typ, target, node, contnode\n )\n\n\ndef setup(sphinx):\n sphinx.add_domain(PatchedPythonDomain, override=True)\n\n\non_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\nyear = datetime.now().year\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"myst_parser\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"ext.docstrings\",\n \"sphinxcontrib.autoprogram\",\n]\n\nautodoc_typehints = \"both\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = {\".rst\": \"restructuredtext\", \".md\": \"markdown\"}\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nroot_doc = \"index\"\n\n# General information about the project.\nproject = \"python-gitlab\"\ncopyright = (\n f\"2013-2018, Gauvain Pocentek, Mika Mäenpää.\\n2018-{year}, python-gitlab team\"\n)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = gitlab.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\"]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"furo\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\nhtml_title = f\"{project} <small>v{release}</small>\"\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nhtml_js_files = [\n \"js/gitter.js\",\n (\n \"https://sidecar.gitter.im/dist/sidecar.v1.js\",\n {\"async\": \"async\", \"defer\": \"defer\"},\n ),\n]\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"python-gitlabdoc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n \"index\",\n \"python-gitlab.tex\",\n \"python-gitlab Documentation\",\n \"Gauvain Pocentek, Mika Mäenpää\",\n \"manual\",\n )\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (\n \"index\",\n \"python-gitlab\",\n \"python-gitlab Documentation\",\n [\"Gauvain Pocentek, Mika Mäenpää\"],\n 1,\n )\n]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n \"index\",\n \"python-gitlab\",\n \"python-gitlab Documentation\",\n \"Gauvain Pocentek, Mika Mäenpää\",\n \"python-gitlab\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n# texinfo_no_detailmenu = False\n",
"path": "docs/conf.py"
}
] | [
{
"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# python-gitlab documentation build configuration file, created by\n# sphinx-quickstart on Mon Dec 8 15:17:39 2014.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\nfrom datetime import datetime\n\nfrom sphinx.domains.python import PythonDomain\n\nsys.path.append(\"../\")\nsys.path.append(os.path.dirname(__file__))\nimport gitlab # noqa: E402. Needed purely for readthedocs' build\n\n\n# Sphinx will warn when attributes are exported in multiple places. See workaround:\n# https://github.com/sphinx-doc/sphinx/issues/3866#issuecomment-768167824\n# This patch can be removed when this issue is resolved:\n# https://github.com/sphinx-doc/sphinx/issues/4961\nclass PatchedPythonDomain(PythonDomain):\n def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):\n if \"refspecific\" in node:\n del node[\"refspecific\"]\n return super(PatchedPythonDomain, self).resolve_xref(\n env, fromdocname, builder, typ, target, node, contnode\n )\n\n\ndef setup(sphinx):\n sphinx.add_domain(PatchedPythonDomain, override=True)\n\n\non_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\nyear = datetime.now().year\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"myst_parser\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"ext.docstrings\",\n \"sphinxcontrib.autoprogram\",\n]\n\nautodoc_typehints = \"both\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = {\".rst\": \"restructuredtext\", \".md\": \"markdown\"}\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nroot_doc = \"index\"\n\n# General information about the project.\nproject = \"python-gitlab\"\ncopyright = (\n f\"2013-2018, Gauvain Pocentek, Mika Mäenpää.\\n2018-{year}, python-gitlab team\"\n)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = gitlab.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\"]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"furo\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\nhtml_title = f\"{project} <small>v{release}</small>\"\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nhtml_js_files = [\n \"js/gitter.js\",\n (\n \"https://sidecar.gitter.im/dist/sidecar.v1.js\",\n {\"async\": \"async\", \"defer\": \"defer\"},\n ),\n]\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"python-gitlabdoc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n \"index\",\n \"python-gitlab.tex\",\n \"python-gitlab Documentation\",\n \"Gauvain Pocentek, Mika Mäenpää\",\n \"manual\",\n )\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (\n \"index\",\n \"python-gitlab\",\n \"python-gitlab Documentation\",\n [\"Gauvain Pocentek, Mika Mäenpää\"],\n 1,\n )\n]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\nnitpick_ignore_regex = [(r\"py:.*\", r\".*\")]\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n \"index\",\n \"python-gitlab\",\n \"python-gitlab Documentation\",\n \"Gauvain Pocentek, Mika Mäenpää\",\n \"python-gitlab\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n# texinfo_no_detailmenu = False\n",
"path": "docs/conf.py"
}
] | diff --git a/docs/conf.py b/docs/conf.py
index 11b920e7b..13de175d0 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -287,6 +287,7 @@ def setup(sphinx):
# If true, show URL addresses after external links.
# man_show_urls = False
+nitpick_ignore_regex = [(r"py:.*", r".*")]
# -- Options for Texinfo output -------------------------------------------
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index 0e198a6f9..000000000
--- a/setup.cfg
+++ /dev/null
@@ -1,3 +0,0 @@
-[build_sphinx]
-warning-is-error = 1
-keep-going = 1
diff --git a/tox.ini b/tox.ini
index 144c52164..38171f2f6 100644
--- a/tox.ini
+++ b/tox.ini
@@ -83,7 +83,7 @@ per-file-ignores =
[testenv:docs]
deps = -r{toxinidir}/requirements-docs.txt
-commands = python setup.py build_sphinx
+commands = sphinx-build -n -W --keep-going -b html docs build/sphinx/html
[testenv:cover]
commands =
|
cisagov__manage.get.gov-1583 | Redirect logout to {beta.}get.gov info site
Now that we have the `cloud.gov Pages` site setup at get.gov, we should redirect logout actions to that site.
As a logged-in user of the registrar
I want to be redirected to the new get.gov informational site when I log out
So that I stay in the .gov experience rather than login.gov
AC:
- [ ] **Given** a logged-in user on the .gov registrar, **when** I logout and also choose "Yes, sign out of login.gov," **then** I am redirected to the get.gov as an unauthenticated user.
- [ ] Language on login.gov screen reads "Do you want to sign out of Login.gov and return to **get.gov**?

### Additional Context:
Currently, if we select to "return to the .gov registrar," we go back to login.gov.... and if we select to go back to the .gov registrar, we get a nasty 401 error because we aren't logged in anymore.
### Links to related issues
🔄 #1509
| [
{
"content": "\"\"\"\nDjango settings for .gov registrar project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/4.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/4.0/ref/settings/\n\nIF you'd like to see all of these settings in the running app:\n\n```shell\n$ docker-compose exec app python manage.py shell\n>>> from django.conf import settings\n>>> dir(settings)\n```\n\n\"\"\"\nimport environs\nfrom base64 import b64decode\nfrom cfenv import AppEnv # type: ignore\nfrom pathlib import Path\nfrom typing import Final\n\nfrom botocore.config import Config\n\n# # # ###\n# Setup code goes here #\n# # # ###\n\nenv = environs.Env()\n\n# Get secrets from Cloud.gov user provided service, if exists\n# If not, get secrets from environment variables\nkey_service = AppEnv().get_service(name=\"getgov-credentials\")\n\n\n# Get secrets from Cloud.gov user provided s3 service, if it exists\ns3_key_service = AppEnv().get_service(name=\"getgov-s3\")\n\nif key_service and key_service.credentials:\n if s3_key_service and s3_key_service.credentials:\n # Concatenate the credentials from our S3 service into our secret service\n key_service.credentials.update(s3_key_service.credentials)\n secret = key_service.credentials.get\nelse:\n secret = env\n\n\n# # # ###\n# Values obtained externally #\n# # # ###\n\npath = Path(__file__)\n\nenv_db_url = env.dj_db_url(\"DATABASE_URL\")\nenv_debug = env.bool(\"DJANGO_DEBUG\", default=False)\nenv_is_production = env.bool(\"IS_PRODUCTION\", default=False)\nenv_log_level = env.str(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\nenv_base_url = env.str(\"DJANGO_BASE_URL\")\nenv_getgov_public_site_url = env.str(\"GETGOV_PUBLIC_SITE_URL\", \"\")\nenv_oidc_active_provider = env.str(\"OIDC_ACTIVE_PROVIDER\", \"identity sandbox\")\n\nsecret_login_key = b64decode(secret(\"DJANGO_SECRET_LOGIN_KEY\", \"\"))\nsecret_key = secret(\"DJANGO_SECRET_KEY\")\n\nsecret_aws_ses_key_id = secret(\"AWS_ACCESS_KEY_ID\", None)\nsecret_aws_ses_key = secret(\"AWS_SECRET_ACCESS_KEY\", None)\n\n# These keys are present in a getgov-s3 instance, or they can be defined locally\naws_s3_region_name = secret(\"region\", None) or secret(\"AWS_S3_REGION\", None)\nsecret_aws_s3_key_id = secret(\"access_key_id\", None) or secret(\"AWS_S3_ACCESS_KEY_ID\", None)\nsecret_aws_s3_key = secret(\"secret_access_key\", None) or secret(\"AWS_S3_SECRET_ACCESS_KEY\", None)\nsecret_aws_s3_bucket_name = secret(\"bucket\", None) or secret(\"AWS_S3_BUCKET_NAME\", None)\n\nsecret_registry_cl_id = secret(\"REGISTRY_CL_ID\")\nsecret_registry_password = secret(\"REGISTRY_PASSWORD\")\nsecret_registry_cert = b64decode(secret(\"REGISTRY_CERT\", \"\"))\nsecret_registry_key = b64decode(secret(\"REGISTRY_KEY\", \"\"))\nsecret_registry_key_passphrase = secret(\"REGISTRY_KEY_PASSPHRASE\", \"\")\nsecret_registry_hostname = secret(\"REGISTRY_HOSTNAME\")\n\n# region: Basic Django Config-----------------------------------------------###\n\n# Build paths inside the project like this: BASE_DIR / \"subdir\".\n# (settings.py is in `src/registrar/config/`: BASE_DIR is `src/`)\nBASE_DIR = path.resolve().parent.parent.parent\n\n# SECURITY WARNING: don't run with debug turned on in production!\n# TODO - Investigate the behaviour of this flag. Does not appear\n# to function for the IS_PRODUCTION flag.\nDEBUG = env_debug\n\n# Controls production specific feature toggles\nIS_PRODUCTION = env_is_production\n\n# Applications are modular pieces of code.\n# They are provided by Django, by third-parties, or by yourself.\n# Installing them here makes them available for execution.\n# Do not access INSTALLED_APPS directly. Use `django.apps.apps` instead.\nINSTALLED_APPS = [\n # let's be sure to install our own application!\n # it needs to be listed before django.contrib.admin\n # otherwise Django would find the default template\n # provided by django.contrib.admin first and use\n # that instead of our custom templates.\n \"registrar\",\n # Django automatic admin interface reads metadata\n # from database models to provide a quick, model-centric\n # interface where trusted users can manage content\n \"django.contrib.admin\",\n # vv Required by django.contrib.admin vv\n # the \"user\" model! *\\o/*\n \"django.contrib.auth\",\n # audit logging of changes to models\n # it needs to be listed before django.contrib.contenttypes\n # for a ContentType query in fixtures.py\n \"auditlog\",\n # generic interface for Django models\n \"django.contrib.contenttypes\",\n # required for CSRF protection and many other things\n \"django.contrib.sessions\",\n # framework for displaying messages to the user\n \"django.contrib.messages\",\n # ^^ Required by django.contrib.admin ^^\n # collects static files from each of your applications\n # (and any other places you specify) into a single location\n # that can easily be served in production\n \"django.contrib.staticfiles\",\n # application used for integrating with Login.gov\n \"djangooidc\",\n # library to simplify form templating\n \"widget_tweaks\",\n # library for Finite State Machine statuses\n \"django_fsm\",\n # library for phone numbers\n \"phonenumber_field\",\n # Our internal API application\n \"api\",\n # Only for generating documentation, uncomment to run manage.py generate_puml\n # \"puml_generator\",\n # supports necessary headers for Django cross origin\n \"corsheaders\",\n]\n\n# Middleware are routines for processing web requests.\n# Adding them here turns them \"on\"; Django will perform the\n# specified routines on each incoming request and outgoing response.\nMIDDLEWARE = [\n # django-allow-cidr: enable use of CIDR IP ranges in ALLOWED_HOSTS\n \"allow_cidr.middleware.AllowCIDRMiddleware\",\n # django-cors-headers: listen to cors responses\n \"corsheaders.middleware.CorsMiddleware\",\n # custom middleware to stop caching from CloudFront\n \"registrar.no_cache_middleware.NoCacheMiddleware\",\n # serve static assets in production\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n # provide security enhancements to the request/response cycle\n \"django.middleware.security.SecurityMiddleware\",\n # store and retrieve arbitrary data on a per-site-visitor basis\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n # add a few conveniences for perfectionists, see documentation\n \"django.middleware.common.CommonMiddleware\",\n # add protection against Cross Site Request Forgeries by adding\n # hidden form fields to POST forms and checking requests for the correct value\n \"django.middleware.csrf.CsrfViewMiddleware\",\n # add `user` (the currently-logged-in user) to incoming HttpRequest objects\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # Require login for every single request by default\n \"login_required.middleware.LoginRequiredMiddleware\",\n # provide framework for displaying messages to the user, see documentation\n \"django.contrib.messages.middleware.MessageMiddleware\",\n # provide clickjacking protection via the X-Frame-Options header\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n # django-csp: enable use of Content-Security-Policy header\n \"csp.middleware.CSPMiddleware\",\n # django-auditlog: obtain the request User for use in logging\n \"auditlog.middleware.AuditlogMiddleware\",\n]\n\n# application object used by Django’s built-in servers (e.g. `runserver`)\nWSGI_APPLICATION = \"registrar.config.wsgi.application\"\n\n# endregion\n# region: Assets and HTML and Caching---------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/howto/static-files/\n\n\n# Caching is disabled by default.\n# For a low to medium traffic site, caching causes more\n# problems than it solves. Should caching be desired,\n# a reasonable start might be:\n# CACHES = {\n# \"default\": {\n# \"BACKEND\": \"django.core.cache.backends.db.DatabaseCache\",\n# }\n# }\n\n# Absolute path to the directory where `collectstatic`\n# will place static files for deployment.\n# Do not use this directory for permanent storage -\n# it is for Django!\nSTATIC_ROOT = BASE_DIR / \"registrar\" / \"public\"\n\nSTATICFILES_DIRS = [\n BASE_DIR / \"registrar\" / \"assets\",\n]\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n # look for templates inside installed apps\n # required by django-debug-toolbar\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n # IMPORTANT security setting: escapes HTMLEntities,\n # helping to prevent XSS attacks\n \"autoescape\": True,\n # context processors are callables which return\n # dicts - Django merges them into the context\n # dictionary used to render the templates\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"registrar.context_processors.language_code\",\n \"registrar.context_processors.canonical_path\",\n \"registrar.context_processors.is_demo_site\",\n \"registrar.context_processors.is_production\",\n ],\n },\n },\n]\n\n# Stop using table-based default form renderer which is deprecated\nFORM_RENDERER = \"django.forms.renderers.DjangoDivFormRenderer\"\n\nMESSAGE_STORAGE = \"django.contrib.messages.storage.session.SessionStorage\"\n\n# IS_DEMO_SITE controls whether or not we show our big red \"TEST SITE\" banner\n# underneath the \"this is a real government website\" banner.\nIS_DEMO_SITE = True\n\n# endregion\n# region: Database----------------------------------------------------------###\n\n# Wrap each view in a transaction on the database\n# A decorator can be used for views which have no database activity:\n# from django.db import transaction\n# @transaction.non_atomic_requests\nenv_db_url[\"ATOMIC_REQUESTS\"] = True\n\nDATABASES = {\n # dj-database-url package takes the supplied Postgres connection string\n # and converts it into a dictionary with the correct USER, HOST, etc\n \"default\": env_db_url,\n}\n\n# Specify default field type to use for primary keys\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\n# Use our user model instead of the default\nAUTH_USER_MODEL = \"registrar.User\"\n\n# endregion\n# region: Email-------------------------------------------------------------###\n\n# Configuration for accessing AWS SES\nAWS_ACCESS_KEY_ID = secret_aws_ses_key_id\nAWS_SECRET_ACCESS_KEY = secret_aws_ses_key\nAWS_REGION = \"us-gov-west-1\"\n\n# Configuration for accessing AWS S3\nAWS_S3_ACCESS_KEY_ID = secret_aws_s3_key_id\nAWS_S3_SECRET_ACCESS_KEY = secret_aws_s3_key\nAWS_S3_REGION = aws_s3_region_name\nAWS_S3_BUCKET_NAME = secret_aws_s3_bucket_name\n\n# https://boto3.amazonaws.com/v1/documentation/latest/guide/retries.html#standard-retry-mode\nAWS_RETRY_MODE: Final = \"standard\"\n# base 2 exponential backoff with max of 20 seconds:\nAWS_MAX_ATTEMPTS = 3\nBOTO_CONFIG = Config(retries={\"mode\": AWS_RETRY_MODE, \"max_attempts\": AWS_MAX_ATTEMPTS})\n\n# email address to use for various automated correspondence\nDEFAULT_FROM_EMAIL = \"[email protected] <[email protected]>\"\n\n# connect to an (external) SMTP server for sending email\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n\n# TODO: configure these when the values are known\n# EMAIL_HOST = \"\"\n# EMAIL_HOST_PASSWORD = \"\"\n# EMAIL_HOST_USER = \"\"\n# EMAIL_PORT = 587\n\n# for mail sent with mail_admins or mail_managers\nEMAIL_SUBJECT_PREFIX = \"[Attn: .gov admin] \"\n\n# use a TLS (secure) connection when talking to the SMTP server\n# TLS generally uses port 587\nEMAIL_USE_TLS = True\n\n# mutually exclusive with EMAIL_USE_TLS = True\n# SSL generally uses port 465\nEMAIL_USE_SSL = False\n\n# timeout in seconds for blocking operations, like the connection attempt\nEMAIL_TIMEOUT = 30\n\n# email address to use for sending error reports\nSERVER_EMAIL = \"[email protected]\"\n\n# endregion\n# region: Headers-----------------------------------------------------------###\n\n# Content-Security-Policy configuration\n# this can be restrictive because we have few external scripts\nallowed_sources = (\"'self'\",)\nCSP_DEFAULT_SRC = allowed_sources\n# Most things fall back to default-src, but the following do not and should be\n# explicitly set\nCSP_FRAME_ANCESTORS = allowed_sources\nCSP_FORM_ACTION = allowed_sources\n\n# Google analytics requires that we relax our otherwise\n# strict CSP by allowing scripts to run from their domain\n# and inline with a nonce, as well as allowing connections back to their domain\nCSP_SCRIPT_SRC_ELEM = [\"'self'\", \"https://www.googletagmanager.com/\"]\nCSP_CONNECT_SRC = [\"'self'\", \"https://www.google-analytics.com/\"]\nCSP_INCLUDE_NONCE_IN = [\"script-src-elem\"]\n\n# Cross-Origin Resource Sharing (CORS) configuration\n# Sets clients that allow access control to manage.get.gov\n# TODO: remove :8080 to see if we can have all localhost access\nCORS_ALLOWED_ORIGINS = [\"http://localhost:8080\", \"https://beta.get.gov\"]\nCORS_ALLOWED_ORIGIN_REGEXES = [r\"https://[\\w-]+\\.sites\\.pages\\.cloud\\.gov\"]\n\n# Content-Length header is set by django.middleware.common.CommonMiddleware\n\n# X-Frame-Options header is set by\n# django.middleware.clickjacking.XFrameOptionsMiddleware\n# and configured in the Security and Privacy section of this file.\n# Strict-Transport-Security is set by django.middleware.security.SecurityMiddleware\n# and configured in the Security and Privacy section of this file.\n\n# prefer contents of X-Forwarded-Host header to Host header\n# as Host header may contain a proxy rather than the actual client\nUSE_X_FORWARDED_HOST = True\n\n# endregion\n# region: Internationalisation----------------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/topics/i18n/\n\n# Charset to use for HttpResponse objects; used in Content-Type header\nDEFAULT_CHARSET = \"utf-8\"\n\n# provide fallback language if translation file is missing or\n# user's locale is not supported - requires USE_I18N = True\nLANGUAGE_CODE = \"en-us\"\n\n# allows language cookie to be sent if the user\n# is coming to our site from an external page.\nLANGUAGE_COOKIE_SAMESITE = None\n\n# only send via HTTPS connection\nLANGUAGE_COOKIE_SECURE = True\n\n# to display datetimes in templates\n# and to interpret datetimes entered in forms\nTIME_ZONE = \"UTC\"\n\n# enable Django’s translation system\nUSE_I18N = True\n\n# enable localized formatting of numbers and dates\nUSE_L10N = True\n\n# make datetimes timezone-aware by default\nUSE_TZ = True\n\n# setting for phonenumber library\nPHONENUMBER_DEFAULT_REGION = \"US\"\n\n# endregion\n# region: Logging-----------------------------------------------------------###\n\n# A Python logging configuration consists of four parts:\n# Loggers\n# Handlers\n# Filters\n# Formatters\n# https://docs.djangoproject.com/en/4.1/topics/logging/\n\n# Log a message by doing this:\n#\n# import logging\n# logger = logging.getLogger(__name__)\n#\n# Then:\n#\n# logger.debug(\"We're about to execute function xyz. Wish us luck!\")\n# logger.info(\"Oh! Here's something you might want to know.\")\n# logger.warning(\"Something kinda bad happened.\")\n# logger.error(\"Can't do this important task. Something is very wrong.\")\n# logger.critical(\"Going to crash now.\")\n\nLOGGING = {\n \"version\": 1,\n # Don't import Django's existing loggers\n \"disable_existing_loggers\": True,\n # define how to convert log messages into text;\n # each handler has its choice of format\n \"formatters\": {\n \"verbose\": {\n \"format\": \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n },\n \"simple\": {\n \"format\": \"%(levelname)s %(message)s\",\n },\n \"django.server\": {\n \"()\": \"django.utils.log.ServerFormatter\",\n \"format\": \"[{server_time}] {message}\",\n \"style\": \"{\",\n },\n },\n # define where log messages will be sent;\n # each logger can have one or more handlers\n \"handlers\": {\n \"console\": {\n \"level\": env_log_level,\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"django.server\": {\n \"level\": \"INFO\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"django.server\",\n },\n # No file logger is configured,\n # because containerized apps\n # do not log to the file system.\n },\n # define loggers: these are \"sinks\" into which\n # messages are sent for processing\n \"loggers\": {\n # Django's generic logger\n \"django\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's template processor\n \"django.template\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's runserver\n \"django.server\": {\n \"handlers\": [\"django.server\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's runserver requests\n \"django.request\": {\n \"handlers\": [\"django.server\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # OpenID Connect logger\n \"oic\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django wrapper for OpenID Connect\n \"djangooidc\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Our app!\n \"registrar\": {\n \"handlers\": [\"console\"],\n \"level\": \"DEBUG\",\n \"propagate\": False,\n },\n },\n # root logger catches anything, unless\n # defined by a more specific logger\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n },\n}\n\n# endregion\n# region: Login-------------------------------------------------------------###\n\n# list of Python classes used when trying to authenticate a user\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n \"djangooidc.backends.OpenIdConnectBackend\",\n]\n\n# this is where unauthenticated requests are redirected when using\n# the login_required() decorator, LoginRequiredMixin, or AccessMixin\nLOGIN_URL = \"/openid/login\"\n\n# We don't want the OIDC app to be login-required because then it can't handle\n# the initial login requests without erroring.\nLOGIN_REQUIRED_IGNORE_PATHS = [\n r\"/openid/(.+)$\",\n]\n\n# where to go after logging out\nLOGOUT_REDIRECT_URL = \"home\"\n\n# disable dynamic client registration,\n# only the OP inside OIDC_PROVIDERS will be available\nOIDC_ALLOW_DYNAMIC_OP = False\n\n# which provider to use if multiple are available\n# (code does not currently support user selection)\n# See above for the default value if the env variable is missing\nOIDC_ACTIVE_PROVIDER = env_oidc_active_provider\n\n\nOIDC_PROVIDERS = {\n \"identity sandbox\": {\n \"srv_discovery_url\": \"https://idp.int.identitysandbox.gov\",\n \"behaviour\": {\n # the 'code' workflow requires direct connectivity from us to Login.gov\n \"response_type\": \"code\",\n \"scope\": [\"email\", \"profile:name\", \"phone\"],\n \"user_info_request\": [\"email\", \"first_name\", \"last_name\", \"phone\"],\n \"acr_value\": \"http://idmanagement.gov/ns/assurance/ial/1\",\n \"step_up_acr_value\": \"http://idmanagement.gov/ns/assurance/ial/2\",\n },\n \"client_registration\": {\n \"client_id\": \"cisa_dotgov_registrar\",\n \"redirect_uris\": [f\"{env_base_url}/openid/callback/login/\"],\n \"post_logout_redirect_uris\": [f\"{env_base_url}/openid/callback/logout/\"],\n \"token_endpoint_auth_method\": [\"private_key_jwt\"],\n \"sp_private_key\": secret_login_key,\n },\n },\n \"login.gov production\": {\n \"srv_discovery_url\": \"https://secure.login.gov\",\n \"behaviour\": {\n # the 'code' workflow requires direct connectivity from us to Login.gov\n \"response_type\": \"code\",\n \"scope\": [\"email\", \"profile:name\", \"phone\"],\n \"user_info_request\": [\"email\", \"first_name\", \"last_name\", \"phone\"],\n \"acr_value\": \"http://idmanagement.gov/ns/assurance/ial/1\",\n \"step_up_acr_value\": \"http://idmanagement.gov/ns/assurance/ial/2\",\n },\n \"client_registration\": {\n \"client_id\": (\"urn:gov:cisa:openidconnect.profiles:sp:sso:cisa:dotgov_registrar\"),\n \"redirect_uris\": [f\"{env_base_url}/openid/callback/login/\"],\n \"post_logout_redirect_uris\": [f\"{env_base_url}/openid/callback/logout/\"],\n \"token_endpoint_auth_method\": [\"private_key_jwt\"],\n \"sp_private_key\": secret_login_key,\n },\n },\n}\n\n# endregion\n# region: Routing-----------------------------------------------------------###\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# APPEND_SLASH = True\n# PREPEND_WWW = False\n\n# full Python import path to the root URLconf\nROOT_URLCONF = \"registrar.config.urls\"\n\n# URL to use when referring to static files located in STATIC_ROOT\n# Must be relative and end with \"/\"\nSTATIC_URL = \"public/\"\n\n# Base URL of our separate static public website. Used by the\n# {% public_site_url subdir/path %} template tag\nGETGOV_PUBLIC_SITE_URL = env_getgov_public_site_url\n\n# endregion\n# region: Registry----------------------------------------------------------###\n\n# SECURITY WARNING: keep all registry variables in production secret!\nSECRET_REGISTRY_CL_ID = secret_registry_cl_id\nSECRET_REGISTRY_PASSWORD = secret_registry_password\nSECRET_REGISTRY_CERT = secret_registry_cert\nSECRET_REGISTRY_KEY = secret_registry_key\nSECRET_REGISTRY_KEY_PASSPHRASE = secret_registry_key_passphrase\nSECRET_REGISTRY_HOSTNAME = secret_registry_hostname\n\n# Use this variable to set the size of our connection pool in client.py\n# WARNING: Setting this value too high could cause frequent app crashes!\n# Having too many connections open could cause the sandbox to timeout,\n# as the spinup time could exceed the timeout time.\nEPP_CONNECTION_POOL_SIZE = 1\n\n# Determines the interval in which we ping open connections in seconds\n# Calculated as POOL_KEEP_ALIVE / EPP_CONNECTION_POOL_SIZE\nPOOL_KEEP_ALIVE = 60\n\n# Determines how long we try to keep a pool alive for,\n# before restarting it.\nPOOL_TIMEOUT = 60\n\n# endregion\n# region: Security and Privacy----------------------------------------------###\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = secret_key\n\n# Use this variable for doing SECRET_KEY rotation, see documentation\nSECRET_KEY_FALLBACKS: \"list[str]\" = []\n\n# ~ Set by django.middleware.security.SecurityMiddleware\n# SECURE_CONTENT_TYPE_NOSNIFF = True\n# SECURE_CROSS_ORIGIN_OPENER_POLICY = \"same-origin\"\n# SECURE_REDIRECT_EXEMPT = []\n# SECURE_REFERRER_POLICY = \"same-origin\"\n# SECURE_SSL_HOST = None\n\n# ~ Overridden from django.middleware.security.SecurityMiddleware\n# adds the includeSubDomains directive to the HTTP Strict Transport Security header\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\n# adds the preload directive to the HTTP Strict Transport Security header\nSECURE_HSTS_PRELOAD = True\n# TODO: set this value to 31536000 (1 year) for production\nSECURE_HSTS_SECONDS = 300\n# redirect all non-HTTPS requests to HTTPS\nSECURE_SSL_REDIRECT = True\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# DISALLOWED_USER_AGENTS = []\n\n# The host/domain names that Django can serve.\n# This is a security measure to prevent HTTP Host header attacks,\n# which are possible even under many seemingly-safe\n# web server configurations.\nALLOWED_HOSTS = [\n \"getgov-stable.app.cloud.gov\",\n \"getgov-staging.app.cloud.gov\",\n \"getgov-development.app.cloud.gov\",\n \"getgov-backup.app.cloud.gov\",\n \"getgov-ky.app.cloud.gov\",\n \"getgov-es.app.cloud.gov\",\n \"getgov-nl.app.cloud.gov\",\n \"getgov-rh.app.cloud.gov\",\n \"getgov-za.app.cloud.gov\",\n \"getgov-gd.app.cloud.gov\",\n \"getgov-rb.app.cloud.gov\",\n \"getgov-ko.app.cloud.gov\",\n \"getgov-ab.app.cloud.gov\",\n \"getgov-bl.app.cloud.gov\",\n \"getgov-rjm.app.cloud.gov\",\n \"getgov-dk.app.cloud.gov\",\n \"manage.get.gov\",\n]\n\n# Extend ALLOWED_HOSTS.\n# IP addresses can also be hosts, which are used by internal\n# load balancers for health checks, etc.\nALLOWED_CIDR_NETS = [\"10.0.0.0/8\"]\n\n# ~ Below are some protections from cross-site request forgery.\n# This is canonically done by including a nonce value\n# in pages sent to the user, which the user is expected\n# to send back. The specifics of implementation are\n# intricate and varied.\n\n# Store the token server-side, do not send it\n# to the user via a cookie. This means each page\n# which requires protection must place the token\n# in the HTML explicitly, otherwise the user will\n# get a 403 error when they submit.\nCSRF_USE_SESSIONS = True\n\n# Expiry of CSRF cookie, in seconds.\n# None means \"use session-based CSRF cookies\".\nCSRF_COOKIE_AGE = None\n\n# Prevent JavaScript from reading the CSRF cookie.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_HTTPONLY = True\n\n# Only send the cookie via HTTPS connections.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SECURE = True\n\n# Protect from non-targeted attacks by obscuring\n# the CSRF cookie name from the default.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_NAME = \"CrSiReFo\"\n\n# Prevents CSRF cookie from being sent if the user\n# is coming to our site from an external page.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SAMESITE = \"Strict\"\n\n# Change header name to match cookie name.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_HEADER_NAME = \"HTTP_X_CRSIREFO\"\n\n# Max parameters that may be received via GET or POST\n# TODO: 1000 is the default, may need to tune upward for\n# large DNS zone files, if records are represented by\n# individual form fields.\nDATA_UPLOAD_MAX_NUMBER_FIELDS = 1000\n\n# age of session cookies, in seconds (28800 = 8 hours)\nSESSION_COOKIE_AGE = 28800\n\n# instruct the browser to forbid client-side JavaScript\n# from accessing the cookie\nSESSION_COOKIE_HTTPONLY = True\n\n# are we a spring boot application? who knows!\nSESSION_COOKIE_NAME = \"JSESSIONID\"\n\n# Allows session cookie to be sent if the user\n# is coming to our site from an external page\n# unless it is via \"risky\" paths, i.e. POST requests\nSESSION_COOKIE_SAMESITE = \"Lax\"\n\n# instruct browser to only send cookie via HTTPS\nSESSION_COOKIE_SECURE = True\n\n# session engine to cache session information\nSESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\n\n# ~ Set by django.middleware.clickjacking.XFrameOptionsMiddleware\n# prevent clickjacking by instructing the browser not to load\n# our site within an iframe\n# X_FRAME_OPTIONS = \"Deny\"\n\n# endregion\n# region: Testing-----------------------------------------------------------###\n\n# Additional directories searched for fixture files.\n# The fixtures directory of each application is searched by default.\n# Must use unix style \"/\" path separators.\nFIXTURE_DIRS: \"list[str]\" = []\n\n# endregion\n\n\n# # # ###\n# Development settings #\n# # # ###\n\nif DEBUG:\n # used by debug() context processor\n INTERNAL_IPS = [\n \"127.0.0.1\",\n \"::1\",\n ]\n\n # allow dev laptop and docker-compose network to connect\n ALLOWED_HOSTS += (\"localhost\", \"app\")\n SECURE_SSL_REDIRECT = False\n SECURE_HSTS_PRELOAD = False\n\n # discover potentially inefficient database queries\n # TODO: use settings overrides to ensure this always is True during tests\n INSTALLED_APPS += (\"nplusone.ext.django\",)\n MIDDLEWARE += (\"nplusone.ext.django.NPlusOneMiddleware\",)\n # turned off for now, because django-auditlog has some issues\n NPLUSONE_RAISE = False\n NPLUSONE_WHITELIST = [\n {\"model\": \"admin.LogEntry\", \"field\": \"user\"},\n ]\n\n # insert the amazing django-debug-toolbar\n INSTALLED_APPS += (\"debug_toolbar\",)\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n\n DEBUG_TOOLBAR_CONFIG = {\n # due to Docker, bypass Debug Toolbar's check on INTERNAL_IPS\n \"SHOW_TOOLBAR_CALLBACK\": lambda _: True,\n }\n",
"path": "src/registrar/config/settings.py"
}
] | [
{
"content": "\"\"\"\nDjango settings for .gov registrar project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/4.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/4.0/ref/settings/\n\nIF you'd like to see all of these settings in the running app:\n\n```shell\n$ docker-compose exec app python manage.py shell\n>>> from django.conf import settings\n>>> dir(settings)\n```\n\n\"\"\"\nimport environs\nfrom base64 import b64decode\nfrom cfenv import AppEnv # type: ignore\nfrom pathlib import Path\nfrom typing import Final\n\nfrom botocore.config import Config\n\n# # # ###\n# Setup code goes here #\n# # # ###\n\nenv = environs.Env()\n\n# Get secrets from Cloud.gov user provided service, if exists\n# If not, get secrets from environment variables\nkey_service = AppEnv().get_service(name=\"getgov-credentials\")\n\n\n# Get secrets from Cloud.gov user provided s3 service, if it exists\ns3_key_service = AppEnv().get_service(name=\"getgov-s3\")\n\nif key_service and key_service.credentials:\n if s3_key_service and s3_key_service.credentials:\n # Concatenate the credentials from our S3 service into our secret service\n key_service.credentials.update(s3_key_service.credentials)\n secret = key_service.credentials.get\nelse:\n secret = env\n\n\n# # # ###\n# Values obtained externally #\n# # # ###\n\npath = Path(__file__)\n\nenv_db_url = env.dj_db_url(\"DATABASE_URL\")\nenv_debug = env.bool(\"DJANGO_DEBUG\", default=False)\nenv_is_production = env.bool(\"IS_PRODUCTION\", default=False)\nenv_log_level = env.str(\"DJANGO_LOG_LEVEL\", \"DEBUG\")\nenv_base_url = env.str(\"DJANGO_BASE_URL\")\nenv_getgov_public_site_url = env.str(\"GETGOV_PUBLIC_SITE_URL\", \"\")\nenv_oidc_active_provider = env.str(\"OIDC_ACTIVE_PROVIDER\", \"identity sandbox\")\n\nsecret_login_key = b64decode(secret(\"DJANGO_SECRET_LOGIN_KEY\", \"\"))\nsecret_key = secret(\"DJANGO_SECRET_KEY\")\n\nsecret_aws_ses_key_id = secret(\"AWS_ACCESS_KEY_ID\", None)\nsecret_aws_ses_key = secret(\"AWS_SECRET_ACCESS_KEY\", None)\n\n# These keys are present in a getgov-s3 instance, or they can be defined locally\naws_s3_region_name = secret(\"region\", None) or secret(\"AWS_S3_REGION\", None)\nsecret_aws_s3_key_id = secret(\"access_key_id\", None) or secret(\"AWS_S3_ACCESS_KEY_ID\", None)\nsecret_aws_s3_key = secret(\"secret_access_key\", None) or secret(\"AWS_S3_SECRET_ACCESS_KEY\", None)\nsecret_aws_s3_bucket_name = secret(\"bucket\", None) or secret(\"AWS_S3_BUCKET_NAME\", None)\n\nsecret_registry_cl_id = secret(\"REGISTRY_CL_ID\")\nsecret_registry_password = secret(\"REGISTRY_PASSWORD\")\nsecret_registry_cert = b64decode(secret(\"REGISTRY_CERT\", \"\"))\nsecret_registry_key = b64decode(secret(\"REGISTRY_KEY\", \"\"))\nsecret_registry_key_passphrase = secret(\"REGISTRY_KEY_PASSPHRASE\", \"\")\nsecret_registry_hostname = secret(\"REGISTRY_HOSTNAME\")\n\n# region: Basic Django Config-----------------------------------------------###\n\n# Build paths inside the project like this: BASE_DIR / \"subdir\".\n# (settings.py is in `src/registrar/config/`: BASE_DIR is `src/`)\nBASE_DIR = path.resolve().parent.parent.parent\n\n# SECURITY WARNING: don't run with debug turned on in production!\n# TODO - Investigate the behaviour of this flag. Does not appear\n# to function for the IS_PRODUCTION flag.\nDEBUG = env_debug\n\n# Controls production specific feature toggles\nIS_PRODUCTION = env_is_production\n\n# Applications are modular pieces of code.\n# They are provided by Django, by third-parties, or by yourself.\n# Installing them here makes them available for execution.\n# Do not access INSTALLED_APPS directly. Use `django.apps.apps` instead.\nINSTALLED_APPS = [\n # let's be sure to install our own application!\n # it needs to be listed before django.contrib.admin\n # otherwise Django would find the default template\n # provided by django.contrib.admin first and use\n # that instead of our custom templates.\n \"registrar\",\n # Django automatic admin interface reads metadata\n # from database models to provide a quick, model-centric\n # interface where trusted users can manage content\n \"django.contrib.admin\",\n # vv Required by django.contrib.admin vv\n # the \"user\" model! *\\o/*\n \"django.contrib.auth\",\n # audit logging of changes to models\n # it needs to be listed before django.contrib.contenttypes\n # for a ContentType query in fixtures.py\n \"auditlog\",\n # generic interface for Django models\n \"django.contrib.contenttypes\",\n # required for CSRF protection and many other things\n \"django.contrib.sessions\",\n # framework for displaying messages to the user\n \"django.contrib.messages\",\n # ^^ Required by django.contrib.admin ^^\n # collects static files from each of your applications\n # (and any other places you specify) into a single location\n # that can easily be served in production\n \"django.contrib.staticfiles\",\n # application used for integrating with Login.gov\n \"djangooidc\",\n # library to simplify form templating\n \"widget_tweaks\",\n # library for Finite State Machine statuses\n \"django_fsm\",\n # library for phone numbers\n \"phonenumber_field\",\n # Our internal API application\n \"api\",\n # Only for generating documentation, uncomment to run manage.py generate_puml\n # \"puml_generator\",\n # supports necessary headers for Django cross origin\n \"corsheaders\",\n]\n\n# Middleware are routines for processing web requests.\n# Adding them here turns them \"on\"; Django will perform the\n# specified routines on each incoming request and outgoing response.\nMIDDLEWARE = [\n # django-allow-cidr: enable use of CIDR IP ranges in ALLOWED_HOSTS\n \"allow_cidr.middleware.AllowCIDRMiddleware\",\n # django-cors-headers: listen to cors responses\n \"corsheaders.middleware.CorsMiddleware\",\n # custom middleware to stop caching from CloudFront\n \"registrar.no_cache_middleware.NoCacheMiddleware\",\n # serve static assets in production\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n # provide security enhancements to the request/response cycle\n \"django.middleware.security.SecurityMiddleware\",\n # store and retrieve arbitrary data on a per-site-visitor basis\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n # add a few conveniences for perfectionists, see documentation\n \"django.middleware.common.CommonMiddleware\",\n # add protection against Cross Site Request Forgeries by adding\n # hidden form fields to POST forms and checking requests for the correct value\n \"django.middleware.csrf.CsrfViewMiddleware\",\n # add `user` (the currently-logged-in user) to incoming HttpRequest objects\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # Require login for every single request by default\n \"login_required.middleware.LoginRequiredMiddleware\",\n # provide framework for displaying messages to the user, see documentation\n \"django.contrib.messages.middleware.MessageMiddleware\",\n # provide clickjacking protection via the X-Frame-Options header\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n # django-csp: enable use of Content-Security-Policy header\n \"csp.middleware.CSPMiddleware\",\n # django-auditlog: obtain the request User for use in logging\n \"auditlog.middleware.AuditlogMiddleware\",\n]\n\n# application object used by Django’s built-in servers (e.g. `runserver`)\nWSGI_APPLICATION = \"registrar.config.wsgi.application\"\n\n# endregion\n# region: Assets and HTML and Caching---------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/howto/static-files/\n\n\n# Caching is disabled by default.\n# For a low to medium traffic site, caching causes more\n# problems than it solves. Should caching be desired,\n# a reasonable start might be:\n# CACHES = {\n# \"default\": {\n# \"BACKEND\": \"django.core.cache.backends.db.DatabaseCache\",\n# }\n# }\n\n# Absolute path to the directory where `collectstatic`\n# will place static files for deployment.\n# Do not use this directory for permanent storage -\n# it is for Django!\nSTATIC_ROOT = BASE_DIR / \"registrar\" / \"public\"\n\nSTATICFILES_DIRS = [\n BASE_DIR / \"registrar\" / \"assets\",\n]\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n # look for templates inside installed apps\n # required by django-debug-toolbar\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n # IMPORTANT security setting: escapes HTMLEntities,\n # helping to prevent XSS attacks\n \"autoescape\": True,\n # context processors are callables which return\n # dicts - Django merges them into the context\n # dictionary used to render the templates\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"registrar.context_processors.language_code\",\n \"registrar.context_processors.canonical_path\",\n \"registrar.context_processors.is_demo_site\",\n \"registrar.context_processors.is_production\",\n ],\n },\n },\n]\n\n# Stop using table-based default form renderer which is deprecated\nFORM_RENDERER = \"django.forms.renderers.DjangoDivFormRenderer\"\n\nMESSAGE_STORAGE = \"django.contrib.messages.storage.session.SessionStorage\"\n\n# IS_DEMO_SITE controls whether or not we show our big red \"TEST SITE\" banner\n# underneath the \"this is a real government website\" banner.\nIS_DEMO_SITE = True\n\n# endregion\n# region: Database----------------------------------------------------------###\n\n# Wrap each view in a transaction on the database\n# A decorator can be used for views which have no database activity:\n# from django.db import transaction\n# @transaction.non_atomic_requests\nenv_db_url[\"ATOMIC_REQUESTS\"] = True\n\nDATABASES = {\n # dj-database-url package takes the supplied Postgres connection string\n # and converts it into a dictionary with the correct USER, HOST, etc\n \"default\": env_db_url,\n}\n\n# Specify default field type to use for primary keys\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\n# Use our user model instead of the default\nAUTH_USER_MODEL = \"registrar.User\"\n\n# endregion\n# region: Email-------------------------------------------------------------###\n\n# Configuration for accessing AWS SES\nAWS_ACCESS_KEY_ID = secret_aws_ses_key_id\nAWS_SECRET_ACCESS_KEY = secret_aws_ses_key\nAWS_REGION = \"us-gov-west-1\"\n\n# Configuration for accessing AWS S3\nAWS_S3_ACCESS_KEY_ID = secret_aws_s3_key_id\nAWS_S3_SECRET_ACCESS_KEY = secret_aws_s3_key\nAWS_S3_REGION = aws_s3_region_name\nAWS_S3_BUCKET_NAME = secret_aws_s3_bucket_name\n\n# https://boto3.amazonaws.com/v1/documentation/latest/guide/retries.html#standard-retry-mode\nAWS_RETRY_MODE: Final = \"standard\"\n# base 2 exponential backoff with max of 20 seconds:\nAWS_MAX_ATTEMPTS = 3\nBOTO_CONFIG = Config(retries={\"mode\": AWS_RETRY_MODE, \"max_attempts\": AWS_MAX_ATTEMPTS})\n\n# email address to use for various automated correspondence\nDEFAULT_FROM_EMAIL = \"[email protected] <[email protected]>\"\n\n# connect to an (external) SMTP server for sending email\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n\n# TODO: configure these when the values are known\n# EMAIL_HOST = \"\"\n# EMAIL_HOST_PASSWORD = \"\"\n# EMAIL_HOST_USER = \"\"\n# EMAIL_PORT = 587\n\n# for mail sent with mail_admins or mail_managers\nEMAIL_SUBJECT_PREFIX = \"[Attn: .gov admin] \"\n\n# use a TLS (secure) connection when talking to the SMTP server\n# TLS generally uses port 587\nEMAIL_USE_TLS = True\n\n# mutually exclusive with EMAIL_USE_TLS = True\n# SSL generally uses port 465\nEMAIL_USE_SSL = False\n\n# timeout in seconds for blocking operations, like the connection attempt\nEMAIL_TIMEOUT = 30\n\n# email address to use for sending error reports\nSERVER_EMAIL = \"[email protected]\"\n\n# endregion\n# region: Headers-----------------------------------------------------------###\n\n# Content-Security-Policy configuration\n# this can be restrictive because we have few external scripts\nallowed_sources = (\"'self'\",)\nCSP_DEFAULT_SRC = allowed_sources\n# Most things fall back to default-src, but the following do not and should be\n# explicitly set\nCSP_FRAME_ANCESTORS = allowed_sources\nCSP_FORM_ACTION = allowed_sources\n\n# Google analytics requires that we relax our otherwise\n# strict CSP by allowing scripts to run from their domain\n# and inline with a nonce, as well as allowing connections back to their domain\nCSP_SCRIPT_SRC_ELEM = [\"'self'\", \"https://www.googletagmanager.com/\"]\nCSP_CONNECT_SRC = [\"'self'\", \"https://www.google-analytics.com/\"]\nCSP_INCLUDE_NONCE_IN = [\"script-src-elem\"]\n\n# Cross-Origin Resource Sharing (CORS) configuration\n# Sets clients that allow access control to manage.get.gov\n# TODO: remove :8080 to see if we can have all localhost access\nCORS_ALLOWED_ORIGINS = [\"http://localhost:8080\", \"https://beta.get.gov\"]\nCORS_ALLOWED_ORIGIN_REGEXES = [r\"https://[\\w-]+\\.sites\\.pages\\.cloud\\.gov\"]\n\n# Content-Length header is set by django.middleware.common.CommonMiddleware\n\n# X-Frame-Options header is set by\n# django.middleware.clickjacking.XFrameOptionsMiddleware\n# and configured in the Security and Privacy section of this file.\n# Strict-Transport-Security is set by django.middleware.security.SecurityMiddleware\n# and configured in the Security and Privacy section of this file.\n\n# prefer contents of X-Forwarded-Host header to Host header\n# as Host header may contain a proxy rather than the actual client\nUSE_X_FORWARDED_HOST = True\n\n# endregion\n# region: Internationalisation----------------------------------------------###\n\n# https://docs.djangoproject.com/en/4.0/topics/i18n/\n\n# Charset to use for HttpResponse objects; used in Content-Type header\nDEFAULT_CHARSET = \"utf-8\"\n\n# provide fallback language if translation file is missing or\n# user's locale is not supported - requires USE_I18N = True\nLANGUAGE_CODE = \"en-us\"\n\n# allows language cookie to be sent if the user\n# is coming to our site from an external page.\nLANGUAGE_COOKIE_SAMESITE = None\n\n# only send via HTTPS connection\nLANGUAGE_COOKIE_SECURE = True\n\n# to display datetimes in templates\n# and to interpret datetimes entered in forms\nTIME_ZONE = \"UTC\"\n\n# enable Django’s translation system\nUSE_I18N = True\n\n# enable localized formatting of numbers and dates\nUSE_L10N = True\n\n# make datetimes timezone-aware by default\nUSE_TZ = True\n\n# setting for phonenumber library\nPHONENUMBER_DEFAULT_REGION = \"US\"\n\n# endregion\n# region: Logging-----------------------------------------------------------###\n\n# A Python logging configuration consists of four parts:\n# Loggers\n# Handlers\n# Filters\n# Formatters\n# https://docs.djangoproject.com/en/4.1/topics/logging/\n\n# Log a message by doing this:\n#\n# import logging\n# logger = logging.getLogger(__name__)\n#\n# Then:\n#\n# logger.debug(\"We're about to execute function xyz. Wish us luck!\")\n# logger.info(\"Oh! Here's something you might want to know.\")\n# logger.warning(\"Something kinda bad happened.\")\n# logger.error(\"Can't do this important task. Something is very wrong.\")\n# logger.critical(\"Going to crash now.\")\n\nLOGGING = {\n \"version\": 1,\n # Don't import Django's existing loggers\n \"disable_existing_loggers\": True,\n # define how to convert log messages into text;\n # each handler has its choice of format\n \"formatters\": {\n \"verbose\": {\n \"format\": \"[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n },\n \"simple\": {\n \"format\": \"%(levelname)s %(message)s\",\n },\n \"django.server\": {\n \"()\": \"django.utils.log.ServerFormatter\",\n \"format\": \"[{server_time}] {message}\",\n \"style\": \"{\",\n },\n },\n # define where log messages will be sent;\n # each logger can have one or more handlers\n \"handlers\": {\n \"console\": {\n \"level\": env_log_level,\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"django.server\": {\n \"level\": \"INFO\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"django.server\",\n },\n # No file logger is configured,\n # because containerized apps\n # do not log to the file system.\n },\n # define loggers: these are \"sinks\" into which\n # messages are sent for processing\n \"loggers\": {\n # Django's generic logger\n \"django\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's template processor\n \"django.template\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's runserver\n \"django.server\": {\n \"handlers\": [\"django.server\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django's runserver requests\n \"django.request\": {\n \"handlers\": [\"django.server\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # OpenID Connect logger\n \"oic\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Django wrapper for OpenID Connect\n \"djangooidc\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n # Our app!\n \"registrar\": {\n \"handlers\": [\"console\"],\n \"level\": \"DEBUG\",\n \"propagate\": False,\n },\n },\n # root logger catches anything, unless\n # defined by a more specific logger\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n },\n}\n\n# endregion\n# region: Login-------------------------------------------------------------###\n\n# list of Python classes used when trying to authenticate a user\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n \"djangooidc.backends.OpenIdConnectBackend\",\n]\n\n# this is where unauthenticated requests are redirected when using\n# the login_required() decorator, LoginRequiredMixin, or AccessMixin\nLOGIN_URL = \"/openid/login\"\n\n# We don't want the OIDC app to be login-required because then it can't handle\n# the initial login requests without erroring.\nLOGIN_REQUIRED_IGNORE_PATHS = [\n r\"/openid/(.+)$\",\n]\n\n# where to go after logging out\nLOGOUT_REDIRECT_URL = \"https://get.gov/\"\n\n# disable dynamic client registration,\n# only the OP inside OIDC_PROVIDERS will be available\nOIDC_ALLOW_DYNAMIC_OP = False\n\n# which provider to use if multiple are available\n# (code does not currently support user selection)\n# See above for the default value if the env variable is missing\nOIDC_ACTIVE_PROVIDER = env_oidc_active_provider\n\n\nOIDC_PROVIDERS = {\n \"identity sandbox\": {\n \"srv_discovery_url\": \"https://idp.int.identitysandbox.gov\",\n \"behaviour\": {\n # the 'code' workflow requires direct connectivity from us to Login.gov\n \"response_type\": \"code\",\n \"scope\": [\"email\", \"profile:name\", \"phone\"],\n \"user_info_request\": [\"email\", \"first_name\", \"last_name\", \"phone\"],\n \"acr_value\": \"http://idmanagement.gov/ns/assurance/ial/1\",\n \"step_up_acr_value\": \"http://idmanagement.gov/ns/assurance/ial/2\",\n },\n \"client_registration\": {\n \"client_id\": \"cisa_dotgov_registrar\",\n \"redirect_uris\": [f\"{env_base_url}/openid/callback/login/\"],\n \"post_logout_redirect_uris\": [f\"{env_base_url}/openid/callback/logout/\"],\n \"token_endpoint_auth_method\": [\"private_key_jwt\"],\n \"sp_private_key\": secret_login_key,\n },\n },\n \"login.gov production\": {\n \"srv_discovery_url\": \"https://secure.login.gov\",\n \"behaviour\": {\n # the 'code' workflow requires direct connectivity from us to Login.gov\n \"response_type\": \"code\",\n \"scope\": [\"email\", \"profile:name\", \"phone\"],\n \"user_info_request\": [\"email\", \"first_name\", \"last_name\", \"phone\"],\n \"acr_value\": \"http://idmanagement.gov/ns/assurance/ial/1\",\n \"step_up_acr_value\": \"http://idmanagement.gov/ns/assurance/ial/2\",\n },\n \"client_registration\": {\n \"client_id\": (\"urn:gov:cisa:openidconnect.profiles:sp:sso:cisa:dotgov_registrar\"),\n \"redirect_uris\": [f\"{env_base_url}/openid/callback/login/\"],\n \"post_logout_redirect_uris\": [f\"{env_base_url}/openid/callback/logout/\"],\n \"token_endpoint_auth_method\": [\"private_key_jwt\"],\n \"sp_private_key\": secret_login_key,\n },\n },\n}\n\n# endregion\n# region: Routing-----------------------------------------------------------###\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# APPEND_SLASH = True\n# PREPEND_WWW = False\n\n# full Python import path to the root URLconf\nROOT_URLCONF = \"registrar.config.urls\"\n\n# URL to use when referring to static files located in STATIC_ROOT\n# Must be relative and end with \"/\"\nSTATIC_URL = \"public/\"\n\n# Base URL of our separate static public website. Used by the\n# {% public_site_url subdir/path %} template tag\nGETGOV_PUBLIC_SITE_URL = env_getgov_public_site_url\n\n# endregion\n# region: Registry----------------------------------------------------------###\n\n# SECURITY WARNING: keep all registry variables in production secret!\nSECRET_REGISTRY_CL_ID = secret_registry_cl_id\nSECRET_REGISTRY_PASSWORD = secret_registry_password\nSECRET_REGISTRY_CERT = secret_registry_cert\nSECRET_REGISTRY_KEY = secret_registry_key\nSECRET_REGISTRY_KEY_PASSPHRASE = secret_registry_key_passphrase\nSECRET_REGISTRY_HOSTNAME = secret_registry_hostname\n\n# Use this variable to set the size of our connection pool in client.py\n# WARNING: Setting this value too high could cause frequent app crashes!\n# Having too many connections open could cause the sandbox to timeout,\n# as the spinup time could exceed the timeout time.\nEPP_CONNECTION_POOL_SIZE = 1\n\n# Determines the interval in which we ping open connections in seconds\n# Calculated as POOL_KEEP_ALIVE / EPP_CONNECTION_POOL_SIZE\nPOOL_KEEP_ALIVE = 60\n\n# Determines how long we try to keep a pool alive for,\n# before restarting it.\nPOOL_TIMEOUT = 60\n\n# endregion\n# region: Security and Privacy----------------------------------------------###\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = secret_key\n\n# Use this variable for doing SECRET_KEY rotation, see documentation\nSECRET_KEY_FALLBACKS: \"list[str]\" = []\n\n# ~ Set by django.middleware.security.SecurityMiddleware\n# SECURE_CONTENT_TYPE_NOSNIFF = True\n# SECURE_CROSS_ORIGIN_OPENER_POLICY = \"same-origin\"\n# SECURE_REDIRECT_EXEMPT = []\n# SECURE_REFERRER_POLICY = \"same-origin\"\n# SECURE_SSL_HOST = None\n\n# ~ Overridden from django.middleware.security.SecurityMiddleware\n# adds the includeSubDomains directive to the HTTP Strict Transport Security header\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\n# adds the preload directive to the HTTP Strict Transport Security header\nSECURE_HSTS_PRELOAD = True\n# TODO: set this value to 31536000 (1 year) for production\nSECURE_HSTS_SECONDS = 300\n# redirect all non-HTTPS requests to HTTPS\nSECURE_SSL_REDIRECT = True\n\n# ~ Set by django.middleware.common.CommonMiddleware\n# DISALLOWED_USER_AGENTS = []\n\n# The host/domain names that Django can serve.\n# This is a security measure to prevent HTTP Host header attacks,\n# which are possible even under many seemingly-safe\n# web server configurations.\nALLOWED_HOSTS = [\n \"getgov-stable.app.cloud.gov\",\n \"getgov-staging.app.cloud.gov\",\n \"getgov-development.app.cloud.gov\",\n \"getgov-backup.app.cloud.gov\",\n \"getgov-ky.app.cloud.gov\",\n \"getgov-es.app.cloud.gov\",\n \"getgov-nl.app.cloud.gov\",\n \"getgov-rh.app.cloud.gov\",\n \"getgov-za.app.cloud.gov\",\n \"getgov-gd.app.cloud.gov\",\n \"getgov-rb.app.cloud.gov\",\n \"getgov-ko.app.cloud.gov\",\n \"getgov-ab.app.cloud.gov\",\n \"getgov-bl.app.cloud.gov\",\n \"getgov-rjm.app.cloud.gov\",\n \"getgov-dk.app.cloud.gov\",\n \"manage.get.gov\",\n]\n\n# Extend ALLOWED_HOSTS.\n# IP addresses can also be hosts, which are used by internal\n# load balancers for health checks, etc.\nALLOWED_CIDR_NETS = [\"10.0.0.0/8\"]\n\n# ~ Below are some protections from cross-site request forgery.\n# This is canonically done by including a nonce value\n# in pages sent to the user, which the user is expected\n# to send back. The specifics of implementation are\n# intricate and varied.\n\n# Store the token server-side, do not send it\n# to the user via a cookie. This means each page\n# which requires protection must place the token\n# in the HTML explicitly, otherwise the user will\n# get a 403 error when they submit.\nCSRF_USE_SESSIONS = True\n\n# Expiry of CSRF cookie, in seconds.\n# None means \"use session-based CSRF cookies\".\nCSRF_COOKIE_AGE = None\n\n# Prevent JavaScript from reading the CSRF cookie.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_HTTPONLY = True\n\n# Only send the cookie via HTTPS connections.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SECURE = True\n\n# Protect from non-targeted attacks by obscuring\n# the CSRF cookie name from the default.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_NAME = \"CrSiReFo\"\n\n# Prevents CSRF cookie from being sent if the user\n# is coming to our site from an external page.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_COOKIE_SAMESITE = \"Strict\"\n\n# Change header name to match cookie name.\n# Has no effect with CSRF_USE_SESSIONS = True.\nCSRF_HEADER_NAME = \"HTTP_X_CRSIREFO\"\n\n# Max parameters that may be received via GET or POST\n# TODO: 1000 is the default, may need to tune upward for\n# large DNS zone files, if records are represented by\n# individual form fields.\nDATA_UPLOAD_MAX_NUMBER_FIELDS = 1000\n\n# age of session cookies, in seconds (28800 = 8 hours)\nSESSION_COOKIE_AGE = 28800\n\n# instruct the browser to forbid client-side JavaScript\n# from accessing the cookie\nSESSION_COOKIE_HTTPONLY = True\n\n# are we a spring boot application? who knows!\nSESSION_COOKIE_NAME = \"JSESSIONID\"\n\n# Allows session cookie to be sent if the user\n# is coming to our site from an external page\n# unless it is via \"risky\" paths, i.e. POST requests\nSESSION_COOKIE_SAMESITE = \"Lax\"\n\n# instruct browser to only send cookie via HTTPS\nSESSION_COOKIE_SECURE = True\n\n# session engine to cache session information\nSESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\n\n# ~ Set by django.middleware.clickjacking.XFrameOptionsMiddleware\n# prevent clickjacking by instructing the browser not to load\n# our site within an iframe\n# X_FRAME_OPTIONS = \"Deny\"\n\n# endregion\n# region: Testing-----------------------------------------------------------###\n\n# Additional directories searched for fixture files.\n# The fixtures directory of each application is searched by default.\n# Must use unix style \"/\" path separators.\nFIXTURE_DIRS: \"list[str]\" = []\n\n# endregion\n\n\n# # # ###\n# Development settings #\n# # # ###\n\nif DEBUG:\n # used by debug() context processor\n INTERNAL_IPS = [\n \"127.0.0.1\",\n \"::1\",\n ]\n\n # allow dev laptop and docker-compose network to connect\n ALLOWED_HOSTS += (\"localhost\", \"app\")\n SECURE_SSL_REDIRECT = False\n SECURE_HSTS_PRELOAD = False\n\n # discover potentially inefficient database queries\n # TODO: use settings overrides to ensure this always is True during tests\n INSTALLED_APPS += (\"nplusone.ext.django\",)\n MIDDLEWARE += (\"nplusone.ext.django.NPlusOneMiddleware\",)\n # turned off for now, because django-auditlog has some issues\n NPLUSONE_RAISE = False\n NPLUSONE_WHITELIST = [\n {\"model\": \"admin.LogEntry\", \"field\": \"user\"},\n ]\n\n # insert the amazing django-debug-toolbar\n INSTALLED_APPS += (\"debug_toolbar\",)\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n\n DEBUG_TOOLBAR_CONFIG = {\n # due to Docker, bypass Debug Toolbar's check on INTERNAL_IPS\n \"SHOW_TOOLBAR_CALLBACK\": lambda _: True,\n }\n",
"path": "src/registrar/config/settings.py"
}
] | diff --git a/src/registrar/config/settings.py b/src/registrar/config/settings.py
index bc46c60ba..2de7e6eb2 100644
--- a/src/registrar/config/settings.py
+++ b/src/registrar/config/settings.py
@@ -519,7 +519,7 @@
]
# where to go after logging out
-LOGOUT_REDIRECT_URL = "home"
+LOGOUT_REDIRECT_URL = "https://get.gov/"
# disable dynamic client registration,
# only the OP inside OIDC_PROVIDERS will be available
diff --git a/src/zap.conf b/src/zap.conf
index e7dc980b0..7a1e5c96d 100644
--- a/src/zap.conf
+++ b/src/zap.conf
@@ -67,6 +67,7 @@
10038 OUTOFSCOPE http://app:8080/dns/nameservers
10038 OUTOFSCOPE http://app:8080/dns/dnssec
10038 OUTOFSCOPE http://app:8080/dns/dnssec/dsdata
+10038 OUTOFSCOPE http://app:8080/org-name-address
# This URL always returns 404, so include it as well.
10038 OUTOFSCOPE http://app:8080/todo
# OIDC isn't configured in the test environment and DEBUG=True so this gives a 500 without CSP headers
|
microsoft__botbuilder-python-1637 | botbuilder-testing is missing install requirements
## Version
botbuilder-testing 4.12.0
## Describe the bug
While installing botbuilder-testing for CI I got errors about missing dependencies.
## To Reproduce
1. `python3 -m venv .venv`
2. `. .venv/bin/activate`
3. `pip install -U pip wheel`
4. `pip install botbuilder-testing`
5. `python -c "from botbuilder.testing import DialogTestClient"`
First error is missing `pytest`:
```python
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/__init__.py", line 6, in <module>
from .storage_base_tests import StorageBaseTests
File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/storage_base_tests.py", line 26, in <module>
import pytest
ModuleNotFoundError: No module named 'pytest'
```
6. `pip install pytest`
7. `python -c 'from botbuilder.testing import DialogTestClient'`
Next error is missing `botbuilder-azure`:
```python
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/__init__.py", line 6, in <module>
from .storage_base_tests import StorageBaseTests
File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/storage_base_tests.py", line 27, in <module>
from botbuilder.azure import CosmosDbStorage
ModuleNotFoundError: No module named 'botbuilder.azure'
```
8. `pip install botbuilder-azure`
9. `python -c 'from botbuilder.testing import DialogTestClient'`
Command works!
## Expected behavior
No errors after installing botbuilder-testing and importing module
I do wonder if the requirement for pytest is not necessary, leaving the lib test-suite agnostic and could be refactored out?
| [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"botbuilder-schema==4.13.0\",\n \"botbuilder-core==4.13.0\",\n \"botbuilder-dialogs==4.13.0\",\n]\n\nTESTS_REQUIRES = [\"aiounittest==1.3.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"testing\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=\"botbuilder-testing bots ai testing botframework botbuilder\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.testing\"],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n",
"path": "libraries/botbuilder-testing/setup.py"
}
] | [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"botbuilder-schema==4.13.0\",\n \"botbuilder-core==4.13.0\",\n \"botbuilder-dialogs==4.13.0\",\n \"botbuilder-azure==4.13.0\",\n \"pytest~=6.2.3\",\n]\n\nTESTS_REQUIRES = [\"aiounittest==1.3.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"testing\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=\"botbuilder-testing bots ai testing botframework botbuilder\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.testing\"],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n",
"path": "libraries/botbuilder-testing/setup.py"
}
] | diff --git a/libraries/botbuilder-testing/setup.py b/libraries/botbuilder-testing/setup.py
index af36832cd..bd6ed4856 100644
--- a/libraries/botbuilder-testing/setup.py
+++ b/libraries/botbuilder-testing/setup.py
@@ -8,6 +8,8 @@
"botbuilder-schema==4.13.0",
"botbuilder-core==4.13.0",
"botbuilder-dialogs==4.13.0",
+ "botbuilder-azure==4.13.0",
+ "pytest~=6.2.3",
]
TESTS_REQUIRES = ["aiounittest==1.3.0"]
|
python-poetry__poetry-3159 | Poetry fails with KeyError if the PATH environment variable is not present
- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
- **OS version and name**: Ubuntu 18.04
- **Poetry version**: 1.0.10
## Issue
When running in CI using a docker container the `PATH` environment variable is not set and causes an issue with poetry. Unfortunately I don't see any traceback. Here's a snipped showing the issue:
```
root@5d1e49d5433c:~/src# unset PATH
root@5d1e49d5433c:~/src# /usr/local/bin/poetry run -vvv pip install pip
[KeyError]
'PATH'
```
| [
{
"content": "import base64\nimport hashlib\nimport json\nimport os\nimport platform\nimport re\nimport shutil\nimport sys\nimport sysconfig\nimport textwrap\n\nfrom contextlib import contextmanager\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nimport packaging.tags\nimport tomlkit\nimport virtualenv\n\nfrom clikit.api.io import IO\nfrom packaging.tags import Tag\nfrom packaging.tags import interpreter_name\nfrom packaging.tags import interpreter_version\nfrom packaging.tags import sys_tags\n\nfrom poetry.core.semver import parse_constraint\nfrom poetry.core.semver.version import Version\nfrom poetry.core.toml.file import TOMLFile\nfrom poetry.core.version.markers import BaseMarker\nfrom poetry.locations import CACHE_DIR\nfrom poetry.poetry import Poetry\nfrom poetry.utils._compat import CalledProcessError\nfrom poetry.utils._compat import Path\nfrom poetry.utils._compat import decode\nfrom poetry.utils._compat import encode\nfrom poetry.utils._compat import list_to_shell_command\nfrom poetry.utils._compat import subprocess\n\n\nGET_ENVIRONMENT_INFO = \"\"\"\\\nimport json\nimport os\nimport platform\nimport sys\nimport sysconfig\n\nINTERPRETER_SHORT_NAMES = {\n \"python\": \"py\",\n \"cpython\": \"cp\",\n \"pypy\": \"pp\",\n \"ironpython\": \"ip\",\n \"jython\": \"jy\",\n}\n\n\ndef interpreter_version():\n version = sysconfig.get_config_var(\"interpreter_version\")\n if version:\n version = str(version)\n else:\n version = _version_nodot(sys.version_info[:2])\n\n return version\n\n\ndef _version_nodot(version):\n # type: (PythonVersion) -> str\n if any(v >= 10 for v in version):\n sep = \"_\"\n else:\n sep = \"\"\n\n return sep.join(map(str, version))\n\n\nif hasattr(sys, \"implementation\"):\n info = sys.implementation.version\n iver = \"{0.major}.{0.minor}.{0.micro}\".format(info)\n kind = info.releaselevel\n if kind != \"final\":\n iver += kind[0] + str(info.serial)\n\n implementation_name = sys.implementation.name\nelse:\n iver = \"0\"\n implementation_name = platform.python_implementation().lower()\n\nenv = {\n \"implementation_name\": implementation_name,\n \"implementation_version\": iver,\n \"os_name\": os.name,\n \"platform_machine\": platform.machine(),\n \"platform_release\": platform.release(),\n \"platform_system\": platform.system(),\n \"platform_version\": platform.version(),\n \"python_full_version\": platform.python_version(),\n \"platform_python_implementation\": platform.python_implementation(),\n \"python_version\": platform.python_version()[:3],\n \"sys_platform\": sys.platform,\n \"version_info\": tuple(sys.version_info),\n # Extra information\n \"interpreter_name\": INTERPRETER_SHORT_NAMES.get(implementation_name, implementation_name),\n \"interpreter_version\": interpreter_version(),\n}\n\nprint(json.dumps(env))\n\"\"\"\n\n\nGET_BASE_PREFIX = \"\"\"\\\nimport sys\n\nif hasattr(sys, \"real_prefix\"):\n print(sys.real_prefix)\nelif hasattr(sys, \"base_prefix\"):\n print(sys.base_prefix)\nelse:\n print(sys.prefix)\n\"\"\"\n\nGET_PYTHON_VERSION = \"\"\"\\\nimport sys\n\nprint('.'.join([str(s) for s in sys.version_info[:3]]))\n\"\"\"\n\nGET_SYS_PATH = \"\"\"\\\nimport json\nimport sys\n\nprint(json.dumps(sys.path))\n\"\"\"\n\nGET_PATHS = \"\"\"\\\nimport json\nimport sysconfig\n\nprint(json.dumps(sysconfig.get_paths()))\n\"\"\"\n\n\nclass EnvError(Exception):\n\n pass\n\n\nclass EnvCommandError(EnvError):\n def __init__(self, e, input=None): # type: (CalledProcessError) -> None\n self.e = e\n\n message = \"Command {} errored with the following return code {}, and output: \\n{}\".format(\n e.cmd, e.returncode, decode(e.output)\n )\n if input:\n message += \"input was : {}\".format(input)\n super(EnvCommandError, self).__init__(message)\n\n\nclass NoCompatiblePythonVersionFound(EnvError):\n def __init__(self, expected, given=None):\n if given:\n message = (\n \"The specified Python version ({}) \"\n \"is not supported by the project ({}).\\n\"\n \"Please choose a compatible version \"\n \"or loosen the python constraint specified \"\n \"in the pyproject.toml file.\".format(given, expected)\n )\n else:\n message = (\n \"Poetry was unable to find a compatible version. \"\n \"If you have one, you can explicitly use it \"\n 'via the \"env use\" command.'\n )\n\n super(NoCompatiblePythonVersionFound, self).__init__(message)\n\n\nclass EnvManager(object):\n \"\"\"\n Environments manager\n \"\"\"\n\n _env = None\n\n ENVS_FILE = \"envs.toml\"\n\n def __init__(self, poetry): # type: (Poetry) -> None\n self._poetry = poetry\n\n def activate(self, python, io): # type: (str, IO) -> Env\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n cwd = self._poetry.file.parent\n\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n\n try:\n python_version = Version.parse(python)\n python = \"python{}\".format(python_version.major)\n if python_version.precision > 1:\n python += \".{}\".format(python_version.minor)\n except ValueError:\n # Executable in PATH or full executable path\n pass\n\n try:\n python_version = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n python,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n shell=True,\n )\n )\n except CalledProcessError as e:\n raise EnvCommandError(e)\n\n python_version = Version.parse(python_version.strip())\n minor = \"{}.{}\".format(python_version.major, python_version.minor)\n patch = python_version.text\n\n create = False\n is_root_venv = self._poetry.config.get(\"virtualenvs.in-project\")\n # If we are required to create the virtual environment in the root folder,\n # create or recreate it if needed\n if is_root_venv:\n create = False\n venv = self._poetry.file.parent / \".venv\"\n if venv.exists():\n # We need to check if the patch version is correct\n _venv = VirtualEnv(venv)\n current_patch = \".\".join(str(v) for v in _venv.version_info[:3])\n\n if patch != current_patch:\n create = True\n\n self.create_venv(io, executable=python, force=create)\n\n return self.get(reload=True)\n\n envs = tomlkit.document()\n base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))\n if envs_file.exists():\n envs = envs_file.read()\n current_env = envs.get(base_env_name)\n if current_env is not None:\n current_minor = current_env[\"minor\"]\n current_patch = current_env[\"patch\"]\n\n if current_minor == minor and current_patch != patch:\n # We need to recreate\n create = True\n\n name = \"{}-py{}\".format(base_env_name, minor)\n venv = venv_path / name\n\n # Create if needed\n if not venv.exists() or venv.exists() and create:\n in_venv = os.environ.get(\"VIRTUAL_ENV\") is not None\n if in_venv or not venv.exists():\n create = True\n\n if venv.exists():\n # We need to check if the patch version is correct\n _venv = VirtualEnv(venv)\n current_patch = \".\".join(str(v) for v in _venv.version_info[:3])\n\n if patch != current_patch:\n create = True\n\n self.create_venv(io, executable=python, force=create)\n\n # Activate\n envs[base_env_name] = {\"minor\": minor, \"patch\": patch}\n envs_file.write(envs)\n\n return self.get(reload=True)\n\n def deactivate(self, io): # type: (IO) -> None\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n name = self._poetry.package.name\n name = self.generate_env_name(name, str(self._poetry.file.parent))\n\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n if envs_file.exists():\n envs = envs_file.read()\n env = envs.get(name)\n if env is not None:\n io.write_line(\n \"Deactivating virtualenv: <comment>{}</comment>\".format(\n venv_path / (name + \"-py{}\".format(env[\"minor\"]))\n )\n )\n del envs[name]\n\n envs_file.write(envs)\n\n def get(self, reload=False): # type: (bool) -> Env\n if self._env is not None and not reload:\n return self._env\n\n python_minor = \".\".join([str(v) for v in sys.version_info[:2]])\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n cwd = self._poetry.file.parent\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n env = None\n base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))\n if envs_file.exists():\n envs = envs_file.read()\n env = envs.get(base_env_name)\n if env:\n python_minor = env[\"minor\"]\n\n # Check if we are inside a virtualenv or not\n # Conda sets CONDA_PREFIX in its envs, see\n # https://github.com/conda/conda/issues/2764\n env_prefix = os.environ.get(\"VIRTUAL_ENV\", os.environ.get(\"CONDA_PREFIX\"))\n conda_env_name = os.environ.get(\"CONDA_DEFAULT_ENV\")\n # It's probably not a good idea to pollute Conda's global \"base\" env, since\n # most users have it activated all the time.\n in_venv = env_prefix is not None and conda_env_name != \"base\"\n\n if not in_venv or env is not None:\n # Checking if a local virtualenv exists\n if self._poetry.config.get(\"virtualenvs.in-project\") is not False:\n if (cwd / \".venv\").exists() and (cwd / \".venv\").is_dir():\n venv = cwd / \".venv\"\n\n return VirtualEnv(venv)\n\n create_venv = self._poetry.config.get(\"virtualenvs.create\", True)\n\n if not create_venv:\n return SystemEnv(Path(sys.prefix))\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n name = \"{}-py{}\".format(base_env_name, python_minor.strip())\n\n venv = venv_path / name\n\n if not venv.exists():\n return SystemEnv(Path(sys.prefix))\n\n return VirtualEnv(venv)\n\n if env_prefix is not None:\n prefix = Path(env_prefix)\n base_prefix = None\n else:\n prefix = Path(sys.prefix)\n base_prefix = self.get_base_prefix()\n\n return VirtualEnv(prefix, base_prefix)\n\n def list(self, name=None): # type: (Optional[str]) -> List[VirtualEnv]\n if name is None:\n name = self._poetry.package.name\n\n venv_name = self.generate_env_name(name, str(self._poetry.file.parent))\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n env_list = [\n VirtualEnv(Path(p))\n for p in sorted(venv_path.glob(\"{}-py*\".format(venv_name)))\n ]\n\n venv = self._poetry.file.parent / \".venv\"\n if (\n self._poetry.config.get(\"virtualenvs.in-project\")\n and venv.exists()\n and venv.is_dir()\n ):\n env_list.insert(0, VirtualEnv(venv))\n return env_list\n\n def remove(self, python): # type: (str) -> Env\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n cwd = self._poetry.file.parent\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))\n\n if python.startswith(base_env_name):\n venvs = self.list()\n for venv in venvs:\n if venv.path.name == python:\n # Exact virtualenv name\n if not envs_file.exists():\n self.remove_venv(venv.path)\n\n return venv\n\n venv_minor = \".\".join(str(v) for v in venv.version_info[:2])\n base_env_name = self.generate_env_name(cwd.name, str(cwd))\n envs = envs_file.read()\n\n current_env = envs.get(base_env_name)\n if not current_env:\n self.remove_venv(venv.path)\n\n return venv\n\n if current_env[\"minor\"] == venv_minor:\n del envs[base_env_name]\n envs_file.write(envs)\n\n self.remove_venv(venv.path)\n\n return venv\n\n raise ValueError(\n '<warning>Environment \"{}\" does not exist.</warning>'.format(python)\n )\n\n try:\n python_version = Version.parse(python)\n python = \"python{}\".format(python_version.major)\n if python_version.precision > 1:\n python += \".{}\".format(python_version.minor)\n except ValueError:\n # Executable in PATH or full executable path\n pass\n\n try:\n python_version = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n python,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n shell=True,\n )\n )\n except CalledProcessError as e:\n raise EnvCommandError(e)\n\n python_version = Version.parse(python_version.strip())\n minor = \"{}.{}\".format(python_version.major, python_version.minor)\n\n name = \"{}-py{}\".format(base_env_name, minor)\n venv = venv_path / name\n\n if not venv.exists():\n raise ValueError(\n '<warning>Environment \"{}\" does not exist.</warning>'.format(name)\n )\n\n if envs_file.exists():\n envs = envs_file.read()\n current_env = envs.get(base_env_name)\n if current_env is not None:\n current_minor = current_env[\"minor\"]\n\n if current_minor == minor:\n del envs[base_env_name]\n envs_file.write(envs)\n\n self.remove_venv(venv)\n\n return VirtualEnv(venv)\n\n def create_venv(\n self, io, name=None, executable=None, force=False\n ): # type: (IO, Optional[str], Optional[str], bool) -> Env\n if self._env is not None and not force:\n return self._env\n\n cwd = self._poetry.file.parent\n env = self.get(reload=True)\n\n if not env.is_sane():\n force = True\n\n if env.is_venv() and not force:\n # Already inside a virtualenv.\n return env\n\n create_venv = self._poetry.config.get(\"virtualenvs.create\")\n root_venv = self._poetry.config.get(\"virtualenvs.in-project\")\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if root_venv:\n venv_path = cwd / \".venv\"\n elif venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n if not name:\n name = self._poetry.package.name\n\n python_patch = \".\".join([str(v) for v in sys.version_info[:3]])\n python_minor = \".\".join([str(v) for v in sys.version_info[:2]])\n if executable:\n python_patch = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n executable,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n shell=True,\n ).strip()\n )\n python_minor = \".\".join(python_patch.split(\".\")[:2])\n\n supported_python = self._poetry.package.python_constraint\n if not supported_python.allows(Version.parse(python_patch)):\n # The currently activated or chosen Python version\n # is not compatible with the Python constraint specified\n # for the project.\n # If an executable has been specified, we stop there\n # and notify the user of the incompatibility.\n # Otherwise, we try to find a compatible Python version.\n if executable:\n raise NoCompatiblePythonVersionFound(\n self._poetry.package.python_versions, python_patch\n )\n\n io.write_line(\n \"<warning>The currently activated Python version {} \"\n \"is not supported by the project ({}).\\n\"\n \"Trying to find and use a compatible version.</warning> \".format(\n python_patch, self._poetry.package.python_versions\n )\n )\n\n for python_to_try in reversed(\n sorted(\n self._poetry.package.AVAILABLE_PYTHONS,\n key=lambda v: (v.startswith(\"3\"), -len(v), v),\n )\n ):\n if len(python_to_try) == 1:\n if not parse_constraint(\"^{}.0\".format(python_to_try)).allows_any(\n supported_python\n ):\n continue\n elif not supported_python.allows_all(\n parse_constraint(python_to_try + \".*\")\n ):\n continue\n\n python = \"python\" + python_to_try\n\n if io.is_debug():\n io.write_line(\"<debug>Trying {}</debug>\".format(python))\n\n try:\n python_patch = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n python,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n stderr=subprocess.STDOUT,\n shell=True,\n ).strip()\n )\n except CalledProcessError:\n continue\n\n if not python_patch:\n continue\n\n if supported_python.allows(Version.parse(python_patch)):\n io.write_line(\"Using <c1>{}</c1> ({})\".format(python, python_patch))\n executable = python\n python_minor = \".\".join(python_patch.split(\".\")[:2])\n break\n\n if not executable:\n raise NoCompatiblePythonVersionFound(\n self._poetry.package.python_versions\n )\n\n if root_venv:\n venv = venv_path\n else:\n name = self.generate_env_name(name, str(cwd))\n name = \"{}-py{}\".format(name, python_minor.strip())\n venv = venv_path / name\n\n if not venv.exists():\n if create_venv is False:\n io.write_line(\n \"<fg=black;bg=yellow>\"\n \"Skipping virtualenv creation, \"\n \"as specified in config file.\"\n \"</>\"\n )\n\n return SystemEnv(Path(sys.prefix))\n\n io.write_line(\n \"Creating virtualenv <c1>{}</> in {}\".format(name, str(venv_path))\n )\n\n self.build_venv(venv, executable=executable)\n else:\n if force:\n if not env.is_sane():\n io.write_line(\n \"<warning>The virtual environment found in {} seems to be broken.</warning>\".format(\n env.path\n )\n )\n io.write_line(\n \"Recreating virtualenv <c1>{}</> in {}\".format(name, str(venv))\n )\n self.remove_venv(venv)\n self.build_venv(venv, executable=executable)\n elif io.is_very_verbose():\n io.write_line(\"Virtualenv <c1>{}</> already exists.\".format(name))\n\n # venv detection:\n # stdlib venv may symlink sys.executable, so we can't use realpath.\n # but others can symlink *to* the venv Python,\n # so we can't just use sys.executable.\n # So we just check every item in the symlink tree (generally <= 3)\n p = os.path.normcase(sys.executable)\n paths = [p]\n while os.path.islink(p):\n p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))\n paths.append(p)\n\n p_venv = os.path.normcase(str(venv))\n if any(p.startswith(p_venv) for p in paths):\n # Running properly in the virtualenv, don't need to do anything\n return SystemEnv(Path(sys.prefix), self.get_base_prefix())\n\n return VirtualEnv(venv)\n\n @classmethod\n def build_venv(\n cls, path, executable=None\n ): # type: (Union[Path,str], Optional[Union[str, Path]]) -> virtualenv.run.session.Session\n if isinstance(executable, Path):\n executable = executable.resolve().as_posix()\n return virtualenv.cli_run(\n [\n \"--no-download\",\n \"--no-periodic-update\",\n \"--python\",\n executable or sys.executable,\n str(path),\n ]\n )\n\n @classmethod\n def remove_venv(cls, path): # type: (Union[Path,str]) -> None\n if isinstance(path, str):\n path = Path(path)\n assert path.is_dir()\n try:\n shutil.rmtree(str(path))\n return\n except OSError as e:\n # Continue only if e.errno == 16\n if e.errno != 16: # ERRNO 16: Device or resource busy\n raise e\n\n # Delete all files and folders but the toplevel one. This is because sometimes\n # the venv folder is mounted by the OS, such as in a docker volume. In such\n # cases, an attempt to delete the folder itself will result in an `OSError`.\n # See https://github.com/python-poetry/poetry/pull/2064\n for file_path in path.iterdir():\n if file_path.is_file() or file_path.is_symlink():\n file_path.unlink()\n elif file_path.is_dir():\n shutil.rmtree(str(file_path))\n\n def get_base_prefix(self): # type: () -> Path\n if hasattr(sys, \"real_prefix\"):\n return sys.real_prefix\n\n if hasattr(sys, \"base_prefix\"):\n return sys.base_prefix\n\n return sys.prefix\n\n @classmethod\n def generate_env_name(cls, name, cwd): # type: (str, str) -> str\n name = name.lower()\n sanitized_name = re.sub(r'[ $`!*@\"\\\\\\r\\n\\t]', \"_\", name)[:42]\n h = hashlib.sha256(encode(cwd)).digest()\n h = base64.urlsafe_b64encode(h).decode()[:8]\n\n return \"{}-{}\".format(sanitized_name, h)\n\n\nclass Env(object):\n \"\"\"\n An abstract Python environment.\n \"\"\"\n\n def __init__(self, path, base=None): # type: (Path, Optional[Path]) -> None\n self._is_windows = sys.platform == \"win32\"\n\n self._path = path\n bin_dir = \"bin\" if not self._is_windows else \"Scripts\"\n self._bin_dir = self._path / bin_dir\n\n self._base = base or path\n\n self._marker_env = None\n self._pip_version = None\n self._site_packages = None\n self._paths = None\n self._supported_tags = None\n self._purelib = None\n self._platlib = None\n\n @property\n def path(self): # type: () -> Path\n return self._path\n\n @property\n def base(self): # type: () -> Path\n return self._base\n\n @property\n def version_info(self): # type: () -> Tuple[int]\n return tuple(self.marker_env[\"version_info\"])\n\n @property\n def python_implementation(self): # type: () -> str\n return self.marker_env[\"platform_python_implementation\"]\n\n @property\n def python(self): # type: () -> str\n \"\"\"\n Path to current python executable\n \"\"\"\n return self._bin(\"python\")\n\n @property\n def marker_env(self):\n if self._marker_env is None:\n self._marker_env = self.get_marker_env()\n\n return self._marker_env\n\n @property\n def pip(self): # type: () -> str\n \"\"\"\n Path to current pip executable\n \"\"\"\n return self._bin(\"pip\")\n\n @property\n def platform(self): # type: () -> str\n return sys.platform\n\n @property\n def os(self): # type: () -> str\n return os.name\n\n @property\n def pip_version(self):\n if self._pip_version is None:\n self._pip_version = self.get_pip_version()\n\n return self._pip_version\n\n @property\n def site_packages(self): # type: () -> Path\n if self._site_packages is None:\n self._site_packages = self.purelib\n return self._site_packages\n\n @property\n def usersite(self): # type: () -> Optional[Path]\n if \"usersite\" in self.paths:\n return Path(self.paths[\"usersite\"])\n\n @property\n def purelib(self): # type: () -> Path\n if self._purelib is None:\n self._purelib = Path(self.paths[\"purelib\"])\n\n return self._purelib\n\n @property\n def platlib(self): # type: () -> Path\n if self._platlib is None:\n if \"platlib\" in self.paths:\n self._platlib = Path(self.paths[\"platlib\"])\n else:\n self._platlib = self.purelib\n\n return self._platlib\n\n def is_path_relative_to_lib(self, path): # type: (Path) -> bool\n for lib_path in [self.purelib, self.platlib]:\n try:\n path.relative_to(lib_path)\n return True\n except ValueError:\n pass\n\n return False\n\n @property\n def sys_path(self): # type: () -> List[str]\n raise NotImplementedError()\n\n @property\n def paths(self): # type: () -> Dict[str, str]\n if self._paths is None:\n self._paths = self.get_paths()\n\n return self._paths\n\n @property\n def supported_tags(self): # type: () -> List[Tag]\n if self._supported_tags is None:\n self._supported_tags = self.get_supported_tags()\n\n return self._supported_tags\n\n @classmethod\n def get_base_prefix(cls): # type: () -> Path\n if hasattr(sys, \"real_prefix\"):\n return sys.real_prefix\n\n if hasattr(sys, \"base_prefix\"):\n return sys.base_prefix\n\n return sys.prefix\n\n def get_version_info(self): # type: () -> Tuple[int]\n raise NotImplementedError()\n\n def get_python_implementation(self): # type: () -> str\n raise NotImplementedError()\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n raise NotImplementedError()\n\n def get_pip_command(self): # type: () -> List[str]\n raise NotImplementedError()\n\n def get_supported_tags(self): # type: () -> List[Tag]\n raise NotImplementedError()\n\n def get_pip_version(self): # type: () -> Version\n raise NotImplementedError()\n\n def get_paths(self): # type: () -> Dict[str, str]\n raise NotImplementedError()\n\n def is_valid_for_marker(self, marker): # type: (BaseMarker) -> bool\n return marker.validate(self.marker_env)\n\n def is_sane(self): # type: () -> bool\n \"\"\"\n Checks whether the current environment is sane or not.\n \"\"\"\n return True\n\n def run(self, bin, *args, **kwargs):\n bin = self._bin(bin)\n cmd = [bin] + list(args)\n return self._run(cmd, **kwargs)\n\n def run_pip(self, *args, **kwargs):\n pip = self.get_pip_command()\n cmd = pip + list(args)\n return self._run(cmd, **kwargs)\n\n def _run(self, cmd, **kwargs):\n \"\"\"\n Run a command inside the Python environment.\n \"\"\"\n call = kwargs.pop(\"call\", False)\n input_ = kwargs.pop(\"input_\", None)\n\n try:\n if self._is_windows:\n kwargs[\"shell\"] = True\n\n if kwargs.get(\"shell\", False):\n cmd = list_to_shell_command(cmd)\n\n if input_:\n output = subprocess.run(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n input=encode(input_),\n check=True,\n **kwargs\n ).stdout\n elif call:\n return subprocess.call(cmd, stderr=subprocess.STDOUT, **kwargs)\n else:\n output = subprocess.check_output(\n cmd, stderr=subprocess.STDOUT, **kwargs\n )\n except CalledProcessError as e:\n raise EnvCommandError(e, input=input_)\n\n return decode(output)\n\n def execute(self, bin, *args, **kwargs):\n bin = self._bin(bin)\n\n if not self._is_windows:\n args = [bin] + list(args)\n if \"env\" in kwargs:\n return os.execvpe(bin, args, kwargs[\"env\"])\n else:\n return os.execvp(bin, args)\n else:\n exe = subprocess.Popen([bin] + list(args), **kwargs)\n exe.communicate()\n return exe.returncode\n\n def is_venv(self): # type: () -> bool\n raise NotImplementedError()\n\n def _bin(self, bin): # type: (str) -> str\n \"\"\"\n Return path to the given executable.\n \"\"\"\n bin_path = (self._bin_dir / bin).with_suffix(\".exe\" if self._is_windows else \"\")\n if not bin_path.exists():\n # On Windows, some executables can be in the base path\n # This is especially true when installing Python with\n # the official installer, where python.exe will be at\n # the root of the env path.\n # This is an edge case and should not be encountered\n # in normal uses but this happens in the sonnet script\n # that creates a fake virtual environment pointing to\n # a base Python install.\n if self._is_windows:\n bin_path = (self._path / bin).with_suffix(\".exe\")\n if bin_path.exists():\n return str(bin_path)\n\n return bin\n\n return str(bin_path)\n\n def __eq__(self, other): # type: (Env) -> bool\n return other.__class__ == self.__class__ and other.path == self.path\n\n def __repr__(self):\n return '{}(\"{}\")'.format(self.__class__.__name__, self._path)\n\n\nclass SystemEnv(Env):\n \"\"\"\n A system (i.e. not a virtualenv) Python environment.\n \"\"\"\n\n @property\n def sys_path(self): # type: () -> List[str]\n return sys.path\n\n def get_version_info(self): # type: () -> Tuple[int]\n return sys.version_info\n\n def get_python_implementation(self): # type: () -> str\n return platform.python_implementation()\n\n def get_pip_command(self): # type: () -> List[str]\n # If we're not in a venv, assume the interpreter we're running on\n # has a pip and use that\n return [sys.executable, \"-m\", \"pip\"]\n\n def get_paths(self): # type: () -> Dict[str, str]\n # We can't use sysconfig.get_paths() because\n # on some distributions it does not return the proper paths\n # (those used by pip for instance). We go through distutils\n # to get the proper ones.\n import site\n\n from distutils.command.install import SCHEME_KEYS # noqa\n from distutils.core import Distribution\n\n d = Distribution()\n d.parse_config_files()\n obj = d.get_command_obj(\"install\", create=True)\n obj.finalize_options()\n\n paths = sysconfig.get_paths().copy()\n for key in SCHEME_KEYS:\n if key == \"headers\":\n # headers is not a path returned by sysconfig.get_paths()\n continue\n\n paths[key] = getattr(obj, \"install_{}\".format(key))\n\n if site.check_enableusersite() and hasattr(obj, \"install_usersite\"):\n paths[\"usersite\"] = getattr(obj, \"install_usersite\")\n\n return paths\n\n def get_supported_tags(self): # type: () -> List[Tag]\n return list(sys_tags())\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n if hasattr(sys, \"implementation\"):\n info = sys.implementation.version\n iver = \"{0.major}.{0.minor}.{0.micro}\".format(info)\n kind = info.releaselevel\n if kind != \"final\":\n iver += kind[0] + str(info.serial)\n\n implementation_name = sys.implementation.name\n else:\n iver = \"0\"\n implementation_name = \"\"\n\n return {\n \"implementation_name\": implementation_name,\n \"implementation_version\": iver,\n \"os_name\": os.name,\n \"platform_machine\": platform.machine(),\n \"platform_release\": platform.release(),\n \"platform_system\": platform.system(),\n \"platform_version\": platform.version(),\n \"python_full_version\": platform.python_version(),\n \"platform_python_implementation\": platform.python_implementation(),\n \"python_version\": \".\".join(\n v for v in platform.python_version().split(\".\")[:2]\n ),\n \"sys_platform\": sys.platform,\n \"version_info\": sys.version_info,\n # Extra information\n \"interpreter_name\": interpreter_name(),\n \"interpreter_version\": interpreter_version(),\n }\n\n def get_pip_version(self): # type: () -> Version\n from pip import __version__\n\n return Version.parse(__version__)\n\n def is_venv(self): # type: () -> bool\n return self._path != self._base\n\n\nclass VirtualEnv(Env):\n \"\"\"\n A virtual Python environment.\n \"\"\"\n\n def __init__(self, path, base=None): # type: (Path, Optional[Path]) -> None\n super(VirtualEnv, self).__init__(path, base)\n\n # If base is None, it probably means this is\n # a virtualenv created from VIRTUAL_ENV.\n # In this case we need to get sys.base_prefix\n # from inside the virtualenv.\n if base is None:\n self._base = Path(self.run(\"python\", \"-\", input_=GET_BASE_PREFIX).strip())\n\n @property\n def sys_path(self): # type: () -> List[str]\n output = self.run(\"python\", \"-\", input_=GET_SYS_PATH)\n\n return json.loads(output)\n\n def get_version_info(self): # type: () -> Tuple[int]\n output = self.run(\"python\", \"-\", input_=GET_PYTHON_VERSION)\n\n return tuple([int(s) for s in output.strip().split(\".\")])\n\n def get_python_implementation(self): # type: () -> str\n return self.marker_env[\"platform_python_implementation\"]\n\n def get_pip_command(self): # type: () -> List[str]\n # We're in a virtualenv that is known to be sane,\n # so assume that we have a functional pip\n return [self._bin(\"pip\")]\n\n def get_supported_tags(self): # type: () -> List[Tag]\n file_path = Path(packaging.tags.__file__)\n if file_path.suffix == \".pyc\":\n # Python 2\n file_path = file_path.with_suffix(\".py\")\n\n with file_path.open(encoding=\"utf-8\") as f:\n script = decode(f.read())\n\n script = script.replace(\n \"from ._typing import TYPE_CHECKING, cast\",\n \"TYPE_CHECKING = False\\ncast = lambda type_, value: value\",\n )\n script = script.replace(\n \"from ._typing import MYPY_CHECK_RUNNING, cast\",\n \"MYPY_CHECK_RUNNING = False\\ncast = lambda type_, value: value\",\n )\n\n script += textwrap.dedent(\n \"\"\"\n import json\n\n print(json.dumps([(t.interpreter, t.abi, t.platform) for t in sys_tags()]))\n \"\"\"\n )\n\n output = self.run(\"python\", \"-\", input_=script)\n\n return [Tag(*t) for t in json.loads(output)]\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n output = self.run(\"python\", \"-\", input_=GET_ENVIRONMENT_INFO)\n\n return json.loads(output)\n\n def get_pip_version(self): # type: () -> Version\n output = self.run_pip(\"--version\").strip()\n m = re.match(\"pip (.+?)(?: from .+)?$\", output)\n if not m:\n return Version.parse(\"0.0\")\n\n return Version.parse(m.group(1))\n\n def get_paths(self): # type: () -> Dict[str, str]\n output = self.run(\"python\", \"-\", input_=GET_PATHS)\n\n return json.loads(output)\n\n def is_venv(self): # type: () -> bool\n return True\n\n def is_sane(self):\n # A virtualenv is considered sane if both \"python\" and \"pip\" exist.\n return os.path.exists(self._bin(\"python\")) and os.path.exists(self._bin(\"pip\"))\n\n def _run(self, cmd, **kwargs):\n with self.temp_environ():\n os.environ[\"PATH\"] = self._updated_path()\n os.environ[\"VIRTUAL_ENV\"] = str(self._path)\n\n self.unset_env(\"PYTHONHOME\")\n self.unset_env(\"__PYVENV_LAUNCHER__\")\n\n return super(VirtualEnv, self)._run(cmd, **kwargs)\n\n def execute(self, bin, *args, **kwargs):\n with self.temp_environ():\n os.environ[\"PATH\"] = self._updated_path()\n os.environ[\"VIRTUAL_ENV\"] = str(self._path)\n\n self.unset_env(\"PYTHONHOME\")\n self.unset_env(\"__PYVENV_LAUNCHER__\")\n\n return super(VirtualEnv, self).execute(bin, *args, **kwargs)\n\n @contextmanager\n def temp_environ(self):\n environ = dict(os.environ)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(environ)\n\n def unset_env(self, key):\n if key in os.environ:\n del os.environ[key]\n\n def _updated_path(self):\n return os.pathsep.join([str(self._bin_dir), os.environ[\"PATH\"]])\n\n\nclass NullEnv(SystemEnv):\n def __init__(self, path=None, base=None, execute=False):\n if path is None:\n path = Path(sys.prefix)\n\n super(NullEnv, self).__init__(path, base=base)\n\n self._execute = execute\n self.executed = []\n\n def get_pip_command(self): # type: () -> List[str]\n return [self._bin(\"python\"), \"-m\", \"pip\"]\n\n def _run(self, cmd, **kwargs):\n self.executed.append(cmd)\n\n if self._execute:\n return super(NullEnv, self)._run(cmd, **kwargs)\n\n def execute(self, bin, *args, **kwargs):\n self.executed.append([bin] + list(args))\n\n if self._execute:\n return super(NullEnv, self).execute(bin, *args, **kwargs)\n\n def _bin(self, bin):\n return bin\n\n\nclass MockEnv(NullEnv):\n def __init__(\n self,\n version_info=(3, 7, 0),\n python_implementation=\"CPython\",\n platform=\"darwin\",\n os_name=\"posix\",\n is_venv=False,\n pip_version=\"19.1\",\n sys_path=None,\n marker_env=None,\n supported_tags=None,\n **kwargs\n ):\n super(MockEnv, self).__init__(**kwargs)\n\n self._version_info = version_info\n self._python_implementation = python_implementation\n self._platform = platform\n self._os_name = os_name\n self._is_venv = is_venv\n self._pip_version = Version.parse(pip_version)\n self._sys_path = sys_path\n self._mock_marker_env = marker_env\n self._supported_tags = supported_tags\n\n @property\n def platform(self): # type: () -> str\n return self._platform\n\n @property\n def os(self): # type: () -> str\n return self._os_name\n\n @property\n def pip_version(self):\n return self._pip_version\n\n @property\n def sys_path(self):\n if self._sys_path is None:\n return super(MockEnv, self).sys_path\n\n return self._sys_path\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n if self._mock_marker_env is not None:\n return self._mock_marker_env\n\n marker_env = super(MockEnv, self).get_marker_env()\n marker_env[\"python_implementation\"] = self._python_implementation\n marker_env[\"version_info\"] = self._version_info\n marker_env[\"python_version\"] = \".\".join(str(v) for v in self._version_info[:2])\n marker_env[\"python_full_version\"] = \".\".join(str(v) for v in self._version_info)\n marker_env[\"sys_platform\"] = self._platform\n marker_env[\"interpreter_name\"] = self._python_implementation.lower()\n marker_env[\"interpreter_version\"] = \"cp\" + \"\".join(\n str(v) for v in self._version_info[:2]\n )\n\n return marker_env\n\n def is_venv(self): # type: () -> bool\n return self._is_venv\n",
"path": "poetry/utils/env.py"
}
] | [
{
"content": "import base64\nimport hashlib\nimport json\nimport os\nimport platform\nimport re\nimport shutil\nimport sys\nimport sysconfig\nimport textwrap\n\nfrom contextlib import contextmanager\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nimport packaging.tags\nimport tomlkit\nimport virtualenv\n\nfrom clikit.api.io import IO\nfrom packaging.tags import Tag\nfrom packaging.tags import interpreter_name\nfrom packaging.tags import interpreter_version\nfrom packaging.tags import sys_tags\n\nfrom poetry.core.semver import parse_constraint\nfrom poetry.core.semver.version import Version\nfrom poetry.core.toml.file import TOMLFile\nfrom poetry.core.version.markers import BaseMarker\nfrom poetry.locations import CACHE_DIR\nfrom poetry.poetry import Poetry\nfrom poetry.utils._compat import CalledProcessError\nfrom poetry.utils._compat import Path\nfrom poetry.utils._compat import decode\nfrom poetry.utils._compat import encode\nfrom poetry.utils._compat import list_to_shell_command\nfrom poetry.utils._compat import subprocess\n\n\nGET_ENVIRONMENT_INFO = \"\"\"\\\nimport json\nimport os\nimport platform\nimport sys\nimport sysconfig\n\nINTERPRETER_SHORT_NAMES = {\n \"python\": \"py\",\n \"cpython\": \"cp\",\n \"pypy\": \"pp\",\n \"ironpython\": \"ip\",\n \"jython\": \"jy\",\n}\n\n\ndef interpreter_version():\n version = sysconfig.get_config_var(\"interpreter_version\")\n if version:\n version = str(version)\n else:\n version = _version_nodot(sys.version_info[:2])\n\n return version\n\n\ndef _version_nodot(version):\n # type: (PythonVersion) -> str\n if any(v >= 10 for v in version):\n sep = \"_\"\n else:\n sep = \"\"\n\n return sep.join(map(str, version))\n\n\nif hasattr(sys, \"implementation\"):\n info = sys.implementation.version\n iver = \"{0.major}.{0.minor}.{0.micro}\".format(info)\n kind = info.releaselevel\n if kind != \"final\":\n iver += kind[0] + str(info.serial)\n\n implementation_name = sys.implementation.name\nelse:\n iver = \"0\"\n implementation_name = platform.python_implementation().lower()\n\nenv = {\n \"implementation_name\": implementation_name,\n \"implementation_version\": iver,\n \"os_name\": os.name,\n \"platform_machine\": platform.machine(),\n \"platform_release\": platform.release(),\n \"platform_system\": platform.system(),\n \"platform_version\": platform.version(),\n \"python_full_version\": platform.python_version(),\n \"platform_python_implementation\": platform.python_implementation(),\n \"python_version\": platform.python_version()[:3],\n \"sys_platform\": sys.platform,\n \"version_info\": tuple(sys.version_info),\n # Extra information\n \"interpreter_name\": INTERPRETER_SHORT_NAMES.get(implementation_name, implementation_name),\n \"interpreter_version\": interpreter_version(),\n}\n\nprint(json.dumps(env))\n\"\"\"\n\n\nGET_BASE_PREFIX = \"\"\"\\\nimport sys\n\nif hasattr(sys, \"real_prefix\"):\n print(sys.real_prefix)\nelif hasattr(sys, \"base_prefix\"):\n print(sys.base_prefix)\nelse:\n print(sys.prefix)\n\"\"\"\n\nGET_PYTHON_VERSION = \"\"\"\\\nimport sys\n\nprint('.'.join([str(s) for s in sys.version_info[:3]]))\n\"\"\"\n\nGET_SYS_PATH = \"\"\"\\\nimport json\nimport sys\n\nprint(json.dumps(sys.path))\n\"\"\"\n\nGET_PATHS = \"\"\"\\\nimport json\nimport sysconfig\n\nprint(json.dumps(sysconfig.get_paths()))\n\"\"\"\n\n\nclass EnvError(Exception):\n\n pass\n\n\nclass EnvCommandError(EnvError):\n def __init__(self, e, input=None): # type: (CalledProcessError) -> None\n self.e = e\n\n message = \"Command {} errored with the following return code {}, and output: \\n{}\".format(\n e.cmd, e.returncode, decode(e.output)\n )\n if input:\n message += \"input was : {}\".format(input)\n super(EnvCommandError, self).__init__(message)\n\n\nclass NoCompatiblePythonVersionFound(EnvError):\n def __init__(self, expected, given=None):\n if given:\n message = (\n \"The specified Python version ({}) \"\n \"is not supported by the project ({}).\\n\"\n \"Please choose a compatible version \"\n \"or loosen the python constraint specified \"\n \"in the pyproject.toml file.\".format(given, expected)\n )\n else:\n message = (\n \"Poetry was unable to find a compatible version. \"\n \"If you have one, you can explicitly use it \"\n 'via the \"env use\" command.'\n )\n\n super(NoCompatiblePythonVersionFound, self).__init__(message)\n\n\nclass EnvManager(object):\n \"\"\"\n Environments manager\n \"\"\"\n\n _env = None\n\n ENVS_FILE = \"envs.toml\"\n\n def __init__(self, poetry): # type: (Poetry) -> None\n self._poetry = poetry\n\n def activate(self, python, io): # type: (str, IO) -> Env\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n cwd = self._poetry.file.parent\n\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n\n try:\n python_version = Version.parse(python)\n python = \"python{}\".format(python_version.major)\n if python_version.precision > 1:\n python += \".{}\".format(python_version.minor)\n except ValueError:\n # Executable in PATH or full executable path\n pass\n\n try:\n python_version = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n python,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n shell=True,\n )\n )\n except CalledProcessError as e:\n raise EnvCommandError(e)\n\n python_version = Version.parse(python_version.strip())\n minor = \"{}.{}\".format(python_version.major, python_version.minor)\n patch = python_version.text\n\n create = False\n is_root_venv = self._poetry.config.get(\"virtualenvs.in-project\")\n # If we are required to create the virtual environment in the root folder,\n # create or recreate it if needed\n if is_root_venv:\n create = False\n venv = self._poetry.file.parent / \".venv\"\n if venv.exists():\n # We need to check if the patch version is correct\n _venv = VirtualEnv(venv)\n current_patch = \".\".join(str(v) for v in _venv.version_info[:3])\n\n if patch != current_patch:\n create = True\n\n self.create_venv(io, executable=python, force=create)\n\n return self.get(reload=True)\n\n envs = tomlkit.document()\n base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))\n if envs_file.exists():\n envs = envs_file.read()\n current_env = envs.get(base_env_name)\n if current_env is not None:\n current_minor = current_env[\"minor\"]\n current_patch = current_env[\"patch\"]\n\n if current_minor == minor and current_patch != patch:\n # We need to recreate\n create = True\n\n name = \"{}-py{}\".format(base_env_name, minor)\n venv = venv_path / name\n\n # Create if needed\n if not venv.exists() or venv.exists() and create:\n in_venv = os.environ.get(\"VIRTUAL_ENV\") is not None\n if in_venv or not venv.exists():\n create = True\n\n if venv.exists():\n # We need to check if the patch version is correct\n _venv = VirtualEnv(venv)\n current_patch = \".\".join(str(v) for v in _venv.version_info[:3])\n\n if patch != current_patch:\n create = True\n\n self.create_venv(io, executable=python, force=create)\n\n # Activate\n envs[base_env_name] = {\"minor\": minor, \"patch\": patch}\n envs_file.write(envs)\n\n return self.get(reload=True)\n\n def deactivate(self, io): # type: (IO) -> None\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n name = self._poetry.package.name\n name = self.generate_env_name(name, str(self._poetry.file.parent))\n\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n if envs_file.exists():\n envs = envs_file.read()\n env = envs.get(name)\n if env is not None:\n io.write_line(\n \"Deactivating virtualenv: <comment>{}</comment>\".format(\n venv_path / (name + \"-py{}\".format(env[\"minor\"]))\n )\n )\n del envs[name]\n\n envs_file.write(envs)\n\n def get(self, reload=False): # type: (bool) -> Env\n if self._env is not None and not reload:\n return self._env\n\n python_minor = \".\".join([str(v) for v in sys.version_info[:2]])\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n cwd = self._poetry.file.parent\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n env = None\n base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))\n if envs_file.exists():\n envs = envs_file.read()\n env = envs.get(base_env_name)\n if env:\n python_minor = env[\"minor\"]\n\n # Check if we are inside a virtualenv or not\n # Conda sets CONDA_PREFIX in its envs, see\n # https://github.com/conda/conda/issues/2764\n env_prefix = os.environ.get(\"VIRTUAL_ENV\", os.environ.get(\"CONDA_PREFIX\"))\n conda_env_name = os.environ.get(\"CONDA_DEFAULT_ENV\")\n # It's probably not a good idea to pollute Conda's global \"base\" env, since\n # most users have it activated all the time.\n in_venv = env_prefix is not None and conda_env_name != \"base\"\n\n if not in_venv or env is not None:\n # Checking if a local virtualenv exists\n if self._poetry.config.get(\"virtualenvs.in-project\") is not False:\n if (cwd / \".venv\").exists() and (cwd / \".venv\").is_dir():\n venv = cwd / \".venv\"\n\n return VirtualEnv(venv)\n\n create_venv = self._poetry.config.get(\"virtualenvs.create\", True)\n\n if not create_venv:\n return SystemEnv(Path(sys.prefix))\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n name = \"{}-py{}\".format(base_env_name, python_minor.strip())\n\n venv = venv_path / name\n\n if not venv.exists():\n return SystemEnv(Path(sys.prefix))\n\n return VirtualEnv(venv)\n\n if env_prefix is not None:\n prefix = Path(env_prefix)\n base_prefix = None\n else:\n prefix = Path(sys.prefix)\n base_prefix = self.get_base_prefix()\n\n return VirtualEnv(prefix, base_prefix)\n\n def list(self, name=None): # type: (Optional[str]) -> List[VirtualEnv]\n if name is None:\n name = self._poetry.package.name\n\n venv_name = self.generate_env_name(name, str(self._poetry.file.parent))\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n env_list = [\n VirtualEnv(Path(p))\n for p in sorted(venv_path.glob(\"{}-py*\".format(venv_name)))\n ]\n\n venv = self._poetry.file.parent / \".venv\"\n if (\n self._poetry.config.get(\"virtualenvs.in-project\")\n and venv.exists()\n and venv.is_dir()\n ):\n env_list.insert(0, VirtualEnv(venv))\n return env_list\n\n def remove(self, python): # type: (str) -> Env\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n cwd = self._poetry.file.parent\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))\n\n if python.startswith(base_env_name):\n venvs = self.list()\n for venv in venvs:\n if venv.path.name == python:\n # Exact virtualenv name\n if not envs_file.exists():\n self.remove_venv(venv.path)\n\n return venv\n\n venv_minor = \".\".join(str(v) for v in venv.version_info[:2])\n base_env_name = self.generate_env_name(cwd.name, str(cwd))\n envs = envs_file.read()\n\n current_env = envs.get(base_env_name)\n if not current_env:\n self.remove_venv(venv.path)\n\n return venv\n\n if current_env[\"minor\"] == venv_minor:\n del envs[base_env_name]\n envs_file.write(envs)\n\n self.remove_venv(venv.path)\n\n return venv\n\n raise ValueError(\n '<warning>Environment \"{}\" does not exist.</warning>'.format(python)\n )\n\n try:\n python_version = Version.parse(python)\n python = \"python{}\".format(python_version.major)\n if python_version.precision > 1:\n python += \".{}\".format(python_version.minor)\n except ValueError:\n # Executable in PATH or full executable path\n pass\n\n try:\n python_version = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n python,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n shell=True,\n )\n )\n except CalledProcessError as e:\n raise EnvCommandError(e)\n\n python_version = Version.parse(python_version.strip())\n minor = \"{}.{}\".format(python_version.major, python_version.minor)\n\n name = \"{}-py{}\".format(base_env_name, minor)\n venv = venv_path / name\n\n if not venv.exists():\n raise ValueError(\n '<warning>Environment \"{}\" does not exist.</warning>'.format(name)\n )\n\n if envs_file.exists():\n envs = envs_file.read()\n current_env = envs.get(base_env_name)\n if current_env is not None:\n current_minor = current_env[\"minor\"]\n\n if current_minor == minor:\n del envs[base_env_name]\n envs_file.write(envs)\n\n self.remove_venv(venv)\n\n return VirtualEnv(venv)\n\n def create_venv(\n self, io, name=None, executable=None, force=False\n ): # type: (IO, Optional[str], Optional[str], bool) -> Env\n if self._env is not None and not force:\n return self._env\n\n cwd = self._poetry.file.parent\n env = self.get(reload=True)\n\n if not env.is_sane():\n force = True\n\n if env.is_venv() and not force:\n # Already inside a virtualenv.\n return env\n\n create_venv = self._poetry.config.get(\"virtualenvs.create\")\n root_venv = self._poetry.config.get(\"virtualenvs.in-project\")\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if root_venv:\n venv_path = cwd / \".venv\"\n elif venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n if not name:\n name = self._poetry.package.name\n\n python_patch = \".\".join([str(v) for v in sys.version_info[:3]])\n python_minor = \".\".join([str(v) for v in sys.version_info[:2]])\n if executable:\n python_patch = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n executable,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n shell=True,\n ).strip()\n )\n python_minor = \".\".join(python_patch.split(\".\")[:2])\n\n supported_python = self._poetry.package.python_constraint\n if not supported_python.allows(Version.parse(python_patch)):\n # The currently activated or chosen Python version\n # is not compatible with the Python constraint specified\n # for the project.\n # If an executable has been specified, we stop there\n # and notify the user of the incompatibility.\n # Otherwise, we try to find a compatible Python version.\n if executable:\n raise NoCompatiblePythonVersionFound(\n self._poetry.package.python_versions, python_patch\n )\n\n io.write_line(\n \"<warning>The currently activated Python version {} \"\n \"is not supported by the project ({}).\\n\"\n \"Trying to find and use a compatible version.</warning> \".format(\n python_patch, self._poetry.package.python_versions\n )\n )\n\n for python_to_try in reversed(\n sorted(\n self._poetry.package.AVAILABLE_PYTHONS,\n key=lambda v: (v.startswith(\"3\"), -len(v), v),\n )\n ):\n if len(python_to_try) == 1:\n if not parse_constraint(\"^{}.0\".format(python_to_try)).allows_any(\n supported_python\n ):\n continue\n elif not supported_python.allows_all(\n parse_constraint(python_to_try + \".*\")\n ):\n continue\n\n python = \"python\" + python_to_try\n\n if io.is_debug():\n io.write_line(\"<debug>Trying {}</debug>\".format(python))\n\n try:\n python_patch = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n python,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n stderr=subprocess.STDOUT,\n shell=True,\n ).strip()\n )\n except CalledProcessError:\n continue\n\n if not python_patch:\n continue\n\n if supported_python.allows(Version.parse(python_patch)):\n io.write_line(\"Using <c1>{}</c1> ({})\".format(python, python_patch))\n executable = python\n python_minor = \".\".join(python_patch.split(\".\")[:2])\n break\n\n if not executable:\n raise NoCompatiblePythonVersionFound(\n self._poetry.package.python_versions\n )\n\n if root_venv:\n venv = venv_path\n else:\n name = self.generate_env_name(name, str(cwd))\n name = \"{}-py{}\".format(name, python_minor.strip())\n venv = venv_path / name\n\n if not venv.exists():\n if create_venv is False:\n io.write_line(\n \"<fg=black;bg=yellow>\"\n \"Skipping virtualenv creation, \"\n \"as specified in config file.\"\n \"</>\"\n )\n\n return SystemEnv(Path(sys.prefix))\n\n io.write_line(\n \"Creating virtualenv <c1>{}</> in {}\".format(name, str(venv_path))\n )\n\n self.build_venv(venv, executable=executable)\n else:\n if force:\n if not env.is_sane():\n io.write_line(\n \"<warning>The virtual environment found in {} seems to be broken.</warning>\".format(\n env.path\n )\n )\n io.write_line(\n \"Recreating virtualenv <c1>{}</> in {}\".format(name, str(venv))\n )\n self.remove_venv(venv)\n self.build_venv(venv, executable=executable)\n elif io.is_very_verbose():\n io.write_line(\"Virtualenv <c1>{}</> already exists.\".format(name))\n\n # venv detection:\n # stdlib venv may symlink sys.executable, so we can't use realpath.\n # but others can symlink *to* the venv Python,\n # so we can't just use sys.executable.\n # So we just check every item in the symlink tree (generally <= 3)\n p = os.path.normcase(sys.executable)\n paths = [p]\n while os.path.islink(p):\n p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))\n paths.append(p)\n\n p_venv = os.path.normcase(str(venv))\n if any(p.startswith(p_venv) for p in paths):\n # Running properly in the virtualenv, don't need to do anything\n return SystemEnv(Path(sys.prefix), self.get_base_prefix())\n\n return VirtualEnv(venv)\n\n @classmethod\n def build_venv(\n cls, path, executable=None\n ): # type: (Union[Path,str], Optional[Union[str, Path]]) -> virtualenv.run.session.Session\n if isinstance(executable, Path):\n executable = executable.resolve().as_posix()\n return virtualenv.cli_run(\n [\n \"--no-download\",\n \"--no-periodic-update\",\n \"--python\",\n executable or sys.executable,\n str(path),\n ]\n )\n\n @classmethod\n def remove_venv(cls, path): # type: (Union[Path,str]) -> None\n if isinstance(path, str):\n path = Path(path)\n assert path.is_dir()\n try:\n shutil.rmtree(str(path))\n return\n except OSError as e:\n # Continue only if e.errno == 16\n if e.errno != 16: # ERRNO 16: Device or resource busy\n raise e\n\n # Delete all files and folders but the toplevel one. This is because sometimes\n # the venv folder is mounted by the OS, such as in a docker volume. In such\n # cases, an attempt to delete the folder itself will result in an `OSError`.\n # See https://github.com/python-poetry/poetry/pull/2064\n for file_path in path.iterdir():\n if file_path.is_file() or file_path.is_symlink():\n file_path.unlink()\n elif file_path.is_dir():\n shutil.rmtree(str(file_path))\n\n def get_base_prefix(self): # type: () -> Path\n if hasattr(sys, \"real_prefix\"):\n return sys.real_prefix\n\n if hasattr(sys, \"base_prefix\"):\n return sys.base_prefix\n\n return sys.prefix\n\n @classmethod\n def generate_env_name(cls, name, cwd): # type: (str, str) -> str\n name = name.lower()\n sanitized_name = re.sub(r'[ $`!*@\"\\\\\\r\\n\\t]', \"_\", name)[:42]\n h = hashlib.sha256(encode(cwd)).digest()\n h = base64.urlsafe_b64encode(h).decode()[:8]\n\n return \"{}-{}\".format(sanitized_name, h)\n\n\nclass Env(object):\n \"\"\"\n An abstract Python environment.\n \"\"\"\n\n def __init__(self, path, base=None): # type: (Path, Optional[Path]) -> None\n self._is_windows = sys.platform == \"win32\"\n\n self._path = path\n bin_dir = \"bin\" if not self._is_windows else \"Scripts\"\n self._bin_dir = self._path / bin_dir\n\n self._base = base or path\n\n self._marker_env = None\n self._pip_version = None\n self._site_packages = None\n self._paths = None\n self._supported_tags = None\n self._purelib = None\n self._platlib = None\n\n @property\n def path(self): # type: () -> Path\n return self._path\n\n @property\n def base(self): # type: () -> Path\n return self._base\n\n @property\n def version_info(self): # type: () -> Tuple[int]\n return tuple(self.marker_env[\"version_info\"])\n\n @property\n def python_implementation(self): # type: () -> str\n return self.marker_env[\"platform_python_implementation\"]\n\n @property\n def python(self): # type: () -> str\n \"\"\"\n Path to current python executable\n \"\"\"\n return self._bin(\"python\")\n\n @property\n def marker_env(self):\n if self._marker_env is None:\n self._marker_env = self.get_marker_env()\n\n return self._marker_env\n\n @property\n def pip(self): # type: () -> str\n \"\"\"\n Path to current pip executable\n \"\"\"\n return self._bin(\"pip\")\n\n @property\n def platform(self): # type: () -> str\n return sys.platform\n\n @property\n def os(self): # type: () -> str\n return os.name\n\n @property\n def pip_version(self):\n if self._pip_version is None:\n self._pip_version = self.get_pip_version()\n\n return self._pip_version\n\n @property\n def site_packages(self): # type: () -> Path\n if self._site_packages is None:\n self._site_packages = self.purelib\n return self._site_packages\n\n @property\n def usersite(self): # type: () -> Optional[Path]\n if \"usersite\" in self.paths:\n return Path(self.paths[\"usersite\"])\n\n @property\n def purelib(self): # type: () -> Path\n if self._purelib is None:\n self._purelib = Path(self.paths[\"purelib\"])\n\n return self._purelib\n\n @property\n def platlib(self): # type: () -> Path\n if self._platlib is None:\n if \"platlib\" in self.paths:\n self._platlib = Path(self.paths[\"platlib\"])\n else:\n self._platlib = self.purelib\n\n return self._platlib\n\n def is_path_relative_to_lib(self, path): # type: (Path) -> bool\n for lib_path in [self.purelib, self.platlib]:\n try:\n path.relative_to(lib_path)\n return True\n except ValueError:\n pass\n\n return False\n\n @property\n def sys_path(self): # type: () -> List[str]\n raise NotImplementedError()\n\n @property\n def paths(self): # type: () -> Dict[str, str]\n if self._paths is None:\n self._paths = self.get_paths()\n\n return self._paths\n\n @property\n def supported_tags(self): # type: () -> List[Tag]\n if self._supported_tags is None:\n self._supported_tags = self.get_supported_tags()\n\n return self._supported_tags\n\n @classmethod\n def get_base_prefix(cls): # type: () -> Path\n if hasattr(sys, \"real_prefix\"):\n return sys.real_prefix\n\n if hasattr(sys, \"base_prefix\"):\n return sys.base_prefix\n\n return sys.prefix\n\n def get_version_info(self): # type: () -> Tuple[int]\n raise NotImplementedError()\n\n def get_python_implementation(self): # type: () -> str\n raise NotImplementedError()\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n raise NotImplementedError()\n\n def get_pip_command(self): # type: () -> List[str]\n raise NotImplementedError()\n\n def get_supported_tags(self): # type: () -> List[Tag]\n raise NotImplementedError()\n\n def get_pip_version(self): # type: () -> Version\n raise NotImplementedError()\n\n def get_paths(self): # type: () -> Dict[str, str]\n raise NotImplementedError()\n\n def is_valid_for_marker(self, marker): # type: (BaseMarker) -> bool\n return marker.validate(self.marker_env)\n\n def is_sane(self): # type: () -> bool\n \"\"\"\n Checks whether the current environment is sane or not.\n \"\"\"\n return True\n\n def run(self, bin, *args, **kwargs):\n bin = self._bin(bin)\n cmd = [bin] + list(args)\n return self._run(cmd, **kwargs)\n\n def run_pip(self, *args, **kwargs):\n pip = self.get_pip_command()\n cmd = pip + list(args)\n return self._run(cmd, **kwargs)\n\n def _run(self, cmd, **kwargs):\n \"\"\"\n Run a command inside the Python environment.\n \"\"\"\n call = kwargs.pop(\"call\", False)\n input_ = kwargs.pop(\"input_\", None)\n\n try:\n if self._is_windows:\n kwargs[\"shell\"] = True\n\n if kwargs.get(\"shell\", False):\n cmd = list_to_shell_command(cmd)\n\n if input_:\n output = subprocess.run(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n input=encode(input_),\n check=True,\n **kwargs\n ).stdout\n elif call:\n return subprocess.call(cmd, stderr=subprocess.STDOUT, **kwargs)\n else:\n output = subprocess.check_output(\n cmd, stderr=subprocess.STDOUT, **kwargs\n )\n except CalledProcessError as e:\n raise EnvCommandError(e, input=input_)\n\n return decode(output)\n\n def execute(self, bin, *args, **kwargs):\n bin = self._bin(bin)\n\n if not self._is_windows:\n args = [bin] + list(args)\n if \"env\" in kwargs:\n return os.execvpe(bin, args, kwargs[\"env\"])\n else:\n return os.execvp(bin, args)\n else:\n exe = subprocess.Popen([bin] + list(args), **kwargs)\n exe.communicate()\n return exe.returncode\n\n def is_venv(self): # type: () -> bool\n raise NotImplementedError()\n\n def _bin(self, bin): # type: (str) -> str\n \"\"\"\n Return path to the given executable.\n \"\"\"\n bin_path = (self._bin_dir / bin).with_suffix(\".exe\" if self._is_windows else \"\")\n if not bin_path.exists():\n # On Windows, some executables can be in the base path\n # This is especially true when installing Python with\n # the official installer, where python.exe will be at\n # the root of the env path.\n # This is an edge case and should not be encountered\n # in normal uses but this happens in the sonnet script\n # that creates a fake virtual environment pointing to\n # a base Python install.\n if self._is_windows:\n bin_path = (self._path / bin).with_suffix(\".exe\")\n if bin_path.exists():\n return str(bin_path)\n\n return bin\n\n return str(bin_path)\n\n def __eq__(self, other): # type: (Env) -> bool\n return other.__class__ == self.__class__ and other.path == self.path\n\n def __repr__(self):\n return '{}(\"{}\")'.format(self.__class__.__name__, self._path)\n\n\nclass SystemEnv(Env):\n \"\"\"\n A system (i.e. not a virtualenv) Python environment.\n \"\"\"\n\n @property\n def sys_path(self): # type: () -> List[str]\n return sys.path\n\n def get_version_info(self): # type: () -> Tuple[int]\n return sys.version_info\n\n def get_python_implementation(self): # type: () -> str\n return platform.python_implementation()\n\n def get_pip_command(self): # type: () -> List[str]\n # If we're not in a venv, assume the interpreter we're running on\n # has a pip and use that\n return [sys.executable, \"-m\", \"pip\"]\n\n def get_paths(self): # type: () -> Dict[str, str]\n # We can't use sysconfig.get_paths() because\n # on some distributions it does not return the proper paths\n # (those used by pip for instance). We go through distutils\n # to get the proper ones.\n import site\n\n from distutils.command.install import SCHEME_KEYS # noqa\n from distutils.core import Distribution\n\n d = Distribution()\n d.parse_config_files()\n obj = d.get_command_obj(\"install\", create=True)\n obj.finalize_options()\n\n paths = sysconfig.get_paths().copy()\n for key in SCHEME_KEYS:\n if key == \"headers\":\n # headers is not a path returned by sysconfig.get_paths()\n continue\n\n paths[key] = getattr(obj, \"install_{}\".format(key))\n\n if site.check_enableusersite() and hasattr(obj, \"install_usersite\"):\n paths[\"usersite\"] = getattr(obj, \"install_usersite\")\n\n return paths\n\n def get_supported_tags(self): # type: () -> List[Tag]\n return list(sys_tags())\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n if hasattr(sys, \"implementation\"):\n info = sys.implementation.version\n iver = \"{0.major}.{0.minor}.{0.micro}\".format(info)\n kind = info.releaselevel\n if kind != \"final\":\n iver += kind[0] + str(info.serial)\n\n implementation_name = sys.implementation.name\n else:\n iver = \"0\"\n implementation_name = \"\"\n\n return {\n \"implementation_name\": implementation_name,\n \"implementation_version\": iver,\n \"os_name\": os.name,\n \"platform_machine\": platform.machine(),\n \"platform_release\": platform.release(),\n \"platform_system\": platform.system(),\n \"platform_version\": platform.version(),\n \"python_full_version\": platform.python_version(),\n \"platform_python_implementation\": platform.python_implementation(),\n \"python_version\": \".\".join(\n v for v in platform.python_version().split(\".\")[:2]\n ),\n \"sys_platform\": sys.platform,\n \"version_info\": sys.version_info,\n # Extra information\n \"interpreter_name\": interpreter_name(),\n \"interpreter_version\": interpreter_version(),\n }\n\n def get_pip_version(self): # type: () -> Version\n from pip import __version__\n\n return Version.parse(__version__)\n\n def is_venv(self): # type: () -> bool\n return self._path != self._base\n\n\nclass VirtualEnv(Env):\n \"\"\"\n A virtual Python environment.\n \"\"\"\n\n def __init__(self, path, base=None): # type: (Path, Optional[Path]) -> None\n super(VirtualEnv, self).__init__(path, base)\n\n # If base is None, it probably means this is\n # a virtualenv created from VIRTUAL_ENV.\n # In this case we need to get sys.base_prefix\n # from inside the virtualenv.\n if base is None:\n self._base = Path(self.run(\"python\", \"-\", input_=GET_BASE_PREFIX).strip())\n\n @property\n def sys_path(self): # type: () -> List[str]\n output = self.run(\"python\", \"-\", input_=GET_SYS_PATH)\n\n return json.loads(output)\n\n def get_version_info(self): # type: () -> Tuple[int]\n output = self.run(\"python\", \"-\", input_=GET_PYTHON_VERSION)\n\n return tuple([int(s) for s in output.strip().split(\".\")])\n\n def get_python_implementation(self): # type: () -> str\n return self.marker_env[\"platform_python_implementation\"]\n\n def get_pip_command(self): # type: () -> List[str]\n # We're in a virtualenv that is known to be sane,\n # so assume that we have a functional pip\n return [self._bin(\"pip\")]\n\n def get_supported_tags(self): # type: () -> List[Tag]\n file_path = Path(packaging.tags.__file__)\n if file_path.suffix == \".pyc\":\n # Python 2\n file_path = file_path.with_suffix(\".py\")\n\n with file_path.open(encoding=\"utf-8\") as f:\n script = decode(f.read())\n\n script = script.replace(\n \"from ._typing import TYPE_CHECKING, cast\",\n \"TYPE_CHECKING = False\\ncast = lambda type_, value: value\",\n )\n script = script.replace(\n \"from ._typing import MYPY_CHECK_RUNNING, cast\",\n \"MYPY_CHECK_RUNNING = False\\ncast = lambda type_, value: value\",\n )\n\n script += textwrap.dedent(\n \"\"\"\n import json\n\n print(json.dumps([(t.interpreter, t.abi, t.platform) for t in sys_tags()]))\n \"\"\"\n )\n\n output = self.run(\"python\", \"-\", input_=script)\n\n return [Tag(*t) for t in json.loads(output)]\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n output = self.run(\"python\", \"-\", input_=GET_ENVIRONMENT_INFO)\n\n return json.loads(output)\n\n def get_pip_version(self): # type: () -> Version\n output = self.run_pip(\"--version\").strip()\n m = re.match(\"pip (.+?)(?: from .+)?$\", output)\n if not m:\n return Version.parse(\"0.0\")\n\n return Version.parse(m.group(1))\n\n def get_paths(self): # type: () -> Dict[str, str]\n output = self.run(\"python\", \"-\", input_=GET_PATHS)\n\n return json.loads(output)\n\n def is_venv(self): # type: () -> bool\n return True\n\n def is_sane(self):\n # A virtualenv is considered sane if both \"python\" and \"pip\" exist.\n return os.path.exists(self._bin(\"python\")) and os.path.exists(self._bin(\"pip\"))\n\n def _run(self, cmd, **kwargs):\n with self.temp_environ():\n os.environ[\"PATH\"] = self._updated_path()\n os.environ[\"VIRTUAL_ENV\"] = str(self._path)\n\n self.unset_env(\"PYTHONHOME\")\n self.unset_env(\"__PYVENV_LAUNCHER__\")\n\n return super(VirtualEnv, self)._run(cmd, **kwargs)\n\n def execute(self, bin, *args, **kwargs):\n with self.temp_environ():\n os.environ[\"PATH\"] = self._updated_path()\n os.environ[\"VIRTUAL_ENV\"] = str(self._path)\n\n self.unset_env(\"PYTHONHOME\")\n self.unset_env(\"__PYVENV_LAUNCHER__\")\n\n return super(VirtualEnv, self).execute(bin, *args, **kwargs)\n\n @contextmanager\n def temp_environ(self):\n environ = dict(os.environ)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(environ)\n\n def unset_env(self, key):\n if key in os.environ:\n del os.environ[key]\n\n def _updated_path(self):\n return os.pathsep.join([str(self._bin_dir), os.environ.get(\"PATH\", \"\")])\n\n\nclass NullEnv(SystemEnv):\n def __init__(self, path=None, base=None, execute=False):\n if path is None:\n path = Path(sys.prefix)\n\n super(NullEnv, self).__init__(path, base=base)\n\n self._execute = execute\n self.executed = []\n\n def get_pip_command(self): # type: () -> List[str]\n return [self._bin(\"python\"), \"-m\", \"pip\"]\n\n def _run(self, cmd, **kwargs):\n self.executed.append(cmd)\n\n if self._execute:\n return super(NullEnv, self)._run(cmd, **kwargs)\n\n def execute(self, bin, *args, **kwargs):\n self.executed.append([bin] + list(args))\n\n if self._execute:\n return super(NullEnv, self).execute(bin, *args, **kwargs)\n\n def _bin(self, bin):\n return bin\n\n\nclass MockEnv(NullEnv):\n def __init__(\n self,\n version_info=(3, 7, 0),\n python_implementation=\"CPython\",\n platform=\"darwin\",\n os_name=\"posix\",\n is_venv=False,\n pip_version=\"19.1\",\n sys_path=None,\n marker_env=None,\n supported_tags=None,\n **kwargs\n ):\n super(MockEnv, self).__init__(**kwargs)\n\n self._version_info = version_info\n self._python_implementation = python_implementation\n self._platform = platform\n self._os_name = os_name\n self._is_venv = is_venv\n self._pip_version = Version.parse(pip_version)\n self._sys_path = sys_path\n self._mock_marker_env = marker_env\n self._supported_tags = supported_tags\n\n @property\n def platform(self): # type: () -> str\n return self._platform\n\n @property\n def os(self): # type: () -> str\n return self._os_name\n\n @property\n def pip_version(self):\n return self._pip_version\n\n @property\n def sys_path(self):\n if self._sys_path is None:\n return super(MockEnv, self).sys_path\n\n return self._sys_path\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n if self._mock_marker_env is not None:\n return self._mock_marker_env\n\n marker_env = super(MockEnv, self).get_marker_env()\n marker_env[\"python_implementation\"] = self._python_implementation\n marker_env[\"version_info\"] = self._version_info\n marker_env[\"python_version\"] = \".\".join(str(v) for v in self._version_info[:2])\n marker_env[\"python_full_version\"] = \".\".join(str(v) for v in self._version_info)\n marker_env[\"sys_platform\"] = self._platform\n marker_env[\"interpreter_name\"] = self._python_implementation.lower()\n marker_env[\"interpreter_version\"] = \"cp\" + \"\".join(\n str(v) for v in self._version_info[:2]\n )\n\n return marker_env\n\n def is_venv(self): # type: () -> bool\n return self._is_venv\n",
"path": "poetry/utils/env.py"
}
] | diff --git a/poetry/utils/env.py b/poetry/utils/env.py
index 0a027bd668a..ccd855b5c60 100644
--- a/poetry/utils/env.py
+++ b/poetry/utils/env.py
@@ -1212,7 +1212,7 @@ def unset_env(self, key):
del os.environ[key]
def _updated_path(self):
- return os.pathsep.join([str(self._bin_dir), os.environ["PATH"]])
+ return os.pathsep.join([str(self._bin_dir), os.environ.get("PATH", "")])
class NullEnv(SystemEnv):
|
python-poetry__poetry-3146 | Poetry fails with KeyError if the PATH environment variable is not present
- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
- **OS version and name**: Ubuntu 18.04
- **Poetry version**: 1.0.10
## Issue
When running in CI using a docker container the `PATH` environment variable is not set and causes an issue with poetry. Unfortunately I don't see any traceback. Here's a snipped showing the issue:
```
root@5d1e49d5433c:~/src# unset PATH
root@5d1e49d5433c:~/src# /usr/local/bin/poetry run -vvv pip install pip
[KeyError]
'PATH'
```
| [
{
"content": "import base64\nimport hashlib\nimport json\nimport os\nimport platform\nimport re\nimport shutil\nimport sys\nimport sysconfig\nimport textwrap\n\nfrom contextlib import contextmanager\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nimport packaging.tags\nimport tomlkit\nimport virtualenv\n\nfrom clikit.api.io import IO\nfrom packaging.tags import Tag\nfrom packaging.tags import interpreter_name\nfrom packaging.tags import interpreter_version\nfrom packaging.tags import sys_tags\n\nfrom poetry.core.semver import parse_constraint\nfrom poetry.core.semver.version import Version\nfrom poetry.core.toml.file import TOMLFile\nfrom poetry.core.version.markers import BaseMarker\nfrom poetry.locations import CACHE_DIR\nfrom poetry.poetry import Poetry\nfrom poetry.utils._compat import CalledProcessError\nfrom poetry.utils._compat import Path\nfrom poetry.utils._compat import decode\nfrom poetry.utils._compat import encode\nfrom poetry.utils._compat import list_to_shell_command\nfrom poetry.utils._compat import subprocess\n\n\nGET_ENVIRONMENT_INFO = \"\"\"\\\nimport json\nimport os\nimport platform\nimport sys\nimport sysconfig\n\nINTERPRETER_SHORT_NAMES = {\n \"python\": \"py\",\n \"cpython\": \"cp\",\n \"pypy\": \"pp\",\n \"ironpython\": \"ip\",\n \"jython\": \"jy\",\n}\n\n\ndef interpreter_version():\n version = sysconfig.get_config_var(\"interpreter_version\")\n if version:\n version = str(version)\n else:\n version = _version_nodot(sys.version_info[:2])\n\n return version\n\n\ndef _version_nodot(version):\n # type: (PythonVersion) -> str\n if any(v >= 10 for v in version):\n sep = \"_\"\n else:\n sep = \"\"\n\n return sep.join(map(str, version))\n\n\nif hasattr(sys, \"implementation\"):\n info = sys.implementation.version\n iver = \"{0.major}.{0.minor}.{0.micro}\".format(info)\n kind = info.releaselevel\n if kind != \"final\":\n iver += kind[0] + str(info.serial)\n\n implementation_name = sys.implementation.name\nelse:\n iver = \"0\"\n implementation_name = platform.python_implementation().lower()\n\nenv = {\n \"implementation_name\": implementation_name,\n \"implementation_version\": iver,\n \"os_name\": os.name,\n \"platform_machine\": platform.machine(),\n \"platform_release\": platform.release(),\n \"platform_system\": platform.system(),\n \"platform_version\": platform.version(),\n \"python_full_version\": platform.python_version(),\n \"platform_python_implementation\": platform.python_implementation(),\n \"python_version\": platform.python_version()[:3],\n \"sys_platform\": sys.platform,\n \"version_info\": tuple(sys.version_info),\n # Extra information\n \"interpreter_name\": INTERPRETER_SHORT_NAMES.get(implementation_name, implementation_name),\n \"interpreter_version\": interpreter_version(),\n}\n\nprint(json.dumps(env))\n\"\"\"\n\n\nGET_BASE_PREFIX = \"\"\"\\\nimport sys\n\nif hasattr(sys, \"real_prefix\"):\n print(sys.real_prefix)\nelif hasattr(sys, \"base_prefix\"):\n print(sys.base_prefix)\nelse:\n print(sys.prefix)\n\"\"\"\n\nGET_PYTHON_VERSION = \"\"\"\\\nimport sys\n\nprint('.'.join([str(s) for s in sys.version_info[:3]]))\n\"\"\"\n\nGET_SYS_PATH = \"\"\"\\\nimport json\nimport sys\n\nprint(json.dumps(sys.path))\n\"\"\"\n\nGET_PATHS = \"\"\"\\\nimport json\nimport sysconfig\n\nprint(json.dumps(sysconfig.get_paths()))\n\"\"\"\n\n\nclass EnvError(Exception):\n\n pass\n\n\nclass EnvCommandError(EnvError):\n def __init__(self, e, input=None): # type: (CalledProcessError) -> None\n self.e = e\n\n message = \"Command {} errored with the following return code {}, and output: \\n{}\".format(\n e.cmd, e.returncode, decode(e.output)\n )\n if input:\n message += \"input was : {}\".format(input)\n super(EnvCommandError, self).__init__(message)\n\n\nclass NoCompatiblePythonVersionFound(EnvError):\n def __init__(self, expected, given=None):\n if given:\n message = (\n \"The specified Python version ({}) \"\n \"is not supported by the project ({}).\\n\"\n \"Please choose a compatible version \"\n \"or loosen the python constraint specified \"\n \"in the pyproject.toml file.\".format(given, expected)\n )\n else:\n message = (\n \"Poetry was unable to find a compatible version. \"\n \"If you have one, you can explicitly use it \"\n 'via the \"env use\" command.'\n )\n\n super(NoCompatiblePythonVersionFound, self).__init__(message)\n\n\nclass EnvManager(object):\n \"\"\"\n Environments manager\n \"\"\"\n\n _env = None\n\n ENVS_FILE = \"envs.toml\"\n\n def __init__(self, poetry): # type: (Poetry) -> None\n self._poetry = poetry\n\n def activate(self, python, io): # type: (str, IO) -> Env\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n cwd = self._poetry.file.parent\n\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n\n try:\n python_version = Version.parse(python)\n python = \"python{}\".format(python_version.major)\n if python_version.precision > 1:\n python += \".{}\".format(python_version.minor)\n except ValueError:\n # Executable in PATH or full executable path\n pass\n\n try:\n python_version = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n python,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n shell=True,\n )\n )\n except CalledProcessError as e:\n raise EnvCommandError(e)\n\n python_version = Version.parse(python_version.strip())\n minor = \"{}.{}\".format(python_version.major, python_version.minor)\n patch = python_version.text\n\n create = False\n is_root_venv = self._poetry.config.get(\"virtualenvs.in-project\")\n # If we are required to create the virtual environment in the root folder,\n # create or recreate it if needed\n if is_root_venv:\n create = False\n venv = self._poetry.file.parent / \".venv\"\n if venv.exists():\n # We need to check if the patch version is correct\n _venv = VirtualEnv(venv)\n current_patch = \".\".join(str(v) for v in _venv.version_info[:3])\n\n if patch != current_patch:\n create = True\n\n self.create_venv(io, executable=python, force=create)\n\n return self.get(reload=True)\n\n envs = tomlkit.document()\n base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))\n if envs_file.exists():\n envs = envs_file.read()\n current_env = envs.get(base_env_name)\n if current_env is not None:\n current_minor = current_env[\"minor\"]\n current_patch = current_env[\"patch\"]\n\n if current_minor == minor and current_patch != patch:\n # We need to recreate\n create = True\n\n name = \"{}-py{}\".format(base_env_name, minor)\n venv = venv_path / name\n\n # Create if needed\n if not venv.exists() or venv.exists() and create:\n in_venv = os.environ.get(\"VIRTUAL_ENV\") is not None\n if in_venv or not venv.exists():\n create = True\n\n if venv.exists():\n # We need to check if the patch version is correct\n _venv = VirtualEnv(venv)\n current_patch = \".\".join(str(v) for v in _venv.version_info[:3])\n\n if patch != current_patch:\n create = True\n\n self.create_venv(io, executable=python, force=create)\n\n # Activate\n envs[base_env_name] = {\"minor\": minor, \"patch\": patch}\n envs_file.write(envs)\n\n return self.get(reload=True)\n\n def deactivate(self, io): # type: (IO) -> None\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n name = self._poetry.package.name\n name = self.generate_env_name(name, str(self._poetry.file.parent))\n\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n if envs_file.exists():\n envs = envs_file.read()\n env = envs.get(name)\n if env is not None:\n io.write_line(\n \"Deactivating virtualenv: <comment>{}</comment>\".format(\n venv_path / (name + \"-py{}\".format(env[\"minor\"]))\n )\n )\n del envs[name]\n\n envs_file.write(envs)\n\n def get(self, reload=False): # type: (bool) -> Env\n if self._env is not None and not reload:\n return self._env\n\n python_minor = \".\".join([str(v) for v in sys.version_info[:2]])\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n cwd = self._poetry.file.parent\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n env = None\n base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))\n if envs_file.exists():\n envs = envs_file.read()\n env = envs.get(base_env_name)\n if env:\n python_minor = env[\"minor\"]\n\n # Check if we are inside a virtualenv or not\n # Conda sets CONDA_PREFIX in its envs, see\n # https://github.com/conda/conda/issues/2764\n env_prefix = os.environ.get(\"VIRTUAL_ENV\", os.environ.get(\"CONDA_PREFIX\"))\n conda_env_name = os.environ.get(\"CONDA_DEFAULT_ENV\")\n # It's probably not a good idea to pollute Conda's global \"base\" env, since\n # most users have it activated all the time.\n in_venv = env_prefix is not None and conda_env_name != \"base\"\n\n if not in_venv or env is not None:\n # Checking if a local virtualenv exists\n if self._poetry.config.get(\"virtualenvs.in-project\") is not False:\n if (cwd / \".venv\").exists() and (cwd / \".venv\").is_dir():\n venv = cwd / \".venv\"\n\n return VirtualEnv(venv)\n\n create_venv = self._poetry.config.get(\"virtualenvs.create\", True)\n\n if not create_venv:\n return SystemEnv(Path(sys.prefix))\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n name = \"{}-py{}\".format(base_env_name, python_minor.strip())\n\n venv = venv_path / name\n\n if not venv.exists():\n return SystemEnv(Path(sys.prefix))\n\n return VirtualEnv(venv)\n\n if env_prefix is not None:\n prefix = Path(env_prefix)\n base_prefix = None\n else:\n prefix = Path(sys.prefix)\n base_prefix = self.get_base_prefix()\n\n return VirtualEnv(prefix, base_prefix)\n\n def list(self, name=None): # type: (Optional[str]) -> List[VirtualEnv]\n if name is None:\n name = self._poetry.package.name\n\n venv_name = self.generate_env_name(name, str(self._poetry.file.parent))\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n env_list = [\n VirtualEnv(Path(p))\n for p in sorted(venv_path.glob(\"{}-py*\".format(venv_name)))\n ]\n\n venv = self._poetry.file.parent / \".venv\"\n if (\n self._poetry.config.get(\"virtualenvs.in-project\")\n and venv.exists()\n and venv.is_dir()\n ):\n env_list.insert(0, VirtualEnv(venv))\n return env_list\n\n def remove(self, python): # type: (str) -> Env\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n cwd = self._poetry.file.parent\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))\n\n if python.startswith(base_env_name):\n venvs = self.list()\n for venv in venvs:\n if venv.path.name == python:\n # Exact virtualenv name\n if not envs_file.exists():\n self.remove_venv(venv.path)\n\n return venv\n\n venv_minor = \".\".join(str(v) for v in venv.version_info[:2])\n base_env_name = self.generate_env_name(cwd.name, str(cwd))\n envs = envs_file.read()\n\n current_env = envs.get(base_env_name)\n if not current_env:\n self.remove_venv(venv.path)\n\n return venv\n\n if current_env[\"minor\"] == venv_minor:\n del envs[base_env_name]\n envs_file.write(envs)\n\n self.remove_venv(venv.path)\n\n return venv\n\n raise ValueError(\n '<warning>Environment \"{}\" does not exist.</warning>'.format(python)\n )\n\n try:\n python_version = Version.parse(python)\n python = \"python{}\".format(python_version.major)\n if python_version.precision > 1:\n python += \".{}\".format(python_version.minor)\n except ValueError:\n # Executable in PATH or full executable path\n pass\n\n try:\n python_version = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n python,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n shell=True,\n )\n )\n except CalledProcessError as e:\n raise EnvCommandError(e)\n\n python_version = Version.parse(python_version.strip())\n minor = \"{}.{}\".format(python_version.major, python_version.minor)\n\n name = \"{}-py{}\".format(base_env_name, minor)\n venv = venv_path / name\n\n if not venv.exists():\n raise ValueError(\n '<warning>Environment \"{}\" does not exist.</warning>'.format(name)\n )\n\n if envs_file.exists():\n envs = envs_file.read()\n current_env = envs.get(base_env_name)\n if current_env is not None:\n current_minor = current_env[\"minor\"]\n\n if current_minor == minor:\n del envs[base_env_name]\n envs_file.write(envs)\n\n self.remove_venv(venv)\n\n return VirtualEnv(venv)\n\n def create_venv(\n self, io, name=None, executable=None, force=False\n ): # type: (IO, Optional[str], Optional[str], bool) -> Env\n if self._env is not None and not force:\n return self._env\n\n cwd = self._poetry.file.parent\n env = self.get(reload=True)\n\n if not env.is_sane():\n force = True\n\n if env.is_venv() and not force:\n # Already inside a virtualenv.\n return env\n\n create_venv = self._poetry.config.get(\"virtualenvs.create\")\n root_venv = self._poetry.config.get(\"virtualenvs.in-project\")\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if root_venv:\n venv_path = cwd / \".venv\"\n elif venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n if not name:\n name = self._poetry.package.name\n\n python_patch = \".\".join([str(v) for v in sys.version_info[:3]])\n python_minor = \".\".join([str(v) for v in sys.version_info[:2]])\n if executable:\n python_patch = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n executable,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n shell=True,\n ).strip()\n )\n python_minor = \".\".join(python_patch.split(\".\")[:2])\n\n supported_python = self._poetry.package.python_constraint\n if not supported_python.allows(Version.parse(python_patch)):\n # The currently activated or chosen Python version\n # is not compatible with the Python constraint specified\n # for the project.\n # If an executable has been specified, we stop there\n # and notify the user of the incompatibility.\n # Otherwise, we try to find a compatible Python version.\n if executable:\n raise NoCompatiblePythonVersionFound(\n self._poetry.package.python_versions, python_patch\n )\n\n io.write_line(\n \"<warning>The currently activated Python version {} \"\n \"is not supported by the project ({}).\\n\"\n \"Trying to find and use a compatible version.</warning> \".format(\n python_patch, self._poetry.package.python_versions\n )\n )\n\n for python_to_try in reversed(\n sorted(\n self._poetry.package.AVAILABLE_PYTHONS,\n key=lambda v: (v.startswith(\"3\"), -len(v), v),\n )\n ):\n if len(python_to_try) == 1:\n if not parse_constraint(\"^{}.0\".format(python_to_try)).allows_any(\n supported_python\n ):\n continue\n elif not supported_python.allows_all(\n parse_constraint(python_to_try + \".*\")\n ):\n continue\n\n python = \"python\" + python_to_try\n\n if io.is_debug():\n io.write_line(\"<debug>Trying {}</debug>\".format(python))\n\n try:\n python_patch = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n python,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n stderr=subprocess.STDOUT,\n shell=True,\n ).strip()\n )\n except CalledProcessError:\n continue\n\n if not python_patch:\n continue\n\n if supported_python.allows(Version.parse(python_patch)):\n io.write_line(\"Using <c1>{}</c1> ({})\".format(python, python_patch))\n executable = python\n python_minor = \".\".join(python_patch.split(\".\")[:2])\n break\n\n if not executable:\n raise NoCompatiblePythonVersionFound(\n self._poetry.package.python_versions\n )\n\n if root_venv:\n venv = venv_path\n else:\n name = self.generate_env_name(name, str(cwd))\n name = \"{}-py{}\".format(name, python_minor.strip())\n venv = venv_path / name\n\n if not venv.exists():\n if create_venv is False:\n io.write_line(\n \"<fg=black;bg=yellow>\"\n \"Skipping virtualenv creation, \"\n \"as specified in config file.\"\n \"</>\"\n )\n\n return SystemEnv(Path(sys.prefix))\n\n io.write_line(\n \"Creating virtualenv <c1>{}</> in {}\".format(name, str(venv_path))\n )\n\n self.build_venv(venv, executable=executable)\n else:\n if force:\n if not env.is_sane():\n io.write_line(\n \"<warning>The virtual environment found in {} seems to be broken.</warning>\".format(\n env.path\n )\n )\n io.write_line(\n \"Recreating virtualenv <c1>{}</> in {}\".format(name, str(venv))\n )\n self.remove_venv(venv)\n self.build_venv(venv, executable=executable)\n elif io.is_very_verbose():\n io.write_line(\"Virtualenv <c1>{}</> already exists.\".format(name))\n\n # venv detection:\n # stdlib venv may symlink sys.executable, so we can't use realpath.\n # but others can symlink *to* the venv Python,\n # so we can't just use sys.executable.\n # So we just check every item in the symlink tree (generally <= 3)\n p = os.path.normcase(sys.executable)\n paths = [p]\n while os.path.islink(p):\n p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))\n paths.append(p)\n\n p_venv = os.path.normcase(str(venv))\n if any(p.startswith(p_venv) for p in paths):\n # Running properly in the virtualenv, don't need to do anything\n return SystemEnv(Path(sys.prefix), self.get_base_prefix())\n\n return VirtualEnv(venv)\n\n @classmethod\n def build_venv(\n cls, path, executable=None\n ): # type: (Union[Path,str], Optional[Union[str, Path]]) -> virtualenv.run.session.Session\n if isinstance(executable, Path):\n executable = executable.resolve().as_posix()\n return virtualenv.cli_run(\n [\n \"--no-download\",\n \"--no-periodic-update\",\n \"--python\",\n executable or sys.executable,\n str(path),\n ]\n )\n\n @classmethod\n def remove_venv(cls, path): # type: (Union[Path,str]) -> None\n if isinstance(path, str):\n path = Path(path)\n assert path.is_dir()\n try:\n shutil.rmtree(str(path))\n return\n except OSError as e:\n # Continue only if e.errno == 16\n if e.errno != 16: # ERRNO 16: Device or resource busy\n raise e\n\n # Delete all files and folders but the toplevel one. This is because sometimes\n # the venv folder is mounted by the OS, such as in a docker volume. In such\n # cases, an attempt to delete the folder itself will result in an `OSError`.\n # See https://github.com/python-poetry/poetry/pull/2064\n for file_path in path.iterdir():\n if file_path.is_file() or file_path.is_symlink():\n file_path.unlink()\n elif file_path.is_dir():\n shutil.rmtree(str(file_path))\n\n def get_base_prefix(self): # type: () -> Path\n if hasattr(sys, \"real_prefix\"):\n return sys.real_prefix\n\n if hasattr(sys, \"base_prefix\"):\n return sys.base_prefix\n\n return sys.prefix\n\n @classmethod\n def generate_env_name(cls, name, cwd): # type: (str, str) -> str\n name = name.lower()\n sanitized_name = re.sub(r'[ $`!*@\"\\\\\\r\\n\\t]', \"_\", name)[:42]\n h = hashlib.sha256(encode(cwd)).digest()\n h = base64.urlsafe_b64encode(h).decode()[:8]\n\n return \"{}-{}\".format(sanitized_name, h)\n\n\nclass Env(object):\n \"\"\"\n An abstract Python environment.\n \"\"\"\n\n def __init__(self, path, base=None): # type: (Path, Optional[Path]) -> None\n self._is_windows = sys.platform == \"win32\"\n\n self._path = path\n bin_dir = \"bin\" if not self._is_windows else \"Scripts\"\n self._bin_dir = self._path / bin_dir\n\n self._base = base or path\n\n self._marker_env = None\n self._pip_version = None\n self._site_packages = None\n self._paths = None\n self._supported_tags = None\n self._purelib = None\n self._platlib = None\n\n @property\n def path(self): # type: () -> Path\n return self._path\n\n @property\n def base(self): # type: () -> Path\n return self._base\n\n @property\n def version_info(self): # type: () -> Tuple[int]\n return tuple(self.marker_env[\"version_info\"])\n\n @property\n def python_implementation(self): # type: () -> str\n return self.marker_env[\"platform_python_implementation\"]\n\n @property\n def python(self): # type: () -> str\n \"\"\"\n Path to current python executable\n \"\"\"\n return self._bin(\"python\")\n\n @property\n def marker_env(self):\n if self._marker_env is None:\n self._marker_env = self.get_marker_env()\n\n return self._marker_env\n\n @property\n def pip(self): # type: () -> str\n \"\"\"\n Path to current pip executable\n \"\"\"\n return self._bin(\"pip\")\n\n @property\n def platform(self): # type: () -> str\n return sys.platform\n\n @property\n def os(self): # type: () -> str\n return os.name\n\n @property\n def pip_version(self):\n if self._pip_version is None:\n self._pip_version = self.get_pip_version()\n\n return self._pip_version\n\n @property\n def site_packages(self): # type: () -> Path\n if self._site_packages is None:\n self._site_packages = self.purelib\n return self._site_packages\n\n @property\n def usersite(self): # type: () -> Optional[Path]\n if \"usersite\" in self.paths:\n return Path(self.paths[\"usersite\"])\n\n @property\n def purelib(self): # type: () -> Path\n if self._purelib is None:\n self._purelib = Path(self.paths[\"purelib\"])\n\n return self._purelib\n\n @property\n def platlib(self): # type: () -> Path\n if self._platlib is None:\n if \"platlib\" in self.paths:\n self._platlib = Path(self.paths[\"platlib\"])\n else:\n self._platlib = self.purelib\n\n return self._platlib\n\n def is_path_relative_to_lib(self, path): # type: (Path) -> bool\n for lib_path in [self.purelib, self.platlib]:\n try:\n path.relative_to(lib_path)\n return True\n except ValueError:\n pass\n\n return False\n\n @property\n def sys_path(self): # type: () -> List[str]\n raise NotImplementedError()\n\n @property\n def paths(self): # type: () -> Dict[str, str]\n if self._paths is None:\n self._paths = self.get_paths()\n\n return self._paths\n\n @property\n def supported_tags(self): # type: () -> List[Tag]\n if self._supported_tags is None:\n self._supported_tags = self.get_supported_tags()\n\n return self._supported_tags\n\n @classmethod\n def get_base_prefix(cls): # type: () -> Path\n if hasattr(sys, \"real_prefix\"):\n return sys.real_prefix\n\n if hasattr(sys, \"base_prefix\"):\n return sys.base_prefix\n\n return sys.prefix\n\n def get_version_info(self): # type: () -> Tuple[int]\n raise NotImplementedError()\n\n def get_python_implementation(self): # type: () -> str\n raise NotImplementedError()\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n raise NotImplementedError()\n\n def get_pip_command(self): # type: () -> List[str]\n raise NotImplementedError()\n\n def get_supported_tags(self): # type: () -> List[Tag]\n raise NotImplementedError()\n\n def get_pip_version(self): # type: () -> Version\n raise NotImplementedError()\n\n def get_paths(self): # type: () -> Dict[str, str]\n raise NotImplementedError()\n\n def is_valid_for_marker(self, marker): # type: (BaseMarker) -> bool\n return marker.validate(self.marker_env)\n\n def is_sane(self): # type: () -> bool\n \"\"\"\n Checks whether the current environment is sane or not.\n \"\"\"\n return True\n\n def run(self, bin, *args, **kwargs):\n bin = self._bin(bin)\n cmd = [bin] + list(args)\n return self._run(cmd, **kwargs)\n\n def run_pip(self, *args, **kwargs):\n pip = self.get_pip_command()\n cmd = pip + list(args)\n return self._run(cmd, **kwargs)\n\n def _run(self, cmd, **kwargs):\n \"\"\"\n Run a command inside the Python environment.\n \"\"\"\n call = kwargs.pop(\"call\", False)\n input_ = kwargs.pop(\"input_\", None)\n\n try:\n if self._is_windows:\n kwargs[\"shell\"] = True\n\n if kwargs.get(\"shell\", False):\n cmd = list_to_shell_command(cmd)\n\n if input_:\n output = subprocess.run(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n input=encode(input_),\n check=True,\n **kwargs\n ).stdout\n elif call:\n return subprocess.call(cmd, stderr=subprocess.STDOUT, **kwargs)\n else:\n output = subprocess.check_output(\n cmd, stderr=subprocess.STDOUT, **kwargs\n )\n except CalledProcessError as e:\n raise EnvCommandError(e, input=input_)\n\n return decode(output)\n\n def execute(self, bin, *args, **kwargs):\n bin = self._bin(bin)\n\n if not self._is_windows:\n args = [bin] + list(args)\n if \"env\" in kwargs:\n return os.execvpe(bin, args, kwargs[\"env\"])\n else:\n return os.execvp(bin, args)\n else:\n exe = subprocess.Popen([bin] + list(args), **kwargs)\n exe.communicate()\n return exe.returncode\n\n def is_venv(self): # type: () -> bool\n raise NotImplementedError()\n\n def _bin(self, bin): # type: (str) -> str\n \"\"\"\n Return path to the given executable.\n \"\"\"\n bin_path = (self._bin_dir / bin).with_suffix(\".exe\" if self._is_windows else \"\")\n if not bin_path.exists():\n # On Windows, some executables can be in the base path\n # This is especially true when installing Python with\n # the official installer, where python.exe will be at\n # the root of the env path.\n # This is an edge case and should not be encountered\n # in normal uses but this happens in the sonnet script\n # that creates a fake virtual environment pointing to\n # a base Python install.\n if self._is_windows:\n bin_path = (self._path / bin).with_suffix(\".exe\")\n if bin_path.exists():\n return str(bin_path)\n\n return bin\n\n return str(bin_path)\n\n def __eq__(self, other): # type: (Env) -> bool\n return other.__class__ == self.__class__ and other.path == self.path\n\n def __repr__(self):\n return '{}(\"{}\")'.format(self.__class__.__name__, self._path)\n\n\nclass SystemEnv(Env):\n \"\"\"\n A system (i.e. not a virtualenv) Python environment.\n \"\"\"\n\n @property\n def sys_path(self): # type: () -> List[str]\n return sys.path\n\n def get_version_info(self): # type: () -> Tuple[int]\n return sys.version_info\n\n def get_python_implementation(self): # type: () -> str\n return platform.python_implementation()\n\n def get_pip_command(self): # type: () -> List[str]\n # If we're not in a venv, assume the interpreter we're running on\n # has a pip and use that\n return [sys.executable, \"-m\", \"pip\"]\n\n def get_paths(self): # type: () -> Dict[str, str]\n # We can't use sysconfig.get_paths() because\n # on some distributions it does not return the proper paths\n # (those used by pip for instance). We go through distutils\n # to get the proper ones.\n import site\n\n from distutils.command.install import SCHEME_KEYS # noqa\n from distutils.core import Distribution\n\n d = Distribution()\n d.parse_config_files()\n obj = d.get_command_obj(\"install\", create=True)\n obj.finalize_options()\n\n paths = sysconfig.get_paths().copy()\n for key in SCHEME_KEYS:\n if key == \"headers\":\n # headers is not a path returned by sysconfig.get_paths()\n continue\n\n paths[key] = getattr(obj, \"install_{}\".format(key))\n\n if site.check_enableusersite() and hasattr(obj, \"install_usersite\"):\n paths[\"usersite\"] = getattr(obj, \"install_usersite\")\n\n return paths\n\n def get_supported_tags(self): # type: () -> List[Tag]\n return list(sys_tags())\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n if hasattr(sys, \"implementation\"):\n info = sys.implementation.version\n iver = \"{0.major}.{0.minor}.{0.micro}\".format(info)\n kind = info.releaselevel\n if kind != \"final\":\n iver += kind[0] + str(info.serial)\n\n implementation_name = sys.implementation.name\n else:\n iver = \"0\"\n implementation_name = \"\"\n\n return {\n \"implementation_name\": implementation_name,\n \"implementation_version\": iver,\n \"os_name\": os.name,\n \"platform_machine\": platform.machine(),\n \"platform_release\": platform.release(),\n \"platform_system\": platform.system(),\n \"platform_version\": platform.version(),\n \"python_full_version\": platform.python_version(),\n \"platform_python_implementation\": platform.python_implementation(),\n \"python_version\": \".\".join(\n v for v in platform.python_version().split(\".\")[:2]\n ),\n \"sys_platform\": sys.platform,\n \"version_info\": sys.version_info,\n # Extra information\n \"interpreter_name\": interpreter_name(),\n \"interpreter_version\": interpreter_version(),\n }\n\n def get_pip_version(self): # type: () -> Version\n from pip import __version__\n\n return Version.parse(__version__)\n\n def is_venv(self): # type: () -> bool\n return self._path != self._base\n\n\nclass VirtualEnv(Env):\n \"\"\"\n A virtual Python environment.\n \"\"\"\n\n def __init__(self, path, base=None): # type: (Path, Optional[Path]) -> None\n super(VirtualEnv, self).__init__(path, base)\n\n # If base is None, it probably means this is\n # a virtualenv created from VIRTUAL_ENV.\n # In this case we need to get sys.base_prefix\n # from inside the virtualenv.\n if base is None:\n self._base = Path(self.run(\"python\", \"-\", input_=GET_BASE_PREFIX).strip())\n\n @property\n def sys_path(self): # type: () -> List[str]\n output = self.run(\"python\", \"-\", input_=GET_SYS_PATH)\n\n return json.loads(output)\n\n def get_version_info(self): # type: () -> Tuple[int]\n output = self.run(\"python\", \"-\", input_=GET_PYTHON_VERSION)\n\n return tuple([int(s) for s in output.strip().split(\".\")])\n\n def get_python_implementation(self): # type: () -> str\n return self.marker_env[\"platform_python_implementation\"]\n\n def get_pip_command(self): # type: () -> List[str]\n # We're in a virtualenv that is known to be sane,\n # so assume that we have a functional pip\n return [self._bin(\"pip\")]\n\n def get_supported_tags(self): # type: () -> List[Tag]\n file_path = Path(packaging.tags.__file__)\n if file_path.suffix == \".pyc\":\n # Python 2\n file_path = file_path.with_suffix(\".py\")\n\n with file_path.open(encoding=\"utf-8\") as f:\n script = decode(f.read())\n\n script = script.replace(\n \"from ._typing import TYPE_CHECKING, cast\",\n \"TYPE_CHECKING = False\\ncast = lambda type_, value: value\",\n )\n script = script.replace(\n \"from ._typing import MYPY_CHECK_RUNNING, cast\",\n \"MYPY_CHECK_RUNNING = False\\ncast = lambda type_, value: value\",\n )\n\n script += textwrap.dedent(\n \"\"\"\n import json\n\n print(json.dumps([(t.interpreter, t.abi, t.platform) for t in sys_tags()]))\n \"\"\"\n )\n\n output = self.run(\"python\", \"-\", input_=script)\n\n return [Tag(*t) for t in json.loads(output)]\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n output = self.run(\"python\", \"-\", input_=GET_ENVIRONMENT_INFO)\n\n return json.loads(output)\n\n def get_pip_version(self): # type: () -> Version\n output = self.run_pip(\"--version\").strip()\n m = re.match(\"pip (.+?)(?: from .+)?$\", output)\n if not m:\n return Version.parse(\"0.0\")\n\n return Version.parse(m.group(1))\n\n def get_paths(self): # type: () -> Dict[str, str]\n output = self.run(\"python\", \"-\", input_=GET_PATHS)\n\n return json.loads(output)\n\n def is_venv(self): # type: () -> bool\n return True\n\n def is_sane(self):\n # A virtualenv is considered sane if both \"python\" and \"pip\" exist.\n return os.path.exists(self._bin(\"python\")) and os.path.exists(self._bin(\"pip\"))\n\n def _run(self, cmd, **kwargs):\n with self.temp_environ():\n os.environ[\"PATH\"] = self._updated_path()\n os.environ[\"VIRTUAL_ENV\"] = str(self._path)\n\n self.unset_env(\"PYTHONHOME\")\n self.unset_env(\"__PYVENV_LAUNCHER__\")\n\n return super(VirtualEnv, self)._run(cmd, **kwargs)\n\n def execute(self, bin, *args, **kwargs):\n with self.temp_environ():\n os.environ[\"PATH\"] = self._updated_path()\n os.environ[\"VIRTUAL_ENV\"] = str(self._path)\n\n self.unset_env(\"PYTHONHOME\")\n self.unset_env(\"__PYVENV_LAUNCHER__\")\n\n return super(VirtualEnv, self).execute(bin, *args, **kwargs)\n\n @contextmanager\n def temp_environ(self):\n environ = dict(os.environ)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(environ)\n\n def unset_env(self, key):\n if key in os.environ:\n del os.environ[key]\n\n def _updated_path(self):\n return os.pathsep.join([str(self._bin_dir), os.environ[\"PATH\"]])\n\n\nclass NullEnv(SystemEnv):\n def __init__(self, path=None, base=None, execute=False):\n if path is None:\n path = Path(sys.prefix)\n\n super(NullEnv, self).__init__(path, base=base)\n\n self._execute = execute\n self.executed = []\n\n def get_pip_command(self): # type: () -> List[str]\n return [self._bin(\"python\"), \"-m\", \"pip\"]\n\n def _run(self, cmd, **kwargs):\n self.executed.append(cmd)\n\n if self._execute:\n return super(NullEnv, self)._run(cmd, **kwargs)\n\n def execute(self, bin, *args, **kwargs):\n self.executed.append([bin] + list(args))\n\n if self._execute:\n return super(NullEnv, self).execute(bin, *args, **kwargs)\n\n def _bin(self, bin):\n return bin\n\n\nclass MockEnv(NullEnv):\n def __init__(\n self,\n version_info=(3, 7, 0),\n python_implementation=\"CPython\",\n platform=\"darwin\",\n os_name=\"posix\",\n is_venv=False,\n pip_version=\"19.1\",\n sys_path=None,\n marker_env=None,\n supported_tags=None,\n **kwargs\n ):\n super(MockEnv, self).__init__(**kwargs)\n\n self._version_info = version_info\n self._python_implementation = python_implementation\n self._platform = platform\n self._os_name = os_name\n self._is_venv = is_venv\n self._pip_version = Version.parse(pip_version)\n self._sys_path = sys_path\n self._mock_marker_env = marker_env\n self._supported_tags = supported_tags\n\n @property\n def platform(self): # type: () -> str\n return self._platform\n\n @property\n def os(self): # type: () -> str\n return self._os_name\n\n @property\n def pip_version(self):\n return self._pip_version\n\n @property\n def sys_path(self):\n if self._sys_path is None:\n return super(MockEnv, self).sys_path\n\n return self._sys_path\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n if self._mock_marker_env is not None:\n return self._mock_marker_env\n\n marker_env = super(MockEnv, self).get_marker_env()\n marker_env[\"python_implementation\"] = self._python_implementation\n marker_env[\"version_info\"] = self._version_info\n marker_env[\"python_version\"] = \".\".join(str(v) for v in self._version_info[:2])\n marker_env[\"python_full_version\"] = \".\".join(str(v) for v in self._version_info)\n marker_env[\"sys_platform\"] = self._platform\n marker_env[\"interpreter_name\"] = self._python_implementation.lower()\n marker_env[\"interpreter_version\"] = \"cp\" + \"\".join(\n str(v) for v in self._version_info[:2]\n )\n\n return marker_env\n\n def is_venv(self): # type: () -> bool\n return self._is_venv\n",
"path": "poetry/utils/env.py"
}
] | [
{
"content": "import base64\nimport hashlib\nimport json\nimport os\nimport platform\nimport re\nimport shutil\nimport sys\nimport sysconfig\nimport textwrap\n\nfrom contextlib import contextmanager\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nimport packaging.tags\nimport tomlkit\nimport virtualenv\n\nfrom clikit.api.io import IO\nfrom packaging.tags import Tag\nfrom packaging.tags import interpreter_name\nfrom packaging.tags import interpreter_version\nfrom packaging.tags import sys_tags\n\nfrom poetry.core.semver import parse_constraint\nfrom poetry.core.semver.version import Version\nfrom poetry.core.toml.file import TOMLFile\nfrom poetry.core.version.markers import BaseMarker\nfrom poetry.locations import CACHE_DIR\nfrom poetry.poetry import Poetry\nfrom poetry.utils._compat import CalledProcessError\nfrom poetry.utils._compat import Path\nfrom poetry.utils._compat import decode\nfrom poetry.utils._compat import encode\nfrom poetry.utils._compat import list_to_shell_command\nfrom poetry.utils._compat import subprocess\n\n\nGET_ENVIRONMENT_INFO = \"\"\"\\\nimport json\nimport os\nimport platform\nimport sys\nimport sysconfig\n\nINTERPRETER_SHORT_NAMES = {\n \"python\": \"py\",\n \"cpython\": \"cp\",\n \"pypy\": \"pp\",\n \"ironpython\": \"ip\",\n \"jython\": \"jy\",\n}\n\n\ndef interpreter_version():\n version = sysconfig.get_config_var(\"interpreter_version\")\n if version:\n version = str(version)\n else:\n version = _version_nodot(sys.version_info[:2])\n\n return version\n\n\ndef _version_nodot(version):\n # type: (PythonVersion) -> str\n if any(v >= 10 for v in version):\n sep = \"_\"\n else:\n sep = \"\"\n\n return sep.join(map(str, version))\n\n\nif hasattr(sys, \"implementation\"):\n info = sys.implementation.version\n iver = \"{0.major}.{0.minor}.{0.micro}\".format(info)\n kind = info.releaselevel\n if kind != \"final\":\n iver += kind[0] + str(info.serial)\n\n implementation_name = sys.implementation.name\nelse:\n iver = \"0\"\n implementation_name = platform.python_implementation().lower()\n\nenv = {\n \"implementation_name\": implementation_name,\n \"implementation_version\": iver,\n \"os_name\": os.name,\n \"platform_machine\": platform.machine(),\n \"platform_release\": platform.release(),\n \"platform_system\": platform.system(),\n \"platform_version\": platform.version(),\n \"python_full_version\": platform.python_version(),\n \"platform_python_implementation\": platform.python_implementation(),\n \"python_version\": platform.python_version()[:3],\n \"sys_platform\": sys.platform,\n \"version_info\": tuple(sys.version_info),\n # Extra information\n \"interpreter_name\": INTERPRETER_SHORT_NAMES.get(implementation_name, implementation_name),\n \"interpreter_version\": interpreter_version(),\n}\n\nprint(json.dumps(env))\n\"\"\"\n\n\nGET_BASE_PREFIX = \"\"\"\\\nimport sys\n\nif hasattr(sys, \"real_prefix\"):\n print(sys.real_prefix)\nelif hasattr(sys, \"base_prefix\"):\n print(sys.base_prefix)\nelse:\n print(sys.prefix)\n\"\"\"\n\nGET_PYTHON_VERSION = \"\"\"\\\nimport sys\n\nprint('.'.join([str(s) for s in sys.version_info[:3]]))\n\"\"\"\n\nGET_SYS_PATH = \"\"\"\\\nimport json\nimport sys\n\nprint(json.dumps(sys.path))\n\"\"\"\n\nGET_PATHS = \"\"\"\\\nimport json\nimport sysconfig\n\nprint(json.dumps(sysconfig.get_paths()))\n\"\"\"\n\n\nclass EnvError(Exception):\n\n pass\n\n\nclass EnvCommandError(EnvError):\n def __init__(self, e, input=None): # type: (CalledProcessError) -> None\n self.e = e\n\n message = \"Command {} errored with the following return code {}, and output: \\n{}\".format(\n e.cmd, e.returncode, decode(e.output)\n )\n if input:\n message += \"input was : {}\".format(input)\n super(EnvCommandError, self).__init__(message)\n\n\nclass NoCompatiblePythonVersionFound(EnvError):\n def __init__(self, expected, given=None):\n if given:\n message = (\n \"The specified Python version ({}) \"\n \"is not supported by the project ({}).\\n\"\n \"Please choose a compatible version \"\n \"or loosen the python constraint specified \"\n \"in the pyproject.toml file.\".format(given, expected)\n )\n else:\n message = (\n \"Poetry was unable to find a compatible version. \"\n \"If you have one, you can explicitly use it \"\n 'via the \"env use\" command.'\n )\n\n super(NoCompatiblePythonVersionFound, self).__init__(message)\n\n\nclass EnvManager(object):\n \"\"\"\n Environments manager\n \"\"\"\n\n _env = None\n\n ENVS_FILE = \"envs.toml\"\n\n def __init__(self, poetry): # type: (Poetry) -> None\n self._poetry = poetry\n\n def activate(self, python, io): # type: (str, IO) -> Env\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n cwd = self._poetry.file.parent\n\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n\n try:\n python_version = Version.parse(python)\n python = \"python{}\".format(python_version.major)\n if python_version.precision > 1:\n python += \".{}\".format(python_version.minor)\n except ValueError:\n # Executable in PATH or full executable path\n pass\n\n try:\n python_version = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n python,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n shell=True,\n )\n )\n except CalledProcessError as e:\n raise EnvCommandError(e)\n\n python_version = Version.parse(python_version.strip())\n minor = \"{}.{}\".format(python_version.major, python_version.minor)\n patch = python_version.text\n\n create = False\n is_root_venv = self._poetry.config.get(\"virtualenvs.in-project\")\n # If we are required to create the virtual environment in the root folder,\n # create or recreate it if needed\n if is_root_venv:\n create = False\n venv = self._poetry.file.parent / \".venv\"\n if venv.exists():\n # We need to check if the patch version is correct\n _venv = VirtualEnv(venv)\n current_patch = \".\".join(str(v) for v in _venv.version_info[:3])\n\n if patch != current_patch:\n create = True\n\n self.create_venv(io, executable=python, force=create)\n\n return self.get(reload=True)\n\n envs = tomlkit.document()\n base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))\n if envs_file.exists():\n envs = envs_file.read()\n current_env = envs.get(base_env_name)\n if current_env is not None:\n current_minor = current_env[\"minor\"]\n current_patch = current_env[\"patch\"]\n\n if current_minor == minor and current_patch != patch:\n # We need to recreate\n create = True\n\n name = \"{}-py{}\".format(base_env_name, minor)\n venv = venv_path / name\n\n # Create if needed\n if not venv.exists() or venv.exists() and create:\n in_venv = os.environ.get(\"VIRTUAL_ENV\") is not None\n if in_venv or not venv.exists():\n create = True\n\n if venv.exists():\n # We need to check if the patch version is correct\n _venv = VirtualEnv(venv)\n current_patch = \".\".join(str(v) for v in _venv.version_info[:3])\n\n if patch != current_patch:\n create = True\n\n self.create_venv(io, executable=python, force=create)\n\n # Activate\n envs[base_env_name] = {\"minor\": minor, \"patch\": patch}\n envs_file.write(envs)\n\n return self.get(reload=True)\n\n def deactivate(self, io): # type: (IO) -> None\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n name = self._poetry.package.name\n name = self.generate_env_name(name, str(self._poetry.file.parent))\n\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n if envs_file.exists():\n envs = envs_file.read()\n env = envs.get(name)\n if env is not None:\n io.write_line(\n \"Deactivating virtualenv: <comment>{}</comment>\".format(\n venv_path / (name + \"-py{}\".format(env[\"minor\"]))\n )\n )\n del envs[name]\n\n envs_file.write(envs)\n\n def get(self, reload=False): # type: (bool) -> Env\n if self._env is not None and not reload:\n return self._env\n\n python_minor = \".\".join([str(v) for v in sys.version_info[:2]])\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n cwd = self._poetry.file.parent\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n env = None\n base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))\n if envs_file.exists():\n envs = envs_file.read()\n env = envs.get(base_env_name)\n if env:\n python_minor = env[\"minor\"]\n\n # Check if we are inside a virtualenv or not\n # Conda sets CONDA_PREFIX in its envs, see\n # https://github.com/conda/conda/issues/2764\n env_prefix = os.environ.get(\"VIRTUAL_ENV\", os.environ.get(\"CONDA_PREFIX\"))\n conda_env_name = os.environ.get(\"CONDA_DEFAULT_ENV\")\n # It's probably not a good idea to pollute Conda's global \"base\" env, since\n # most users have it activated all the time.\n in_venv = env_prefix is not None and conda_env_name != \"base\"\n\n if not in_venv or env is not None:\n # Checking if a local virtualenv exists\n if self._poetry.config.get(\"virtualenvs.in-project\") is not False:\n if (cwd / \".venv\").exists() and (cwd / \".venv\").is_dir():\n venv = cwd / \".venv\"\n\n return VirtualEnv(venv)\n\n create_venv = self._poetry.config.get(\"virtualenvs.create\", True)\n\n if not create_venv:\n return SystemEnv(Path(sys.prefix))\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n name = \"{}-py{}\".format(base_env_name, python_minor.strip())\n\n venv = venv_path / name\n\n if not venv.exists():\n return SystemEnv(Path(sys.prefix))\n\n return VirtualEnv(venv)\n\n if env_prefix is not None:\n prefix = Path(env_prefix)\n base_prefix = None\n else:\n prefix = Path(sys.prefix)\n base_prefix = self.get_base_prefix()\n\n return VirtualEnv(prefix, base_prefix)\n\n def list(self, name=None): # type: (Optional[str]) -> List[VirtualEnv]\n if name is None:\n name = self._poetry.package.name\n\n venv_name = self.generate_env_name(name, str(self._poetry.file.parent))\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n env_list = [\n VirtualEnv(Path(p))\n for p in sorted(venv_path.glob(\"{}-py*\".format(venv_name)))\n ]\n\n venv = self._poetry.file.parent / \".venv\"\n if (\n self._poetry.config.get(\"virtualenvs.in-project\")\n and venv.exists()\n and venv.is_dir()\n ):\n env_list.insert(0, VirtualEnv(venv))\n return env_list\n\n def remove(self, python): # type: (str) -> Env\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n cwd = self._poetry.file.parent\n envs_file = TOMLFile(venv_path / self.ENVS_FILE)\n base_env_name = self.generate_env_name(self._poetry.package.name, str(cwd))\n\n if python.startswith(base_env_name):\n venvs = self.list()\n for venv in venvs:\n if venv.path.name == python:\n # Exact virtualenv name\n if not envs_file.exists():\n self.remove_venv(venv.path)\n\n return venv\n\n venv_minor = \".\".join(str(v) for v in venv.version_info[:2])\n base_env_name = self.generate_env_name(cwd.name, str(cwd))\n envs = envs_file.read()\n\n current_env = envs.get(base_env_name)\n if not current_env:\n self.remove_venv(venv.path)\n\n return venv\n\n if current_env[\"minor\"] == venv_minor:\n del envs[base_env_name]\n envs_file.write(envs)\n\n self.remove_venv(venv.path)\n\n return venv\n\n raise ValueError(\n '<warning>Environment \"{}\" does not exist.</warning>'.format(python)\n )\n\n try:\n python_version = Version.parse(python)\n python = \"python{}\".format(python_version.major)\n if python_version.precision > 1:\n python += \".{}\".format(python_version.minor)\n except ValueError:\n # Executable in PATH or full executable path\n pass\n\n try:\n python_version = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n python,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n shell=True,\n )\n )\n except CalledProcessError as e:\n raise EnvCommandError(e)\n\n python_version = Version.parse(python_version.strip())\n minor = \"{}.{}\".format(python_version.major, python_version.minor)\n\n name = \"{}-py{}\".format(base_env_name, minor)\n venv = venv_path / name\n\n if not venv.exists():\n raise ValueError(\n '<warning>Environment \"{}\" does not exist.</warning>'.format(name)\n )\n\n if envs_file.exists():\n envs = envs_file.read()\n current_env = envs.get(base_env_name)\n if current_env is not None:\n current_minor = current_env[\"minor\"]\n\n if current_minor == minor:\n del envs[base_env_name]\n envs_file.write(envs)\n\n self.remove_venv(venv)\n\n return VirtualEnv(venv)\n\n def create_venv(\n self, io, name=None, executable=None, force=False\n ): # type: (IO, Optional[str], Optional[str], bool) -> Env\n if self._env is not None and not force:\n return self._env\n\n cwd = self._poetry.file.parent\n env = self.get(reload=True)\n\n if not env.is_sane():\n force = True\n\n if env.is_venv() and not force:\n # Already inside a virtualenv.\n return env\n\n create_venv = self._poetry.config.get(\"virtualenvs.create\")\n root_venv = self._poetry.config.get(\"virtualenvs.in-project\")\n\n venv_path = self._poetry.config.get(\"virtualenvs.path\")\n if root_venv:\n venv_path = cwd / \".venv\"\n elif venv_path is None:\n venv_path = Path(CACHE_DIR) / \"virtualenvs\"\n else:\n venv_path = Path(venv_path)\n\n if not name:\n name = self._poetry.package.name\n\n python_patch = \".\".join([str(v) for v in sys.version_info[:3]])\n python_minor = \".\".join([str(v) for v in sys.version_info[:2]])\n if executable:\n python_patch = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n executable,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n shell=True,\n ).strip()\n )\n python_minor = \".\".join(python_patch.split(\".\")[:2])\n\n supported_python = self._poetry.package.python_constraint\n if not supported_python.allows(Version.parse(python_patch)):\n # The currently activated or chosen Python version\n # is not compatible with the Python constraint specified\n # for the project.\n # If an executable has been specified, we stop there\n # and notify the user of the incompatibility.\n # Otherwise, we try to find a compatible Python version.\n if executable:\n raise NoCompatiblePythonVersionFound(\n self._poetry.package.python_versions, python_patch\n )\n\n io.write_line(\n \"<warning>The currently activated Python version {} \"\n \"is not supported by the project ({}).\\n\"\n \"Trying to find and use a compatible version.</warning> \".format(\n python_patch, self._poetry.package.python_versions\n )\n )\n\n for python_to_try in reversed(\n sorted(\n self._poetry.package.AVAILABLE_PYTHONS,\n key=lambda v: (v.startswith(\"3\"), -len(v), v),\n )\n ):\n if len(python_to_try) == 1:\n if not parse_constraint(\"^{}.0\".format(python_to_try)).allows_any(\n supported_python\n ):\n continue\n elif not supported_python.allows_all(\n parse_constraint(python_to_try + \".*\")\n ):\n continue\n\n python = \"python\" + python_to_try\n\n if io.is_debug():\n io.write_line(\"<debug>Trying {}</debug>\".format(python))\n\n try:\n python_patch = decode(\n subprocess.check_output(\n list_to_shell_command(\n [\n python,\n \"-c\",\n \"\\\"import sys; print('.'.join([str(s) for s in sys.version_info[:3]]))\\\"\",\n ]\n ),\n stderr=subprocess.STDOUT,\n shell=True,\n ).strip()\n )\n except CalledProcessError:\n continue\n\n if not python_patch:\n continue\n\n if supported_python.allows(Version.parse(python_patch)):\n io.write_line(\"Using <c1>{}</c1> ({})\".format(python, python_patch))\n executable = python\n python_minor = \".\".join(python_patch.split(\".\")[:2])\n break\n\n if not executable:\n raise NoCompatiblePythonVersionFound(\n self._poetry.package.python_versions\n )\n\n if root_venv:\n venv = venv_path\n else:\n name = self.generate_env_name(name, str(cwd))\n name = \"{}-py{}\".format(name, python_minor.strip())\n venv = venv_path / name\n\n if not venv.exists():\n if create_venv is False:\n io.write_line(\n \"<fg=black;bg=yellow>\"\n \"Skipping virtualenv creation, \"\n \"as specified in config file.\"\n \"</>\"\n )\n\n return SystemEnv(Path(sys.prefix))\n\n io.write_line(\n \"Creating virtualenv <c1>{}</> in {}\".format(name, str(venv_path))\n )\n\n self.build_venv(venv, executable=executable)\n else:\n if force:\n if not env.is_sane():\n io.write_line(\n \"<warning>The virtual environment found in {} seems to be broken.</warning>\".format(\n env.path\n )\n )\n io.write_line(\n \"Recreating virtualenv <c1>{}</> in {}\".format(name, str(venv))\n )\n self.remove_venv(venv)\n self.build_venv(venv, executable=executable)\n elif io.is_very_verbose():\n io.write_line(\"Virtualenv <c1>{}</> already exists.\".format(name))\n\n # venv detection:\n # stdlib venv may symlink sys.executable, so we can't use realpath.\n # but others can symlink *to* the venv Python,\n # so we can't just use sys.executable.\n # So we just check every item in the symlink tree (generally <= 3)\n p = os.path.normcase(sys.executable)\n paths = [p]\n while os.path.islink(p):\n p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))\n paths.append(p)\n\n p_venv = os.path.normcase(str(venv))\n if any(p.startswith(p_venv) for p in paths):\n # Running properly in the virtualenv, don't need to do anything\n return SystemEnv(Path(sys.prefix), self.get_base_prefix())\n\n return VirtualEnv(venv)\n\n @classmethod\n def build_venv(\n cls, path, executable=None\n ): # type: (Union[Path,str], Optional[Union[str, Path]]) -> virtualenv.run.session.Session\n if isinstance(executable, Path):\n executable = executable.resolve().as_posix()\n return virtualenv.cli_run(\n [\n \"--no-download\",\n \"--no-periodic-update\",\n \"--python\",\n executable or sys.executable,\n str(path),\n ]\n )\n\n @classmethod\n def remove_venv(cls, path): # type: (Union[Path,str]) -> None\n if isinstance(path, str):\n path = Path(path)\n assert path.is_dir()\n try:\n shutil.rmtree(str(path))\n return\n except OSError as e:\n # Continue only if e.errno == 16\n if e.errno != 16: # ERRNO 16: Device or resource busy\n raise e\n\n # Delete all files and folders but the toplevel one. This is because sometimes\n # the venv folder is mounted by the OS, such as in a docker volume. In such\n # cases, an attempt to delete the folder itself will result in an `OSError`.\n # See https://github.com/python-poetry/poetry/pull/2064\n for file_path in path.iterdir():\n if file_path.is_file() or file_path.is_symlink():\n file_path.unlink()\n elif file_path.is_dir():\n shutil.rmtree(str(file_path))\n\n def get_base_prefix(self): # type: () -> Path\n if hasattr(sys, \"real_prefix\"):\n return sys.real_prefix\n\n if hasattr(sys, \"base_prefix\"):\n return sys.base_prefix\n\n return sys.prefix\n\n @classmethod\n def generate_env_name(cls, name, cwd): # type: (str, str) -> str\n name = name.lower()\n sanitized_name = re.sub(r'[ $`!*@\"\\\\\\r\\n\\t]', \"_\", name)[:42]\n h = hashlib.sha256(encode(cwd)).digest()\n h = base64.urlsafe_b64encode(h).decode()[:8]\n\n return \"{}-{}\".format(sanitized_name, h)\n\n\nclass Env(object):\n \"\"\"\n An abstract Python environment.\n \"\"\"\n\n def __init__(self, path, base=None): # type: (Path, Optional[Path]) -> None\n self._is_windows = sys.platform == \"win32\"\n\n self._path = path\n bin_dir = \"bin\" if not self._is_windows else \"Scripts\"\n self._bin_dir = self._path / bin_dir\n\n self._base = base or path\n\n self._marker_env = None\n self._pip_version = None\n self._site_packages = None\n self._paths = None\n self._supported_tags = None\n self._purelib = None\n self._platlib = None\n\n @property\n def path(self): # type: () -> Path\n return self._path\n\n @property\n def base(self): # type: () -> Path\n return self._base\n\n @property\n def version_info(self): # type: () -> Tuple[int]\n return tuple(self.marker_env[\"version_info\"])\n\n @property\n def python_implementation(self): # type: () -> str\n return self.marker_env[\"platform_python_implementation\"]\n\n @property\n def python(self): # type: () -> str\n \"\"\"\n Path to current python executable\n \"\"\"\n return self._bin(\"python\")\n\n @property\n def marker_env(self):\n if self._marker_env is None:\n self._marker_env = self.get_marker_env()\n\n return self._marker_env\n\n @property\n def pip(self): # type: () -> str\n \"\"\"\n Path to current pip executable\n \"\"\"\n return self._bin(\"pip\")\n\n @property\n def platform(self): # type: () -> str\n return sys.platform\n\n @property\n def os(self): # type: () -> str\n return os.name\n\n @property\n def pip_version(self):\n if self._pip_version is None:\n self._pip_version = self.get_pip_version()\n\n return self._pip_version\n\n @property\n def site_packages(self): # type: () -> Path\n if self._site_packages is None:\n self._site_packages = self.purelib\n return self._site_packages\n\n @property\n def usersite(self): # type: () -> Optional[Path]\n if \"usersite\" in self.paths:\n return Path(self.paths[\"usersite\"])\n\n @property\n def purelib(self): # type: () -> Path\n if self._purelib is None:\n self._purelib = Path(self.paths[\"purelib\"])\n\n return self._purelib\n\n @property\n def platlib(self): # type: () -> Path\n if self._platlib is None:\n if \"platlib\" in self.paths:\n self._platlib = Path(self.paths[\"platlib\"])\n else:\n self._platlib = self.purelib\n\n return self._platlib\n\n def is_path_relative_to_lib(self, path): # type: (Path) -> bool\n for lib_path in [self.purelib, self.platlib]:\n try:\n path.relative_to(lib_path)\n return True\n except ValueError:\n pass\n\n return False\n\n @property\n def sys_path(self): # type: () -> List[str]\n raise NotImplementedError()\n\n @property\n def paths(self): # type: () -> Dict[str, str]\n if self._paths is None:\n self._paths = self.get_paths()\n\n return self._paths\n\n @property\n def supported_tags(self): # type: () -> List[Tag]\n if self._supported_tags is None:\n self._supported_tags = self.get_supported_tags()\n\n return self._supported_tags\n\n @classmethod\n def get_base_prefix(cls): # type: () -> Path\n if hasattr(sys, \"real_prefix\"):\n return sys.real_prefix\n\n if hasattr(sys, \"base_prefix\"):\n return sys.base_prefix\n\n return sys.prefix\n\n def get_version_info(self): # type: () -> Tuple[int]\n raise NotImplementedError()\n\n def get_python_implementation(self): # type: () -> str\n raise NotImplementedError()\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n raise NotImplementedError()\n\n def get_pip_command(self): # type: () -> List[str]\n raise NotImplementedError()\n\n def get_supported_tags(self): # type: () -> List[Tag]\n raise NotImplementedError()\n\n def get_pip_version(self): # type: () -> Version\n raise NotImplementedError()\n\n def get_paths(self): # type: () -> Dict[str, str]\n raise NotImplementedError()\n\n def is_valid_for_marker(self, marker): # type: (BaseMarker) -> bool\n return marker.validate(self.marker_env)\n\n def is_sane(self): # type: () -> bool\n \"\"\"\n Checks whether the current environment is sane or not.\n \"\"\"\n return True\n\n def run(self, bin, *args, **kwargs):\n bin = self._bin(bin)\n cmd = [bin] + list(args)\n return self._run(cmd, **kwargs)\n\n def run_pip(self, *args, **kwargs):\n pip = self.get_pip_command()\n cmd = pip + list(args)\n return self._run(cmd, **kwargs)\n\n def _run(self, cmd, **kwargs):\n \"\"\"\n Run a command inside the Python environment.\n \"\"\"\n call = kwargs.pop(\"call\", False)\n input_ = kwargs.pop(\"input_\", None)\n\n try:\n if self._is_windows:\n kwargs[\"shell\"] = True\n\n if kwargs.get(\"shell\", False):\n cmd = list_to_shell_command(cmd)\n\n if input_:\n output = subprocess.run(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n input=encode(input_),\n check=True,\n **kwargs\n ).stdout\n elif call:\n return subprocess.call(cmd, stderr=subprocess.STDOUT, **kwargs)\n else:\n output = subprocess.check_output(\n cmd, stderr=subprocess.STDOUT, **kwargs\n )\n except CalledProcessError as e:\n raise EnvCommandError(e, input=input_)\n\n return decode(output)\n\n def execute(self, bin, *args, **kwargs):\n bin = self._bin(bin)\n\n if not self._is_windows:\n args = [bin] + list(args)\n if \"env\" in kwargs:\n return os.execvpe(bin, args, kwargs[\"env\"])\n else:\n return os.execvp(bin, args)\n else:\n exe = subprocess.Popen([bin] + list(args), **kwargs)\n exe.communicate()\n return exe.returncode\n\n def is_venv(self): # type: () -> bool\n raise NotImplementedError()\n\n def _bin(self, bin): # type: (str) -> str\n \"\"\"\n Return path to the given executable.\n \"\"\"\n bin_path = (self._bin_dir / bin).with_suffix(\".exe\" if self._is_windows else \"\")\n if not bin_path.exists():\n # On Windows, some executables can be in the base path\n # This is especially true when installing Python with\n # the official installer, where python.exe will be at\n # the root of the env path.\n # This is an edge case and should not be encountered\n # in normal uses but this happens in the sonnet script\n # that creates a fake virtual environment pointing to\n # a base Python install.\n if self._is_windows:\n bin_path = (self._path / bin).with_suffix(\".exe\")\n if bin_path.exists():\n return str(bin_path)\n\n return bin\n\n return str(bin_path)\n\n def __eq__(self, other): # type: (Env) -> bool\n return other.__class__ == self.__class__ and other.path == self.path\n\n def __repr__(self):\n return '{}(\"{}\")'.format(self.__class__.__name__, self._path)\n\n\nclass SystemEnv(Env):\n \"\"\"\n A system (i.e. not a virtualenv) Python environment.\n \"\"\"\n\n @property\n def sys_path(self): # type: () -> List[str]\n return sys.path\n\n def get_version_info(self): # type: () -> Tuple[int]\n return sys.version_info\n\n def get_python_implementation(self): # type: () -> str\n return platform.python_implementation()\n\n def get_pip_command(self): # type: () -> List[str]\n # If we're not in a venv, assume the interpreter we're running on\n # has a pip and use that\n return [sys.executable, \"-m\", \"pip\"]\n\n def get_paths(self): # type: () -> Dict[str, str]\n # We can't use sysconfig.get_paths() because\n # on some distributions it does not return the proper paths\n # (those used by pip for instance). We go through distutils\n # to get the proper ones.\n import site\n\n from distutils.command.install import SCHEME_KEYS # noqa\n from distutils.core import Distribution\n\n d = Distribution()\n d.parse_config_files()\n obj = d.get_command_obj(\"install\", create=True)\n obj.finalize_options()\n\n paths = sysconfig.get_paths().copy()\n for key in SCHEME_KEYS:\n if key == \"headers\":\n # headers is not a path returned by sysconfig.get_paths()\n continue\n\n paths[key] = getattr(obj, \"install_{}\".format(key))\n\n if site.check_enableusersite() and hasattr(obj, \"install_usersite\"):\n paths[\"usersite\"] = getattr(obj, \"install_usersite\")\n\n return paths\n\n def get_supported_tags(self): # type: () -> List[Tag]\n return list(sys_tags())\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n if hasattr(sys, \"implementation\"):\n info = sys.implementation.version\n iver = \"{0.major}.{0.minor}.{0.micro}\".format(info)\n kind = info.releaselevel\n if kind != \"final\":\n iver += kind[0] + str(info.serial)\n\n implementation_name = sys.implementation.name\n else:\n iver = \"0\"\n implementation_name = \"\"\n\n return {\n \"implementation_name\": implementation_name,\n \"implementation_version\": iver,\n \"os_name\": os.name,\n \"platform_machine\": platform.machine(),\n \"platform_release\": platform.release(),\n \"platform_system\": platform.system(),\n \"platform_version\": platform.version(),\n \"python_full_version\": platform.python_version(),\n \"platform_python_implementation\": platform.python_implementation(),\n \"python_version\": \".\".join(\n v for v in platform.python_version().split(\".\")[:2]\n ),\n \"sys_platform\": sys.platform,\n \"version_info\": sys.version_info,\n # Extra information\n \"interpreter_name\": interpreter_name(),\n \"interpreter_version\": interpreter_version(),\n }\n\n def get_pip_version(self): # type: () -> Version\n from pip import __version__\n\n return Version.parse(__version__)\n\n def is_venv(self): # type: () -> bool\n return self._path != self._base\n\n\nclass VirtualEnv(Env):\n \"\"\"\n A virtual Python environment.\n \"\"\"\n\n def __init__(self, path, base=None): # type: (Path, Optional[Path]) -> None\n super(VirtualEnv, self).__init__(path, base)\n\n # If base is None, it probably means this is\n # a virtualenv created from VIRTUAL_ENV.\n # In this case we need to get sys.base_prefix\n # from inside the virtualenv.\n if base is None:\n self._base = Path(self.run(\"python\", \"-\", input_=GET_BASE_PREFIX).strip())\n\n @property\n def sys_path(self): # type: () -> List[str]\n output = self.run(\"python\", \"-\", input_=GET_SYS_PATH)\n\n return json.loads(output)\n\n def get_version_info(self): # type: () -> Tuple[int]\n output = self.run(\"python\", \"-\", input_=GET_PYTHON_VERSION)\n\n return tuple([int(s) for s in output.strip().split(\".\")])\n\n def get_python_implementation(self): # type: () -> str\n return self.marker_env[\"platform_python_implementation\"]\n\n def get_pip_command(self): # type: () -> List[str]\n # We're in a virtualenv that is known to be sane,\n # so assume that we have a functional pip\n return [self._bin(\"pip\")]\n\n def get_supported_tags(self): # type: () -> List[Tag]\n file_path = Path(packaging.tags.__file__)\n if file_path.suffix == \".pyc\":\n # Python 2\n file_path = file_path.with_suffix(\".py\")\n\n with file_path.open(encoding=\"utf-8\") as f:\n script = decode(f.read())\n\n script = script.replace(\n \"from ._typing import TYPE_CHECKING, cast\",\n \"TYPE_CHECKING = False\\ncast = lambda type_, value: value\",\n )\n script = script.replace(\n \"from ._typing import MYPY_CHECK_RUNNING, cast\",\n \"MYPY_CHECK_RUNNING = False\\ncast = lambda type_, value: value\",\n )\n\n script += textwrap.dedent(\n \"\"\"\n import json\n\n print(json.dumps([(t.interpreter, t.abi, t.platform) for t in sys_tags()]))\n \"\"\"\n )\n\n output = self.run(\"python\", \"-\", input_=script)\n\n return [Tag(*t) for t in json.loads(output)]\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n output = self.run(\"python\", \"-\", input_=GET_ENVIRONMENT_INFO)\n\n return json.loads(output)\n\n def get_pip_version(self): # type: () -> Version\n output = self.run_pip(\"--version\").strip()\n m = re.match(\"pip (.+?)(?: from .+)?$\", output)\n if not m:\n return Version.parse(\"0.0\")\n\n return Version.parse(m.group(1))\n\n def get_paths(self): # type: () -> Dict[str, str]\n output = self.run(\"python\", \"-\", input_=GET_PATHS)\n\n return json.loads(output)\n\n def is_venv(self): # type: () -> bool\n return True\n\n def is_sane(self):\n # A virtualenv is considered sane if both \"python\" and \"pip\" exist.\n return os.path.exists(self._bin(\"python\")) and os.path.exists(self._bin(\"pip\"))\n\n def _run(self, cmd, **kwargs):\n with self.temp_environ():\n os.environ[\"PATH\"] = self._updated_path()\n os.environ[\"VIRTUAL_ENV\"] = str(self._path)\n\n self.unset_env(\"PYTHONHOME\")\n self.unset_env(\"__PYVENV_LAUNCHER__\")\n\n return super(VirtualEnv, self)._run(cmd, **kwargs)\n\n def execute(self, bin, *args, **kwargs):\n with self.temp_environ():\n os.environ[\"PATH\"] = self._updated_path()\n os.environ[\"VIRTUAL_ENV\"] = str(self._path)\n\n self.unset_env(\"PYTHONHOME\")\n self.unset_env(\"__PYVENV_LAUNCHER__\")\n\n return super(VirtualEnv, self).execute(bin, *args, **kwargs)\n\n @contextmanager\n def temp_environ(self):\n environ = dict(os.environ)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(environ)\n\n def unset_env(self, key):\n if key in os.environ:\n del os.environ[key]\n\n def _updated_path(self):\n return os.pathsep.join([str(self._bin_dir), os.environ.get(\"PATH\", \"\")])\n\n\nclass NullEnv(SystemEnv):\n def __init__(self, path=None, base=None, execute=False):\n if path is None:\n path = Path(sys.prefix)\n\n super(NullEnv, self).__init__(path, base=base)\n\n self._execute = execute\n self.executed = []\n\n def get_pip_command(self): # type: () -> List[str]\n return [self._bin(\"python\"), \"-m\", \"pip\"]\n\n def _run(self, cmd, **kwargs):\n self.executed.append(cmd)\n\n if self._execute:\n return super(NullEnv, self)._run(cmd, **kwargs)\n\n def execute(self, bin, *args, **kwargs):\n self.executed.append([bin] + list(args))\n\n if self._execute:\n return super(NullEnv, self).execute(bin, *args, **kwargs)\n\n def _bin(self, bin):\n return bin\n\n\nclass MockEnv(NullEnv):\n def __init__(\n self,\n version_info=(3, 7, 0),\n python_implementation=\"CPython\",\n platform=\"darwin\",\n os_name=\"posix\",\n is_venv=False,\n pip_version=\"19.1\",\n sys_path=None,\n marker_env=None,\n supported_tags=None,\n **kwargs\n ):\n super(MockEnv, self).__init__(**kwargs)\n\n self._version_info = version_info\n self._python_implementation = python_implementation\n self._platform = platform\n self._os_name = os_name\n self._is_venv = is_venv\n self._pip_version = Version.parse(pip_version)\n self._sys_path = sys_path\n self._mock_marker_env = marker_env\n self._supported_tags = supported_tags\n\n @property\n def platform(self): # type: () -> str\n return self._platform\n\n @property\n def os(self): # type: () -> str\n return self._os_name\n\n @property\n def pip_version(self):\n return self._pip_version\n\n @property\n def sys_path(self):\n if self._sys_path is None:\n return super(MockEnv, self).sys_path\n\n return self._sys_path\n\n def get_marker_env(self): # type: () -> Dict[str, Any]\n if self._mock_marker_env is not None:\n return self._mock_marker_env\n\n marker_env = super(MockEnv, self).get_marker_env()\n marker_env[\"python_implementation\"] = self._python_implementation\n marker_env[\"version_info\"] = self._version_info\n marker_env[\"python_version\"] = \".\".join(str(v) for v in self._version_info[:2])\n marker_env[\"python_full_version\"] = \".\".join(str(v) for v in self._version_info)\n marker_env[\"sys_platform\"] = self._platform\n marker_env[\"interpreter_name\"] = self._python_implementation.lower()\n marker_env[\"interpreter_version\"] = \"cp\" + \"\".join(\n str(v) for v in self._version_info[:2]\n )\n\n return marker_env\n\n def is_venv(self): # type: () -> bool\n return self._is_venv\n",
"path": "poetry/utils/env.py"
}
] | diff --git a/poetry/utils/env.py b/poetry/utils/env.py
index 0a027bd668a..ccd855b5c60 100644
--- a/poetry/utils/env.py
+++ b/poetry/utils/env.py
@@ -1212,7 +1212,7 @@ def unset_env(self, key):
del os.environ[key]
def _updated_path(self):
- return os.pathsep.join([str(self._bin_dir), os.environ["PATH"]])
+ return os.pathsep.join([str(self._bin_dir), os.environ.get("PATH", "")])
class NullEnv(SystemEnv):
|
cleanlab__cleanlab-990 | Add underperforming_group issue type among the Datalab defaults
Test issue manager with different datasets (Image, tabular etc.) to make sure that the underperforming group in the dataset is extracted successfully. List any failure cases that might need to be addressed before adding this issue type to the defaults.
| [
{
"content": "# Copyright (C) 2017-2023 Cleanlab Inc.\n# This file is part of cleanlab.\n#\n# cleanlab is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cleanlab is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with cleanlab. If not, see <https://www.gnu.org/licenses/>.\n\"\"\"The factory module provides a factory class for constructing concrete issue managers\nand a decorator for registering new issue managers.\n\nThis module provides the :py:meth:`register` decorator for users to register new subclasses of\n:py:class:`IssueManager <cleanlab.datalab.internal.issue_manager.issue_manager.IssueManager>`\nin the registry. Each IssueManager detects some particular type of issue in a dataset.\n\n\nNote\n----\n\nThe :class:`REGISTRY` variable is used by the factory class to keep track\nof registered issue managers.\nThe factory class is used as an implementation detail by\n:py:class:`Datalab <cleanlab.datalab.datalab.Datalab>`,\nwhich provides a simplified API for constructing concrete issue managers.\n:py:class:`Datalab <cleanlab.datalab.datalab.Datalab>` is intended to be used by users\nand provides detailed documentation on how to use the API.\n\nWarning\n-------\nNeither the :class:`REGISTRY` variable nor the factory class should be used directly by users.\n\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Dict, List, Type\n\nfrom cleanlab.datalab.internal.issue_manager import (\n ClassImbalanceIssueManager,\n DataValuationIssueManager,\n IssueManager,\n LabelIssueManager,\n NearDuplicateIssueManager,\n NonIIDIssueManager,\n ClassImbalanceIssueManager,\n UnderperformingGroupIssueManager,\n DataValuationIssueManager,\n OutlierIssueManager,\n NullIssueManager,\n)\nfrom cleanlab.datalab.internal.issue_manager.regression import RegressionLabelIssueManager\nfrom cleanlab.datalab.internal.issue_manager.multilabel.label import MultilabelIssueManager\nfrom cleanlab.datalab.internal.task import Task\n\n\nREGISTRY: Dict[Task, Dict[str, Type[IssueManager]]] = {\n Task.CLASSIFICATION: {\n \"outlier\": OutlierIssueManager,\n \"label\": LabelIssueManager,\n \"near_duplicate\": NearDuplicateIssueManager,\n \"non_iid\": NonIIDIssueManager,\n \"class_imbalance\": ClassImbalanceIssueManager,\n \"underperforming_group\": UnderperformingGroupIssueManager,\n \"data_valuation\": DataValuationIssueManager,\n \"null\": NullIssueManager,\n },\n Task.REGRESSION: {\n \"label\": RegressionLabelIssueManager,\n \"outlier\": OutlierIssueManager,\n \"near_duplicate\": NearDuplicateIssueManager,\n \"non_iid\": NonIIDIssueManager,\n \"null\": NullIssueManager,\n },\n Task.MULTILABEL: {\n \"label\": MultilabelIssueManager,\n \"outlier\": OutlierIssueManager,\n \"near_duplicate\": NearDuplicateIssueManager,\n \"non_iid\": NonIIDIssueManager,\n \"null\": NullIssueManager,\n },\n}\n\"\"\"Registry of issue managers that can be constructed from a task and issue type\nand used in the Datalab class.\n\n:meta hide-value:\n\nCurrently, the following issue managers are registered by default for a given task:\n\n- Classification:\n\n - ``\"outlier\"``: :py:class:`OutlierIssueManager <cleanlab.datalab.internal.issue_manager.outlier.OutlierIssueManager>`\n - ``\"label\"``: :py:class:`LabelIssueManager <cleanlab.datalab.internal.issue_manager.label.LabelIssueManager>`\n - ``\"near_duplicate\"``: :py:class:`NearDuplicateIssueManager <cleanlab.datalab.internal.issue_manager.duplicate.NearDuplicateIssueManager>`\n - ``\"non_iid\"``: :py:class:`NonIIDIssueManager <cleanlab.datalab.internal.issue_manager.noniid.NonIIDIssueManager>`\n - ``\"class_imbalance\"``: :py:class:`ClassImbalanceIssueManager <cleanlab.datalab.internal.issue_manager.class_imbalance.ClassImbalanceIssueManager>`\n - ``\"underperforming_group\"``: :py:class:`UnderperformingGroupIssueManager <cleanlab.datalab.internal.issue_manager.underperforming_group.UnderperformingGroupIssueManager>`\n - ``\"data_valuation\"``: :py:class:`DataValuationIssueManager <cleanlab.datalab.internal.issue_manager.data_valuation.DataValuationIssueManager>`\n - ``\"null\"``: :py:class:`NullIssueManager <cleanlab.datalab.internal.issue_manager.null.NullIssueManager>`\n \n- Regression:\n\n - ``\"label\"``: :py:class:`RegressionLabelIssueManager <cleanlab.datalab.internal.issue_manager.regression.label.RegressionLabelIssueManager>`\n\n- Multilabel:\n\n - ``\"label\"``: :py:class:`MultilabelIssueManager <cleanlab.datalab.internal.issue_manager.multilabel.label.MultilabelIssueManager>`\n\nWarning\n-------\nThis variable should not be used directly by users.\n\"\"\"\n\n\n# Construct concrete issue manager with a from_str method\nclass _IssueManagerFactory:\n \"\"\"Factory class for constructing concrete issue managers.\"\"\"\n\n @classmethod\n def from_str(cls, issue_type: str, task: Task) -> Type[IssueManager]:\n \"\"\"Constructs a concrete issue manager class from a string.\"\"\"\n if isinstance(issue_type, list):\n raise ValueError(\n \"issue_type must be a string, not a list. Try using from_list instead.\"\n )\n\n if task not in REGISTRY:\n raise ValueError(f\"Invalid task type: {task}, must be in {list(REGISTRY.keys())}\")\n if issue_type not in REGISTRY[task]:\n raise ValueError(f\"Invalid issue type: {issue_type} for task {task}\")\n\n return REGISTRY[task][issue_type]\n\n @classmethod\n def from_list(cls, issue_types: List[str], task: Task) -> List[Type[IssueManager]]:\n \"\"\"Constructs a list of concrete issue manager classes from a list of strings.\"\"\"\n return [cls.from_str(issue_type, task) for issue_type in issue_types]\n\n\ndef register(cls: Type[IssueManager], task: str = str(Task.CLASSIFICATION)) -> Type[IssueManager]:\n \"\"\"Registers the issue manager factory.\n\n Parameters\n ----------\n cls :\n A subclass of\n :py:class:`IssueManager <cleanlab.datalab.internal.issue_manager.issue_manager.IssueManager>`.\n\n task :\n Specific machine learning task like classification or regression.\n See :py:meth:`Task.from_str <cleanlab.datalab.internal.task.Task.from_str>`` for more details,\n to see which task type corresponds to which string.\n\n Returns\n -------\n cls :\n The same class that was passed in.\n\n Example\n -------\n\n When defining a new subclass of\n :py:class:`IssueManager <cleanlab.datalab.internal.issue_manager.issue_manager.IssueManager>`,\n you can register it like so:\n\n .. code-block:: python\n\n from cleanlab import IssueManager\n from cleanlab.datalab.internal.issue_manager_factory import register\n\n @register\n class MyIssueManager(IssueManager):\n issue_name: str = \"my_issue\"\n def find_issues(self, **kwargs):\n # Some logic to find issues\n pass\n\n or in a function call:\n\n .. code-block:: python\n\n from cleanlab import IssueManager\n from cleanlab.datalab.internal.issue_manager_factory import register\n\n class MyIssueManager(IssueManager):\n issue_name: str = \"my_issue\"\n def find_issues(self, **kwargs):\n # Some logic to find issues\n pass\n\n register(MyIssueManager, task=\"classification\")\n \"\"\"\n\n if not issubclass(cls, IssueManager):\n raise ValueError(f\"Class {cls} must be a subclass of IssueManager\")\n\n name: str = str(cls.issue_name)\n\n try:\n _task = Task.from_str(task)\n if _task not in REGISTRY:\n raise ValueError(f\"Invalid task type: {_task}, must be in {list(REGISTRY.keys())}\")\n except KeyError:\n raise ValueError(f\"Invalid task type: {task}, must be in {list(REGISTRY.keys())}\")\n\n if name in REGISTRY[_task]:\n print(\n f\"Warning: Overwriting existing issue manager {name} with {cls} for task {_task}.\"\n \"This may cause unexpected behavior.\"\n )\n\n REGISTRY[_task][name] = cls\n return cls\n\n\ndef list_possible_issue_types(task: Task) -> List[str]:\n \"\"\"Returns a list of all registered issue types.\n\n Any issue type that is not in this list cannot be used in the :py:meth:`find_issues` method.\n\n See Also\n --------\n :py:class:`REGISTRY <cleanlab.datalab.internal.issue_manager_factory.REGISTRY>` : All available issue types and their corresponding issue managers can be found here.\n \"\"\"\n return list(REGISTRY.get(task, []))\n\n\ndef list_default_issue_types(task: Task) -> List[str]:\n \"\"\"Returns a list of the issue types that are run by default\n when :py:meth:`find_issues` is called without specifying `issue_types`.\n\n task :\n Specific machine learning task supported by Datalab.\n\n See Also\n --------\n :py:class:`REGISTRY <cleanlab.datalab.internal.issue_manager_factory.REGISTRY>` : All available issue types and their corresponding issue managers can be found here.\n \"\"\"\n default_issue_types_dict = {\n Task.CLASSIFICATION: [\n \"null\",\n \"label\",\n \"outlier\",\n \"near_duplicate\",\n \"non_iid\",\n \"class_imbalance\",\n ],\n Task.REGRESSION: [\n \"null\",\n \"label\",\n \"outlier\",\n \"near_duplicate\",\n \"non_iid\",\n ],\n Task.MULTILABEL: [\n \"null\",\n \"label\",\n \"outlier\",\n \"near_duplicate\",\n \"non_iid\",\n ],\n }\n if task not in default_issue_types_dict:\n task = Task.CLASSIFICATION\n default_issue_types = default_issue_types_dict[task]\n return default_issue_types\n",
"path": "cleanlab/datalab/internal/issue_manager_factory.py"
}
] | [
{
"content": "# Copyright (C) 2017-2023 Cleanlab Inc.\n# This file is part of cleanlab.\n#\n# cleanlab is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cleanlab is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with cleanlab. If not, see <https://www.gnu.org/licenses/>.\n\"\"\"The factory module provides a factory class for constructing concrete issue managers\nand a decorator for registering new issue managers.\n\nThis module provides the :py:meth:`register` decorator for users to register new subclasses of\n:py:class:`IssueManager <cleanlab.datalab.internal.issue_manager.issue_manager.IssueManager>`\nin the registry. Each IssueManager detects some particular type of issue in a dataset.\n\n\nNote\n----\n\nThe :class:`REGISTRY` variable is used by the factory class to keep track\nof registered issue managers.\nThe factory class is used as an implementation detail by\n:py:class:`Datalab <cleanlab.datalab.datalab.Datalab>`,\nwhich provides a simplified API for constructing concrete issue managers.\n:py:class:`Datalab <cleanlab.datalab.datalab.Datalab>` is intended to be used by users\nand provides detailed documentation on how to use the API.\n\nWarning\n-------\nNeither the :class:`REGISTRY` variable nor the factory class should be used directly by users.\n\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Dict, List, Type\n\nfrom cleanlab.datalab.internal.issue_manager import (\n ClassImbalanceIssueManager,\n DataValuationIssueManager,\n IssueManager,\n LabelIssueManager,\n NearDuplicateIssueManager,\n NonIIDIssueManager,\n ClassImbalanceIssueManager,\n UnderperformingGroupIssueManager,\n DataValuationIssueManager,\n OutlierIssueManager,\n NullIssueManager,\n)\nfrom cleanlab.datalab.internal.issue_manager.regression import RegressionLabelIssueManager\nfrom cleanlab.datalab.internal.issue_manager.multilabel.label import MultilabelIssueManager\n\n\nREGISTRY: Dict[str, Dict[str, Type[IssueManager]]] = {\n \"classification\": {\n \"outlier\": OutlierIssueManager,\n \"label\": LabelIssueManager,\n \"near_duplicate\": NearDuplicateIssueManager,\n \"non_iid\": NonIIDIssueManager,\n \"class_imbalance\": ClassImbalanceIssueManager,\n \"underperforming_group\": UnderperformingGroupIssueManager,\n \"data_valuation\": DataValuationIssueManager,\n \"null\": NullIssueManager,\n },\n \"regression\": {\n \"label\": RegressionLabelIssueManager,\n \"outlier\": OutlierIssueManager,\n \"near_duplicate\": NearDuplicateIssueManager,\n \"non_iid\": NonIIDIssueManager,\n \"null\": NullIssueManager,\n },\n \"multilabel\": {\n \"label\": MultilabelIssueManager,\n \"outlier\": OutlierIssueManager,\n \"near_duplicate\": NearDuplicateIssueManager,\n \"non_iid\": NonIIDIssueManager,\n \"null\": NullIssueManager,\n },\n}\n\"\"\"Registry of issue managers that can be constructed from a string\nand used in the Datalab class.\n\n:meta hide-value:\n\nCurrently, the following issue managers are registered by default:\n\n- ``\"outlier\"``: :py:class:`OutlierIssueManager <cleanlab.datalab.internal.issue_manager.outlier.OutlierIssueManager>`\n- ``\"near_duplicate\"``: :py:class:`NearDuplicateIssueManager <cleanlab.datalab.internal.issue_manager.duplicate.NearDuplicateIssueManager>`\n- ``\"non_iid\"``: :py:class:`NonIIDIssueManager <cleanlab.datalab.internal.issue_manager.noniid.NonIIDIssueManager>`\n\nWarning\n-------\nThis variable should not be used directly by users.\n\"\"\"\n\n\n# Construct concrete issue manager with a from_str method\nclass _IssueManagerFactory:\n \"\"\"Factory class for constructing concrete issue managers.\"\"\"\n\n @classmethod\n def from_str(cls, issue_type: str, task: str) -> Type[IssueManager]:\n \"\"\"Constructs a concrete issue manager class from a string.\"\"\"\n if isinstance(issue_type, list):\n raise ValueError(\n \"issue_type must be a string, not a list. Try using from_list instead.\"\n )\n\n if task not in REGISTRY:\n raise ValueError(f\"Invalid task type: {task}, must be in {list(REGISTRY.keys())}\")\n if issue_type not in REGISTRY[task]:\n raise ValueError(f\"Invalid issue type: {issue_type} for task {task}\")\n\n return REGISTRY[task][issue_type]\n\n @classmethod\n def from_list(cls, issue_types: List[str], task: str) -> List[Type[IssueManager]]:\n \"\"\"Constructs a list of concrete issue manager classes from a list of strings.\"\"\"\n return [cls.from_str(issue_type, task) for issue_type in issue_types]\n\n\ndef register(cls: Type[IssueManager], task: str = \"classification\") -> Type[IssueManager]:\n \"\"\"Registers the issue manager factory.\n\n Parameters\n ----------\n cls :\n A subclass of\n :py:class:`IssueManager <cleanlab.datalab.internal.issue_manager.issue_manager.IssueManager>`.\n\n task :\n Specific machine learning task like classification or regression.\n\n Returns\n -------\n cls :\n The same class that was passed in.\n\n Example\n -------\n\n When defining a new subclass of\n :py:class:`IssueManager <cleanlab.datalab.internal.issue_manager.issue_manager.IssueManager>`,\n you can register it like so:\n\n .. code-block:: python\n\n from cleanlab import IssueManager\n from cleanlab.datalab.internal.issue_manager_factory import register\n\n @register\n class MyIssueManager(IssueManager):\n issue_name: str = \"my_issue\"\n def find_issues(self, **kwargs):\n # Some logic to find issues\n pass\n\n or in a function call:\n\n .. code-block:: python\n\n from cleanlab import IssueManager\n from cleanlab.datalab.internal.issue_manager_factory import register\n\n class MyIssueManager(IssueManager):\n issue_name: str = \"my_issue\"\n def find_issues(self, **kwargs):\n # Some logic to find issues\n pass\n\n register(MyIssueManager, task=\"classification\")\n \"\"\"\n\n if not issubclass(cls, IssueManager):\n raise ValueError(f\"Class {cls} must be a subclass of IssueManager\")\n\n name: str = str(cls.issue_name)\n\n if task not in REGISTRY:\n raise ValueError(f\"Invalid task type: {task}, must be in {list(REGISTRY.keys())}\")\n\n if name in REGISTRY[task]:\n print(\n f\"Warning: Overwriting existing issue manager {name} with {cls} for task {task}.\"\n \"This may cause unexpected behavior.\"\n )\n\n REGISTRY[task][name] = cls\n return cls\n\n\ndef list_possible_issue_types(task: str) -> List[str]:\n \"\"\"Returns a list of all registered issue types.\n\n Any issue type that is not in this list cannot be used in the :py:meth:`find_issues` method.\n\n See Also\n --------\n :py:class:`REGISTRY <cleanlab.datalab.internal.issue_manager_factory.REGISTRY>` : All available issue types and their corresponding issue managers can be found here.\n \"\"\"\n return list(REGISTRY.get(task, []))\n\n\ndef list_default_issue_types(task: str) -> List[str]:\n \"\"\"Returns a list of the issue types that are run by default\n when :py:meth:`find_issues` is called without specifying `issue_types`.\n\n See Also\n --------\n :py:class:`REGISTRY <cleanlab.datalab.internal.issue_manager_factory.REGISTRY>` : All available issue types and their corresponding issue managers can be found here.\n \"\"\"\n default_issue_types_dict = {\n \"classification\": [\n \"null\",\n \"label\",\n \"outlier\",\n \"near_duplicate\",\n \"non_iid\",\n \"class_imbalance\",\n \"underperforming_group\",\n ],\n \"regression\": [\n \"null\",\n \"label\",\n \"outlier\",\n \"near_duplicate\",\n \"non_iid\",\n ],\n \"multilabel\": [\n \"null\",\n \"label\",\n \"outlier\",\n \"near_duplicate\",\n \"non_iid\",\n ],\n }\n if task not in default_issue_types_dict:\n task = \"classification\"\n default_issue_types = default_issue_types_dict[task]\n return default_issue_types\n",
"path": "cleanlab/datalab/internal/issue_manager_factory.py"
}
] | diff --git a/cleanlab/datalab/internal/issue_manager_factory.py b/cleanlab/datalab/internal/issue_manager_factory.py
index 28cabb58a6..d85fc62e4c 100644
--- a/cleanlab/datalab/internal/issue_manager_factory.py
+++ b/cleanlab/datalab/internal/issue_manager_factory.py
@@ -223,6 +223,7 @@ def list_default_issue_types(task: str) -> List[str]:
"near_duplicate",
"non_iid",
"class_imbalance",
+ "underperforming_group",
],
"regression": [
"null",
diff --git a/tests/datalab/datalab/test_datalab.py b/tests/datalab/datalab/test_datalab.py
index 6d06d5de04..172801a1bf 100644
--- a/tests/datalab/datalab/test_datalab.py
+++ b/tests/datalab/datalab/test_datalab.py
@@ -89,6 +89,7 @@ def test_list_default_issue_types(self):
"near_duplicate",
"non_iid",
"class_imbalance",
+ "underperforming_group",
]
def tmp_path(self):
diff --git a/tests/datalab/test_cleanvision_integration.py b/tests/datalab/test_cleanvision_integration.py
index 972791bd9a..1d463473e6 100644
--- a/tests/datalab/test_cleanvision_integration.py
+++ b/tests/datalab/test_cleanvision_integration.py
@@ -32,7 +32,7 @@ def num_imagelab_issues(self):
@pytest.fixture
def num_datalab_issues(self):
- return 5
+ return 6
@pytest.fixture
def pred_probs(self, image_dataset):
@@ -69,6 +69,7 @@ def test_imagelab_issues_checked(
"near_duplicate",
"class_imbalance",
"null",
+ "underperforming_group",
# "non_iid",
]
@@ -94,12 +95,14 @@ def test_imagelab_issues_checked(
"near_duplicate",
"class_imbalance",
"null",
+ "underperforming_group",
],
- "num_issues": [1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
+ "num_issues": [1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
}
)
expected_count = df.sort_values(by="issue_type")["num_issues"].tolist()
count = datalab.issue_summary.sort_values(by="issue_type")["num_issues"].tolist()
+ assert set(datalab.issue_summary["issue_type"].tolist()) == set(df["issue_type"].tolist())
assert count == expected_count
assert datalab.issue_summary["num_issues"].sum() == df["num_issues"].sum()
@@ -147,7 +150,15 @@ def test_imagelab_issues_not_checked(
assert len(datalab.issues.columns) == num_datalab_issues * 2
assert len(datalab.issue_summary) == num_datalab_issues
- all_keys = ["statistics", "label", "outlier", "near_duplicate", "class_imbalance", "null"]
+ all_keys = [
+ "statistics",
+ "label",
+ "outlier",
+ "near_duplicate",
+ "class_imbalance",
+ "null",
+ "underperforming_group",
+ ]
assert set(all_keys) == set(datalab.info.keys())
datalab.report()
|
liqd__adhocracy4-1243 | Poll cannot change order of questions
Poll: after moving a question in the dashboard and saving, the question moves back to it's original position.
NOTE: flip-move lib still working in documents, may require a poll refactor
https://github.com/liqd/adhocracy-plus/issues/1964
https://github.com/liqd/a4-meinberlin/issues/4370
| [
{
"content": "from django.contrib.contenttypes.fields import GenericRelation\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\nfrom adhocracy4.comments import models as comment_models\nfrom adhocracy4.models.base import UserGeneratedContentModel\nfrom adhocracy4.modules import models as module_models\nfrom adhocracy4.polls import validators\n\n\nclass QuestionQuerySet(models.QuerySet):\n def annotate_vote_count(self):\n return self.annotate(\n vote_count=models.Count(\n 'choices__votes__creator_id',\n distinct=True),\n vote_count_multi=models.Count(\n 'choices__votes',\n distinct=True),\n answer_count=models.Count(\n 'answers__creator_id',\n distinct=True),\n )\n\n\nclass ChoiceQuerySet(models.QuerySet):\n def annotate_vote_count(self):\n return self.annotate(\n vote_count=models.Count(\n 'votes'\n )\n )\n\n\nclass Poll(module_models.Item):\n comments = GenericRelation(comment_models.Comment,\n related_query_name='poll',\n object_id_field='object_pk')\n\n def get_absolute_url(self):\n return self.module.get_detail_url\n\n def annotated_questions(self):\n return self.questions.annotate_vote_count()\n\n\nclass Question(models.Model):\n label = models.CharField(max_length=255)\n help_text = models.CharField(\n max_length=250,\n blank=True,\n verbose_name=_('Explanation')\n )\n\n weight = models.SmallIntegerField()\n\n multiple_choice = models.BooleanField(default=False)\n is_open = models.BooleanField(default=False)\n\n poll = models.ForeignKey(\n 'Poll',\n on_delete=models.CASCADE,\n related_name='questions'\n )\n\n objects = QuestionQuerySet.as_manager()\n\n @property\n def has_other_option(self):\n return self.choices.filter(is_other_choice=True).exists()\n\n def get_other_option(self):\n if self.has_other_option:\n return self.choices.filter(is_other_choice=True).first()\n return None\n\n def clean(self, *args, **kwargs):\n if self.is_open:\n if self.multiple_choice:\n raise ValidationError({\n 'is_open': _('Questions with open answers cannot '\n 'have multiple choices.')\n })\n elif self.choices.count() > 0:\n raise ValidationError({\n 'is_open': _('Question with choices cannot become '\n 'open question. Delete choices or add new '\n 'open question.')\n })\n\n super().clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n return super().save(*args, **kwargs)\n\n def user_choices_list(self, user):\n if not user.is_authenticated:\n return []\n\n return self.choices\\\n .filter(votes__creator=user)\\\n .values_list('id', flat=True)\n\n def user_answer(self, user):\n if not user.is_authenticated:\n return ''\n\n answers = self.answers.filter(creator=user)\n if answers.exists():\n # there can only be one answer bc of unique constraint\n return answers.first().id\n else:\n return ''\n\n def other_choice_answers(self):\n if self.has_other_option:\n other_choice = self.choices.filter(is_other_choice=True).first()\n other_answers = OtherVote.objects.filter(vote__choice=other_choice)\n return other_answers\n else:\n return OtherVote.objects.none()\n\n def other_choice_user_answer(self, user):\n if not user.is_authenticated:\n return ''\n\n elif self.has_other_option:\n other_choice = self.choices.filter(is_other_choice=True).first()\n other_choice_user_answer = OtherVote.objects.filter(\n vote__creator=user,\n vote__choice=other_choice)\n if other_choice_user_answer.exists():\n # there can only be one other vote as 1:1 relation\n return other_choice_user_answer.first().vote.id\n return ''\n\n def get_absolute_url(self):\n return self.poll.get_absolute_url()\n\n def __str__(self):\n return self.label\n\n class Meta:\n ordering = ['weight']\n\n\nclass Answer(UserGeneratedContentModel):\n answer = models.CharField(\n max_length=750,\n verbose_name=_('Answer')\n )\n\n question = models.ForeignKey(\n 'Question',\n on_delete=models.CASCADE,\n related_name='answers',\n )\n\n def clean(self, *args, **kwargs):\n if not self.question.is_open:\n raise ValidationError({\n 'question': _('Only open questions can have answers.')\n })\n super().clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n return super().save(*args, **kwargs)\n\n def get_absolute_url(self):\n return self.question.poll.get_absolute_url()\n\n def __str__(self):\n return '%s: %s' % (self.creator, self.answer[:20])\n\n class Meta:\n ordering = ['id']\n unique_together = ('question', 'creator')\n\n\nclass Choice(models.Model):\n label = models.CharField(max_length=255)\n\n question = models.ForeignKey(\n 'Question',\n on_delete=models.CASCADE,\n related_name='choices',\n )\n\n is_other_choice = models.BooleanField(default=False)\n\n weight = models.SmallIntegerField()\n\n objects = ChoiceQuerySet.as_manager()\n\n def clean(self, *args, **kwargs):\n if self.question.is_open:\n raise ValidationError({\n 'label': _('Open questions cannot have choices.')\n })\n elif self.is_other_choice:\n if self.question.choices.count() == 0:\n raise ValidationError({\n 'is_other_choice': _('\"Other\" cannot be the only choice. '\n 'Use open question or add more '\n 'choices.')\n })\n if self.question.has_other_option and \\\n self.id != self.question.get_other_option().id:\n raise ValidationError({\n 'is_other_choice': _('Question already has \"other\" '\n 'choice.')\n })\n super().clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n return super().save(*args, **kwargs)\n\n def get_absolute_url(self):\n return self.question.poll.get_absolute_url()\n\n def __str__(self):\n return '%s @%s' % (self.label, self.question)\n\n class Meta:\n ordering = ['weight', 'id']\n\n\nclass Vote(UserGeneratedContentModel):\n choice = models.ForeignKey(\n 'Choice',\n on_delete=models.CASCADE,\n related_name='votes'\n )\n\n def save(self, *args, **kwargs):\n self.validate_unique()\n return super().save(*args, **kwargs)\n\n def validate_unique(self, exclude=None):\n super(Vote, self).validate_unique(exclude)\n validators.single_vote_per_user(self.creator,\n self.choice,\n self.pk)\n\n @property\n def is_other_vote(self):\n return hasattr(self, 'other_vote')\n\n # Make Vote instances behave like items for rule checking\n @property\n def module(self):\n return self.choice.question.poll.module\n\n @property\n def project(self):\n return self.module.project\n\n def get_absolute_url(self):\n return self.choice.question.poll.get_absolute_url()\n\n def __str__(self):\n return '%s: %s' % (self.creator, self.choice)\n\n\nclass OtherVote(models.Model):\n vote = models.OneToOneField(\n Vote,\n on_delete=models.CASCADE,\n primary_key=True,\n related_name='other_vote'\n )\n\n answer = models.CharField(\n max_length=250,\n verbose_name=_('Answer')\n )\n\n def clean(self, *args, **kwargs):\n if not self.vote.choice.is_other_choice:\n raise ValidationError({\n 'vote': _('Other vote can only be created for vote on '\n '\"other\" choice.')\n })\n super().clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n return super().save(*args, **kwargs)\n\n @property\n def module(self):\n return self.vote.choice.question.poll.module\n\n @property\n def project(self):\n return self.module.project\n\n def get_absolute_url(self):\n return self.vote.choice.question.poll.get_absolute_url()\n\n def __str__(self):\n return '%s: %s' % (self.vote.creator, _('other'))\n",
"path": "adhocracy4/polls/models.py"
}
] | [
{
"content": "from django.contrib.contenttypes.fields import GenericRelation\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\nfrom adhocracy4.comments import models as comment_models\nfrom adhocracy4.models.base import UserGeneratedContentModel\nfrom adhocracy4.modules import models as module_models\nfrom adhocracy4.polls import validators\n\n\nclass QuestionQuerySet(models.QuerySet):\n def annotate_vote_count(self):\n return self.annotate(\n vote_count=models.Count(\n 'choices__votes__creator_id',\n distinct=True),\n vote_count_multi=models.Count(\n 'choices__votes',\n distinct=True),\n answer_count=models.Count(\n 'answers__creator_id',\n distinct=True),\n ).order_by('weight')\n\n\nclass ChoiceQuerySet(models.QuerySet):\n def annotate_vote_count(self):\n return self.annotate(\n vote_count=models.Count(\n 'votes'\n )\n )\n\n\nclass Poll(module_models.Item):\n comments = GenericRelation(comment_models.Comment,\n related_query_name='poll',\n object_id_field='object_pk')\n\n def get_absolute_url(self):\n return self.module.get_detail_url\n\n def annotated_questions(self):\n return self.questions.annotate_vote_count()\n\n\nclass Question(models.Model):\n label = models.CharField(max_length=255)\n help_text = models.CharField(\n max_length=250,\n blank=True,\n verbose_name=_('Explanation')\n )\n\n weight = models.SmallIntegerField()\n\n multiple_choice = models.BooleanField(default=False)\n is_open = models.BooleanField(default=False)\n\n poll = models.ForeignKey(\n 'Poll',\n on_delete=models.CASCADE,\n related_name='questions'\n )\n\n objects = QuestionQuerySet.as_manager()\n\n @property\n def has_other_option(self):\n return self.choices.filter(is_other_choice=True).exists()\n\n def get_other_option(self):\n if self.has_other_option:\n return self.choices.filter(is_other_choice=True).first()\n return None\n\n def clean(self, *args, **kwargs):\n if self.is_open:\n if self.multiple_choice:\n raise ValidationError({\n 'is_open': _('Questions with open answers cannot '\n 'have multiple choices.')\n })\n elif self.choices.count() > 0:\n raise ValidationError({\n 'is_open': _('Question with choices cannot become '\n 'open question. Delete choices or add new '\n 'open question.')\n })\n\n super().clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n return super().save(*args, **kwargs)\n\n def user_choices_list(self, user):\n if not user.is_authenticated:\n return []\n\n return self.choices\\\n .filter(votes__creator=user)\\\n .values_list('id', flat=True)\n\n def user_answer(self, user):\n if not user.is_authenticated:\n return ''\n\n answers = self.answers.filter(creator=user)\n if answers.exists():\n # there can only be one answer bc of unique constraint\n return answers.first().id\n else:\n return ''\n\n def other_choice_answers(self):\n if self.has_other_option:\n other_choice = self.choices.filter(is_other_choice=True).first()\n other_answers = OtherVote.objects.filter(vote__choice=other_choice)\n return other_answers\n else:\n return OtherVote.objects.none()\n\n def other_choice_user_answer(self, user):\n if not user.is_authenticated:\n return ''\n\n elif self.has_other_option:\n other_choice = self.choices.filter(is_other_choice=True).first()\n other_choice_user_answer = OtherVote.objects.filter(\n vote__creator=user,\n vote__choice=other_choice)\n if other_choice_user_answer.exists():\n # there can only be one other vote as 1:1 relation\n return other_choice_user_answer.first().vote.id\n return ''\n\n def get_absolute_url(self):\n return self.poll.get_absolute_url()\n\n def __str__(self):\n return self.label\n\n class Meta:\n ordering = ['weight']\n\n\nclass Answer(UserGeneratedContentModel):\n answer = models.CharField(\n max_length=750,\n verbose_name=_('Answer')\n )\n\n question = models.ForeignKey(\n 'Question',\n on_delete=models.CASCADE,\n related_name='answers',\n )\n\n def clean(self, *args, **kwargs):\n if not self.question.is_open:\n raise ValidationError({\n 'question': _('Only open questions can have answers.')\n })\n super().clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n return super().save(*args, **kwargs)\n\n def get_absolute_url(self):\n return self.question.poll.get_absolute_url()\n\n def __str__(self):\n return '%s: %s' % (self.creator, self.answer[:20])\n\n class Meta:\n ordering = ['id']\n unique_together = ('question', 'creator')\n\n\nclass Choice(models.Model):\n label = models.CharField(max_length=255)\n\n question = models.ForeignKey(\n 'Question',\n on_delete=models.CASCADE,\n related_name='choices',\n )\n\n is_other_choice = models.BooleanField(default=False)\n\n weight = models.SmallIntegerField()\n\n objects = ChoiceQuerySet.as_manager()\n\n def clean(self, *args, **kwargs):\n if self.question.is_open:\n raise ValidationError({\n 'label': _('Open questions cannot have choices.')\n })\n elif self.is_other_choice:\n if self.question.choices.count() == 0:\n raise ValidationError({\n 'is_other_choice': _('\"Other\" cannot be the only choice. '\n 'Use open question or add more '\n 'choices.')\n })\n if self.question.has_other_option and \\\n self.id != self.question.get_other_option().id:\n raise ValidationError({\n 'is_other_choice': _('Question already has \"other\" '\n 'choice.')\n })\n super().clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n return super().save(*args, **kwargs)\n\n def get_absolute_url(self):\n return self.question.poll.get_absolute_url()\n\n def __str__(self):\n return '%s @%s' % (self.label, self.question)\n\n class Meta:\n ordering = ['weight', 'id']\n\n\nclass Vote(UserGeneratedContentModel):\n choice = models.ForeignKey(\n 'Choice',\n on_delete=models.CASCADE,\n related_name='votes'\n )\n\n def save(self, *args, **kwargs):\n self.validate_unique()\n return super().save(*args, **kwargs)\n\n def validate_unique(self, exclude=None):\n super(Vote, self).validate_unique(exclude)\n validators.single_vote_per_user(self.creator,\n self.choice,\n self.pk)\n\n @property\n def is_other_vote(self):\n return hasattr(self, 'other_vote')\n\n # Make Vote instances behave like items for rule checking\n @property\n def module(self):\n return self.choice.question.poll.module\n\n @property\n def project(self):\n return self.module.project\n\n def get_absolute_url(self):\n return self.choice.question.poll.get_absolute_url()\n\n def __str__(self):\n return '%s: %s' % (self.creator, self.choice)\n\n\nclass OtherVote(models.Model):\n vote = models.OneToOneField(\n Vote,\n on_delete=models.CASCADE,\n primary_key=True,\n related_name='other_vote'\n )\n\n answer = models.CharField(\n max_length=250,\n verbose_name=_('Answer')\n )\n\n def clean(self, *args, **kwargs):\n if not self.vote.choice.is_other_choice:\n raise ValidationError({\n 'vote': _('Other vote can only be created for vote on '\n '\"other\" choice.')\n })\n super().clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n return super().save(*args, **kwargs)\n\n @property\n def module(self):\n return self.vote.choice.question.poll.module\n\n @property\n def project(self):\n return self.module.project\n\n def get_absolute_url(self):\n return self.vote.choice.question.poll.get_absolute_url()\n\n def __str__(self):\n return '%s: %s' % (self.vote.creator, _('other'))\n",
"path": "adhocracy4/polls/models.py"
}
] | diff --git a/adhocracy4/comments_async/static/comments_async/comment_edit_form.jsx b/adhocracy4/comments_async/static/comments_async/comment_edit_form.jsx
index 958b0108a..85ba95a8a 100644
--- a/adhocracy4/comments_async/static/comments_async/comment_edit_form.jsx
+++ b/adhocracy4/comments_async/static/comments_async/comment_edit_form.jsx
@@ -89,7 +89,7 @@ export default class CommentEditForm extends React.Component {
</button>
<button
- type="submit" value={translated.cancel} className="cancel-button"
+ type="submit" value={translated.cancel} className="btn btn--light cancel-button"
onClick={this.props.handleCancel}
>
{translated.cancel}
diff --git a/adhocracy4/polls/assets/EditPollQuestions.jsx b/adhocracy4/polls/assets/EditPollQuestions.jsx
deleted file mode 100644
index d874d2119..000000000
--- a/adhocracy4/polls/assets/EditPollQuestions.jsx
+++ /dev/null
@@ -1,286 +0,0 @@
-import React, { useState, useRef, useEffect } from 'react'
-import django from 'django'
-import dashboard from '../../../adhocracy4/dashboard/assets/dashboard'
-import update from 'immutability-helper'
-import { EditPollQuestion } from './EditPollQuestion'
-import { EditPollOpenQuestion } from './EditPollOpenQuestion'
-import Alert from '../../static/Alert'
-import PopperMenu from './PopperMenu'
-
-const api = require('adhocracy4').api
-const FlipMove = require('react-flip-move').default
-
-/*
-|--------------------------------------------------------------------------
-| Helper method for local scoped key/identifier
-|--------------------------------------------------------------------------
-*/
-
-let maxLocalKey = 0
-const getNextLocalKey = () => {
- /** Get an artificial key for non-committed items.
- *
- * The key is prefixed to prevent collisions with real database keys.
- */
- return 'local_' + maxLocalKey++
-}
-
-export const EditPollQuestions = (props) => {
- /*
- |--------------------------------------------------------------------------
- | Question state related handlers
- |--------------------------------------------------------------------------
- */
-
- const getNewQuestion = (label = '', helptext = '') => {
- return {
- label,
- help_text: helptext,
- multiple_choice: false,
- key: getNextLocalKey(),
- is_open: false,
- choices: [
- getNewChoice(),
- getNewChoice()
- ],
- answers: []
- }
- }
-
- const getNewOpenQuestion = (label = '') => {
- const newQuestion = getNewQuestion(label)
- newQuestion.is_open = true
- newQuestion.choices = []
- return newQuestion
- }
-
- const updatePopper = () => {
- popper &&
- popper.current &&
- popper.current.instance.update &&
- popper.current.instance.update()
- }
-
- const handleQuestion = (action, params) => {
- let diff = {}
- if (action === 'label') {
- const { index, label } = params
- diff[index] = { $merge: { label } }
- updatePopper()
- } else if (action === 'helptext') {
- const { index, helptext } = params
- diff[index] = { $merge: { help_text: helptext } }
- updatePopper()
- } else if (action === 'multiple-choice') {
- const { index, multipleChoice } = params
- diff[index] = { $merge: { multiple_choice: multipleChoice } }
- } else if (action === 'move') {
- const { index, direction } = params
- const position = direction === 'up' ? (index - 1) : (index + 1)
- diff = { $splice: [[index, 1], [position, 0, questions[index]]] }
- } else if (action === 'append') {
- const newQuestion = params && params.isOpen
- ? getNewOpenQuestion()
- : getNewQuestion()
- diff = { $push: [newQuestion] }
- updatePopper()
- } else if (action === 'delete') {
- const { index } = params
- diff = { $splice: [[index, 1]] }
- updatePopper()
- } else {
- return null
- }
- action && setQuestions(update(questions, diff))
- }
-
- /*
- |--------------------------------------------------------------------------
- | Choice state related handlers
- |--------------------------------------------------------------------------
- */
-
- const getNewChoice = (label = '', isOther = false) => {
- return {
- label,
- key: isOther ? 'other-choice' : getNextLocalKey(),
- is_other_choice: isOther
- }
- }
-
- const handleChoice = (action, params) => {
- const diff = {}
- if (action === 'label') {
- const { index, choiceIndex, label } = params
- diff[index] = { choices: {} }
- diff[index].choices[choiceIndex] = { $merge: { label } }
- } else if (action === 'append') {
- const { index, hasOtherOption } = params
- const position = questions[index].choices.length - 1
- const newChoice = getNewChoice()
- diff[index] = hasOtherOption
- ? { choices: { $splice: [[position, 0, newChoice]] } }
- : { choices: { $push: [newChoice] } }
- } else if (action === 'is-other-choice') {
- const { index, isOtherChoice } = params
- if (isOtherChoice) {
- const otherChoice = getNewChoice('other', true)
- diff[index] = { choices: { $push: [otherChoice] } }
- } else {
- const choiceIndex = questions[index].choices.findIndex(c => c.key === 'other-choice')
- diff[index] = { choices: { $splice: [[choiceIndex, 1]] } }
- }
- } else if (action === 'delete') {
- const { index, choiceIndex } = params
- diff[index] = { choices: { $splice: [[choiceIndex, 1]] } }
- }
- updatePopper()
- action && setQuestions(update(questions, diff))
- }
-
- /*
- |--------------------------------------------------------------------------
- | Poll form and submit logic
- |--------------------------------------------------------------------------
- */
-
- const removeAlert = () => {
- setAlert(null)
- }
-
- const handleSubmit = (e) => {
- e.preventDefault()
-
- const data = {
- questions
- }
-
- api.poll.change(data, props.pollId)
- .done((data) => {
- setQuestions(data.questions)
- setAlert({
- type: 'success',
- message: django.gettext('The poll has been updated.')
- })
- setErrors([])
- if (props.reloadOnSuccess) {
- dashboard.updateDashboard()
- }
- })
- .fail((xhr, status, err) => {
- if (xhr.responseJSON && 'questions' in xhr.responseJSON) {
- setErrors(xhr.responseJSON.questions)
- }
-
- setAlert({
- type: 'danger',
- message: django.gettext('The poll could not be updated.')
- })
- })
- }
-
- /*
- |--------------------------------------------------------------------------
- | Runtime logic and JSX render
- |--------------------------------------------------------------------------
- */
-
- const [questions, setQuestions] = useState([])
- const [errors, setErrors] = useState([])
- const [alert, setAlert] = useState(null)
- const popper = useRef()
-
- const popperMenuContent = {
- popperButton: {
- styleClass: 'btn poll__btn--light',
- buttonText: django.gettext('New question'),
- icon: 'fa fa-plus'
- },
- popperMenuItems: [
- {
- styleClass: 'btn poll__btn--light submenu-item',
- text: django.gettext('Multiple choice question'),
- handleClick: () => handleQuestion('append')
- },
- {
- styleClass: 'btn poll__btn--light submenu-item',
- text: django.gettext('Open question'),
- handleClick: () => handleQuestion('append', { isOpen: true })
- }
- ]
- }
-
- useEffect(() => {
- api.poll.get(props.pollId).done(({ questions }) => {
- questions.length > 0
- ? setQuestions(questions)
- : setQuestions([getNewQuestion()])
- })
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [])
-
- return (
- <form
- onSubmit={(e) => handleSubmit(e)} onChange={() => removeAlert()}
- className="editpoll__questions"
- >
- <FlipMove easing="cubic-bezier(0.25, 0.5, 0.75, 1)">
- {
- questions.map((question, index, arr) => {
- const key = question.id || question.key
- return question.is_open
- ? (
- <div key={key}>
- <EditPollOpenQuestion
- id={key}
- question={question}
- onLabelChange={(label) => handleQuestion('label', { index, label })}
- onHelptextChange={(helptext) => handleQuestion('helptext', { index, helptext })}
- onMoveUp={index !== 0 ? () => handleQuestion('move', { index, direction: 'up' }) : null}
- onMoveDown={index < arr.length - 1 ? () => handleQuestion('move', { index, direction: 'down' }) : null}
- onDelete={() => handleQuestion('delete', { index })}
- errors={errors && errors[index] ? errors[index] : {}}
- />
- </div>
- )
- : (
- <div key={key}>
- <EditPollQuestion
- id={key}
- question={question}
- onLabelChange={(label) => handleQuestion('label', { index, label })}
- onHelptextChange={(helptext) => handleQuestion('helptext', { index, helptext })}
- onMultipleChoiceChange={(multipleChoice) => handleQuestion('multiple-choice', { index, multipleChoice })}
- onHasOtherChoiceChange={(isOtherChoice) => handleChoice('is-other-choice', { index, isOtherChoice })}
- onMoveUp={index !== 0 ? () => handleQuestion('move', { index, direction: 'up' }) : null}
- onMoveDown={index < arr.length - 1 ? () => handleQuestion('move', { index, direction: 'down' }) : null}
- onDelete={() => handleQuestion('delete', { index })}
- errors={errors && errors[index] ? errors[index] : {}}
- onChoiceLabelChange={(choiceIndex, label) => handleChoice('label', { index, choiceIndex, label })}
- onDeleteChoice={(choiceIndex) => handleChoice('delete', { index, choiceIndex })}
- onAppendChoice={(hasOtherOption) => handleChoice('append', { index, hasOtherOption })}
- />
- </div>
- )
- })
- }
- </FlipMove>
- <Alert onClick={() => removeAlert()} {...alert} />
- <div className="editpoll__actions-container">
- <div className="editpoll__menu-container">
- <PopperMenu
- ref={popper}
- containerStyleClass="editpoll__menu-container--override"
- >
- {popperMenuContent}
- </PopperMenu>
- </div>
- <div className="editpoll__menu-container">
- <button type="submit" className="btn poll__btn--dark">
- {django.gettext('Save')}
- </button>
- </div>
- </div>
- </form>
- )
-}
diff --git a/adhocracy4/polls/assets/EditPollChoice.jsx b/adhocracy4/polls/assets/PollDashboard/EditPollChoice.jsx
similarity index 96%
rename from adhocracy4/polls/assets/EditPollChoice.jsx
rename to adhocracy4/polls/assets/PollDashboard/EditPollChoice.jsx
index 2ef0dc556..46a57b1d8 100644
--- a/adhocracy4/polls/assets/EditPollChoice.jsx
+++ b/adhocracy4/polls/assets/PollDashboard/EditPollChoice.jsx
@@ -1,6 +1,6 @@
import React from 'react'
import django from 'django'
-import ErrorList from '../../static/ErrorList'
+import ErrorList from '../../../static/ErrorList'
export const EditPollChoice = (props) => {
return (
diff --git a/adhocracy4/polls/assets/PollDashboard/EditPollDropdown.jsx b/adhocracy4/polls/assets/PollDashboard/EditPollDropdown.jsx
new file mode 100644
index 000000000..da462a0e0
--- /dev/null
+++ b/adhocracy4/polls/assets/PollDashboard/EditPollDropdown.jsx
@@ -0,0 +1,45 @@
+import React from 'react'
+import django from 'django'
+
+const translated = {
+ new: django.gettext(' New Question'),
+ multi: django.gettext('Multiple Choice question'),
+ open: django.gettext('Open question')
+}
+
+const EditPollDropdown = (props) => {
+ return (
+ <div className="dropdown editpoll__dropdown">
+ <button
+ type="button"
+ className="dropdown-toggle btn btn--light"
+ aria-haspopup="true"
+ aria-expanded="false"
+ data-bs-toggle="dropdown"
+ >
+ <i className="fa fa-plus" />
+ {translated.new}
+ </button>
+ <div className="dropdown-menu">
+ <button
+ key="1"
+ className="dropdown-item"
+ type="button"
+ onClick={props.handleToggleMulti}
+ >
+ {translated.multi}
+ </button>
+ <button
+ key="2"
+ className="dropdown-item"
+ type="button"
+ onClick={props.handleToggleOpen}
+ >
+ {translated.open}
+ </button>
+ </div>
+ </div>
+ )
+}
+
+export default EditPollDropdown
diff --git a/adhocracy4/polls/assets/PollDashboard/EditPollManagement.jsx b/adhocracy4/polls/assets/PollDashboard/EditPollManagement.jsx
new file mode 100644
index 000000000..ee2e64961
--- /dev/null
+++ b/adhocracy4/polls/assets/PollDashboard/EditPollManagement.jsx
@@ -0,0 +1,260 @@
+import React, { useState, useEffect } from 'react'
+import django from 'django'
+import FlipMove from 'react-flip-move'
+import update from 'immutability-helper'
+
+import { EditPollQuestion } from './EditPollQuestion'
+import { EditPollOpenQuestion } from './EditPollOpenQuestion'
+import EditPollDropdown from './EditPollDropdown'
+
+import dashboard from '../../../../adhocracy4/dashboard/assets/dashboard'
+import api from '../../../static/api'
+import Alert from '../../../static/Alert'
+
+// | Helper method for local scoped key/identifier
+
+let maxLocalKey = 0
+const getNextLocalKey = () => {
+ // Get an artificial key for non-committed items.
+ // The key is prefixed to prevent collisions with real database keys.
+ return 'local_' + maxLocalKey++
+}
+
+export const EditPollManagement = (props) => {
+ const [questions, setQuestions] = useState([])
+ const [errors, setErrors] = useState([])
+ const [alert, setAlert] = useState(null)
+
+ useEffect(() => {
+ api.poll.get(props.pollId).done(({ questions }) => {
+ questions.length > 0
+ ? setQuestions(questions)
+ : setQuestions([getNewQuestion()])
+ })
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [])
+
+ const getNewQuestion = (label = '', helptext = '') => {
+ return {
+ label,
+ help_text: helptext,
+ multiple_choice: false,
+ key: getNextLocalKey(),
+ is_open: false,
+ choices: [
+ getNewChoice(),
+ getNewChoice()
+ ],
+ answers: []
+ }
+ }
+
+ // | Question state related handlers
+
+ const getNewOpenQuestion = (label = '') => {
+ const newQuestion = getNewQuestion(label)
+ newQuestion.is_open = true
+ newQuestion.choices = []
+ return newQuestion
+ }
+
+ const handleQuestionLabel = (index, label) => {
+ const diff = {}
+ diff[index] = { $merge: { label } }
+ setQuestions(update(questions, diff))
+ }
+
+ const handleQuestionHelpText = (index, helptext) => {
+ const diff = {}
+ diff[index] = { $merge: { help_text: helptext } }
+ setQuestions(update(questions, diff))
+ }
+
+ const handleQuestionMultiChoice = (index, multipleChoice) => {
+ const diff = {}
+ diff[index] = { $merge: { multiple_choice: multipleChoice } }
+ setQuestions(update(questions, diff))
+ }
+
+ const handleQuestionAppend = (params, index) => {
+ let diff = {}
+ const newQuestion = params && params.isOpen
+ ? getNewOpenQuestion()
+ : getNewQuestion()
+ diff = { $push: [newQuestion] }
+ setQuestions(update(questions, diff))
+ }
+
+ const handleQuestionDelete = (index) => {
+ let diff = {}
+ diff = { $splice: [[index, 1]] }
+ setQuestions(update(questions, diff))
+ }
+
+ const handleQuestionMoveUp = (index) => {
+ let diff = {}
+ const position = index - 1
+ diff = {
+ $splice: [
+ [index, 1], // remove from current index
+ [position, 0, questions[index]] // insert to new index
+ ]
+ }
+ setQuestions(update(questions, diff))
+ }
+
+ const handleQuestionMoveDown = (index) => {
+ let diff = {}
+ const position = index + 1
+ diff = { $splice: [[index, 1], [position, 0, questions[index]]] }
+ setQuestions(update(questions, diff))
+ }
+
+ // | Choice state related handlers
+
+ const getNewChoice = (label = '', isOther = false) => {
+ return {
+ label,
+ key: isOther ? 'other-choice' : getNextLocalKey(),
+ is_other_choice: isOther
+ }
+ }
+
+ const handleChoiceLabel = (index, choiceIndex, label) => {
+ const diff = {}
+ diff[index] = { choices: {} }
+ diff[index].choices[choiceIndex] = { $merge: { label } }
+ setQuestions(update(questions, diff))
+ }
+
+ const handleChoiceAppend = (index, hasOtherOption) => {
+ const position = questions[index].choices.length - 1
+ const newChoice = getNewChoice()
+ const diff = {}
+ diff[index] = hasOtherOption
+ ? { choices: { $splice: [[position, 0, newChoice]] } }
+ : { choices: { $push: [newChoice] } }
+ setQuestions(update(questions, diff))
+ }
+
+ const handleChoiceIsOtherChoice = (index, isOtherChoice) => {
+ const diff = {}
+ if (isOtherChoice) {
+ const otherChoice = getNewChoice('other', true)
+ diff[index] = { choices: { $push: [otherChoice] } }
+ } else {
+ const choiceIndex = questions[index].choices.findIndex(c => c.key === 'other-choice')
+ diff[index] = { choices: { $splice: [[choiceIndex, 1]] } }
+ }
+ setQuestions(update(questions, diff))
+ }
+
+ const handleChoiceDelete = (index, choiceIndex) => {
+ const diff = {}
+ diff[index] = { choices: { $splice: [[choiceIndex, 1]] } }
+ setQuestions(update(questions, diff))
+ }
+
+ // | Poll form and submit logic
+
+ const removeAlert = () => {
+ setAlert(null)
+ }
+
+ const handleSubmit = (e) => {
+ e.preventDefault()
+
+ const data = {
+ questions
+ }
+
+ api.poll.change(data, props.pollId)
+ .done((data) => {
+ setQuestions(data.questions)
+ setAlert({
+ type: 'success',
+ message: django.gettext('The poll has been updated.')
+ })
+ setErrors([])
+ if (props.reloadOnSuccess) {
+ dashboard.updateDashboard()
+ }
+ })
+ .fail((xhr, status, err) => {
+ if (xhr.responseJSON && 'questions' in xhr.responseJSON) {
+ setErrors(xhr.responseJSON.questions)
+ }
+
+ setAlert({
+ type: 'danger',
+ message: django.gettext('The poll could not be updated.')
+ })
+ })
+ }
+
+ // | JSX render
+
+ return (
+ <form
+ onSubmit={(e) => handleSubmit(e)} onChange={() => removeAlert()}
+ className="editpoll__questions"
+ >
+ <FlipMove easing="cubic-bezier(0.25, 0.5, 0.75, 1)">
+ {
+ questions.map((question, index, arr) => {
+ const key = question.id || question.key
+ return question.is_open
+ ? (
+ <div key={key}>
+ <EditPollOpenQuestion
+ id={key}
+ question={question}
+ onLabelChange={(label) => handleQuestionLabel(index, label)}
+ onHelptextChange={(helptext) => handleQuestionHelpText(index, helptext)}
+ onMoveUp={index !== 0 ? () => handleQuestionMoveUp(index) : null}
+ onMoveDown={index < arr.length - 1 ? () => handleQuestionMoveDown(index) : null}
+ onDelete={() => handleQuestionDelete(index)}
+ errors={errors && errors[index] ? errors[index] : {}}
+ />
+ </div>
+ )
+ : (
+ <div key={key}>
+ <EditPollQuestion
+ id={key}
+ question={question}
+ onLabelChange={(label) => handleQuestionLabel(index, label)}
+ onHelptextChange={(helptext) => handleQuestionHelpText(index, helptext)}
+ onMultipleChoiceChange={(multipleChoice) => handleQuestionMultiChoice(index, multipleChoice)}
+ onMoveUp={index !== 0 ? () => handleQuestionMoveUp(index) : null}
+ onMoveDown={index < arr.length - 1 ? () => handleQuestionMoveDown(index) : null}
+ onDelete={() => handleQuestionDelete(index)}
+ errors={errors && errors[index] ? errors[index] : {}}
+ onHasOtherChoiceChange={(isOtherChoice) => handleChoiceIsOtherChoice(index, isOtherChoice)}
+ onChoiceLabelChange={(choiceIndex, label) => handleChoiceLabel(index, choiceIndex, label)}
+ onDeleteChoice={(choiceIndex) => handleChoiceDelete(index, choiceIndex)}
+ onAppendChoice={(hasOtherOption) => handleChoiceAppend(index, hasOtherOption)}
+ />
+ </div>
+ )
+ })
+ }
+ </FlipMove>
+ <Alert onClick={() => removeAlert()} {...alert} />
+ <div className="editpoll__actions-container">
+ <div className="editpoll__menu-container">
+ <EditPollDropdown
+ handleToggleMulti={() => handleQuestionAppend()}
+ handleToggleOpen={() => handleQuestionAppend({ isOpen: true })}
+ />
+ </div>
+
+ <div className="editpoll__menu-container">
+ <button type="submit" className="btn poll__btn--dark">
+ {django.gettext('Save')}
+ </button>
+ </div>
+ </div>
+ </form>
+ )
+}
diff --git a/adhocracy4/polls/assets/EditPollOpenQuestion.jsx b/adhocracy4/polls/assets/PollDashboard/EditPollOpenQuestion.jsx
similarity index 98%
rename from adhocracy4/polls/assets/EditPollOpenQuestion.jsx
rename to adhocracy4/polls/assets/PollDashboard/EditPollOpenQuestion.jsx
index 23e63b7f9..e5ecbf016 100644
--- a/adhocracy4/polls/assets/EditPollOpenQuestion.jsx
+++ b/adhocracy4/polls/assets/PollDashboard/EditPollOpenQuestion.jsx
@@ -1,6 +1,6 @@
import React, { useState } from 'react'
import django from 'django'
-import ErrorList from '../../static/ErrorList'
+import ErrorList from '../../../static/ErrorList'
import { HelptextForm } from './HelptextForm'
export const EditPollOpenQuestion = (props) => {
diff --git a/adhocracy4/polls/assets/EditPollQuestion.jsx b/adhocracy4/polls/assets/PollDashboard/EditPollQuestion.jsx
similarity index 99%
rename from adhocracy4/polls/assets/EditPollQuestion.jsx
rename to adhocracy4/polls/assets/PollDashboard/EditPollQuestion.jsx
index 6a7fa71b9..94de1958c 100644
--- a/adhocracy4/polls/assets/EditPollQuestion.jsx
+++ b/adhocracy4/polls/assets/PollDashboard/EditPollQuestion.jsx
@@ -1,7 +1,7 @@
import React, { useState } from 'react'
import { EditPollChoice } from './EditPollChoice'
import django from 'django'
-import ErrorList from '../../static/ErrorList'
+import ErrorList from '../../../static/ErrorList'
import { HelptextForm } from './HelptextForm'
const FlipMove = require('react-flip-move').default
diff --git a/adhocracy4/polls/assets/HelptextForm.jsx b/adhocracy4/polls/assets/PollDashboard/HelptextForm.jsx
similarity index 92%
rename from adhocracy4/polls/assets/HelptextForm.jsx
rename to adhocracy4/polls/assets/PollDashboard/HelptextForm.jsx
index 39cf1bd89..9abedc1a3 100644
--- a/adhocracy4/polls/assets/HelptextForm.jsx
+++ b/adhocracy4/polls/assets/PollDashboard/HelptextForm.jsx
@@ -1,6 +1,6 @@
import React from 'react'
import django from 'django'
-import ErrorList from '../../static/ErrorList'
+import ErrorList from '../../../static/ErrorList'
export const HelptextForm = (props) => {
return (
diff --git a/adhocracy4/polls/assets/CharCounter.jsx b/adhocracy4/polls/assets/PollDetail/CharCounter.jsx
similarity index 100%
rename from adhocracy4/polls/assets/CharCounter.jsx
rename to adhocracy4/polls/assets/PollDetail/CharCounter.jsx
diff --git a/adhocracy4/polls/assets/PollOpenQuestion.jsx b/adhocracy4/polls/assets/PollDetail/PollOpenQuestion.jsx
similarity index 97%
rename from adhocracy4/polls/assets/PollOpenQuestion.jsx
rename to adhocracy4/polls/assets/PollDetail/PollOpenQuestion.jsx
index c5496e273..aee0e7598 100644
--- a/adhocracy4/polls/assets/PollOpenQuestion.jsx
+++ b/adhocracy4/polls/assets/PollDetail/PollOpenQuestion.jsx
@@ -2,8 +2,7 @@ import React, { useState } from 'react'
import { CharCounter } from './CharCounter'
export const PollOpenQuestion = (props) => {
- const questionHelpText = props.question.help_text ? <div className="poll__help-text">{props.question.help_text}</div> : null
- const maxlength = 750
+ // | Function to define state
const getUserOpenAnswer = () => {
const userAnswerId = props.question.userAnswer
@@ -17,6 +16,8 @@ export const PollOpenQuestion = (props) => {
}
const [userAnswer, setUserAnswer] = useState(getUserOpenAnswer())
+ const questionHelpText = props.question.help_text ? <div className="poll__help-text">{props.question.help_text}</div> : null
+ const maxlength = 750
const handleOpenChange = (event) => {
setUserAnswer(event.target.value)
diff --git a/adhocracy4/polls/assets/PollQuestion.jsx b/adhocracy4/polls/assets/PollDetail/PollQuestion.jsx
similarity index 94%
rename from adhocracy4/polls/assets/PollQuestion.jsx
rename to adhocracy4/polls/assets/PollDetail/PollQuestion.jsx
index c9f72431a..134cd2887 100644
--- a/adhocracy4/polls/assets/PollQuestion.jsx
+++ b/adhocracy4/polls/assets/PollDetail/PollQuestion.jsx
@@ -1,9 +1,16 @@
import React, { useEffect, useState } from 'react'
import django from 'django'
import { CharCounter } from './CharCounter'
-import ErrorList from '../../static/ErrorList'
+import ErrorList from '../../../static/ErrorList'
+
+const translated = {
+ multiple: django.gettext('Multiple answers are possible.'),
+ other: django.gettext('other')
+}
export const PollQuestion = (props) => {
+ // | Function to define state
+
const getUserAnswer = () => {
const userAnswerId = props.question.other_choice_user_answer
const userAnswer = props.question.other_choice_answers.find(oc => oc.vote_id === userAnswerId)
@@ -15,11 +22,11 @@ export const PollQuestion = (props) => {
)
}
- const multiHelpText = props.question.multiple_choice ? <div className="poll__help-text">{django.gettext('Multiple answers are possible.')}</div> : null
- const questionHelpText = props.question.help_text ? <div className="poll__help-text">{props.question.help_text}</div> : null
const [userChoices, setUserChoices] = useState(props.question.userChoices)
const [otherChoiceAnswer, setOtherChoiceAnswer] = useState(getUserAnswer())
const [errors, setErrors] = useState()
+ const multiHelpText = props.question.multiple_choice ? <div className="poll__help-text">{translated.multiple}</div> : null
+ const questionHelpText = props.question.help_text ? <div className="poll__help-text">{props.question.help_text}</div> : null
const maxlength = 250
useEffect(() => {
@@ -81,7 +88,7 @@ export const PollQuestion = (props) => {
onChange={(event) => { handleSingleChange(event, choice.is_other_choice) }}
disabled={!props.question.authenticated || props.question.isReadOnly}
/>
- <span className="radio__text">{choice.is_other_choice ? django.gettext('other') : choice.label}</span>
+ <span className="radio__text">{choice.is_other_choice ? translated.other : choice.label}</span>
{choice.is_other_choice &&
<>
<input
@@ -120,7 +127,7 @@ export const PollQuestion = (props) => {
onChange={(event) => { handleMultiChange(event, choice.is_other_choice) }}
disabled={!props.question.authenticated || props.question.isReadOnly}
/>
- <span className="radio__text radio__text--checkbox">{choice.is_other_choice ? django.gettext('other') : choice.label}</span>
+ <span className="radio__text radio__text--checkbox">{choice.is_other_choice ? translated.other : choice.label}</span>
{choice.is_other_choice &&
<>
<input
diff --git a/adhocracy4/polls/assets/PollQuestions.jsx b/adhocracy4/polls/assets/PollDetail/PollQuestions.jsx
similarity index 98%
rename from adhocracy4/polls/assets/PollQuestions.jsx
rename to adhocracy4/polls/assets/PollDetail/PollQuestions.jsx
index 40a2aa32f..7918b4358 100644
--- a/adhocracy4/polls/assets/PollQuestions.jsx
+++ b/adhocracy4/polls/assets/PollDetail/PollQuestions.jsx
@@ -1,13 +1,14 @@
import React from 'react'
+import django from 'django'
+
import { PollQuestion } from './PollQuestion'
import { PollOpenQuestion } from './PollOpenQuestion'
-import Alert from '../../static/Alert'
-import django from 'django'
import PollResults from './PollResults'
-import { TermsOfUseCheckbox } from '../../static/TermsOfUseCheckbox'
-const api = require('adhocracy4').api
-const config = require('adhocracy4').config
+import Alert from '../../../static/Alert'
+import api from '../../../static/api'
+import config from '../../../static/config'
+import { TermsOfUseCheckbox } from '../../../static/TermsOfUseCheckbox'
const ALERT_SUCCESS = {
type: 'success',
diff --git a/adhocracy4/polls/assets/PollResults.jsx b/adhocracy4/polls/assets/PollDetail/PollResults.jsx
similarity index 100%
rename from adhocracy4/polls/assets/PollResults.jsx
rename to adhocracy4/polls/assets/PollDetail/PollResults.jsx
diff --git a/adhocracy4/polls/assets/PopperMenu.jsx b/adhocracy4/polls/assets/PopperMenu.jsx
deleted file mode 100644
index d2054b5d8..000000000
--- a/adhocracy4/polls/assets/PopperMenu.jsx
+++ /dev/null
@@ -1,91 +0,0 @@
-import React, { useState, useEffect, useRef, useImperativeHandle, forwardRef } from 'react'
-import { usePopper } from 'react-popper'
-
-const PopperMenu = (props, ref) => {
- const { children: { popperButton, popperMenuItems, popperConfig } } = props
- const referenceRef = useRef(null)
- const popperRef = useRef(null)
- const [visible, setVisible] = useState(false)
-
- let config = {
- placement: 'bottom-start'
- }
-
- popperConfig &&
- (config = { ...config, ...popperConfig })
-
- const popper = usePopper(
- referenceRef.current,
- popperRef.current,
- {
- ...config
- }
- )
-
- const { styles, attributes } = popper
- const containerStyleClass = props.containerStyleClass
- ? `popper-content--container ${props.containerStyleClass}`
- : 'popper-content--container'
-
- useEffect(() => {
- // listen for clicks and close dropdown on body
- document.addEventListener('mousedown', handleDocumentClick)
- return () => {
- document.removeEventListener('mousedown', handleDocumentClick)
- }
- }, [])
-
- const handleDocumentClick = (event) => {
- (referenceRef.current.contains(event.target) ||
- popperRef.current.contains(event.target)) ||
- setVisible(false)
- }
- const handleDropdownClick = (event) => {
- setVisible(!visible)
- }
-
- const handleClickAction = (menuItem) => {
- setVisible(false)
- menuItem.handleClick()
- popper.update()
- }
-
- useImperativeHandle(ref, () => ({
- instance: popper
- }))
-
- return (
- <>
- <button
- className={popperButton.styleClass ? popperButton.styleClass : ''}
- ref={referenceRef} onClick={handleDropdownClick}
- type="button"
- >
- {popperButton.icon && <i className={popperButton.icon} />} {popperButton.buttonText}
- </button>
- <div ref={popperRef} style={styles.popper} {...attributes.popper}>
- <div
- style={styles.offset}
- className={containerStyleClass}
- data-visible={visible}
- >
- <ul className="popper-container">
- {popperMenuItems.map((menuItem, idx) => (
- <li key={idx}>
- <button
- className={`${menuItem.styleClass ? menuItem.styleClass : ''} popper-menuitem__button`}
- type="button"
- onClick={() => handleClickAction(menuItem)}
- >
- {menuItem.text}
- </button>
- </li>
- ))}
- </ul>
- </div>
- </div>
- </>
- )
-}
-
-export default forwardRef(PopperMenu)
diff --git a/adhocracy4/polls/assets/__tests__/CharCounter.jest.jsx b/adhocracy4/polls/assets/__tests__/CharCounter.jest.jsx
index de2686881..9ecc2f453 100644
--- a/adhocracy4/polls/assets/__tests__/CharCounter.jest.jsx
+++ b/adhocracy4/polls/assets/__tests__/CharCounter.jest.jsx
@@ -3,7 +3,7 @@ import React from 'react'
import { render } from '@testing-library/react'
// component and related data to be tested
-import { CharCounter } from '../CharCounter'
+import { CharCounter } from '../PollDetail/CharCounter'
test('<CharCounter> component renders correctly', () => {
const tree = render(<CharCounter value="random" max={25} />)
diff --git a/adhocracy4/polls/assets/__tests__/EditPollChoice.jest.jsx b/adhocracy4/polls/assets/__tests__/EditPollChoice.jest.jsx
index 8d33cb2a2..35421d3dc 100644
--- a/adhocracy4/polls/assets/__tests__/EditPollChoice.jest.jsx
+++ b/adhocracy4/polls/assets/__tests__/EditPollChoice.jest.jsx
@@ -3,7 +3,7 @@ import React from 'react'
import { render, fireEvent } from '@testing-library/react'
// component and related data to be tested
-import { EditPollChoice } from '../EditPollChoice.jsx'
+import { EditPollChoice } from '../PollDashboard/EditPollChoice.jsx'
const CHOICE_OBJECT = {
id: 1,
diff --git a/adhocracy4/polls/assets/__tests__/EditPollOpenQuestion.jest.jsx b/adhocracy4/polls/assets/__tests__/EditPollOpenQuestion.jest.jsx
index 6e42e664a..819375941 100644
--- a/adhocracy4/polls/assets/__tests__/EditPollOpenQuestion.jest.jsx
+++ b/adhocracy4/polls/assets/__tests__/EditPollOpenQuestion.jest.jsx
@@ -3,7 +3,7 @@ import React from 'react'
import { render, fireEvent } from '@testing-library/react'
// component and related data to be tested
-import { EditPollOpenQuestion } from '../EditPollOpenQuestion.jsx'
+import { EditPollOpenQuestion } from '../PollDashboard/EditPollOpenQuestion.jsx'
import { QUESTION_OBJECT } from './__testdata__/QUESTION_OBJECT'
describe('<EditPollOpenQuestion> with...', () => {
diff --git a/adhocracy4/polls/assets/__tests__/EditPollQuestion.jest.jsx b/adhocracy4/polls/assets/__tests__/EditPollQuestion.jest.jsx
index f031ef938..df565e738 100644
--- a/adhocracy4/polls/assets/__tests__/EditPollQuestion.jest.jsx
+++ b/adhocracy4/polls/assets/__tests__/EditPollQuestion.jest.jsx
@@ -3,7 +3,7 @@ import React from 'react'
import { render, fireEvent } from '@testing-library/react'
// component and related data to be tested
-import { EditPollQuestion } from '../EditPollQuestion.jsx'
+import { EditPollQuestion } from '../PollDashboard/EditPollQuestion.jsx'
import { QUESTION_OBJECT } from './__testdata__/QUESTION_OBJECT'
describe('<EditPollQuestion> with...', () => {
diff --git a/adhocracy4/polls/assets/__tests__/PollQuestion.jest.jsx b/adhocracy4/polls/assets/__tests__/PollQuestion.jest.jsx
index 540fa7538..95bf3abf1 100644
--- a/adhocracy4/polls/assets/__tests__/PollQuestion.jest.jsx
+++ b/adhocracy4/polls/assets/__tests__/PollQuestion.jest.jsx
@@ -3,7 +3,7 @@ import React from 'react'
import { render, fireEvent } from '@testing-library/react'
// component and related data to be tested
-import { PollQuestion } from '../PollQuestion.jsx'
+import { PollQuestion } from '../PollDetail/PollQuestion.jsx'
import { QUESTION_OBJECT } from './__testdata__/QUESTION_OBJECT'
describe('render <PollQuestion> with...', () => {
diff --git a/adhocracy4/polls/assets/react_polls.jsx b/adhocracy4/polls/assets/react_polls.jsx
index 1876b493d..6af4a8fe9 100644
--- a/adhocracy4/polls/assets/react_polls.jsx
+++ b/adhocracy4/polls/assets/react_polls.jsx
@@ -1,8 +1,8 @@
import React from 'react'
import { createRoot } from 'react-dom/client'
-import { EditPollQuestions } from './EditPollQuestions'
-import PollQuestions from './PollQuestions'
+import { EditPollManagement } from './PollDashboard/EditPollManagement'
+import PollQuestions from './PollDetail/PollQuestions'
module.exports.renderPolls = function (element) {
const pollId = element.getAttribute('data-poll-id')
@@ -17,5 +17,5 @@ module.exports.renderPollManagement = function (element) {
const reloadOnSuccess = JSON.parse(element.getAttribute('data-reloadOnSuccess'))
const root = createRoot(element)
- root.render(<EditPollQuestions pollId={pollId} reloadOnSuccess={reloadOnSuccess} />)
+ root.render(<EditPollManagement pollId={pollId} reloadOnSuccess={reloadOnSuccess} />)
}
diff --git a/adhocracy4/polls/models.py b/adhocracy4/polls/models.py
index e88f93d81..fda89f75e 100644
--- a/adhocracy4/polls/models.py
+++ b/adhocracy4/polls/models.py
@@ -21,7 +21,7 @@ def annotate_vote_count(self):
answer_count=models.Count(
'answers__creator_id',
distinct=True),
- )
+ ).order_by('weight')
class ChoiceQuerySet(models.QuerySet):
diff --git a/adhocracy4/static/ErrorList.jsx b/adhocracy4/static/ErrorList.jsx
index 3ecc9e9e8..86614a9e8 100644
--- a/adhocracy4/static/ErrorList.jsx
+++ b/adhocracy4/static/ErrorList.jsx
@@ -3,7 +3,7 @@ import React from 'react'
const ErrorList = ({ errors, field }) => {
if (errors && errors[field]) {
return (
- <ul className="errorlist">
+ <ul className="errorlist" role="alert">
{errors[field].map(function (msg, index) {
return <li key={msg}>{msg}</li>
})}
diff --git a/package.json b/package.json
index 735f3606f..c02375de6 100644
--- a/package.json
+++ b/package.json
@@ -48,8 +48,8 @@
"lint-staged": "13.0.3",
"react": "18.2.0",
"react-dom": "18.2.0",
+ "react-flip-move": "3.0.4",
"react-markdown": "8.0.3",
- "react-popper": "2.3.0",
"react-slick": "0.29.0",
"shpjs": "4.0.4",
"slick-carousel": "git+https://github.com/liqd/slick.git#pm-2019-07-overwrites"
@@ -62,8 +62,8 @@
"leaflet.markercluster": "git+https://github.com/liqd/Leaflet.markercluster#5ed89b26922c51083fc9632a2c01425b9261a0f5",
"react": "18.2.0",
"react-dom": "18.2.0",
+ "react-flip-move": "3.0.4",
"react-markdown": "8.0.3",
- "react-popper": "2.3.0",
"react-slick": "0.29",
"shpjs": "4.0.4",
"slick-carousel": "git+https://github.com/liqd/slick.git#pm-2019-07-overwrites"
|
DataDog__dd-trace-py-906 | Scrolling for the left-side menu on the API docs is broken
Chromium, Ubuntu
Go to http://pypi.datadoghq.com/trace/docs/advanced_usage.html
Scroll up and down
If your browser window is short enough, you'll notice the left-side menu doesn't scroll with the page, leaving some parts inaccessible.
Video: [vokoscreen-2019-04-25_08-21-40.zip](https://github.com/DataDog/dd-trace-py/files/3117626/vokoscreen-2019-04-25_08-21-40.zip)
Since the API docs are generated from this repo, I figured I'd report the issue here.
| [
{
"content": "# -*- coding: utf-8 -*-\n#\n# ddtrace documentation build configuration file, created by\n# sphinx-quickstart on Thu Jul 7 17:25:05 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\nimport os\nimport sys\nfrom datetime import datetime\n\n\n# append the ddtrace path to syspath\nsys.path.insert(0, os.path.abspath('..'))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.extlinks',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nyear = datetime.now().year\nproject = u'ddtrace'\ncopyright = u'2016-{}, Datadog, Inc.'.format(year)\nauthor = u'Datadog, Inc.'\n\n# document in order of source\nautodoc_member_order = 'bysource'\n\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = u'0.2'\n# The full version, including alpha/beta/rc tags.\nrelease = u'0.2'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#\n# today = ''\n#\n# Else, today_fmt is used as the format for a strftime call.\n#\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\n '_build',\n 'Thumbs.db',\n '.DS_Store'\n]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'description': 'Datadog\\'s Python tracing client',\n 'fixed_sidebar': True,\n}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents.\n# \"<project> v<release> documentation\" by default.\n#\n# html_title = u'ddtrace v0.2'\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#\n# html_logo = None\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#\n# html_extra_path = []\n\n# If not None, a 'Last updated on:' timestamp is inserted at every page\n# bottom, using the given strftime format.\n# The empty string is equivalent to '%b %d, %Y'.\n#\n# html_last_updated_fmt = None\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#\nhtml_sidebars = {\n '**': [\n 'about.html',\n 'nav.html',\n 'relations.html',\n 'searchbox.html',\n ]\n}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n#\n# html_domain_indices = True\n\n# If false, no index is generated.\n#\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'\n#\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# 'ja' uses this config value.\n# 'zh' user can custom change `jieba` dictionary path.\n#\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ddtracedoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'ddtrace.tex', u'ddtrace Documentation',\n u'Datadog, Inc', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n#\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#\n# latex_appendices = []\n\n# If false, no module index is generated.\n#\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'ddtrace', u'ddtrace Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'ddtrace', u'ddtrace Documentation',\n author, 'ddtrace', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n#\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#\n# texinfo_no_detailmenu = False\n",
"path": "docs/conf.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# ddtrace documentation build configuration file, created by\n# sphinx-quickstart on Thu Jul 7 17:25:05 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\nimport os\nimport sys\nfrom datetime import datetime\n\n\n# append the ddtrace path to syspath\nsys.path.insert(0, os.path.abspath('..'))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.extlinks',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nyear = datetime.now().year\nproject = u'ddtrace'\ncopyright = u'2016-{}, Datadog, Inc.'.format(year)\nauthor = u'Datadog, Inc.'\n\n# document in order of source\nautodoc_member_order = 'bysource'\n\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = u'0.2'\n# The full version, including alpha/beta/rc tags.\nrelease = u'0.2'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#\n# today = ''\n#\n# Else, today_fmt is used as the format for a strftime call.\n#\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\n '_build',\n 'Thumbs.db',\n '.DS_Store'\n]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'description': 'Datadog\\'s Python tracing client',\n}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents.\n# \"<project> v<release> documentation\" by default.\n#\n# html_title = u'ddtrace v0.2'\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#\n# html_logo = None\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#\n# html_extra_path = []\n\n# If not None, a 'Last updated on:' timestamp is inserted at every page\n# bottom, using the given strftime format.\n# The empty string is equivalent to '%b %d, %Y'.\n#\n# html_last_updated_fmt = None\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#\nhtml_sidebars = {\n '**': [\n 'about.html',\n 'nav.html',\n 'relations.html',\n 'searchbox.html',\n ]\n}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n#\n# html_domain_indices = True\n\n# If false, no index is generated.\n#\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'\n#\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# 'ja' uses this config value.\n# 'zh' user can custom change `jieba` dictionary path.\n#\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ddtracedoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'ddtrace.tex', u'ddtrace Documentation',\n u'Datadog, Inc', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n#\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#\n# latex_appendices = []\n\n# If false, no module index is generated.\n#\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'ddtrace', u'ddtrace Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'ddtrace', u'ddtrace Documentation',\n author, 'ddtrace', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n#\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#\n# texinfo_no_detailmenu = False\n",
"path": "docs/conf.py"
}
] | diff --git a/docs/conf.py b/docs/conf.py
index 0ed85b47747..5abb255baaf 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -145,7 +145,6 @@
#
html_theme_options = {
'description': 'Datadog\'s Python tracing client',
- 'fixed_sidebar': True,
}
# Add any paths that contain custom themes here, relative to this directory.
|
opendatacube__datacube-core-875 | If DB_PORT is not set, config process sets port to an empty string
I have an existing environment that sets up the datacube connection using this:
```
- DB_HOSTNAME=host.docker.internal
- DB_USERNAME=opendatacube
- DB_PASSWORD=opendatacubepassword
- DB_DATABASE=opendatacube
```
and with the new changes to read config from environment variables over the config file, the port is required to be set with `DB_PORT=5432`.
Expected behaviour was that if the port is blank it is set to the default for Postgres.
https://github.com/opendatacube/datacube-core/blob/8481d907b198a1c8946326b8b70625a9a8523a12/datacube/config.py#L265
| [
{
"content": "# coding=utf-8\n\"\"\"\nUser configuration.\n\"\"\"\n\nimport os\nfrom pathlib import Path\nimport configparser\nfrom urllib.parse import unquote_plus, urlparse\nfrom typing import Optional, Iterable, Union, Any, Tuple, Dict\n\nPathLike = Union[str, 'os.PathLike[Any]']\n\n\nENVIRONMENT_VARNAME = 'DATACUBE_CONFIG_PATH'\n#: Config locations in order. Properties found in latter locations override\n#: earlier ones.\n#:\n#: - `/etc/datacube.conf`\n#: - file at `$DATACUBE_CONFIG_PATH` environment variable\n#: - `~/.datacube.conf`\n#: - `datacube.conf`\nDEFAULT_CONF_PATHS = tuple(p for p in ['/etc/datacube.conf',\n os.environ.get(ENVIRONMENT_VARNAME, ''),\n str(os.path.expanduser(\"~/.datacube.conf\")),\n 'datacube.conf'] if len(p) > 0)\n\nDEFAULT_ENV = 'default'\n\n# Default configuration options.\n_DEFAULT_CONF = \"\"\"\n[DEFAULT]\n# Blank implies localhost\ndb_hostname:\ndb_database: datacube\nindex_driver: default\n# If a connection is unused for this length of time, expect it to be invalidated.\ndb_connection_timeout: 60\n\n[user]\n# Which environment to use when none is specified explicitly.\n# note: will fail if default_environment points to non-existent section\n# default_environment: datacube\n\"\"\"\n\n#: Used in place of None as a default, when None is a valid but not default parameter to a function\n_UNSET = object()\n\n\ndef read_config(default_text: Optional[str] = None) -> configparser.ConfigParser:\n config = configparser.ConfigParser()\n if default_text is not None:\n config.read_string(default_text)\n return config\n\n\nclass LocalConfig(object):\n \"\"\"\n System configuration for the user.\n\n This loads from a set of possible configuration files which define the available environments.\n An environment contains connection details for a Data Cube Index, which provides access to\n available data.\n\n \"\"\"\n\n def __init__(self, config: configparser.ConfigParser,\n files_loaded: Optional[Iterable[str]] = None,\n env: Optional[str] = None):\n \"\"\"\n Datacube environment resolution precedence is:\n 1. Supplied as a function argument `env`\n 2. DATACUBE_ENVIRONMENT environment variable\n 3. user.default_environment option in the config\n 4. 'default' or 'datacube' whichever is present\n\n If environment is supplied by any of the first 3 methods is not present\n in the config, then throw an exception.\n \"\"\"\n self._config = config\n self.files_loaded = [] if files_loaded is None else list(iter(files_loaded))\n\n if env is None:\n env = os.environ.get('DATACUBE_ENVIRONMENT',\n config.get('user', 'default_environment', fallback=None))\n\n # If the user specifies a particular env, we either want to use it or Fail\n if env:\n if config.has_section(env):\n self._env = env\n # All is good\n return\n else:\n raise ValueError('No config section found for environment %r' % (env,))\n else:\n # If an env hasn't been specifically selected, we can fall back defaults\n fallbacks = [DEFAULT_ENV, 'datacube']\n for fallback_env in fallbacks:\n if config.has_section(fallback_env):\n self._env = fallback_env\n return\n raise ValueError('No ODC environment, checked configurations for %s' % fallbacks)\n\n @classmethod\n def find(cls,\n paths: Optional[Union[str, Iterable[PathLike]]] = None,\n env: Optional[str] = None) -> 'LocalConfig':\n \"\"\"\n Find config from environment variables or possible filesystem locations.\n\n 'env' is which environment to use from the config: it corresponds to the name of a\n config section\n \"\"\"\n config = read_config(_DEFAULT_CONF)\n\n if paths is None:\n if env is None:\n env_opts = parse_env_params()\n if env_opts:\n return _cfg_from_env_opts(env_opts, config)\n\n paths = DEFAULT_CONF_PATHS\n\n if isinstance(paths, str) or hasattr(paths, '__fspath__'): # Use os.PathLike in 3.6+\n paths = [str(paths)]\n\n files_loaded = config.read(str(p) for p in paths if p)\n\n return LocalConfig(\n config,\n files_loaded=files_loaded,\n env=env,\n )\n\n def get(self, item: str, fallback=_UNSET):\n if fallback == _UNSET:\n return self._config.get(self._env, item)\n else:\n return self._config.get(self._env, item, fallback=fallback)\n\n def __getitem__(self, item: str):\n return self.get(item, fallback=None)\n\n def __str__(self) -> str:\n return \"LocalConfig<loaded_from={}, environment={!r}, config={}>\".format(\n self.files_loaded or 'defaults',\n self._env,\n dict(self._config[self._env]),\n )\n\n def __repr__(self) -> str:\n return str(self)\n\n\nOPTIONS = {'reproject_threads': 4}\n\n\n#: pylint: disable=invalid-name\nclass set_options(object):\n \"\"\"Set global state within a controlled context\n\n Currently, the only supported options are:\n * reproject_threads: The number of threads to use when reprojecting\n\n You can use ``set_options`` either as a context manager::\n\n with datacube.set_options(reproject_threads=16):\n ...\n\n Or to set global options::\n\n datacube.set_options(reproject_threads=16)\n \"\"\"\n\n def __init__(self, **kwargs):\n self.old = OPTIONS.copy()\n OPTIONS.update(kwargs)\n\n def __enter__(self):\n return\n\n def __exit__(self, exc_type, value, traceback):\n OPTIONS.clear()\n OPTIONS.update(self.old)\n\n\nDB_KEYS = ('hostname', 'port', 'database', 'username', 'password')\n\n\ndef parse_connect_url(url: str) -> Dict[str, str]:\n \"\"\" Extract database,hostname,port,username,password from db URL.\n\n Example: postgresql://username:password@hostname:port/database\n\n For local password-less db use `postgresql:///<your db>`\n \"\"\"\n def split2(s: str, separator: str) -> Tuple[str, str]:\n i = s.find(separator)\n return (s, '') if i < 0 else (s[:i], s[i+1:])\n\n _, netloc, path, *_ = urlparse(url)\n\n db = path[1:] if path else ''\n if '@' in netloc:\n (user, password), (host, port) = (split2(p, ':') for p in split2(netloc, '@'))\n else:\n user, password = '', ''\n host, port = split2(netloc, ':')\n\n oo = dict(hostname=host, database=db)\n\n if port:\n oo['port'] = port\n if password:\n oo['password'] = unquote_plus(password)\n if user:\n oo['username'] = user\n return oo\n\n\ndef parse_env_params() -> Dict[str, str]:\n \"\"\"\n - Extract parameters from DATACUBE_DB_URL if present\n - Else look for DB_HOSTNAME, DB_USERNAME, DB_PASSWORD, DB_DATABASE\n - Return {} otherwise\n \"\"\"\n\n db_url = os.environ.get('DATACUBE_DB_URL', None)\n if db_url is not None:\n return parse_connect_url(db_url)\n\n params = {k: os.environ.get('DB_{}'.format(k.upper()), None)\n for k in DB_KEYS}\n return {k: v\n for k, v in params.items()\n if v is not None}\n\n\ndef _cfg_from_env_opts(opts: Dict[str, str],\n base: configparser.ConfigParser) -> LocalConfig:\n base['default'] = {'db_'+k: v for k, v in opts.items()}\n return LocalConfig(base, files_loaded=[], env='default')\n\n\ndef render_dc_config(params: Dict[str, Any],\n section_name: str = 'default') -> str:\n \"\"\" Render output of parse_env_params to a string that can be written to config file.\n \"\"\"\n oo = '[{}]\\n'.format(section_name)\n for k in DB_KEYS:\n v = params.get(k, None)\n if v is not None:\n oo += 'db_{k}: {v}\\n'.format(k=k, v=v)\n return oo\n\n\ndef auto_config() -> str:\n \"\"\"\n Render config to $DATACUBE_CONFIG_PATH or ~/.datacube.conf, but only if doesn't exist.\n\n option1:\n DATACUBE_DB_URL postgresql://user:password@host/database\n\n option2:\n DB_{HOSTNAME|PORT|USERNAME|PASSWORD|DATABASE}\n\n option3:\n default config\n \"\"\"\n cfg_path = os.environ.get('DATACUBE_CONFIG_PATH', None)\n cfg_path = Path(cfg_path) if cfg_path else Path.home()/'.datacube.conf'\n\n if cfg_path.exists():\n return str(cfg_path)\n\n opts = parse_env_params()\n\n if len(opts) == 0:\n opts['hostname'] = ''\n opts['database'] = 'datacube'\n\n cfg_text = render_dc_config(opts)\n with open(str(cfg_path), 'wt') as f:\n f.write(cfg_text)\n\n return str(cfg_path)\n",
"path": "datacube/config.py"
}
] | [
{
"content": "# coding=utf-8\n\"\"\"\nUser configuration.\n\"\"\"\n\nimport os\nfrom pathlib import Path\nimport configparser\nfrom urllib.parse import unquote_plus, urlparse\nfrom typing import Optional, Iterable, Union, Any, Tuple, Dict\n\nPathLike = Union[str, 'os.PathLike[Any]']\n\n\nENVIRONMENT_VARNAME = 'DATACUBE_CONFIG_PATH'\n#: Config locations in order. Properties found in latter locations override\n#: earlier ones.\n#:\n#: - `/etc/datacube.conf`\n#: - file at `$DATACUBE_CONFIG_PATH` environment variable\n#: - `~/.datacube.conf`\n#: - `datacube.conf`\nDEFAULT_CONF_PATHS = tuple(p for p in ['/etc/datacube.conf',\n os.environ.get(ENVIRONMENT_VARNAME, ''),\n str(os.path.expanduser(\"~/.datacube.conf\")),\n 'datacube.conf'] if len(p) > 0)\n\nDEFAULT_ENV = 'default'\n\n# Default configuration options.\n_DEFAULT_CONF = \"\"\"\n[DEFAULT]\n# Blank implies localhost\ndb_hostname:\ndb_database: datacube\nindex_driver: default\n# If a connection is unused for this length of time, expect it to be invalidated.\ndb_connection_timeout: 60\n\n[user]\n# Which environment to use when none is specified explicitly.\n# note: will fail if default_environment points to non-existent section\n# default_environment: datacube\n\"\"\"\n\n#: Used in place of None as a default, when None is a valid but not default parameter to a function\n_UNSET = object()\n\n\ndef read_config(default_text: Optional[str] = None) -> configparser.ConfigParser:\n config = configparser.ConfigParser()\n if default_text is not None:\n config.read_string(default_text)\n return config\n\n\nclass LocalConfig(object):\n \"\"\"\n System configuration for the user.\n\n This loads from a set of possible configuration files which define the available environments.\n An environment contains connection details for a Data Cube Index, which provides access to\n available data.\n\n \"\"\"\n\n def __init__(self, config: configparser.ConfigParser,\n files_loaded: Optional[Iterable[str]] = None,\n env: Optional[str] = None):\n \"\"\"\n Datacube environment resolution precedence is:\n 1. Supplied as a function argument `env`\n 2. DATACUBE_ENVIRONMENT environment variable\n 3. user.default_environment option in the config\n 4. 'default' or 'datacube' whichever is present\n\n If environment is supplied by any of the first 3 methods is not present\n in the config, then throw an exception.\n \"\"\"\n self._config = config\n self.files_loaded = [] if files_loaded is None else list(iter(files_loaded))\n\n if env is None:\n env = os.environ.get('DATACUBE_ENVIRONMENT',\n config.get('user', 'default_environment', fallback=None))\n\n # If the user specifies a particular env, we either want to use it or Fail\n if env:\n if config.has_section(env):\n self._env = env\n # All is good\n return\n else:\n raise ValueError('No config section found for environment %r' % (env,))\n else:\n # If an env hasn't been specifically selected, we can fall back defaults\n fallbacks = [DEFAULT_ENV, 'datacube']\n for fallback_env in fallbacks:\n if config.has_section(fallback_env):\n self._env = fallback_env\n return\n raise ValueError('No ODC environment, checked configurations for %s' % fallbacks)\n\n @classmethod\n def find(cls,\n paths: Optional[Union[str, Iterable[PathLike]]] = None,\n env: Optional[str] = None) -> 'LocalConfig':\n \"\"\"\n Find config from environment variables or possible filesystem locations.\n\n 'env' is which environment to use from the config: it corresponds to the name of a\n config section\n \"\"\"\n config = read_config(_DEFAULT_CONF)\n\n if paths is None:\n if env is None:\n env_opts = parse_env_params()\n if env_opts:\n return _cfg_from_env_opts(env_opts, config)\n\n paths = DEFAULT_CONF_PATHS\n\n if isinstance(paths, str) or hasattr(paths, '__fspath__'): # Use os.PathLike in 3.6+\n paths = [str(paths)]\n\n files_loaded = config.read(str(p) for p in paths if p)\n\n return LocalConfig(\n config,\n files_loaded=files_loaded,\n env=env,\n )\n\n def get(self, item: str, fallback=_UNSET):\n if fallback == _UNSET:\n return self._config.get(self._env, item)\n else:\n return self._config.get(self._env, item, fallback=fallback)\n\n def __getitem__(self, item: str):\n return self.get(item, fallback=None)\n\n def __str__(self) -> str:\n return \"LocalConfig<loaded_from={}, environment={!r}, config={}>\".format(\n self.files_loaded or 'defaults',\n self._env,\n dict(self._config[self._env]),\n )\n\n def __repr__(self) -> str:\n return str(self)\n\n\nOPTIONS = {'reproject_threads': 4}\n\n\n#: pylint: disable=invalid-name\nclass set_options(object):\n \"\"\"Set global state within a controlled context\n\n Currently, the only supported options are:\n * reproject_threads: The number of threads to use when reprojecting\n\n You can use ``set_options`` either as a context manager::\n\n with datacube.set_options(reproject_threads=16):\n ...\n\n Or to set global options::\n\n datacube.set_options(reproject_threads=16)\n \"\"\"\n\n def __init__(self, **kwargs):\n self.old = OPTIONS.copy()\n OPTIONS.update(kwargs)\n\n def __enter__(self):\n return\n\n def __exit__(self, exc_type, value, traceback):\n OPTIONS.clear()\n OPTIONS.update(self.old)\n\n\nDB_KEYS = ('hostname', 'port', 'database', 'username', 'password')\n\n\ndef parse_connect_url(url: str) -> Dict[str, str]:\n \"\"\" Extract database,hostname,port,username,password from db URL.\n\n Example: postgresql://username:password@hostname:port/database\n\n For local password-less db use `postgresql:///<your db>`\n \"\"\"\n def split2(s: str, separator: str) -> Tuple[str, str]:\n i = s.find(separator)\n return (s, '') if i < 0 else (s[:i], s[i+1:])\n\n _, netloc, path, *_ = urlparse(url)\n\n db = path[1:] if path else ''\n if '@' in netloc:\n (user, password), (host, port) = (split2(p, ':') for p in split2(netloc, '@'))\n else:\n user, password = '', ''\n host, port = split2(netloc, ':')\n\n oo = dict(hostname=host, database=db)\n\n if port:\n oo['port'] = port\n if password:\n oo['password'] = unquote_plus(password)\n if user:\n oo['username'] = user\n return oo\n\n\ndef parse_env_params() -> Dict[str, str]:\n \"\"\"\n - Extract parameters from DATACUBE_DB_URL if present\n - Else look for DB_HOSTNAME, DB_USERNAME, DB_PASSWORD, DB_DATABASE\n - Return {} otherwise\n \"\"\"\n\n db_url = os.environ.get('DATACUBE_DB_URL', None)\n if db_url is not None:\n return parse_connect_url(db_url)\n\n params = {k: os.environ.get('DB_{}'.format(k.upper()), None)\n for k in DB_KEYS}\n return {k: v\n for k, v in params.items()\n if v is not None and v != \"\"}\n\n\ndef _cfg_from_env_opts(opts: Dict[str, str],\n base: configparser.ConfigParser) -> LocalConfig:\n base['default'] = {'db_'+k: v for k, v in opts.items()}\n return LocalConfig(base, files_loaded=[], env='default')\n\n\ndef render_dc_config(params: Dict[str, Any],\n section_name: str = 'default') -> str:\n \"\"\" Render output of parse_env_params to a string that can be written to config file.\n \"\"\"\n oo = '[{}]\\n'.format(section_name)\n for k in DB_KEYS:\n v = params.get(k, None)\n if v is not None:\n oo += 'db_{k}: {v}\\n'.format(k=k, v=v)\n return oo\n\n\ndef auto_config() -> str:\n \"\"\"\n Render config to $DATACUBE_CONFIG_PATH or ~/.datacube.conf, but only if doesn't exist.\n\n option1:\n DATACUBE_DB_URL postgresql://user:password@host/database\n\n option2:\n DB_{HOSTNAME|PORT|USERNAME|PASSWORD|DATABASE}\n\n option3:\n default config\n \"\"\"\n cfg_path = os.environ.get('DATACUBE_CONFIG_PATH', None)\n cfg_path = Path(cfg_path) if cfg_path else Path.home()/'.datacube.conf'\n\n if cfg_path.exists():\n return str(cfg_path)\n\n opts = parse_env_params()\n\n if len(opts) == 0:\n opts['hostname'] = ''\n opts['database'] = 'datacube'\n\n cfg_text = render_dc_config(opts)\n with open(str(cfg_path), 'wt') as f:\n f.write(cfg_text)\n\n return str(cfg_path)\n",
"path": "datacube/config.py"
}
] | diff --git a/datacube/config.py b/datacube/config.py
index 1bf4ce6371..fd10ff3c84 100755
--- a/datacube/config.py
+++ b/datacube/config.py
@@ -233,7 +233,7 @@ def parse_env_params() -> Dict[str, str]:
for k in DB_KEYS}
return {k: v
for k, v in params.items()
- if v is not None}
+ if v is not None and v != ""}
def _cfg_from_env_opts(opts: Dict[str, str],
diff --git a/tests/test_config.py b/tests/test_config.py
index 215310b4ea..330a331854 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -164,6 +164,16 @@ def check_env(**kw):
username='user',
password='pass@')
+ assert check_env(DB_DATABASE='db',
+ DB_HOSTNAME='host.tld',
+ DB_USERNAME='user',
+ DB_PORT='',
+ DB_PASSWORD='pass@') == dict(
+ database='db',
+ hostname='host.tld',
+ username='user',
+ password='pass@')
+
def test_cfg_from_env(monkeypatch):
def set_env(**kw):
|
openai__gym-2633 | [Bug Report] Empty print version warning
**Describe the bug**
When I import gym, there's an empty line printed.
It's because of this line: https://github.com/openai/gym/blob/master/gym/__init__.py#L30
Either it's a bug, because `notice` shouldn't be an empty string, or the check should be `if notice:` which is false for both `None` and `""` (empty string).
Currently it's cluttering the logs at best, or masking some other issue.
**Code example**
```python
import gym
```
**System Info**
Describe the characteristic of your environment:
Latest gym installed from pip, Ubuntu 20.04, Python 3.9.7
### Checklist
- [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)
| [
{
"content": "from gym import error\nfrom gym.version import VERSION as __version__\n\nfrom gym.core import (\n Env,\n Wrapper,\n ObservationWrapper,\n ActionWrapper,\n RewardWrapper,\n)\nfrom gym.spaces import Space\nfrom gym.envs import make, spec, register\nfrom gym import logger\nfrom gym import vector\nfrom gym import wrappers\nimport os\n\n\n__all__ = [\"Env\", \"Space\", \"Wrapper\", \"make\", \"spec\", \"register\"]\n\nos.environ[\"PYGAME_HIDE_SUPPORT_PROMPT\"] = \"hide\"\n\ntry:\n import gym_notices.notices as notices\n import sys\n\n # print version warning if necessary\n notice = notices.notices.get(__version__)\n if notice is not None:\n print(notice, file=sys.stderr)\n\nexcept Exception: # nosec\n pass\n",
"path": "gym/__init__.py"
}
] | [
{
"content": "from gym import error\nfrom gym.version import VERSION as __version__\n\nfrom gym.core import (\n Env,\n Wrapper,\n ObservationWrapper,\n ActionWrapper,\n RewardWrapper,\n)\nfrom gym.spaces import Space\nfrom gym.envs import make, spec, register\nfrom gym import logger\nfrom gym import vector\nfrom gym import wrappers\nimport os\n\n\n__all__ = [\"Env\", \"Space\", \"Wrapper\", \"make\", \"spec\", \"register\"]\n\nos.environ[\"PYGAME_HIDE_SUPPORT_PROMPT\"] = \"hide\"\n\ntry:\n import gym_notices.notices as notices\n import sys\n\n # print version warning if necessary\n notice = notices.notices.get(__version__)\n if notice:\n print(notice, file=sys.stderr)\n\nexcept Exception: # nosec\n pass\n",
"path": "gym/__init__.py"
}
] | diff --git a/gym/__init__.py b/gym/__init__.py
index 71797c7e798..b44d1b419ad 100644
--- a/gym/__init__.py
+++ b/gym/__init__.py
@@ -26,7 +26,7 @@
# print version warning if necessary
notice = notices.notices.get(__version__)
- if notice is not None:
+ if notice:
print(notice, file=sys.stderr)
except Exception: # nosec
|
feast-dev__feast-1585 | Bump fastavro version
**Is your feature request related to a problem? Please describe.**
The version of Fastavro that we're using is kinda old and may be buggy soon. It's also causing some version conflicts with packages that have already upgraded to the newer (1.xx) versions.
**Describe the solution you'd like**
Bump Fastavro to 1.x.x
| [
{
"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport re\nimport subprocess\n\nfrom distutils.cmd import Command\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.install import install\n from setuptools.command.develop import develop\n from setuptools.command.egg_info import egg_info\n from setuptools.command.sdist import sdist\n from setuptools.command.build_py import build_py\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.install import install\n from distutils.command.build_py import build_py\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.7.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"colorama>=0.3.9\",\n \"fastavro>=0.22.11,<0.23\",\n \"google-api-core>=1.23.0\",\n \"googleapis-common-protos==1.52.*\",\n \"grpcio>=1.34.0\",\n \"Jinja2>=2.0.0\",\n \"jsonschema\",\n \"mmh3\",\n \"pandas>=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"pyarrow>=2.0.0\",\n \"pydantic>=1.0.0\",\n \"PyYAML==5.3.*\",\n \"tabulate==0.8.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n]\n\nGCP_REQUIRED = [\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n]\n\nREDIS_REQUIRED = [\n \"redis-py-cluster==2.1.2\",\n]\n\nCI_REQUIRED = [\n \"cryptography==3.3.2\",\n \"flake8\",\n \"black==19.10b0\",\n \"isort>=5\",\n \"grpcio-tools==1.34.0\",\n \"grpcio-testing==1.34.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy==0.790\",\n \"mypy-protobuf==1.24\",\n \"avro==1.10.0\",\n \"gcsfs\",\n \"urllib3>=1.25.4\",\n \"pytest==6.0.0\",\n \"pytest-cov\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering==0.6.*\",\n \"pytest-mock==1.10.4\",\n \"Sphinx!=4.0.0\",\n \"sphinx-rtd-theme\",\n \"tenacity\",\n \"adlfs==0.5.9\",\n \"firebase-admin==4.5.2\",\n \"pre-commit\",\n \"assertpy==1.1\",\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n \"redis-py-cluster==2.1.2\",\n]\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n\nclass BuildProtoCommand(Command):\n description = \"Builds the proto files into python files.\"\n\n def initialize_options(self):\n self.protoc = [\"python\", \"-m\", \"grpc_tools.protoc\"] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')\n self.sub_folders = [\"core\", \"serving\", \"types\", \"storage\"]\n\n def finalize_options(self):\n pass\n\n def _generate_protos(self, path):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n\n subprocess.check_call(self.protoc + [\n '-I', self.proto_folder,\n '--python_out', self.this_package,\n '--grpc_python_out', self.this_package,\n '--mypy_out', self.this_package] + proto_files)\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_protos(f'feast/{sub_folder}/*.proto')\n\n from pathlib import Path\n\n for path in Path('feast/protos').rglob('*.py'):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')\n\n # Write the file out again\n with open(path, 'w') as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"],\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\", \"grpcio\", \"grpcio-tools==1.34.0\", \"mypy-protobuf\", \"sphinx!=4.0.0\"],\n package_data={\n \"\": [\n \"protos/feast/**/*.proto\",\n \"protos/feast/third_party/grpc/health/v1/*.proto\",\n \"protos/tensorflow_metadata/proto/v0/*.proto\",\n \"feast/protos/feast/**/*.py\",\n \"tensorflow_metadata/proto/v0/*.py\"\n ],\n },\n cmdclass={\n \"build_proto\": BuildProtoCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n",
"path": "sdk/python/setup.py"
}
] | [
{
"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport re\nimport subprocess\n\nfrom distutils.cmd import Command\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.install import install\n from setuptools.command.develop import develop\n from setuptools.command.egg_info import egg_info\n from setuptools.command.sdist import sdist\n from setuptools.command.build_py import build_py\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.install import install\n from distutils.command.build_py import build_py\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.7.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"colorama>=0.3.9\",\n \"fastavro>=1.1.0\",\n \"google-api-core>=1.23.0\",\n \"googleapis-common-protos==1.52.*\",\n \"grpcio>=1.34.0\",\n \"Jinja2>=2.0.0\",\n \"jsonschema\",\n \"mmh3\",\n \"pandas>=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"pyarrow>=2.0.0\",\n \"pydantic>=1.0.0\",\n \"PyYAML==5.3.*\",\n \"tabulate==0.8.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n]\n\nGCP_REQUIRED = [\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n]\n\nCI_REQUIRED = [\n \"cryptography==3.3.2\",\n \"flake8\",\n \"black==19.10b0\",\n \"isort>=5\",\n \"grpcio-tools==1.34.0\",\n \"grpcio-testing==1.34.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy==0.790\",\n \"mypy-protobuf==1.24\",\n \"avro==1.10.0\",\n \"gcsfs\",\n \"urllib3>=1.25.4\",\n \"pytest==6.0.0\",\n \"pytest-cov\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering==0.6.*\",\n \"pytest-mock==1.10.4\",\n \"Sphinx!=4.0.0\",\n \"sphinx-rtd-theme\",\n \"tenacity\",\n \"adlfs==0.5.9\",\n \"firebase-admin==4.5.2\",\n \"pre-commit\",\n \"assertpy==1.1\",\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-datastore>=2.1.*\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n]\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n\nclass BuildProtoCommand(Command):\n description = \"Builds the proto files into python files.\"\n\n def initialize_options(self):\n self.protoc = [\"python\", \"-m\", \"grpc_tools.protoc\"] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')\n self.sub_folders = [\"core\", \"serving\", \"types\", \"storage\"]\n\n def finalize_options(self):\n pass\n\n def _generate_protos(self, path):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n\n subprocess.check_call(self.protoc + [\n '-I', self.proto_folder,\n '--python_out', self.this_package,\n '--grpc_python_out', self.this_package,\n '--mypy_out', self.this_package] + proto_files)\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_protos(f'feast/{sub_folder}/*.proto')\n\n from pathlib import Path\n\n for path in Path('feast/protos').rglob('*.py'):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')\n\n # Write the file out again\n with open(path, 'w') as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"],\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\", \"grpcio\", \"grpcio-tools==1.34.0\", \"mypy-protobuf\", \"sphinx!=4.0.0\"],\n package_data={\n \"\": [\n \"protos/feast/**/*.proto\",\n \"protos/feast/third_party/grpc/health/v1/*.proto\",\n \"protos/tensorflow_metadata/proto/v0/*.proto\",\n \"feast/protos/feast/**/*.py\",\n \"tensorflow_metadata/proto/v0/*.py\"\n ],\n },\n cmdclass={\n \"build_proto\": BuildProtoCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n",
"path": "sdk/python/setup.py"
}
] | diff --git a/sdk/python/setup.py b/sdk/python/setup.py
index e2bb02f10d0..2c40d7ec4ac 100644
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -40,7 +40,7 @@
REQUIRED = [
"Click==7.*",
"colorama>=0.3.9",
- "fastavro>=0.22.11,<0.23",
+ "fastavro>=1.1.0",
"google-api-core>=1.23.0",
"googleapis-common-protos==1.52.*",
"grpcio>=1.34.0",
|
apache__airflow-12386 | [ldap] section in configuration is not applicable anymore in 2.0
**Apache Airflow version**: 2.0.0b* / master
**What happened**:
`[ldap]` section in `airflow.cfg` is not applicable anymore in 2.0 and `master`, because the LDAP authentication (for webserver and API) is handled by FAB, and the configuration for this is handled by `webserver_config.py` file.

**What you expected to happen**:
The `[ldap]` section should be removed from `airflow/config_templates/default_airflow.cfg` and `airflow/config_templates/config.yml` (and some other applicable files).
Otherwise leaving this section there will be a big confusion for users.
| [
{
"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport pathlib\nimport re\nimport shlex\nimport subprocess\nimport sys\nimport warnings\nfrom base64 import b64encode\nfrom collections import OrderedDict\n\n# Ignored Mypy on configparser because it thinks the configparser module has no _UNSET attribute\nfrom configparser import _UNSET, ConfigParser, NoOptionError, NoSectionError # type: ignore\nfrom json.decoder import JSONDecodeError\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport yaml\nfrom cryptography.fernet import Fernet\n\nfrom airflow.exceptions import AirflowConfigException\nfrom airflow.secrets import DEFAULT_SECRETS_SEARCH_PATH, BaseSecretsBackend\nfrom airflow.utils.module_loading import import_string\n\nlog = logging.getLogger(__name__)\n\n# show Airflow's deprecation warnings\nif not sys.warnoptions:\n warnings.filterwarnings(action='default', category=DeprecationWarning, module='airflow')\n warnings.filterwarnings(action='default', category=PendingDeprecationWarning, module='airflow')\n\n\ndef expand_env_var(env_var):\n \"\"\"\n Expands (potentially nested) env vars by repeatedly applying\n `expandvars` and `expanduser` until interpolation stops having\n any effect.\n \"\"\"\n if not env_var:\n return env_var\n while True:\n interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))\n if interpolated == env_var:\n return interpolated\n else:\n env_var = interpolated\n\n\ndef run_command(command):\n \"\"\"Runs command and returns stdout\"\"\"\n process = subprocess.Popen(\n shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True\n )\n output, stderr = [stream.decode(sys.getdefaultencoding(), 'ignore') for stream in process.communicate()]\n\n if process.returncode != 0:\n raise AirflowConfigException(\n f\"Cannot execute {command}. Error code is: {process.returncode}. \"\n f\"Output: {output}, Stderr: {stderr}\"\n )\n\n return output\n\n\ndef _get_config_value_from_secret_backend(config_key):\n \"\"\"Get Config option values from Secret Backend\"\"\"\n secrets_client = get_custom_secret_backend()\n if not secrets_client:\n return None\n return secrets_client.get_config(config_key)\n\n\ndef _read_default_config_file(file_name: str) -> Tuple[str, str]:\n templates_dir = os.path.join(os.path.dirname(__file__), 'config_templates')\n file_path = os.path.join(templates_dir, file_name)\n with open(file_path, encoding='utf-8') as config_file:\n return config_file.read(), file_path\n\n\nDEFAULT_CONFIG, DEFAULT_CONFIG_FILE_PATH = _read_default_config_file('default_airflow.cfg')\nTEST_CONFIG, TEST_CONFIG_FILE_PATH = _read_default_config_file('default_test.cfg')\n\n\ndef default_config_yaml() -> dict:\n \"\"\"\n Read Airflow configs from YAML file\n\n :return: Python dictionary containing configs & their info\n \"\"\"\n templates_dir = os.path.join(os.path.dirname(__file__), 'config_templates')\n file_path = os.path.join(templates_dir, \"config.yml\")\n\n with open(file_path) as config_file:\n return yaml.safe_load(config_file)\n\n\nclass AirflowConfigParser(ConfigParser): # pylint: disable=too-many-ancestors\n \"\"\"Custom Airflow Configparser supporting defaults and deprecated options\"\"\"\n\n # These configuration elements can be fetched as the stdout of commands\n # following the \"{section}__{name}__cmd\" pattern, the idea behind this\n # is to not store password on boxes in text files.\n # These configs can also be fetched from Secrets backend\n # following the \"{section}__{name}__secret\" pattern\n sensitive_config_values = {\n ('core', 'sql_alchemy_conn'),\n ('core', 'fernet_key'),\n ('celery', 'broker_url'),\n ('celery', 'flower_basic_auth'),\n ('celery', 'result_backend'),\n ('atlas', 'password'),\n ('smtp', 'smtp_password'),\n ('ldap', 'bind_password'),\n ('kubernetes', 'git_password'),\n }\n\n # A mapping of (new option -> old option). where option is a tuple of section name and key.\n # When reading new option, the old option will be checked to see if it exists. If it does a\n # DeprecationWarning will be issued and the old option will be used instead\n deprecated_options = {\n ('logging', 'base_log_folder'): ('core', 'base_log_folder'),\n ('logging', 'remote_logging'): ('core', 'remote_logging'),\n ('logging', 'remote_log_conn_id'): ('core', 'remote_log_conn_id'),\n ('logging', 'remote_base_log_folder'): ('core', 'remote_base_log_folder'),\n ('logging', 'encrypt_s3_logs'): ('core', 'encrypt_s3_logs'),\n ('logging', 'logging_level'): ('core', 'logging_level'),\n ('logging', 'fab_logging_level'): ('core', 'fab_logging_level'),\n ('logging', 'logging_config_class'): ('core', 'logging_config_class'),\n ('logging', 'colored_console_log'): ('core', 'colored_console_log'),\n ('logging', 'colored_log_format'): ('core', 'colored_log_format'),\n ('logging', 'colored_formatter_class'): ('core', 'colored_formatter_class'),\n ('logging', 'log_format'): ('core', 'log_format'),\n ('logging', 'simple_log_format'): ('core', 'simple_log_format'),\n ('logging', 'task_log_prefix_template'): ('core', 'task_log_prefix_template'),\n ('logging', 'log_filename_template'): ('core', 'log_filename_template'),\n ('logging', 'log_processor_filename_template'): ('core', 'log_processor_filename_template'),\n ('logging', 'dag_processor_manager_log_location'): ('core', 'dag_processor_manager_log_location'),\n ('logging', 'task_log_reader'): ('core', 'task_log_reader'),\n ('metrics', 'statsd_on'): ('scheduler', 'statsd_on'),\n ('metrics', 'statsd_host'): ('scheduler', 'statsd_host'),\n ('metrics', 'statsd_port'): ('scheduler', 'statsd_port'),\n ('metrics', 'statsd_prefix'): ('scheduler', 'statsd_prefix'),\n ('metrics', 'statsd_allow_list'): ('scheduler', 'statsd_allow_list'),\n ('metrics', 'stat_name_handler'): ('scheduler', 'stat_name_handler'),\n ('metrics', 'statsd_datadog_enabled'): ('scheduler', 'statsd_datadog_enabled'),\n ('metrics', 'statsd_datadog_tags'): ('scheduler', 'statsd_datadog_tags'),\n ('metrics', 'statsd_custom_client_path'): ('scheduler', 'statsd_custom_client_path'),\n }\n\n # A mapping of old default values that we want to change and warn the user\n # about. Mapping of section -> setting -> { old, replace, by_version }\n deprecated_values = {\n 'core': {\n 'hostname_callable': (re.compile(r':'), r'.', '2.1'),\n },\n 'webserver': {\n 'navbar_color': (re.compile(r'\\A#007A87\\Z', re.IGNORECASE), '#fff', '2.1'),\n },\n 'email': {\n 'email_backend': (\n re.compile(r'^airflow\\.contrib\\.utils\\.sendgrid\\.send_email$'),\n r'airflow.providers.sendgrid.utils.emailer.send_email',\n '2.1',\n ),\n },\n }\n\n # This method transforms option names on every read, get, or set operation.\n # This changes from the default behaviour of ConfigParser from lowercasing\n # to instead be case-preserving\n def optionxform(self, optionstr: str) -> str:\n return optionstr\n\n def __init__(self, default_config=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.airflow_defaults = ConfigParser(*args, **kwargs)\n if default_config is not None:\n self.airflow_defaults.read_string(default_config)\n\n self.is_validated = False\n\n def _validate(self):\n\n self._validate_config_dependencies()\n\n for section, replacement in self.deprecated_values.items():\n for name, info in replacement.items():\n old, new, version = info\n current_value = self.get(section, name, fallback=None)\n if self._using_old_value(old, current_value):\n new_value = re.sub(old, new, current_value)\n self._update_env_var(section=section, name=name, new_value=new_value)\n self._create_future_warning(\n name=name,\n section=section,\n current_value=current_value,\n new_value=new_value,\n version=version,\n )\n\n self.is_validated = True\n\n def _validate_config_dependencies(self):\n \"\"\"\n Validate that config values aren't invalid given other config values\n or system-level limitations and requirements.\n \"\"\"\n is_executor_without_sqlite_support = self.get(\"core\", \"executor\") not in (\n 'DebugExecutor',\n 'SequentialExecutor',\n )\n is_sqlite = \"sqlite\" in self.get('core', 'sql_alchemy_conn')\n if is_executor_without_sqlite_support and is_sqlite:\n raise AirflowConfigException(\n \"error: cannot use sqlite with the {}\".format(self.get('core', 'executor'))\n )\n\n if self.has_option('core', 'mp_start_method'):\n mp_start_method = self.get('core', 'mp_start_method')\n start_method_options = multiprocessing.get_all_start_methods()\n\n if mp_start_method not in start_method_options:\n raise AirflowConfigException(\n \"mp_start_method should not be \"\n + mp_start_method\n + \". Possible values are \"\n + \", \".join(start_method_options)\n )\n\n def _using_old_value(self, old, current_value): # noqa\n return old.search(current_value) is not None\n\n def _update_env_var(self, section, name, new_value):\n # Make sure the env var option is removed, otherwise it\n # would be read and used instead of the value we set\n env_var = self._env_var_name(section, name)\n os.environ.pop(env_var, None)\n self.set(section, name, new_value)\n\n @staticmethod\n def _create_future_warning(name, section, current_value, new_value, version):\n warnings.warn(\n 'The {name} setting in [{section}] has the old default value '\n 'of {current_value!r}. This value has been changed to {new_value!r} in the '\n 'running config, but please update your config before Apache '\n 'Airflow {version}.'.format(\n name=name, section=section, current_value=current_value, new_value=new_value, version=version\n ),\n FutureWarning,\n )\n\n @staticmethod\n def _env_var_name(section, key):\n return f'AIRFLOW__{section.upper()}__{key.upper()}'\n\n def _get_env_var_option(self, section, key):\n # must have format AIRFLOW__{SECTION}__{KEY} (note double underscore)\n env_var = self._env_var_name(section, key)\n if env_var in os.environ:\n return expand_env_var(os.environ[env_var])\n # alternatively AIRFLOW__{SECTION}__{KEY}_CMD (for a command)\n env_var_cmd = env_var + '_CMD'\n if env_var_cmd in os.environ:\n # if this is a valid command key...\n if (section, key) in self.sensitive_config_values:\n return run_command(os.environ[env_var_cmd])\n # alternatively AIRFLOW__{SECTION}__{KEY}_SECRET (to get from Secrets Backend)\n env_var_secret_path = env_var + '_SECRET'\n if env_var_secret_path in os.environ:\n # if this is a valid secret path...\n if (section, key) in self.sensitive_config_values:\n return _get_config_value_from_secret_backend(os.environ[env_var_secret_path])\n return None\n\n def _get_cmd_option(self, section, key):\n fallback_key = key + '_cmd'\n # if this is a valid command key...\n if (section, key) in self.sensitive_config_values:\n if super().has_option(section, fallback_key):\n command = super().get(section, fallback_key)\n return run_command(command)\n return None\n\n def _get_secret_option(self, section, key):\n \"\"\"Get Config option values from Secret Backend\"\"\"\n fallback_key = key + '_secret'\n # if this is a valid secret key...\n if (section, key) in self.sensitive_config_values:\n if super().has_option(section, fallback_key):\n secrets_path = super().get(section, fallback_key)\n return _get_config_value_from_secret_backend(secrets_path)\n return None\n\n def get(self, section, key, **kwargs):\n section = str(section).lower()\n key = str(key).lower()\n\n deprecated_section, deprecated_key = self.deprecated_options.get((section, key), (None, None))\n\n option = self._get_environment_variables(deprecated_key, deprecated_section, key, section)\n if option is not None:\n return option\n\n option = self._get_option_from_config_file(deprecated_key, deprecated_section, key, kwargs, section)\n if option is not None:\n return option\n\n option = self._get_option_from_commands(deprecated_key, deprecated_section, key, section)\n if option is not None:\n return option\n\n option = self._get_option_from_secrets(deprecated_key, deprecated_section, key, section)\n if option is not None:\n return option\n\n return self._get_option_from_default_config(section, key, **kwargs)\n\n def _get_option_from_default_config(self, section, key, **kwargs):\n # ...then the default config\n if self.airflow_defaults.has_option(section, key) or 'fallback' in kwargs:\n return expand_env_var(self.airflow_defaults.get(section, key, **kwargs))\n\n else:\n log.warning(\"section/key [%s/%s] not found in config\", section, key)\n\n raise AirflowConfigException(f\"section/key [{section}/{key}] not found in config\")\n\n def _get_option_from_secrets(self, deprecated_key, deprecated_section, key, section):\n # ...then from secret backends\n option = self._get_secret_option(section, key)\n if option:\n return option\n if deprecated_section:\n option = self._get_secret_option(deprecated_section, deprecated_key)\n if option:\n self._warn_deprecate(section, key, deprecated_section, deprecated_key)\n return option\n return None\n\n def _get_option_from_commands(self, deprecated_key, deprecated_section, key, section):\n # ...then commands\n option = self._get_cmd_option(section, key)\n if option:\n return option\n if deprecated_section:\n option = self._get_cmd_option(deprecated_section, deprecated_key)\n if option:\n self._warn_deprecate(section, key, deprecated_section, deprecated_key)\n return option\n return None\n\n def _get_option_from_config_file(self, deprecated_key, deprecated_section, key, kwargs, section):\n # ...then the config file\n if super().has_option(section, key):\n # Use the parent's methods to get the actual config here to be able to\n # separate the config from default config.\n return expand_env_var(super().get(section, key, **kwargs))\n if deprecated_section:\n if super().has_option(deprecated_section, deprecated_key):\n self._warn_deprecate(section, key, deprecated_section, deprecated_key)\n return expand_env_var(super().get(deprecated_section, deprecated_key, **kwargs))\n return None\n\n def _get_environment_variables(self, deprecated_key, deprecated_section, key, section):\n # first check environment variables\n option = self._get_env_var_option(section, key)\n if option is not None:\n return option\n if deprecated_section:\n option = self._get_env_var_option(deprecated_section, deprecated_key)\n if option is not None:\n self._warn_deprecate(section, key, deprecated_section, deprecated_key)\n return option\n return None\n\n def getboolean(self, section, key, **kwargs):\n val = str(self.get(section, key, **kwargs)).lower().strip()\n if '#' in val:\n val = val.split('#')[0].strip()\n if val in ('t', 'true', '1'):\n return True\n elif val in ('f', 'false', '0'):\n return False\n else:\n raise AirflowConfigException(\n f'Failed to convert value to bool. Please check \"{key}\" key in \"{section}\" section. '\n f'Current value: \"{val}\".'\n )\n\n def getint(self, section, key, **kwargs):\n val = self.get(section, key, **kwargs)\n\n try:\n return int(val)\n except ValueError:\n raise AirflowConfigException(\n f'Failed to convert value to int. Please check \"{key}\" key in \"{section}\" section. '\n f'Current value: \"{val}\".'\n )\n\n def getfloat(self, section, key, **kwargs):\n val = self.get(section, key, **kwargs)\n\n try:\n return float(val)\n except ValueError:\n raise AirflowConfigException(\n f'Failed to convert value to float. Please check \"{key}\" key in \"{section}\" section. '\n f'Current value: \"{val}\".'\n )\n\n def getimport(self, section, key, **kwargs): # noqa\n \"\"\"\n Reads options, imports the full qualified name, and returns the object.\n\n In case of failure, it throws an exception a clear message with the key aad the section names\n\n :return: The object or None, if the option is empty\n \"\"\"\n full_qualified_path = conf.get(section=section, key=key, **kwargs)\n if not full_qualified_path:\n return None\n\n try:\n return import_string(full_qualified_path)\n except ImportError as e:\n log.error(e)\n raise AirflowConfigException(\n f'The object could not be loaded. Please check \"{key}\" key in \"{section}\" section. '\n f'Current value: \"{full_qualified_path}\".'\n )\n\n def read(self, filenames, encoding=None):\n super().read(filenames=filenames, encoding=encoding)\n self._validate()\n\n def read_dict(self, dictionary, source='<dict>'):\n super().read_dict(dictionary=dictionary, source=source)\n self._validate()\n\n def has_option(self, section, option):\n try:\n # Using self.get() to avoid reimplementing the priority order\n # of config variables (env, config, cmd, defaults)\n # UNSET to avoid logging a warning about missing values\n self.get(section, option, fallback=_UNSET)\n return True\n except (NoOptionError, NoSectionError):\n return False\n\n def remove_option(self, section, option, remove_default=True):\n \"\"\"\n Remove an option if it exists in config from a file or\n default config. If both of config have the same option, this removes\n the option in both configs unless remove_default=False.\n \"\"\"\n if super().has_option(section, option):\n super().remove_option(section, option)\n\n if self.airflow_defaults.has_option(section, option) and remove_default:\n self.airflow_defaults.remove_option(section, option)\n\n # noinspection PyProtectedMember\n def getsection(self, section: str) -> Optional[Dict[str, Union[str, int, float, bool]]]:\n \"\"\"\n Returns the section as a dict. Values are converted to int, float, bool\n as required.\n\n :param section: section from the config\n :rtype: dict\n \"\"\"\n # pylint: disable=protected-access\n if section not in self._sections and section not in self.airflow_defaults._sections: # type: ignore\n return None\n # pylint: enable=protected-access\n\n _section = copy.deepcopy(self.airflow_defaults._sections[section]) # pylint: disable=protected-access\n\n if section in self._sections: # type: ignore\n _section.update(copy.deepcopy(self._sections[section])) # type: ignore\n\n section_prefix = f'AIRFLOW__{section.upper()}__'\n for env_var in sorted(os.environ.keys()):\n if env_var.startswith(section_prefix):\n key = env_var.replace(section_prefix, '')\n if key.endswith(\"_CMD\"):\n key = key[:-4]\n key = key.lower()\n _section[key] = self._get_env_var_option(section, key)\n\n for key, val in _section.items():\n try:\n val = int(val)\n except ValueError:\n try:\n val = float(val)\n except ValueError:\n if val.lower() in ('t', 'true'):\n val = True\n elif val.lower() in ('f', 'false'):\n val = False\n _section[key] = val\n return _section\n\n def write(self, fp, space_around_delimiters=True):\n # This is based on the configparser.RawConfigParser.write method code to add support for\n # reading options from environment variables.\n if space_around_delimiters:\n delimiter = \" {} \".format(self._delimiters[0])\n else:\n delimiter = self._delimiters[0]\n if self._defaults:\n self._write_section(fp, self.default_section, self._defaults.items(), delimiter)\n for section in self._sections:\n self._write_section(fp, section, self.getsection(section).items(), delimiter)\n\n def as_dict(\n self,\n display_source=False,\n display_sensitive=False,\n raw=False,\n include_env=True,\n include_cmds=True,\n include_secret=True,\n ) -> Dict[str, Dict[str, str]]:\n \"\"\"\n Returns the current configuration as an OrderedDict of OrderedDicts.\n\n :param display_source: If False, the option value is returned. If True,\n a tuple of (option_value, source) is returned. Source is either\n 'airflow.cfg', 'default', 'env var', or 'cmd'.\n :type display_source: bool\n :param display_sensitive: If True, the values of options set by env\n vars and bash commands will be displayed. If False, those options\n are shown as '< hidden >'\n :type display_sensitive: bool\n :param raw: Should the values be output as interpolated values, or the\n \"raw\" form that can be fed back in to ConfigParser\n :type raw: bool\n :param include_env: Should the value of configuration from AIRFLOW__\n environment variables be included or not\n :type include_env: bool\n :param include_cmds: Should the result of calling any *_cmd config be\n set (True, default), or should the _cmd options be left as the\n command to run (False)\n :type include_cmds: bool\n :param include_secret: Should the result of calling any *_secret config be\n set (True, default), or should the _secret options be left as the\n path to get the secret from (False)\n :type include_secret: bool\n :rtype: Dict[str, Dict[str, str]]\n :return: Dictionary, where the key is the name of the section and the content is\n the dictionary with the name of the parameter and its value.\n \"\"\"\n config_sources: Dict[str, Dict[str, str]] = {}\n configs = [\n ('default', self.airflow_defaults),\n ('airflow.cfg', self),\n ]\n\n self._replace_config_with_display_sources(config_sources, configs, display_source, raw)\n\n # add env vars and overwrite because they have priority\n if include_env:\n self._include_envs(config_sources, display_sensitive, display_source, raw)\n\n # add bash commands\n if include_cmds:\n self._include_commands(config_sources, display_sensitive, display_source, raw)\n\n # add config from secret backends\n if include_secret:\n self._include_secrets(config_sources, display_sensitive, display_source, raw)\n return config_sources\n\n def _include_secrets(self, config_sources, display_sensitive, display_source, raw):\n for (section, key) in self.sensitive_config_values:\n opt = self._get_secret_option(section, key)\n if opt:\n if not display_sensitive:\n opt = '< hidden >'\n if display_source:\n opt = (opt, 'secret')\n elif raw:\n opt = opt.replace('%', '%%')\n config_sources.setdefault(section, OrderedDict()).update({key: opt})\n del config_sources[section][key + '_secret']\n\n def _include_commands(self, config_sources, display_sensitive, display_source, raw):\n for (section, key) in self.sensitive_config_values:\n opt = self._get_cmd_option(section, key)\n if not opt:\n continue\n if not display_sensitive:\n opt = '< hidden >'\n if display_source:\n opt = (opt, 'cmd')\n elif raw:\n opt = opt.replace('%', '%%')\n config_sources.setdefault(section, OrderedDict()).update({key: opt})\n del config_sources[section][key + '_cmd']\n\n def _include_envs(self, config_sources, display_sensitive, display_source, raw):\n for env_var in [\n os_environment for os_environment in os.environ if os_environment.startswith('AIRFLOW__')\n ]:\n try:\n _, section, key = env_var.split('__', 2)\n opt = self._get_env_var_option(section, key)\n except ValueError:\n continue\n if not display_sensitive and env_var != 'AIRFLOW__CORE__UNIT_TEST_MODE':\n opt = '< hidden >'\n elif raw:\n opt = opt.replace('%', '%%')\n if display_source:\n opt = (opt, 'env var')\n\n section = section.lower()\n # if we lower key for kubernetes_environment_variables section,\n # then we won't be able to set any Airflow environment\n # variables. Airflow only parse environment variables starts\n # with AIRFLOW_. Therefore, we need to make it a special case.\n if section != 'kubernetes_environment_variables':\n key = key.lower()\n config_sources.setdefault(section, OrderedDict()).update({key: opt})\n\n @staticmethod\n def _replace_config_with_display_sources(config_sources, configs, display_source, raw):\n for (source_name, config) in configs:\n for section in config.sections():\n AirflowConfigParser._replace_section_config_with_display_sources(\n config, config_sources, display_source, raw, section, source_name\n )\n\n @staticmethod\n def _replace_section_config_with_display_sources(\n config, config_sources, display_source, raw, section, source_name\n ):\n sect = config_sources.setdefault(section, OrderedDict())\n for (k, val) in config.items(section=section, raw=raw):\n if display_source:\n val = (val, source_name)\n sect[k] = val\n\n def load_test_config(self):\n \"\"\"\n Load the unit test configuration.\n\n Note: this is not reversible.\n \"\"\"\n # override any custom settings with defaults\n log.info(\"Overriding settings with defaults from %s\", DEFAULT_CONFIG_FILE_PATH)\n self.read_string(parameterized_config(DEFAULT_CONFIG))\n # then read test config\n log.info(\"Reading default test configuration from %s\", TEST_CONFIG_FILE_PATH)\n self.read_string(parameterized_config(TEST_CONFIG))\n # then read any \"custom\" test settings\n log.info(\"Reading test configuration from %s\", TEST_CONFIG_FILE)\n self.read(TEST_CONFIG_FILE)\n\n @staticmethod\n def _warn_deprecate(section, key, deprecated_section, deprecated_name):\n if section == deprecated_section:\n warnings.warn(\n 'The {old} option in [{section}] has been renamed to {new} - the old '\n 'setting has been used, but please update your config.'.format(\n old=deprecated_name,\n new=key,\n section=section,\n ),\n DeprecationWarning,\n stacklevel=3,\n )\n else:\n warnings.warn(\n 'The {old_key} option in [{old_section}] has been moved to the {new_key} option in '\n '[{new_section}] - the old setting has been used, but please update your config.'.format(\n old_section=deprecated_section,\n old_key=deprecated_name,\n new_key=key,\n new_section=section,\n ),\n DeprecationWarning,\n stacklevel=3,\n )\n\n\ndef get_airflow_home():\n \"\"\"Get path to Airflow Home\"\"\"\n return expand_env_var(os.environ.get('AIRFLOW_HOME', '~/airflow'))\n\n\ndef get_airflow_config(airflow_home):\n \"\"\"Get Path to airflow.cfg path\"\"\"\n if 'AIRFLOW_CONFIG' not in os.environ:\n return os.path.join(airflow_home, 'airflow.cfg')\n return expand_env_var(os.environ['AIRFLOW_CONFIG'])\n\n\n# Setting AIRFLOW_HOME and AIRFLOW_CONFIG from environment variables, using\n# \"~/airflow\" and \"$AIRFLOW_HOME/airflow.cfg\" respectively as defaults.\n\nAIRFLOW_HOME = get_airflow_home()\nAIRFLOW_CONFIG = get_airflow_config(AIRFLOW_HOME)\npathlib.Path(AIRFLOW_HOME).mkdir(parents=True, exist_ok=True)\n\n\n# Set up dags folder for unit tests\n# this directory won't exist if users install via pip\n_TEST_DAGS_FOLDER = os.path.join(\n os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'tests', 'dags'\n)\nif os.path.exists(_TEST_DAGS_FOLDER):\n TEST_DAGS_FOLDER = _TEST_DAGS_FOLDER\nelse:\n TEST_DAGS_FOLDER = os.path.join(AIRFLOW_HOME, 'dags')\n\n# Set up plugins folder for unit tests\n_TEST_PLUGINS_FOLDER = os.path.join(\n os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'tests', 'plugins'\n)\nif os.path.exists(_TEST_PLUGINS_FOLDER):\n TEST_PLUGINS_FOLDER = _TEST_PLUGINS_FOLDER\nelse:\n TEST_PLUGINS_FOLDER = os.path.join(AIRFLOW_HOME, 'plugins')\n\n\ndef parameterized_config(template):\n \"\"\"\n Generates a configuration from the provided template + variables defined in\n current scope\n\n :param template: a config content templated with {{variables}}\n \"\"\"\n all_vars = {k: v for d in [globals(), locals()] for k, v in d.items()}\n return template.format(**all_vars) # noqa\n\n\ndef get_airflow_test_config(airflow_home):\n \"\"\"Get path to unittests.cfg\"\"\"\n if 'AIRFLOW_TEST_CONFIG' not in os.environ:\n return os.path.join(airflow_home, 'unittests.cfg')\n return expand_env_var(os.environ['AIRFLOW_TEST_CONFIG'])\n\n\nTEST_CONFIG_FILE = get_airflow_test_config(AIRFLOW_HOME)\n\n# only generate a Fernet key if we need to create a new config file\nif not os.path.isfile(TEST_CONFIG_FILE) or not os.path.isfile(AIRFLOW_CONFIG):\n FERNET_KEY = Fernet.generate_key().decode()\nelse:\n FERNET_KEY = ''\n\nSECRET_KEY = b64encode(os.urandom(16)).decode('utf-8')\n\nTEMPLATE_START = '# ----------------------- TEMPLATE BEGINS HERE -----------------------'\nif not os.path.isfile(TEST_CONFIG_FILE):\n log.info('Creating new Airflow config file for unit tests in: %s', TEST_CONFIG_FILE)\n with open(TEST_CONFIG_FILE, 'w') as file:\n cfg = parameterized_config(TEST_CONFIG)\n file.write(cfg.split(TEMPLATE_START)[-1].strip())\nif not os.path.isfile(AIRFLOW_CONFIG):\n log.info('Creating new Airflow config file in: %s', AIRFLOW_CONFIG)\n with open(AIRFLOW_CONFIG, 'w') as file:\n cfg = parameterized_config(DEFAULT_CONFIG)\n cfg = cfg.split(TEMPLATE_START)[-1].strip()\n file.write(cfg)\n\nlog.info(\"Reading the config from %s\", AIRFLOW_CONFIG)\n\nconf = AirflowConfigParser(default_config=parameterized_config(DEFAULT_CONFIG))\n\nconf.read(AIRFLOW_CONFIG)\n\nif conf.has_option('core', 'AIRFLOW_HOME'):\n msg = (\n 'Specifying both AIRFLOW_HOME environment variable and airflow_home '\n 'in the config file is deprecated. Please use only the AIRFLOW_HOME '\n 'environment variable and remove the config file entry.'\n )\n if 'AIRFLOW_HOME' in os.environ:\n warnings.warn(msg, category=DeprecationWarning)\n elif conf.get('core', 'airflow_home') == AIRFLOW_HOME:\n warnings.warn(\n 'Specifying airflow_home in the config file is deprecated. As you '\n 'have left it at the default value you should remove the setting '\n 'from your airflow.cfg and suffer no change in behaviour.',\n category=DeprecationWarning,\n )\n else:\n AIRFLOW_HOME = conf.get('core', 'airflow_home')\n warnings.warn(msg, category=DeprecationWarning)\n\n\nWEBSERVER_CONFIG = AIRFLOW_HOME + '/webserver_config.py'\n\nif not os.path.isfile(WEBSERVER_CONFIG):\n log.info('Creating new FAB webserver config file in: %s', WEBSERVER_CONFIG)\n DEFAULT_WEBSERVER_CONFIG, _ = _read_default_config_file('default_webserver_config.py')\n with open(WEBSERVER_CONFIG, 'w') as file:\n file.write(DEFAULT_WEBSERVER_CONFIG)\n\nif conf.getboolean('core', 'unit_test_mode'):\n conf.load_test_config()\n\n\n# Historical convenience functions to access config entries\ndef load_test_config(): # noqa: D103\n \"\"\"Historical load_test_config\"\"\"\n warnings.warn(\n \"Accessing configuration method 'load_test_config' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.load_test_config'\",\n DeprecationWarning,\n stacklevel=2,\n )\n conf.load_test_config()\n\n\ndef get(*args, **kwargs): # noqa: D103\n \"\"\"Historical get\"\"\"\n warnings.warn(\n \"Accessing configuration method 'get' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.get'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.get(*args, **kwargs)\n\n\ndef getboolean(*args, **kwargs): # noqa: D103\n \"\"\"Historical getboolean\"\"\"\n warnings.warn(\n \"Accessing configuration method 'getboolean' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.getboolean'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.getboolean(*args, **kwargs)\n\n\ndef getfloat(*args, **kwargs): # noqa: D103\n \"\"\"Historical getfloat\"\"\"\n warnings.warn(\n \"Accessing configuration method 'getfloat' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.getfloat'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.getfloat(*args, **kwargs)\n\n\ndef getint(*args, **kwargs): # noqa: D103\n \"\"\"Historical getint\"\"\"\n warnings.warn(\n \"Accessing configuration method 'getint' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.getint'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.getint(*args, **kwargs)\n\n\ndef getsection(*args, **kwargs): # noqa: D103\n \"\"\"Historical getsection\"\"\"\n warnings.warn(\n \"Accessing configuration method 'getsection' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.getsection'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.getint(*args, **kwargs)\n\n\ndef has_option(*args, **kwargs): # noqa: D103\n \"\"\"Historical has_option\"\"\"\n warnings.warn(\n \"Accessing configuration method 'has_option' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.has_option'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.has_option(*args, **kwargs)\n\n\ndef remove_option(*args, **kwargs): # noqa: D103\n \"\"\"Historical remove_option\"\"\"\n warnings.warn(\n \"Accessing configuration method 'remove_option' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.remove_option'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.remove_option(*args, **kwargs)\n\n\ndef as_dict(*args, **kwargs): # noqa: D103\n \"\"\"Historical as_dict\"\"\"\n warnings.warn(\n \"Accessing configuration method 'as_dict' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.as_dict'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.as_dict(*args, **kwargs)\n\n\ndef set(*args, **kwargs): # noqa pylint: disable=redefined-builtin\n \"\"\"Historical set\"\"\"\n warnings.warn(\n \"Accessing configuration method 'set' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.set'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.set(*args, **kwargs)\n\n\ndef ensure_secrets_loaded() -> List[BaseSecretsBackend]:\n \"\"\"\n Ensure that all secrets backends are loaded.\n If the secrets_backend_list contains only 2 default backends, reload it.\n \"\"\"\n # Check if the secrets_backend_list contains only 2 default backends\n if len(secrets_backend_list) == 2:\n return initialize_secrets_backends()\n return secrets_backend_list\n\n\ndef get_custom_secret_backend() -> Optional[BaseSecretsBackend]:\n \"\"\"Get Secret Backend if defined in airflow.cfg\"\"\"\n secrets_backend_cls = conf.getimport(section='secrets', key='backend')\n\n if secrets_backend_cls:\n try:\n alternative_secrets_config_dict = json.loads(\n conf.get(section='secrets', key='backend_kwargs', fallback='{}')\n )\n except JSONDecodeError:\n alternative_secrets_config_dict = {}\n\n return secrets_backend_cls(**alternative_secrets_config_dict)\n return None\n\n\ndef initialize_secrets_backends() -> List[BaseSecretsBackend]:\n \"\"\"\n * import secrets backend classes\n * instantiate them and return them in a list\n \"\"\"\n backend_list = []\n\n custom_secret_backend = get_custom_secret_backend()\n\n if custom_secret_backend is not None:\n backend_list.append(custom_secret_backend)\n\n for class_name in DEFAULT_SECRETS_SEARCH_PATH:\n secrets_backend_cls = import_string(class_name)\n backend_list.append(secrets_backend_cls())\n\n return backend_list\n\n\nsecrets_backend_list = initialize_secrets_backends()\n",
"path": "airflow/configuration.py"
}
] | [
{
"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport pathlib\nimport re\nimport shlex\nimport subprocess\nimport sys\nimport warnings\nfrom base64 import b64encode\nfrom collections import OrderedDict\n\n# Ignored Mypy on configparser because it thinks the configparser module has no _UNSET attribute\nfrom configparser import _UNSET, ConfigParser, NoOptionError, NoSectionError # type: ignore\nfrom json.decoder import JSONDecodeError\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport yaml\nfrom cryptography.fernet import Fernet\n\nfrom airflow.exceptions import AirflowConfigException\nfrom airflow.secrets import DEFAULT_SECRETS_SEARCH_PATH, BaseSecretsBackend\nfrom airflow.utils.module_loading import import_string\n\nlog = logging.getLogger(__name__)\n\n# show Airflow's deprecation warnings\nif not sys.warnoptions:\n warnings.filterwarnings(action='default', category=DeprecationWarning, module='airflow')\n warnings.filterwarnings(action='default', category=PendingDeprecationWarning, module='airflow')\n\n\ndef expand_env_var(env_var):\n \"\"\"\n Expands (potentially nested) env vars by repeatedly applying\n `expandvars` and `expanduser` until interpolation stops having\n any effect.\n \"\"\"\n if not env_var:\n return env_var\n while True:\n interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))\n if interpolated == env_var:\n return interpolated\n else:\n env_var = interpolated\n\n\ndef run_command(command):\n \"\"\"Runs command and returns stdout\"\"\"\n process = subprocess.Popen(\n shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True\n )\n output, stderr = [stream.decode(sys.getdefaultencoding(), 'ignore') for stream in process.communicate()]\n\n if process.returncode != 0:\n raise AirflowConfigException(\n f\"Cannot execute {command}. Error code is: {process.returncode}. \"\n f\"Output: {output}, Stderr: {stderr}\"\n )\n\n return output\n\n\ndef _get_config_value_from_secret_backend(config_key):\n \"\"\"Get Config option values from Secret Backend\"\"\"\n secrets_client = get_custom_secret_backend()\n if not secrets_client:\n return None\n return secrets_client.get_config(config_key)\n\n\ndef _read_default_config_file(file_name: str) -> Tuple[str, str]:\n templates_dir = os.path.join(os.path.dirname(__file__), 'config_templates')\n file_path = os.path.join(templates_dir, file_name)\n with open(file_path, encoding='utf-8') as config_file:\n return config_file.read(), file_path\n\n\nDEFAULT_CONFIG, DEFAULT_CONFIG_FILE_PATH = _read_default_config_file('default_airflow.cfg')\nTEST_CONFIG, TEST_CONFIG_FILE_PATH = _read_default_config_file('default_test.cfg')\n\n\ndef default_config_yaml() -> dict:\n \"\"\"\n Read Airflow configs from YAML file\n\n :return: Python dictionary containing configs & their info\n \"\"\"\n templates_dir = os.path.join(os.path.dirname(__file__), 'config_templates')\n file_path = os.path.join(templates_dir, \"config.yml\")\n\n with open(file_path) as config_file:\n return yaml.safe_load(config_file)\n\n\nclass AirflowConfigParser(ConfigParser): # pylint: disable=too-many-ancestors\n \"\"\"Custom Airflow Configparser supporting defaults and deprecated options\"\"\"\n\n # These configuration elements can be fetched as the stdout of commands\n # following the \"{section}__{name}__cmd\" pattern, the idea behind this\n # is to not store password on boxes in text files.\n # These configs can also be fetched from Secrets backend\n # following the \"{section}__{name}__secret\" pattern\n sensitive_config_values = {\n ('core', 'sql_alchemy_conn'),\n ('core', 'fernet_key'),\n ('celery', 'broker_url'),\n ('celery', 'flower_basic_auth'),\n ('celery', 'result_backend'),\n ('atlas', 'password'),\n ('smtp', 'smtp_password'),\n ('kubernetes', 'git_password'),\n }\n\n # A mapping of (new option -> old option). where option is a tuple of section name and key.\n # When reading new option, the old option will be checked to see if it exists. If it does a\n # DeprecationWarning will be issued and the old option will be used instead\n deprecated_options = {\n ('logging', 'base_log_folder'): ('core', 'base_log_folder'),\n ('logging', 'remote_logging'): ('core', 'remote_logging'),\n ('logging', 'remote_log_conn_id'): ('core', 'remote_log_conn_id'),\n ('logging', 'remote_base_log_folder'): ('core', 'remote_base_log_folder'),\n ('logging', 'encrypt_s3_logs'): ('core', 'encrypt_s3_logs'),\n ('logging', 'logging_level'): ('core', 'logging_level'),\n ('logging', 'fab_logging_level'): ('core', 'fab_logging_level'),\n ('logging', 'logging_config_class'): ('core', 'logging_config_class'),\n ('logging', 'colored_console_log'): ('core', 'colored_console_log'),\n ('logging', 'colored_log_format'): ('core', 'colored_log_format'),\n ('logging', 'colored_formatter_class'): ('core', 'colored_formatter_class'),\n ('logging', 'log_format'): ('core', 'log_format'),\n ('logging', 'simple_log_format'): ('core', 'simple_log_format'),\n ('logging', 'task_log_prefix_template'): ('core', 'task_log_prefix_template'),\n ('logging', 'log_filename_template'): ('core', 'log_filename_template'),\n ('logging', 'log_processor_filename_template'): ('core', 'log_processor_filename_template'),\n ('logging', 'dag_processor_manager_log_location'): ('core', 'dag_processor_manager_log_location'),\n ('logging', 'task_log_reader'): ('core', 'task_log_reader'),\n ('metrics', 'statsd_on'): ('scheduler', 'statsd_on'),\n ('metrics', 'statsd_host'): ('scheduler', 'statsd_host'),\n ('metrics', 'statsd_port'): ('scheduler', 'statsd_port'),\n ('metrics', 'statsd_prefix'): ('scheduler', 'statsd_prefix'),\n ('metrics', 'statsd_allow_list'): ('scheduler', 'statsd_allow_list'),\n ('metrics', 'stat_name_handler'): ('scheduler', 'stat_name_handler'),\n ('metrics', 'statsd_datadog_enabled'): ('scheduler', 'statsd_datadog_enabled'),\n ('metrics', 'statsd_datadog_tags'): ('scheduler', 'statsd_datadog_tags'),\n ('metrics', 'statsd_custom_client_path'): ('scheduler', 'statsd_custom_client_path'),\n }\n\n # A mapping of old default values that we want to change and warn the user\n # about. Mapping of section -> setting -> { old, replace, by_version }\n deprecated_values = {\n 'core': {\n 'hostname_callable': (re.compile(r':'), r'.', '2.1'),\n },\n 'webserver': {\n 'navbar_color': (re.compile(r'\\A#007A87\\Z', re.IGNORECASE), '#fff', '2.1'),\n },\n 'email': {\n 'email_backend': (\n re.compile(r'^airflow\\.contrib\\.utils\\.sendgrid\\.send_email$'),\n r'airflow.providers.sendgrid.utils.emailer.send_email',\n '2.1',\n ),\n },\n }\n\n # This method transforms option names on every read, get, or set operation.\n # This changes from the default behaviour of ConfigParser from lowercasing\n # to instead be case-preserving\n def optionxform(self, optionstr: str) -> str:\n return optionstr\n\n def __init__(self, default_config=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.airflow_defaults = ConfigParser(*args, **kwargs)\n if default_config is not None:\n self.airflow_defaults.read_string(default_config)\n\n self.is_validated = False\n\n def _validate(self):\n\n self._validate_config_dependencies()\n\n for section, replacement in self.deprecated_values.items():\n for name, info in replacement.items():\n old, new, version = info\n current_value = self.get(section, name, fallback=None)\n if self._using_old_value(old, current_value):\n new_value = re.sub(old, new, current_value)\n self._update_env_var(section=section, name=name, new_value=new_value)\n self._create_future_warning(\n name=name,\n section=section,\n current_value=current_value,\n new_value=new_value,\n version=version,\n )\n\n self.is_validated = True\n\n def _validate_config_dependencies(self):\n \"\"\"\n Validate that config values aren't invalid given other config values\n or system-level limitations and requirements.\n \"\"\"\n is_executor_without_sqlite_support = self.get(\"core\", \"executor\") not in (\n 'DebugExecutor',\n 'SequentialExecutor',\n )\n is_sqlite = \"sqlite\" in self.get('core', 'sql_alchemy_conn')\n if is_executor_without_sqlite_support and is_sqlite:\n raise AirflowConfigException(\n \"error: cannot use sqlite with the {}\".format(self.get('core', 'executor'))\n )\n\n if self.has_option('core', 'mp_start_method'):\n mp_start_method = self.get('core', 'mp_start_method')\n start_method_options = multiprocessing.get_all_start_methods()\n\n if mp_start_method not in start_method_options:\n raise AirflowConfigException(\n \"mp_start_method should not be \"\n + mp_start_method\n + \". Possible values are \"\n + \", \".join(start_method_options)\n )\n\n def _using_old_value(self, old, current_value): # noqa\n return old.search(current_value) is not None\n\n def _update_env_var(self, section, name, new_value):\n # Make sure the env var option is removed, otherwise it\n # would be read and used instead of the value we set\n env_var = self._env_var_name(section, name)\n os.environ.pop(env_var, None)\n self.set(section, name, new_value)\n\n @staticmethod\n def _create_future_warning(name, section, current_value, new_value, version):\n warnings.warn(\n 'The {name} setting in [{section}] has the old default value '\n 'of {current_value!r}. This value has been changed to {new_value!r} in the '\n 'running config, but please update your config before Apache '\n 'Airflow {version}.'.format(\n name=name, section=section, current_value=current_value, new_value=new_value, version=version\n ),\n FutureWarning,\n )\n\n @staticmethod\n def _env_var_name(section, key):\n return f'AIRFLOW__{section.upper()}__{key.upper()}'\n\n def _get_env_var_option(self, section, key):\n # must have format AIRFLOW__{SECTION}__{KEY} (note double underscore)\n env_var = self._env_var_name(section, key)\n if env_var in os.environ:\n return expand_env_var(os.environ[env_var])\n # alternatively AIRFLOW__{SECTION}__{KEY}_CMD (for a command)\n env_var_cmd = env_var + '_CMD'\n if env_var_cmd in os.environ:\n # if this is a valid command key...\n if (section, key) in self.sensitive_config_values:\n return run_command(os.environ[env_var_cmd])\n # alternatively AIRFLOW__{SECTION}__{KEY}_SECRET (to get from Secrets Backend)\n env_var_secret_path = env_var + '_SECRET'\n if env_var_secret_path in os.environ:\n # if this is a valid secret path...\n if (section, key) in self.sensitive_config_values:\n return _get_config_value_from_secret_backend(os.environ[env_var_secret_path])\n return None\n\n def _get_cmd_option(self, section, key):\n fallback_key = key + '_cmd'\n # if this is a valid command key...\n if (section, key) in self.sensitive_config_values:\n if super().has_option(section, fallback_key):\n command = super().get(section, fallback_key)\n return run_command(command)\n return None\n\n def _get_secret_option(self, section, key):\n \"\"\"Get Config option values from Secret Backend\"\"\"\n fallback_key = key + '_secret'\n # if this is a valid secret key...\n if (section, key) in self.sensitive_config_values:\n if super().has_option(section, fallback_key):\n secrets_path = super().get(section, fallback_key)\n return _get_config_value_from_secret_backend(secrets_path)\n return None\n\n def get(self, section, key, **kwargs):\n section = str(section).lower()\n key = str(key).lower()\n\n deprecated_section, deprecated_key = self.deprecated_options.get((section, key), (None, None))\n\n option = self._get_environment_variables(deprecated_key, deprecated_section, key, section)\n if option is not None:\n return option\n\n option = self._get_option_from_config_file(deprecated_key, deprecated_section, key, kwargs, section)\n if option is not None:\n return option\n\n option = self._get_option_from_commands(deprecated_key, deprecated_section, key, section)\n if option is not None:\n return option\n\n option = self._get_option_from_secrets(deprecated_key, deprecated_section, key, section)\n if option is not None:\n return option\n\n return self._get_option_from_default_config(section, key, **kwargs)\n\n def _get_option_from_default_config(self, section, key, **kwargs):\n # ...then the default config\n if self.airflow_defaults.has_option(section, key) or 'fallback' in kwargs:\n return expand_env_var(self.airflow_defaults.get(section, key, **kwargs))\n\n else:\n log.warning(\"section/key [%s/%s] not found in config\", section, key)\n\n raise AirflowConfigException(f\"section/key [{section}/{key}] not found in config\")\n\n def _get_option_from_secrets(self, deprecated_key, deprecated_section, key, section):\n # ...then from secret backends\n option = self._get_secret_option(section, key)\n if option:\n return option\n if deprecated_section:\n option = self._get_secret_option(deprecated_section, deprecated_key)\n if option:\n self._warn_deprecate(section, key, deprecated_section, deprecated_key)\n return option\n return None\n\n def _get_option_from_commands(self, deprecated_key, deprecated_section, key, section):\n # ...then commands\n option = self._get_cmd_option(section, key)\n if option:\n return option\n if deprecated_section:\n option = self._get_cmd_option(deprecated_section, deprecated_key)\n if option:\n self._warn_deprecate(section, key, deprecated_section, deprecated_key)\n return option\n return None\n\n def _get_option_from_config_file(self, deprecated_key, deprecated_section, key, kwargs, section):\n # ...then the config file\n if super().has_option(section, key):\n # Use the parent's methods to get the actual config here to be able to\n # separate the config from default config.\n return expand_env_var(super().get(section, key, **kwargs))\n if deprecated_section:\n if super().has_option(deprecated_section, deprecated_key):\n self._warn_deprecate(section, key, deprecated_section, deprecated_key)\n return expand_env_var(super().get(deprecated_section, deprecated_key, **kwargs))\n return None\n\n def _get_environment_variables(self, deprecated_key, deprecated_section, key, section):\n # first check environment variables\n option = self._get_env_var_option(section, key)\n if option is not None:\n return option\n if deprecated_section:\n option = self._get_env_var_option(deprecated_section, deprecated_key)\n if option is not None:\n self._warn_deprecate(section, key, deprecated_section, deprecated_key)\n return option\n return None\n\n def getboolean(self, section, key, **kwargs):\n val = str(self.get(section, key, **kwargs)).lower().strip()\n if '#' in val:\n val = val.split('#')[0].strip()\n if val in ('t', 'true', '1'):\n return True\n elif val in ('f', 'false', '0'):\n return False\n else:\n raise AirflowConfigException(\n f'Failed to convert value to bool. Please check \"{key}\" key in \"{section}\" section. '\n f'Current value: \"{val}\".'\n )\n\n def getint(self, section, key, **kwargs):\n val = self.get(section, key, **kwargs)\n\n try:\n return int(val)\n except ValueError:\n raise AirflowConfigException(\n f'Failed to convert value to int. Please check \"{key}\" key in \"{section}\" section. '\n f'Current value: \"{val}\".'\n )\n\n def getfloat(self, section, key, **kwargs):\n val = self.get(section, key, **kwargs)\n\n try:\n return float(val)\n except ValueError:\n raise AirflowConfigException(\n f'Failed to convert value to float. Please check \"{key}\" key in \"{section}\" section. '\n f'Current value: \"{val}\".'\n )\n\n def getimport(self, section, key, **kwargs): # noqa\n \"\"\"\n Reads options, imports the full qualified name, and returns the object.\n\n In case of failure, it throws an exception a clear message with the key aad the section names\n\n :return: The object or None, if the option is empty\n \"\"\"\n full_qualified_path = conf.get(section=section, key=key, **kwargs)\n if not full_qualified_path:\n return None\n\n try:\n return import_string(full_qualified_path)\n except ImportError as e:\n log.error(e)\n raise AirflowConfigException(\n f'The object could not be loaded. Please check \"{key}\" key in \"{section}\" section. '\n f'Current value: \"{full_qualified_path}\".'\n )\n\n def read(self, filenames, encoding=None):\n super().read(filenames=filenames, encoding=encoding)\n self._validate()\n\n def read_dict(self, dictionary, source='<dict>'):\n super().read_dict(dictionary=dictionary, source=source)\n self._validate()\n\n def has_option(self, section, option):\n try:\n # Using self.get() to avoid reimplementing the priority order\n # of config variables (env, config, cmd, defaults)\n # UNSET to avoid logging a warning about missing values\n self.get(section, option, fallback=_UNSET)\n return True\n except (NoOptionError, NoSectionError):\n return False\n\n def remove_option(self, section, option, remove_default=True):\n \"\"\"\n Remove an option if it exists in config from a file or\n default config. If both of config have the same option, this removes\n the option in both configs unless remove_default=False.\n \"\"\"\n if super().has_option(section, option):\n super().remove_option(section, option)\n\n if self.airflow_defaults.has_option(section, option) and remove_default:\n self.airflow_defaults.remove_option(section, option)\n\n # noinspection PyProtectedMember\n def getsection(self, section: str) -> Optional[Dict[str, Union[str, int, float, bool]]]:\n \"\"\"\n Returns the section as a dict. Values are converted to int, float, bool\n as required.\n\n :param section: section from the config\n :rtype: dict\n \"\"\"\n # pylint: disable=protected-access\n if section not in self._sections and section not in self.airflow_defaults._sections: # type: ignore\n return None\n # pylint: enable=protected-access\n\n _section = copy.deepcopy(self.airflow_defaults._sections[section]) # pylint: disable=protected-access\n\n if section in self._sections: # type: ignore\n _section.update(copy.deepcopy(self._sections[section])) # type: ignore\n\n section_prefix = f'AIRFLOW__{section.upper()}__'\n for env_var in sorted(os.environ.keys()):\n if env_var.startswith(section_prefix):\n key = env_var.replace(section_prefix, '')\n if key.endswith(\"_CMD\"):\n key = key[:-4]\n key = key.lower()\n _section[key] = self._get_env_var_option(section, key)\n\n for key, val in _section.items():\n try:\n val = int(val)\n except ValueError:\n try:\n val = float(val)\n except ValueError:\n if val.lower() in ('t', 'true'):\n val = True\n elif val.lower() in ('f', 'false'):\n val = False\n _section[key] = val\n return _section\n\n def write(self, fp, space_around_delimiters=True):\n # This is based on the configparser.RawConfigParser.write method code to add support for\n # reading options from environment variables.\n if space_around_delimiters:\n delimiter = \" {} \".format(self._delimiters[0])\n else:\n delimiter = self._delimiters[0]\n if self._defaults:\n self._write_section(fp, self.default_section, self._defaults.items(), delimiter)\n for section in self._sections:\n self._write_section(fp, section, self.getsection(section).items(), delimiter)\n\n def as_dict(\n self,\n display_source=False,\n display_sensitive=False,\n raw=False,\n include_env=True,\n include_cmds=True,\n include_secret=True,\n ) -> Dict[str, Dict[str, str]]:\n \"\"\"\n Returns the current configuration as an OrderedDict of OrderedDicts.\n\n :param display_source: If False, the option value is returned. If True,\n a tuple of (option_value, source) is returned. Source is either\n 'airflow.cfg', 'default', 'env var', or 'cmd'.\n :type display_source: bool\n :param display_sensitive: If True, the values of options set by env\n vars and bash commands will be displayed. If False, those options\n are shown as '< hidden >'\n :type display_sensitive: bool\n :param raw: Should the values be output as interpolated values, or the\n \"raw\" form that can be fed back in to ConfigParser\n :type raw: bool\n :param include_env: Should the value of configuration from AIRFLOW__\n environment variables be included or not\n :type include_env: bool\n :param include_cmds: Should the result of calling any *_cmd config be\n set (True, default), or should the _cmd options be left as the\n command to run (False)\n :type include_cmds: bool\n :param include_secret: Should the result of calling any *_secret config be\n set (True, default), or should the _secret options be left as the\n path to get the secret from (False)\n :type include_secret: bool\n :rtype: Dict[str, Dict[str, str]]\n :return: Dictionary, where the key is the name of the section and the content is\n the dictionary with the name of the parameter and its value.\n \"\"\"\n config_sources: Dict[str, Dict[str, str]] = {}\n configs = [\n ('default', self.airflow_defaults),\n ('airflow.cfg', self),\n ]\n\n self._replace_config_with_display_sources(config_sources, configs, display_source, raw)\n\n # add env vars and overwrite because they have priority\n if include_env:\n self._include_envs(config_sources, display_sensitive, display_source, raw)\n\n # add bash commands\n if include_cmds:\n self._include_commands(config_sources, display_sensitive, display_source, raw)\n\n # add config from secret backends\n if include_secret:\n self._include_secrets(config_sources, display_sensitive, display_source, raw)\n return config_sources\n\n def _include_secrets(self, config_sources, display_sensitive, display_source, raw):\n for (section, key) in self.sensitive_config_values:\n opt = self._get_secret_option(section, key)\n if opt:\n if not display_sensitive:\n opt = '< hidden >'\n if display_source:\n opt = (opt, 'secret')\n elif raw:\n opt = opt.replace('%', '%%')\n config_sources.setdefault(section, OrderedDict()).update({key: opt})\n del config_sources[section][key + '_secret']\n\n def _include_commands(self, config_sources, display_sensitive, display_source, raw):\n for (section, key) in self.sensitive_config_values:\n opt = self._get_cmd_option(section, key)\n if not opt:\n continue\n if not display_sensitive:\n opt = '< hidden >'\n if display_source:\n opt = (opt, 'cmd')\n elif raw:\n opt = opt.replace('%', '%%')\n config_sources.setdefault(section, OrderedDict()).update({key: opt})\n del config_sources[section][key + '_cmd']\n\n def _include_envs(self, config_sources, display_sensitive, display_source, raw):\n for env_var in [\n os_environment for os_environment in os.environ if os_environment.startswith('AIRFLOW__')\n ]:\n try:\n _, section, key = env_var.split('__', 2)\n opt = self._get_env_var_option(section, key)\n except ValueError:\n continue\n if not display_sensitive and env_var != 'AIRFLOW__CORE__UNIT_TEST_MODE':\n opt = '< hidden >'\n elif raw:\n opt = opt.replace('%', '%%')\n if display_source:\n opt = (opt, 'env var')\n\n section = section.lower()\n # if we lower key for kubernetes_environment_variables section,\n # then we won't be able to set any Airflow environment\n # variables. Airflow only parse environment variables starts\n # with AIRFLOW_. Therefore, we need to make it a special case.\n if section != 'kubernetes_environment_variables':\n key = key.lower()\n config_sources.setdefault(section, OrderedDict()).update({key: opt})\n\n @staticmethod\n def _replace_config_with_display_sources(config_sources, configs, display_source, raw):\n for (source_name, config) in configs:\n for section in config.sections():\n AirflowConfigParser._replace_section_config_with_display_sources(\n config, config_sources, display_source, raw, section, source_name\n )\n\n @staticmethod\n def _replace_section_config_with_display_sources(\n config, config_sources, display_source, raw, section, source_name\n ):\n sect = config_sources.setdefault(section, OrderedDict())\n for (k, val) in config.items(section=section, raw=raw):\n if display_source:\n val = (val, source_name)\n sect[k] = val\n\n def load_test_config(self):\n \"\"\"\n Load the unit test configuration.\n\n Note: this is not reversible.\n \"\"\"\n # override any custom settings with defaults\n log.info(\"Overriding settings with defaults from %s\", DEFAULT_CONFIG_FILE_PATH)\n self.read_string(parameterized_config(DEFAULT_CONFIG))\n # then read test config\n log.info(\"Reading default test configuration from %s\", TEST_CONFIG_FILE_PATH)\n self.read_string(parameterized_config(TEST_CONFIG))\n # then read any \"custom\" test settings\n log.info(\"Reading test configuration from %s\", TEST_CONFIG_FILE)\n self.read(TEST_CONFIG_FILE)\n\n @staticmethod\n def _warn_deprecate(section, key, deprecated_section, deprecated_name):\n if section == deprecated_section:\n warnings.warn(\n 'The {old} option in [{section}] has been renamed to {new} - the old '\n 'setting has been used, but please update your config.'.format(\n old=deprecated_name,\n new=key,\n section=section,\n ),\n DeprecationWarning,\n stacklevel=3,\n )\n else:\n warnings.warn(\n 'The {old_key} option in [{old_section}] has been moved to the {new_key} option in '\n '[{new_section}] - the old setting has been used, but please update your config.'.format(\n old_section=deprecated_section,\n old_key=deprecated_name,\n new_key=key,\n new_section=section,\n ),\n DeprecationWarning,\n stacklevel=3,\n )\n\n\ndef get_airflow_home():\n \"\"\"Get path to Airflow Home\"\"\"\n return expand_env_var(os.environ.get('AIRFLOW_HOME', '~/airflow'))\n\n\ndef get_airflow_config(airflow_home):\n \"\"\"Get Path to airflow.cfg path\"\"\"\n if 'AIRFLOW_CONFIG' not in os.environ:\n return os.path.join(airflow_home, 'airflow.cfg')\n return expand_env_var(os.environ['AIRFLOW_CONFIG'])\n\n\n# Setting AIRFLOW_HOME and AIRFLOW_CONFIG from environment variables, using\n# \"~/airflow\" and \"$AIRFLOW_HOME/airflow.cfg\" respectively as defaults.\n\nAIRFLOW_HOME = get_airflow_home()\nAIRFLOW_CONFIG = get_airflow_config(AIRFLOW_HOME)\npathlib.Path(AIRFLOW_HOME).mkdir(parents=True, exist_ok=True)\n\n\n# Set up dags folder for unit tests\n# this directory won't exist if users install via pip\n_TEST_DAGS_FOLDER = os.path.join(\n os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'tests', 'dags'\n)\nif os.path.exists(_TEST_DAGS_FOLDER):\n TEST_DAGS_FOLDER = _TEST_DAGS_FOLDER\nelse:\n TEST_DAGS_FOLDER = os.path.join(AIRFLOW_HOME, 'dags')\n\n# Set up plugins folder for unit tests\n_TEST_PLUGINS_FOLDER = os.path.join(\n os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'tests', 'plugins'\n)\nif os.path.exists(_TEST_PLUGINS_FOLDER):\n TEST_PLUGINS_FOLDER = _TEST_PLUGINS_FOLDER\nelse:\n TEST_PLUGINS_FOLDER = os.path.join(AIRFLOW_HOME, 'plugins')\n\n\ndef parameterized_config(template):\n \"\"\"\n Generates a configuration from the provided template + variables defined in\n current scope\n\n :param template: a config content templated with {{variables}}\n \"\"\"\n all_vars = {k: v for d in [globals(), locals()] for k, v in d.items()}\n return template.format(**all_vars) # noqa\n\n\ndef get_airflow_test_config(airflow_home):\n \"\"\"Get path to unittests.cfg\"\"\"\n if 'AIRFLOW_TEST_CONFIG' not in os.environ:\n return os.path.join(airflow_home, 'unittests.cfg')\n return expand_env_var(os.environ['AIRFLOW_TEST_CONFIG'])\n\n\nTEST_CONFIG_FILE = get_airflow_test_config(AIRFLOW_HOME)\n\n# only generate a Fernet key if we need to create a new config file\nif not os.path.isfile(TEST_CONFIG_FILE) or not os.path.isfile(AIRFLOW_CONFIG):\n FERNET_KEY = Fernet.generate_key().decode()\nelse:\n FERNET_KEY = ''\n\nSECRET_KEY = b64encode(os.urandom(16)).decode('utf-8')\n\nTEMPLATE_START = '# ----------------------- TEMPLATE BEGINS HERE -----------------------'\nif not os.path.isfile(TEST_CONFIG_FILE):\n log.info('Creating new Airflow config file for unit tests in: %s', TEST_CONFIG_FILE)\n with open(TEST_CONFIG_FILE, 'w') as file:\n cfg = parameterized_config(TEST_CONFIG)\n file.write(cfg.split(TEMPLATE_START)[-1].strip())\nif not os.path.isfile(AIRFLOW_CONFIG):\n log.info('Creating new Airflow config file in: %s', AIRFLOW_CONFIG)\n with open(AIRFLOW_CONFIG, 'w') as file:\n cfg = parameterized_config(DEFAULT_CONFIG)\n cfg = cfg.split(TEMPLATE_START)[-1].strip()\n file.write(cfg)\n\nlog.info(\"Reading the config from %s\", AIRFLOW_CONFIG)\n\nconf = AirflowConfigParser(default_config=parameterized_config(DEFAULT_CONFIG))\n\nconf.read(AIRFLOW_CONFIG)\n\nif conf.has_option('core', 'AIRFLOW_HOME'):\n msg = (\n 'Specifying both AIRFLOW_HOME environment variable and airflow_home '\n 'in the config file is deprecated. Please use only the AIRFLOW_HOME '\n 'environment variable and remove the config file entry.'\n )\n if 'AIRFLOW_HOME' in os.environ:\n warnings.warn(msg, category=DeprecationWarning)\n elif conf.get('core', 'airflow_home') == AIRFLOW_HOME:\n warnings.warn(\n 'Specifying airflow_home in the config file is deprecated. As you '\n 'have left it at the default value you should remove the setting '\n 'from your airflow.cfg and suffer no change in behaviour.',\n category=DeprecationWarning,\n )\n else:\n AIRFLOW_HOME = conf.get('core', 'airflow_home')\n warnings.warn(msg, category=DeprecationWarning)\n\n\nWEBSERVER_CONFIG = AIRFLOW_HOME + '/webserver_config.py'\n\nif not os.path.isfile(WEBSERVER_CONFIG):\n log.info('Creating new FAB webserver config file in: %s', WEBSERVER_CONFIG)\n DEFAULT_WEBSERVER_CONFIG, _ = _read_default_config_file('default_webserver_config.py')\n with open(WEBSERVER_CONFIG, 'w') as file:\n file.write(DEFAULT_WEBSERVER_CONFIG)\n\nif conf.getboolean('core', 'unit_test_mode'):\n conf.load_test_config()\n\n\n# Historical convenience functions to access config entries\ndef load_test_config(): # noqa: D103\n \"\"\"Historical load_test_config\"\"\"\n warnings.warn(\n \"Accessing configuration method 'load_test_config' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.load_test_config'\",\n DeprecationWarning,\n stacklevel=2,\n )\n conf.load_test_config()\n\n\ndef get(*args, **kwargs): # noqa: D103\n \"\"\"Historical get\"\"\"\n warnings.warn(\n \"Accessing configuration method 'get' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.get'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.get(*args, **kwargs)\n\n\ndef getboolean(*args, **kwargs): # noqa: D103\n \"\"\"Historical getboolean\"\"\"\n warnings.warn(\n \"Accessing configuration method 'getboolean' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.getboolean'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.getboolean(*args, **kwargs)\n\n\ndef getfloat(*args, **kwargs): # noqa: D103\n \"\"\"Historical getfloat\"\"\"\n warnings.warn(\n \"Accessing configuration method 'getfloat' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.getfloat'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.getfloat(*args, **kwargs)\n\n\ndef getint(*args, **kwargs): # noqa: D103\n \"\"\"Historical getint\"\"\"\n warnings.warn(\n \"Accessing configuration method 'getint' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.getint'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.getint(*args, **kwargs)\n\n\ndef getsection(*args, **kwargs): # noqa: D103\n \"\"\"Historical getsection\"\"\"\n warnings.warn(\n \"Accessing configuration method 'getsection' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.getsection'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.getint(*args, **kwargs)\n\n\ndef has_option(*args, **kwargs): # noqa: D103\n \"\"\"Historical has_option\"\"\"\n warnings.warn(\n \"Accessing configuration method 'has_option' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.has_option'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.has_option(*args, **kwargs)\n\n\ndef remove_option(*args, **kwargs): # noqa: D103\n \"\"\"Historical remove_option\"\"\"\n warnings.warn(\n \"Accessing configuration method 'remove_option' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.remove_option'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.remove_option(*args, **kwargs)\n\n\ndef as_dict(*args, **kwargs): # noqa: D103\n \"\"\"Historical as_dict\"\"\"\n warnings.warn(\n \"Accessing configuration method 'as_dict' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.as_dict'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.as_dict(*args, **kwargs)\n\n\ndef set(*args, **kwargs): # noqa pylint: disable=redefined-builtin\n \"\"\"Historical set\"\"\"\n warnings.warn(\n \"Accessing configuration method 'set' directly from the configuration module is \"\n \"deprecated. Please access the configuration from the 'configuration.conf' object via \"\n \"'conf.set'\",\n DeprecationWarning,\n stacklevel=2,\n )\n return conf.set(*args, **kwargs)\n\n\ndef ensure_secrets_loaded() -> List[BaseSecretsBackend]:\n \"\"\"\n Ensure that all secrets backends are loaded.\n If the secrets_backend_list contains only 2 default backends, reload it.\n \"\"\"\n # Check if the secrets_backend_list contains only 2 default backends\n if len(secrets_backend_list) == 2:\n return initialize_secrets_backends()\n return secrets_backend_list\n\n\ndef get_custom_secret_backend() -> Optional[BaseSecretsBackend]:\n \"\"\"Get Secret Backend if defined in airflow.cfg\"\"\"\n secrets_backend_cls = conf.getimport(section='secrets', key='backend')\n\n if secrets_backend_cls:\n try:\n alternative_secrets_config_dict = json.loads(\n conf.get(section='secrets', key='backend_kwargs', fallback='{}')\n )\n except JSONDecodeError:\n alternative_secrets_config_dict = {}\n\n return secrets_backend_cls(**alternative_secrets_config_dict)\n return None\n\n\ndef initialize_secrets_backends() -> List[BaseSecretsBackend]:\n \"\"\"\n * import secrets backend classes\n * instantiate them and return them in a list\n \"\"\"\n backend_list = []\n\n custom_secret_backend = get_custom_secret_backend()\n\n if custom_secret_backend is not None:\n backend_list.append(custom_secret_backend)\n\n for class_name in DEFAULT_SECRETS_SEARCH_PATH:\n secrets_backend_cls = import_string(class_name)\n backend_list.append(secrets_backend_cls())\n\n return backend_list\n\n\nsecrets_backend_list = initialize_secrets_backends()\n",
"path": "airflow/configuration.py"
}
] | diff --git a/airflow/config_templates/config.yml b/airflow/config_templates/config.yml
index df5a53abe7282..516bb6284e1c0 100644
--- a/airflow/config_templates/config.yml
+++ b/airflow/config_templates/config.yml
@@ -1747,84 +1747,6 @@
type: string
example: ~
default: "False"
-- name: ldap
- description: ~
- options:
- - name: uri
- description: |
- set this to ldaps://<your.ldap.server>:<port>
- version_added: ~
- type: string
- example: ~
- default: ""
- - name: user_filter
- description: ~
- version_added: ~
- type: string
- example: ~
- default: "objectClass=*"
- - name: user_name_attr
- description: ~
- version_added: ~
- type: string
- example: ~
- default: "uid"
- - name: group_member_attr
- description: ~
- version_added: ~
- type: string
- example: ~
- default: "memberOf"
- - name: superuser_filter
- description: ~
- version_added: ~
- type: string
- example: ~
- default: ""
- - name: data_profiler_filter
- description: ~
- version_added: ~
- type: string
- example: ~
- default: ""
- - name: bind_user
- description: ~
- version_added: ~
- type: string
- example: ~
- default: "cn=Manager,dc=example,dc=com"
- - name: bind_password
- description: ~
- version_added: ~
- type: string
- example: ~
- default: "insecure"
- - name: basedn
- description: ~
- version_added: ~
- type: string
- example: ~
- default: "dc=example,dc=com"
- - name: cacert
- description: ~
- version_added: ~
- type: string
- example: ~
- default: "/etc/ca/ldap_ca.crt"
- - name: search_scope
- description: ~
- version_added: ~
- type: string
- example: ~
- default: "LEVEL"
- - name: ignore_malformed_schema
- description: |
- This setting allows the use of LDAP servers that either return a
- broken schema, or do not return a schema.
- version_added: 1.10.3
- type: string
- example: ~
- default: "False"
- name: kerberos
description: ~
options:
diff --git a/airflow/config_templates/default_airflow.cfg b/airflow/config_templates/default_airflow.cfg
index 8a9a6a62b6ceb..cebbfd955489f 100644
--- a/airflow/config_templates/default_airflow.cfg
+++ b/airflow/config_templates/default_airflow.cfg
@@ -873,24 +873,6 @@ use_job_schedule = True
# Only has effect if schedule_interval is set to None in DAG
allow_trigger_in_future = False
-[ldap]
-# set this to ldaps://<your.ldap.server>:<port>
-uri =
-user_filter = objectClass=*
-user_name_attr = uid
-group_member_attr = memberOf
-superuser_filter =
-data_profiler_filter =
-bind_user = cn=Manager,dc=example,dc=com
-bind_password = insecure
-basedn = dc=example,dc=com
-cacert = /etc/ca/ldap_ca.crt
-search_scope = LEVEL
-
-# This setting allows the use of LDAP servers that either return a
-# broken schema, or do not return a schema.
-ignore_malformed_schema = False
-
[kerberos]
ccache = /tmp/airflow_krb5_ccache
diff --git a/airflow/configuration.py b/airflow/configuration.py
index 92790d1fb763b..338526b06c62d 100644
--- a/airflow/configuration.py
+++ b/airflow/configuration.py
@@ -129,7 +129,6 @@ class AirflowConfigParser(ConfigParser): # pylint: disable=too-many-ancestors
('celery', 'result_backend'),
('atlas', 'password'),
('smtp', 'smtp_password'),
- ('ldap', 'bind_password'),
('kubernetes', 'git_password'),
}
diff --git a/docs/howto/set-config.rst b/docs/howto/set-config.rst
index 090a6f9352d44..3ba7d9fd2ab02 100644
--- a/docs/howto/set-config.rst
+++ b/docs/howto/set-config.rst
@@ -69,7 +69,6 @@ The following config options support this ``_cmd`` and ``_secret`` version:
* ``result_backend`` in ``[celery]`` section
* ``password`` in ``[atlas]`` section
* ``smtp_password`` in ``[smtp]`` section
-* ``bind_password`` in ``[ldap]`` section
* ``git_password`` in ``[kubernetes]`` section
The ``_cmd`` config options can also be set using a corresponding environment variable
diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt
index 0a1571a59c78f..eaf8c2e15d60c 100644
--- a/docs/spelling_wordlist.txt
+++ b/docs/spelling_wordlist.txt
@@ -478,7 +478,6 @@ backticks
balancer
balancers
baseOperator
-basedn
basestring
basetaskrunner
bashrc
diff --git a/tests/core/test_config_templates.py b/tests/core/test_config_templates.py
index 9c09c318c678c..42ba99133028a 100644
--- a/tests/core/test_config_templates.py
+++ b/tests/core/test_config_templates.py
@@ -45,7 +45,6 @@
'celery_broker_transport_options',
'dask',
'scheduler',
- 'ldap',
'kerberos',
'github_enterprise',
'admin',
|
archlinux__archinstall-555 | Version Bump in conf.py?
https://github.com/archlinux/archinstall/blob/a4033a7d3a94916f2b4972d212f9d0069fca39cd/docs/conf.py#L44
| [
{
"content": "import os\nimport re\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\n\n\ndef process_docstring(app, what, name, obj, options, lines):\n\tspaces_pat = re.compile(r\"( {8})\")\n\tll = []\n\tfor line in lines:\n\t\tll.append(spaces_pat.sub(\" \", line))\n\tlines[:] = ll\n\n\ndef setup(app):\n\tapp.connect('autodoc-process-docstring', process_docstring)\n\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'python-archinstall'\ncopyright = '2020, Anton Hvornum'\nauthor = 'Anton Hvornum'\n\n# The full version, including alpha/beta/rc tags\nrelease = 'v2.1.0'\n\n# -- General configuration ---------------------------------------------------\n\nmaster_doc = 'index'\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n\t'sphinx.ext.autodoc',\n\t'sphinx.ext.inheritance_diagram',\n\t'sphinx.ext.todo'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'alabaster'\nhtml_theme = 'sphinx_rtd_theme'\n\nhtml_logo = \"_static/logo.png\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If false, no module index is generated.\nhtml_domain_indices = True\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If true, the index is split into individual pages for each letter.\nhtml_split_index = True\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'archinstalldoc'\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(\"index\", \"archinstall\", u\"archinstall Documentation\", [u\"Anton Hvornum\"], 1)]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n\t(\"index\", \"archinstall\", u\"archinstall Documentation\", u\"Anton Hvornum\", \"archinstall\", \"Simple and minimal HTTP server.\"),\n]\n",
"path": "docs/conf.py"
}
] | [
{
"content": "import os\nimport re\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\n\n\ndef process_docstring(app, what, name, obj, options, lines):\n\tspaces_pat = re.compile(r\"( {8})\")\n\tll = []\n\tfor line in lines:\n\t\tll.append(spaces_pat.sub(\" \", line))\n\tlines[:] = ll\n\n\ndef setup(app):\n\tapp.connect('autodoc-process-docstring', process_docstring)\n\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'python-archinstall'\ncopyright = '2020, Anton Hvornum'\nauthor = 'Anton Hvornum'\n\n# The full version, including alpha/beta/rc tags\nrelease = 'v2.3.0.dev0'\n\n# -- General configuration ---------------------------------------------------\n\nmaster_doc = 'index'\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n\t'sphinx.ext.autodoc',\n\t'sphinx.ext.inheritance_diagram',\n\t'sphinx.ext.todo'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'alabaster'\nhtml_theme = 'sphinx_rtd_theme'\n\nhtml_logo = \"_static/logo.png\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If false, no module index is generated.\nhtml_domain_indices = True\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If true, the index is split into individual pages for each letter.\nhtml_split_index = True\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'archinstalldoc'\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(\"index\", \"archinstall\", u\"archinstall Documentation\", [u\"Anton Hvornum\"], 1)]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n\t(\"index\", \"archinstall\", u\"archinstall Documentation\", u\"Anton Hvornum\", \"archinstall\", \"Simple and minimal HTTP server.\"),\n]\n",
"path": "docs/conf.py"
}
] | diff --git a/docs/conf.py b/docs/conf.py
index 375ff434de..add1c5e749 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -41,7 +41,7 @@ def setup(app):
author = 'Anton Hvornum'
# The full version, including alpha/beta/rc tags
-release = 'v2.1.0'
+release = 'v2.3.0.dev0'
# -- General configuration ---------------------------------------------------
|
comic__grand-challenge.org-758 | grandchallenge.cases.tasks.build_images should use a separate queue
This process can take a long time
| [
{
"content": "# Django settings for comic project.\nimport glob\nimport os\nimport re\nimport uuid\nfrom datetime import timedelta\nfrom distutils.util import strtobool as strtobool_i\n\nfrom django.contrib.messages import constants as messages\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom config.denylist import USERNAME_DENYLIST\n\n\ndef strtobool(val) -> bool:\n \"\"\" Returns disutils.util.strtobool as a boolean \"\"\"\n return bool(strtobool_i(val))\n\n\n# Default COMIC settings, to be included by settings.py\nDEBUG = strtobool(os.environ.get(\"DEBUG\", \"True\"))\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\n# Who gets the 404 notifications?\nmanager_email = os.environ.get(\"MANAGER_EMAIL\", None)\nif manager_email:\n MANAGERS = [(\"Manager\", manager_email)]\n\nIGNORABLE_404_URLS = [\n re.compile(r\".*\\.(php|cgi|asp).*\"),\n re.compile(r\"^/phpmyadmin.*\"),\n re.compile(r\"^/gen204.*\"),\n re.compile(r\"^/wp-content.*\"),\n re.compile(r\".*/trackback.*\"),\n]\n\n# Used as starting points for various other paths. realpath(__file__) starts in\n# the \"Comic\" app dir. We need to go one dir higher so path.join(\"..\")\nSITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nAPPS_DIR = os.path.join(SITE_ROOT, \"grandchallenge\")\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": os.environ.get(\"POSTGRES_DB\", \"comic\"),\n \"USER\": os.environ.get(\"POSTGRES_USER\", \"comic\"),\n \"PASSWORD\": os.environ.get(\"POSTGRES_PASSWORD\", \"secretpassword\"),\n \"HOST\": os.environ.get(\"POSTGRES_HOST\", \"postgres\"),\n \"PORT\": \"\",\n }\n}\n\nEMAIL_BACKEND = \"djcelery_email.backends.CeleryEmailBackend\"\nEMAIL_HOST = os.environ.get(\"EMAIL_HOST\", \"\")\nEMAIL_HOST_USER = os.environ.get(\"EMAIL_HOST_USER\", \"\")\nEMAIL_HOST_PASSWORD = os.environ.get(\"EMAIL_HOST_PASSWORD\", \"\")\nEMAIL_PORT = int(os.environ.get(\"EMAIL_PORT\", \"25\"))\nEMAIL_USE_TLS = strtobool(os.environ.get(\"EMAIL_USE_TLS\", \"False\"))\nDEFAULT_FROM_EMAIL = os.environ.get(\n \"DEFAULT_FROM_EMAIL\", \"webmaster@localhost\"\n)\nSERVER_EMAIL = os.environ.get(\"SERVER_EMAIL\", \"root@localhost\")\n\nANONYMOUS_USER_NAME = \"AnonymousUser\"\n\nAUTH_PROFILE_MODULE = \"profiles.UserProfile\"\nUSERENA_USE_HTTPS = False\nUSERENA_DEFAULT_PRIVACY = \"open\"\nLOGIN_URL = \"/accounts/signin/\"\nLOGOUT_URL = \"/accounts/signout/\"\n\nLOGIN_REDIRECT_URL = \"/accounts/login-redirect/\"\nSOCIAL_AUTH_LOGIN_REDIRECT_URL = LOGIN_REDIRECT_URL\n\n# Do not give message popups saying \"you have been logged out\". Users are expected\n# to know they have been logged out when they click the logout button\nUSERENA_USE_MESSAGES = (False,)\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = \"UTC\"\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = \"en-us\"\n\nSITE_ID = int(os.environ.get(\"SITE_ID\", \"1\"))\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# the name of the main project: this project is shown when url is loaded without\n# arguments, and pages in this project appear as menu items throughout the site\nMAIN_PROJECT_NAME = os.environ.get(\"MAIN_PROJECT_NAME\", \"comic\")\n\n##############################################################################\n#\n# Storage\n#\n##############################################################################\nDEFAULT_FILE_STORAGE = \"django.core.files.storage.FileSystemStorage\"\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = os.environ.get(\"MEDIA_ROOT\", \"/dbox/Dropbox/media/\")\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = \"/media/\"\n\n# In each challenge there can be a single directory out of which files can be\n# downloaded without logging in.\nCOMIC_PUBLIC_FOLDER_NAME = \"public_html\"\nCOMIC_ADDITIONAL_PUBLIC_FOLDER_NAMES = [\"results/public\"]\n\n# In each challenge there can be a single directory from which files can only\n# be downloaded by registered participants of that project\nCOMIC_REGISTERED_ONLY_FOLDER_NAME = \"datasets\"\n\n# Subdirectories on root for various files\nJQFILEUPLOAD_UPLOAD_SUBIDRECTORY = \"jqfileupload\"\nIMAGE_FILES_SUBDIRECTORY = \"images\"\nEVALUATION_FILES_SUBDIRECTORY = \"evaluation\"\n\n# This is for storing files that should not be served to the public\nAWS_DEFAULT_ACL = None\nPRIVATE_S3_STORAGE_KWARGS = {\n \"access_key\": os.environ.get(\"PRIVATE_S3_STORAGE_ACCESS_KEY\", \"\"),\n \"secret_key\": os.environ.get(\"PRIVATE_S3_STORAGE_SECRET_KEY\", \"\"),\n \"bucket_name\": os.environ.get(\n \"PRIVATE_S3_STORAGE_BUCKET_NAME\", \"grand-challenge-private\"\n ),\n \"auto_create_bucket\": True,\n \"endpoint_url\": os.environ.get(\n \"PRIVATE_S3_STORAGE_ENDPOINT_URL\", \"http://minio-private:9000\"\n ),\n # Do not overwrite files, we get problems with jqfileupload otherwise\n \"file_overwrite\": False,\n}\nPROTECTED_S3_STORAGE_KWARGS = {\n \"access_key\": os.environ.get(\"PROTECTED_S3_STORAGE_ACCESS_KEY\", \"\"),\n \"secret_key\": os.environ.get(\"PROTECTED_S3_STORAGE_SECRET_KEY\", \"\"),\n \"bucket_name\": os.environ.get(\n \"PROTECTED_S3_STORAGE_BUCKET_NAME\", \"grand-challenge-protected\"\n ),\n \"auto_create_bucket\": True,\n \"endpoint_url\": os.environ.get(\n \"PROTECTED_S3_STORAGE_ENDPOINT_URL\", \"http://minio-protected:9000\"\n ),\n # This is the domain where people will be able to go to download data\n # from this bucket. Usually we would use reverse to find this out,\n # but this needs to be defined before the database is populated\n \"custom_domain\": os.environ.get(\n \"PROTECTED_S3_CUSTOM_DOMAIN\", \"gc.localhost/media\"\n ),\n}\n\n##############################################################################\n#\n# Caching\n#\n##############################################################################\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.memcached.MemcachedCache\",\n \"LOCATION\": \"memcached:11211\",\n }\n}\n\n\nROOT_URLCONF = \"config.urls\"\nSUBDOMAIN_URL_CONF = \"grandchallenge.subdomains.urls\"\nDEFAULT_SCHEME = os.environ.get(\"DEFAULT_SCHEME\", \"https\")\n\nSESSION_COOKIE_DOMAIN = os.environ.get(\n \"SESSION_COOKIE_DOMAIN\", \".gc.localhost\"\n)\n# We're always running behind a proxy so set these to true\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\n# Set the allowed hosts to the cookie domain\nALLOWED_HOSTS = [SESSION_COOKIE_DOMAIN, \"web\"]\n\n# Security options\nSECURE_HSTS_SECONDS = int(os.environ.get(\"SECURE_HSTS_SECONDS\", \"0\"))\nSECURE_HSTS_INCLUDE_SUBDOMAINS = strtobool(\n os.environ.get(\"SECURE_HSTS_INCLUDE_SUBDOMAINS\", \"False\")\n)\nSECURE_CONTENT_TYPE_NOSNIFF = strtobool(\n os.environ.get(\"SECURE_CONTENT_TYPE_NOSNIFF\", \"False\")\n)\nSECURE_BROWSER_XSS_FILTER = strtobool(\n os.environ.get(\"SECURE_BROWSER_XSS_FILTER\", \"False\")\n)\nX_FRAME_OPTIONS = os.environ.get(\"X_FRAME_OPTIONS\", \"SAMEORIGIN\")\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = \"/static/\"\n\nSTATIC_HOST = os.environ.get(\"DJANGO_STATIC_HOST\", \"\")\nSTATIC_URL = f\"{STATIC_HOST}/static/\"\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n)\n\n# Vendored static files will be put here\nSTATICFILES_DIRS = [\"/opt/static/\"]\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = os.environ.get(\n \"SECRET_KEY\", \"d=%^l=xa02an9jn-$!*hy1)5yox$a-$2(ejt-2smimh=j4%8*b\"\n)\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [str(APPS_DIR)],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"grandchallenge.core.contextprocessors.contextprocessors.comic_site\",\n \"grandchallenge.core.contextprocessors.contextprocessors.google_analytics_id\",\n ]\n },\n }\n]\n\nMIDDLEWARE = (\n \"django.middleware.security.SecurityMiddleware\", # Keep security at top\n \"whitenoise.middleware.WhiteNoiseMiddleware\", # Keep whitenoise after security and before all else\n \"django.middleware.common.BrokenLinkEmailsMiddleware\",\n # Keep BrokenLinkEmailsMiddleware near the top\n \"raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"grandchallenge.subdomains.middleware.subdomain_middleware\",\n \"grandchallenge.subdomains.middleware.challenge_subdomain_middleware\",\n \"grandchallenge.subdomains.middleware.subdomain_urlconf_middleware\",\n)\n\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = \"config.wsgi.application\"\n\nDJANGO_APPS = [\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.sites\",\n \"django.contrib.messages\",\n \"whitenoise.runserver_nostatic\", # Keep whitenoise above staticfiles\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django.contrib.admin\",\n \"django.contrib.postgres\",\n]\n\nTHIRD_PARTY_APPS = [\n \"raven.contrib.django.raven_compat\", # error logging\n \"django_celery_results\", # database results backend\n \"django_celery_beat\", # periodic tasks\n \"djcelery_email\", # asynchronous emails\n \"userena\", # user profiles\n \"guardian\", # userena dependency, per object permissions\n \"easy_thumbnails\", # userena dependency\n \"social_django\", # social authentication with oauth2\n \"rest_framework\", # provides REST API\n \"rest_framework.authtoken\", # token auth for REST API\n \"crispy_forms\", # bootstrap forms\n \"favicon\", # favicon management\n \"django_select2\", # for multiple choice widgets\n \"django_summernote\", # for WYSIWYG page editing\n \"sorl.thumbnail\", # for dynamic thumbnails\n \"dal\", # for autocompletion of selection fields\n \"dal_select2\", # for autocompletion of selection fields\n]\n\nLOCAL_APPS = [\n \"grandchallenge.admins\",\n \"grandchallenge.api\",\n \"grandchallenge.challenges\",\n \"grandchallenge.core\",\n \"grandchallenge.evaluation\",\n \"grandchallenge.jqfileupload\",\n \"grandchallenge.pages\",\n \"grandchallenge.participants\",\n \"grandchallenge.profiles\",\n \"grandchallenge.teams\",\n \"grandchallenge.uploads\",\n \"grandchallenge.cases\",\n \"grandchallenge.algorithms\",\n \"grandchallenge.container_exec\",\n \"grandchallenge.datasets\",\n \"grandchallenge.submission_conversion\",\n \"grandchallenge.statistics\",\n \"grandchallenge.archives\",\n \"grandchallenge.patients\",\n \"grandchallenge.studies\",\n \"grandchallenge.registrations\",\n \"grandchallenge.annotations\",\n \"grandchallenge.retina_core\",\n \"grandchallenge.retina_importers\",\n \"grandchallenge.retina_api\",\n]\n\nINSTALLED_APPS = DJANGO_APPS + LOCAL_APPS + THIRD_PARTY_APPS\n\nADMIN_URL = f'{os.environ.get(\"DJANGO_ADMIN_URL\", \"django-admin\")}/'\n\nAUTHENTICATION_BACKENDS = (\n \"social_core.backends.google.GoogleOAuth2\",\n \"userena.backends.UserenaAuthenticationBackend\",\n \"guardian.backends.ObjectPermissionBackend\",\n \"django.contrib.auth.backends.ModelBackend\",\n)\n\nGOOGLE_MAPS_API_KEY = os.environ.get(\"GOOGLE_MAPS_API_KEY\", \"\")\nGOOGLE_ANALYTICS_ID = os.environ.get(\"GOOGLE_ANALYTICS_ID\", \"GA_TRACKING_ID\")\n\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get(\n \"SOCIAL_AUTH_GOOGLE_OAUTH2_KEY\", \"\"\n)\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get(\n \"SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET\", \"\"\n)\n\n# TODO: JM - Add the profile filling as a partial\nSOCIAL_AUTH_PIPELINE = (\n \"social_core.pipeline.social_auth.social_details\",\n \"social_core.pipeline.social_auth.social_uid\",\n \"social_core.pipeline.social_auth.auth_allowed\",\n \"social_core.pipeline.social_auth.social_user\",\n \"social_core.pipeline.social_auth.associate_by_email\",\n \"social_core.pipeline.user.get_username\",\n \"social_core.pipeline.user.create_user\",\n \"grandchallenge.profiles.social_auth.pipeline.profile.create_profile\",\n \"social_core.pipeline.social_auth.associate_user\",\n \"social_core.pipeline.social_auth.load_extra_data\",\n \"social_core.pipeline.user.user_details\",\n)\n\n# Do not sanitize redirects for social auth so we can redirect back to\n# other subdomains\nSOCIAL_AUTH_SANITIZE_REDIRECTS = False\nSOCIAL_AUTH_REDIRECT_IS_HTTPS = True\n\n# Django 1.6 introduced a new test runner, use it\nTEST_RUNNER = \"django.test.runner.DiscoverRunner\"\n\n# WYSIWYG editing with Summernote\nSUMMERNOTE_THEME = \"bs4\"\nSUMMERNOTE_CONFIG = {\n \"attachment_model\": \"uploads.SummernoteAttachment\",\n \"attachment_require_authentication\": True,\n \"summernote\": {\n \"width\": \"100%\",\n \"toolbar\": [\n [\"style\", [\"style\"]],\n [\n \"font\",\n [\"bold\", \"italic\", \"underline\", \"strikethrough\", \"clear\"],\n ],\n [\"para\", [\"ul\", \"ol\", \"paragraph\"]],\n [\"insert\", [\"link\", \"picture\", \"hr\"]],\n [\"view\", [\"fullscreen\", \"codeview\"]],\n [\"help\", [\"help\"]],\n ],\n },\n}\n\n# Settings for allowed HTML\nBLEACH_ALLOWED_TAGS = [\n \"a\",\n \"abbr\",\n \"acronym\",\n \"b\",\n \"blockquote\",\n \"br\",\n \"code\",\n \"col\",\n \"div\",\n \"em\",\n \"h1\",\n \"h2\",\n \"h3\",\n \"h4\",\n \"h5\",\n \"h6\",\n \"hr\",\n \"i\",\n \"iframe\", # Allowed for now for continuous registration challenge\n \"img\",\n \"li\",\n \"ol\",\n \"p\",\n \"pre\",\n \"span\",\n \"strike\",\n \"strong\",\n \"table\",\n \"tbody\",\n \"thead\",\n \"td\",\n \"th\",\n \"tr\",\n \"u\",\n \"ul\",\n]\nBLEACH_ALLOWED_ATTRIBUTES = {\n \"*\": [\"class\", \"data-toggle\", \"id\", \"style\", \"role\"],\n \"a\": [\"href\", \"title\"],\n \"abbr\": [\"title\"],\n \"acronym\": [\"title\"],\n \"div\": [\"data-geochart\"], # Required for geocharts\n \"iframe\": [\n \"src\",\n \"sandbox\",\n \"data-groupname\",\n \"scrolling\",\n \"height\",\n ], # For continuous registration challenge and google group\n \"img\": [\"height\", \"src\", \"width\"],\n}\nBLEACH_ALLOWED_STYLES = [\"height\", \"margin-left\", \"text-align\", \"width\"]\nBLEACH_ALLOWED_PROTOCOLS = [\"http\", \"https\", \"mailto\"]\nBLEACH_STRIP = strtobool(os.environ.get(\"BLEACH_STRIP\", \"True\"))\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"\n },\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"\n },\n]\n\n# A sample logging configuration. More info in configuration can be found at\n# https://docs.djangoproject.com/en/dev/topics/logging/ .\n# This configuration writes WARNING and worse errors to an error log file, and\n# sends an email to all admins. It also writes INFO logmessages and worse to a\n# regular log file.\nLOG_FILEPATH = \"/tmp/django.log\"\nLOG_FILEPATH_ERROR = \"/tmp/django_error.log\"\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": True,\n \"root\": {\"level\": \"WARNING\", \"handlers\": [\"sentry\"]},\n \"formatters\": {\n \"verbose\": {\n \"format\": \"%(levelname)s %(asctime)s %(module)s \"\n \"%(process)d %(thread)d %(message)s\"\n }\n },\n \"handlers\": {\n \"sentry\": {\n \"level\": \"ERROR\",\n # To capture more than ERROR, change to WARNING, INFO, etc.\n \"class\": \"raven.contrib.django.raven_compat.handlers.SentryHandler\",\n },\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n },\n \"loggers\": {\n \"grandchallenge\": {\n \"level\": \"WARNING\",\n \"handlers\": [\"console\"],\n \"propagate\": True,\n },\n \"django.db.backends\": {\n \"level\": \"ERROR\",\n \"handlers\": [\"console\"],\n \"propagate\": False,\n },\n \"raven\": {\n \"level\": \"DEBUG\",\n \"handlers\": [\"console\"],\n \"propagate\": False,\n },\n \"sentry.errors\": {\n \"level\": \"DEBUG\",\n \"handlers\": [\"console\"],\n \"propagate\": False,\n },\n },\n}\n\nRAVEN_CONFIG = {\"dsn\": os.environ.get(\"DJANGO_SENTRY_DSN\", \"\")}\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAdminUser\",),\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.TokenAuthentication\",\n ),\n}\n\nCELERY_BROKER_URL = os.environ.get(\"CELERY_BROKER_URL\", \"redis://redis:6379/0\")\nCELERY_RESULT_BACKEND = os.environ.get(\"CELERY_RESULT_BACKEND\", \"django-db\")\nCELERY_RESULT_PERSISTENT = True\nCELERY_TASK_SOFT_TIME_LIMIT = int(\n os.environ.get(\"CELERY_TASK_SOFT_TIME_LIMIT\", \"7200\")\n)\nCELERY_TASK_TIME_LIMIT = int(os.environ.get(\"CELERY_TASK_TIME_LIMIT\", \"7260\"))\n\nCONTAINER_EXEC_DOCKER_BASE_URL = os.environ.get(\n \"CONTAINER_EXEC_DOCKER_BASE_URL\", \"unix://var/run/docker.sock\"\n)\nCONTAINER_EXEC_DOCKER_TLSVERIFY = strtobool(\n os.environ.get(\"CONTAINER_EXEC_DOCKER_TLSVERIFY\", \"False\")\n)\nCONTAINER_EXEC_DOCKER_TLSCACERT = os.environ.get(\n \"CONTAINER_EXEC_DOCKER_TLSCACERT\", \"\"\n)\nCONTAINER_EXEC_DOCKER_TLSCERT = os.environ.get(\n \"CONTAINER_EXEC_DOCKER_TLSCERT\", \"\"\n)\nCONTAINER_EXEC_DOCKER_TLSKEY = os.environ.get(\n \"CONTAINER_EXEC_DOCKER_TLSKEY\", \"\"\n)\nCONTAINER_EXEC_MEMORY_LIMIT = os.environ.get(\n \"CONTAINER_EXEC_MEMORY_LIMIT\", \"4g\"\n)\nCONTAINER_EXEC_IO_IMAGE = \"alpine:3.9\"\nCONTAINER_EXEC_IO_SHA256 = (\n \"sha256:5cb3aa00f89934411ffba5c063a9bc98ace875d8f92e77d0029543d9f2ef4ad0\"\n)\nCONTAINER_EXEC_CPU_QUOTA = int(\n os.environ.get(\"CONTAINER_EXEC_CPU_QUOTA\", \"100000\")\n)\nCONTAINER_EXEC_CPU_PERIOD = int(\n os.environ.get(\"CONTAINER_EXEC_CPU_PERIOD\", \"100000\")\n)\nCONTAINER_EXEC_DOCKER_RUNTIME = os.environ.get(\n \"CONTAINER_EXEC_DOCKER_RUNTIME\", None\n)\n\nCELERY_BEAT_SCHEDULE = {\n \"cleanup_stale_uploads\": {\n \"task\": \"grandchallenge.jqfileupload.tasks.cleanup_stale_uploads\",\n \"schedule\": timedelta(hours=1),\n },\n \"clear_sessions\": {\n \"task\": \"grandchallenge.core.tasks.clear_sessions\",\n \"schedule\": timedelta(days=1),\n },\n \"update_filter_classes\": {\n \"task\": \"grandchallenge.challenges.tasks.update_filter_classes\",\n \"schedule\": timedelta(minutes=5),\n },\n \"validate_external_challenges\": {\n \"task\": \"grandchallenge.challenges.tasks.check_external_challenge_urls\",\n \"schedule\": timedelta(days=1),\n },\n}\n\nCELERY_TASK_ROUTES = {\n \"grandchallenge.container_exec.tasks.execute_job\": \"evaluation\"\n}\n\n# Set which template pack to use for forms\nCRISPY_TEMPLATE_PACK = \"bootstrap4\"\n\n# When using bootstrap error messages need to be renamed to danger\nMESSAGE_TAGS = {messages.ERROR: \"danger\"}\n\n# CIRRUS Is an external application that can view images\nCIRRUS_APPLICATION = \"https://apps.diagnijmegen.nl/Applications/CIRRUSWeb_master_98d13770/#!/?workstation=BasicWorkstation\"\nCIRRUS_BASE_IMAGE_QUERY_PARAM = \"grand_challenge_image\"\nCIRRUS_ANNOATION_QUERY_PARAM = \"grand_challenge_overlay\"\n\n# Disallow some challenge names due to subdomain or media folder clashes\nDISALLOWED_CHALLENGE_NAMES = [\n \"m\",\n IMAGE_FILES_SUBDIRECTORY,\n \"logos\",\n \"banners\",\n \"mugshots\",\n \"docker\",\n EVALUATION_FILES_SUBDIRECTORY,\n \"evaluation-supplementary\",\n \"favicon\",\n \"i\",\n \"cache\", # for sorl-thumbnails\n JQFILEUPLOAD_UPLOAD_SUBIDRECTORY,\n *USERNAME_DENYLIST,\n]\n\nif MEDIA_ROOT[-1] != \"/\":\n msg = (\n \"MEDIA_ROOT setting should end in a slash. Found '\"\n + MEDIA_ROOT\n + \"'. Please add a slash\"\n )\n raise ImproperlyConfigured(msg)\n\nENABLE_DEBUG_TOOLBAR = False\n\nif DEBUG:\n EMAIL_BACKEND = \"django.core.mail.backends.dummy.EmailBackend\"\n\n if ENABLE_DEBUG_TOOLBAR:\n INSTALLED_APPS += (\"debug_toolbar\",)\n\n MIDDLEWARE += (\"debug_toolbar.middleware.DebugToolbarMiddleware\",)\n\n DEBUG_TOOLBAR_CONFIG = {\n \"SHOW_TOOLBAR_CALLBACK\": \"config.toolbar_callback\"\n }\n\nif not COMIC_PUBLIC_FOLDER_NAME:\n raise ImproperlyConfigured(\n \"Don't know from which folder serving publiv files\"\n \"is allowed. Please add a setting like \"\n '\\'COMIC_PUBLIC_FOLDER_NAME = \"public_html\"'\n \" to your .conf file.\"\n )\n\nif not COMIC_REGISTERED_ONLY_FOLDER_NAME:\n raise ImproperlyConfigured(\n \"Don't know from which folder serving protected files\"\n \"is allowed. Please add a setting like \"\n '\\'COMIC_REGISTERED_ONLY_FOLDER_NAME = \"datasets\"'\n \" to your .conf file.\"\n )\n\n# Modality name constants\nMODALITY_OCT = \"OCT\" # Optical coherence tomography\nMODALITY_CF = \"Fundus Photography\" # Color fundus photography\nMODALITY_FA = \"Flurescein Angiography\" # Fluorescein angiography\nMODALITY_IR = \"Infrared Reflectance Imaging\" # Infrared Reflectance imaging\n\n# Maximum file size in bytes to be opened by SimpleITK.ReadImage in cases.models.Image.get_sitk_image()\nMAX_SITK_FILE_SIZE = 268435456 # == 256 mb\n\n# Retina specific settings\nRETINA_IMAGE_CACHE_TIME = 60 * 60 * 24\nRETINA_GRADERS_GROUP_NAME = \"retina_graders\"\nRETINA_ADMINS_GROUP_NAME = \"retina_admins\"\nRETINA_IMPORT_USER_NAME = \"retina_import_user\"\nRETINA_EXCEPTION_ARCHIVE = \"Australia\"\n",
"path": "app/config/settings.py"
}
] | [
{
"content": "# Django settings for comic project.\nimport glob\nimport os\nimport re\nimport uuid\nfrom datetime import timedelta\nfrom distutils.util import strtobool as strtobool_i\n\nfrom django.contrib.messages import constants as messages\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom config.denylist import USERNAME_DENYLIST\n\n\ndef strtobool(val) -> bool:\n \"\"\" Returns disutils.util.strtobool as a boolean \"\"\"\n return bool(strtobool_i(val))\n\n\n# Default COMIC settings, to be included by settings.py\nDEBUG = strtobool(os.environ.get(\"DEBUG\", \"True\"))\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\n# Who gets the 404 notifications?\nmanager_email = os.environ.get(\"MANAGER_EMAIL\", None)\nif manager_email:\n MANAGERS = [(\"Manager\", manager_email)]\n\nIGNORABLE_404_URLS = [\n re.compile(r\".*\\.(php|cgi|asp).*\"),\n re.compile(r\"^/phpmyadmin.*\"),\n re.compile(r\"^/gen204.*\"),\n re.compile(r\"^/wp-content.*\"),\n re.compile(r\".*/trackback.*\"),\n]\n\n# Used as starting points for various other paths. realpath(__file__) starts in\n# the \"Comic\" app dir. We need to go one dir higher so path.join(\"..\")\nSITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nAPPS_DIR = os.path.join(SITE_ROOT, \"grandchallenge\")\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": os.environ.get(\"POSTGRES_DB\", \"comic\"),\n \"USER\": os.environ.get(\"POSTGRES_USER\", \"comic\"),\n \"PASSWORD\": os.environ.get(\"POSTGRES_PASSWORD\", \"secretpassword\"),\n \"HOST\": os.environ.get(\"POSTGRES_HOST\", \"postgres\"),\n \"PORT\": \"\",\n }\n}\n\nEMAIL_BACKEND = \"djcelery_email.backends.CeleryEmailBackend\"\nEMAIL_HOST = os.environ.get(\"EMAIL_HOST\", \"\")\nEMAIL_HOST_USER = os.environ.get(\"EMAIL_HOST_USER\", \"\")\nEMAIL_HOST_PASSWORD = os.environ.get(\"EMAIL_HOST_PASSWORD\", \"\")\nEMAIL_PORT = int(os.environ.get(\"EMAIL_PORT\", \"25\"))\nEMAIL_USE_TLS = strtobool(os.environ.get(\"EMAIL_USE_TLS\", \"False\"))\nDEFAULT_FROM_EMAIL = os.environ.get(\n \"DEFAULT_FROM_EMAIL\", \"webmaster@localhost\"\n)\nSERVER_EMAIL = os.environ.get(\"SERVER_EMAIL\", \"root@localhost\")\n\nANONYMOUS_USER_NAME = \"AnonymousUser\"\n\nAUTH_PROFILE_MODULE = \"profiles.UserProfile\"\nUSERENA_USE_HTTPS = False\nUSERENA_DEFAULT_PRIVACY = \"open\"\nLOGIN_URL = \"/accounts/signin/\"\nLOGOUT_URL = \"/accounts/signout/\"\n\nLOGIN_REDIRECT_URL = \"/accounts/login-redirect/\"\nSOCIAL_AUTH_LOGIN_REDIRECT_URL = LOGIN_REDIRECT_URL\n\n# Do not give message popups saying \"you have been logged out\". Users are expected\n# to know they have been logged out when they click the logout button\nUSERENA_USE_MESSAGES = (False,)\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = \"UTC\"\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = \"en-us\"\n\nSITE_ID = int(os.environ.get(\"SITE_ID\", \"1\"))\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# the name of the main project: this project is shown when url is loaded without\n# arguments, and pages in this project appear as menu items throughout the site\nMAIN_PROJECT_NAME = os.environ.get(\"MAIN_PROJECT_NAME\", \"comic\")\n\n##############################################################################\n#\n# Storage\n#\n##############################################################################\nDEFAULT_FILE_STORAGE = \"django.core.files.storage.FileSystemStorage\"\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = os.environ.get(\"MEDIA_ROOT\", \"/dbox/Dropbox/media/\")\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = \"/media/\"\n\n# In each challenge there can be a single directory out of which files can be\n# downloaded without logging in.\nCOMIC_PUBLIC_FOLDER_NAME = \"public_html\"\nCOMIC_ADDITIONAL_PUBLIC_FOLDER_NAMES = [\"results/public\"]\n\n# In each challenge there can be a single directory from which files can only\n# be downloaded by registered participants of that project\nCOMIC_REGISTERED_ONLY_FOLDER_NAME = \"datasets\"\n\n# Subdirectories on root for various files\nJQFILEUPLOAD_UPLOAD_SUBIDRECTORY = \"jqfileupload\"\nIMAGE_FILES_SUBDIRECTORY = \"images\"\nEVALUATION_FILES_SUBDIRECTORY = \"evaluation\"\n\n# This is for storing files that should not be served to the public\nAWS_DEFAULT_ACL = None\nPRIVATE_S3_STORAGE_KWARGS = {\n \"access_key\": os.environ.get(\"PRIVATE_S3_STORAGE_ACCESS_KEY\", \"\"),\n \"secret_key\": os.environ.get(\"PRIVATE_S3_STORAGE_SECRET_KEY\", \"\"),\n \"bucket_name\": os.environ.get(\n \"PRIVATE_S3_STORAGE_BUCKET_NAME\", \"grand-challenge-private\"\n ),\n \"auto_create_bucket\": True,\n \"endpoint_url\": os.environ.get(\n \"PRIVATE_S3_STORAGE_ENDPOINT_URL\", \"http://minio-private:9000\"\n ),\n # Do not overwrite files, we get problems with jqfileupload otherwise\n \"file_overwrite\": False,\n}\nPROTECTED_S3_STORAGE_KWARGS = {\n \"access_key\": os.environ.get(\"PROTECTED_S3_STORAGE_ACCESS_KEY\", \"\"),\n \"secret_key\": os.environ.get(\"PROTECTED_S3_STORAGE_SECRET_KEY\", \"\"),\n \"bucket_name\": os.environ.get(\n \"PROTECTED_S3_STORAGE_BUCKET_NAME\", \"grand-challenge-protected\"\n ),\n \"auto_create_bucket\": True,\n \"endpoint_url\": os.environ.get(\n \"PROTECTED_S3_STORAGE_ENDPOINT_URL\", \"http://minio-protected:9000\"\n ),\n # This is the domain where people will be able to go to download data\n # from this bucket. Usually we would use reverse to find this out,\n # but this needs to be defined before the database is populated\n \"custom_domain\": os.environ.get(\n \"PROTECTED_S3_CUSTOM_DOMAIN\", \"gc.localhost/media\"\n ),\n}\n\n##############################################################################\n#\n# Caching\n#\n##############################################################################\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.memcached.MemcachedCache\",\n \"LOCATION\": \"memcached:11211\",\n }\n}\n\n\nROOT_URLCONF = \"config.urls\"\nSUBDOMAIN_URL_CONF = \"grandchallenge.subdomains.urls\"\nDEFAULT_SCHEME = os.environ.get(\"DEFAULT_SCHEME\", \"https\")\n\nSESSION_COOKIE_DOMAIN = os.environ.get(\n \"SESSION_COOKIE_DOMAIN\", \".gc.localhost\"\n)\n# We're always running behind a proxy so set these to true\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\n# Set the allowed hosts to the cookie domain\nALLOWED_HOSTS = [SESSION_COOKIE_DOMAIN, \"web\"]\n\n# Security options\nSECURE_HSTS_SECONDS = int(os.environ.get(\"SECURE_HSTS_SECONDS\", \"0\"))\nSECURE_HSTS_INCLUDE_SUBDOMAINS = strtobool(\n os.environ.get(\"SECURE_HSTS_INCLUDE_SUBDOMAINS\", \"False\")\n)\nSECURE_CONTENT_TYPE_NOSNIFF = strtobool(\n os.environ.get(\"SECURE_CONTENT_TYPE_NOSNIFF\", \"False\")\n)\nSECURE_BROWSER_XSS_FILTER = strtobool(\n os.environ.get(\"SECURE_BROWSER_XSS_FILTER\", \"False\")\n)\nX_FRAME_OPTIONS = os.environ.get(\"X_FRAME_OPTIONS\", \"SAMEORIGIN\")\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = \"/static/\"\n\nSTATIC_HOST = os.environ.get(\"DJANGO_STATIC_HOST\", \"\")\nSTATIC_URL = f\"{STATIC_HOST}/static/\"\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n)\n\n# Vendored static files will be put here\nSTATICFILES_DIRS = [\"/opt/static/\"]\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = os.environ.get(\n \"SECRET_KEY\", \"d=%^l=xa02an9jn-$!*hy1)5yox$a-$2(ejt-2smimh=j4%8*b\"\n)\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [str(APPS_DIR)],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"grandchallenge.core.contextprocessors.contextprocessors.comic_site\",\n \"grandchallenge.core.contextprocessors.contextprocessors.google_analytics_id\",\n ]\n },\n }\n]\n\nMIDDLEWARE = (\n \"django.middleware.security.SecurityMiddleware\", # Keep security at top\n \"whitenoise.middleware.WhiteNoiseMiddleware\", # Keep whitenoise after security and before all else\n \"django.middleware.common.BrokenLinkEmailsMiddleware\",\n # Keep BrokenLinkEmailsMiddleware near the top\n \"raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"grandchallenge.subdomains.middleware.subdomain_middleware\",\n \"grandchallenge.subdomains.middleware.challenge_subdomain_middleware\",\n \"grandchallenge.subdomains.middleware.subdomain_urlconf_middleware\",\n)\n\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = \"config.wsgi.application\"\n\nDJANGO_APPS = [\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.sites\",\n \"django.contrib.messages\",\n \"whitenoise.runserver_nostatic\", # Keep whitenoise above staticfiles\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django.contrib.admin\",\n \"django.contrib.postgres\",\n]\n\nTHIRD_PARTY_APPS = [\n \"raven.contrib.django.raven_compat\", # error logging\n \"django_celery_results\", # database results backend\n \"django_celery_beat\", # periodic tasks\n \"djcelery_email\", # asynchronous emails\n \"userena\", # user profiles\n \"guardian\", # userena dependency, per object permissions\n \"easy_thumbnails\", # userena dependency\n \"social_django\", # social authentication with oauth2\n \"rest_framework\", # provides REST API\n \"rest_framework.authtoken\", # token auth for REST API\n \"crispy_forms\", # bootstrap forms\n \"favicon\", # favicon management\n \"django_select2\", # for multiple choice widgets\n \"django_summernote\", # for WYSIWYG page editing\n \"sorl.thumbnail\", # for dynamic thumbnails\n \"dal\", # for autocompletion of selection fields\n \"dal_select2\", # for autocompletion of selection fields\n]\n\nLOCAL_APPS = [\n \"grandchallenge.admins\",\n \"grandchallenge.api\",\n \"grandchallenge.challenges\",\n \"grandchallenge.core\",\n \"grandchallenge.evaluation\",\n \"grandchallenge.jqfileupload\",\n \"grandchallenge.pages\",\n \"grandchallenge.participants\",\n \"grandchallenge.profiles\",\n \"grandchallenge.teams\",\n \"grandchallenge.uploads\",\n \"grandchallenge.cases\",\n \"grandchallenge.algorithms\",\n \"grandchallenge.container_exec\",\n \"grandchallenge.datasets\",\n \"grandchallenge.submission_conversion\",\n \"grandchallenge.statistics\",\n \"grandchallenge.archives\",\n \"grandchallenge.patients\",\n \"grandchallenge.studies\",\n \"grandchallenge.registrations\",\n \"grandchallenge.annotations\",\n \"grandchallenge.retina_core\",\n \"grandchallenge.retina_importers\",\n \"grandchallenge.retina_api\",\n]\n\nINSTALLED_APPS = DJANGO_APPS + LOCAL_APPS + THIRD_PARTY_APPS\n\nADMIN_URL = f'{os.environ.get(\"DJANGO_ADMIN_URL\", \"django-admin\")}/'\n\nAUTHENTICATION_BACKENDS = (\n \"social_core.backends.google.GoogleOAuth2\",\n \"userena.backends.UserenaAuthenticationBackend\",\n \"guardian.backends.ObjectPermissionBackend\",\n \"django.contrib.auth.backends.ModelBackend\",\n)\n\nGOOGLE_MAPS_API_KEY = os.environ.get(\"GOOGLE_MAPS_API_KEY\", \"\")\nGOOGLE_ANALYTICS_ID = os.environ.get(\"GOOGLE_ANALYTICS_ID\", \"GA_TRACKING_ID\")\n\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get(\n \"SOCIAL_AUTH_GOOGLE_OAUTH2_KEY\", \"\"\n)\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get(\n \"SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET\", \"\"\n)\n\n# TODO: JM - Add the profile filling as a partial\nSOCIAL_AUTH_PIPELINE = (\n \"social_core.pipeline.social_auth.social_details\",\n \"social_core.pipeline.social_auth.social_uid\",\n \"social_core.pipeline.social_auth.auth_allowed\",\n \"social_core.pipeline.social_auth.social_user\",\n \"social_core.pipeline.social_auth.associate_by_email\",\n \"social_core.pipeline.user.get_username\",\n \"social_core.pipeline.user.create_user\",\n \"grandchallenge.profiles.social_auth.pipeline.profile.create_profile\",\n \"social_core.pipeline.social_auth.associate_user\",\n \"social_core.pipeline.social_auth.load_extra_data\",\n \"social_core.pipeline.user.user_details\",\n)\n\n# Do not sanitize redirects for social auth so we can redirect back to\n# other subdomains\nSOCIAL_AUTH_SANITIZE_REDIRECTS = False\nSOCIAL_AUTH_REDIRECT_IS_HTTPS = True\n\n# Django 1.6 introduced a new test runner, use it\nTEST_RUNNER = \"django.test.runner.DiscoverRunner\"\n\n# WYSIWYG editing with Summernote\nSUMMERNOTE_THEME = \"bs4\"\nSUMMERNOTE_CONFIG = {\n \"attachment_model\": \"uploads.SummernoteAttachment\",\n \"attachment_require_authentication\": True,\n \"summernote\": {\n \"width\": \"100%\",\n \"toolbar\": [\n [\"style\", [\"style\"]],\n [\n \"font\",\n [\"bold\", \"italic\", \"underline\", \"strikethrough\", \"clear\"],\n ],\n [\"para\", [\"ul\", \"ol\", \"paragraph\"]],\n [\"insert\", [\"link\", \"picture\", \"hr\"]],\n [\"view\", [\"fullscreen\", \"codeview\"]],\n [\"help\", [\"help\"]],\n ],\n },\n}\n\n# Settings for allowed HTML\nBLEACH_ALLOWED_TAGS = [\n \"a\",\n \"abbr\",\n \"acronym\",\n \"b\",\n \"blockquote\",\n \"br\",\n \"code\",\n \"col\",\n \"div\",\n \"em\",\n \"h1\",\n \"h2\",\n \"h3\",\n \"h4\",\n \"h5\",\n \"h6\",\n \"hr\",\n \"i\",\n \"iframe\", # Allowed for now for continuous registration challenge\n \"img\",\n \"li\",\n \"ol\",\n \"p\",\n \"pre\",\n \"span\",\n \"strike\",\n \"strong\",\n \"table\",\n \"tbody\",\n \"thead\",\n \"td\",\n \"th\",\n \"tr\",\n \"u\",\n \"ul\",\n]\nBLEACH_ALLOWED_ATTRIBUTES = {\n \"*\": [\"class\", \"data-toggle\", \"id\", \"style\", \"role\"],\n \"a\": [\"href\", \"title\"],\n \"abbr\": [\"title\"],\n \"acronym\": [\"title\"],\n \"div\": [\"data-geochart\"], # Required for geocharts\n \"iframe\": [\n \"src\",\n \"sandbox\",\n \"data-groupname\",\n \"scrolling\",\n \"height\",\n ], # For continuous registration challenge and google group\n \"img\": [\"height\", \"src\", \"width\"],\n}\nBLEACH_ALLOWED_STYLES = [\"height\", \"margin-left\", \"text-align\", \"width\"]\nBLEACH_ALLOWED_PROTOCOLS = [\"http\", \"https\", \"mailto\"]\nBLEACH_STRIP = strtobool(os.environ.get(\"BLEACH_STRIP\", \"True\"))\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"\n },\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"\n },\n]\n\n# A sample logging configuration. More info in configuration can be found at\n# https://docs.djangoproject.com/en/dev/topics/logging/ .\n# This configuration writes WARNING and worse errors to an error log file, and\n# sends an email to all admins. It also writes INFO logmessages and worse to a\n# regular log file.\nLOG_FILEPATH = \"/tmp/django.log\"\nLOG_FILEPATH_ERROR = \"/tmp/django_error.log\"\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": True,\n \"root\": {\"level\": \"WARNING\", \"handlers\": [\"sentry\"]},\n \"formatters\": {\n \"verbose\": {\n \"format\": \"%(levelname)s %(asctime)s %(module)s \"\n \"%(process)d %(thread)d %(message)s\"\n }\n },\n \"handlers\": {\n \"sentry\": {\n \"level\": \"ERROR\",\n # To capture more than ERROR, change to WARNING, INFO, etc.\n \"class\": \"raven.contrib.django.raven_compat.handlers.SentryHandler\",\n },\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n },\n \"loggers\": {\n \"grandchallenge\": {\n \"level\": \"WARNING\",\n \"handlers\": [\"console\"],\n \"propagate\": True,\n },\n \"django.db.backends\": {\n \"level\": \"ERROR\",\n \"handlers\": [\"console\"],\n \"propagate\": False,\n },\n \"raven\": {\n \"level\": \"DEBUG\",\n \"handlers\": [\"console\"],\n \"propagate\": False,\n },\n \"sentry.errors\": {\n \"level\": \"DEBUG\",\n \"handlers\": [\"console\"],\n \"propagate\": False,\n },\n },\n}\n\nRAVEN_CONFIG = {\"dsn\": os.environ.get(\"DJANGO_SENTRY_DSN\", \"\")}\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAdminUser\",),\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.TokenAuthentication\",\n ),\n}\n\nCELERY_BROKER_URL = os.environ.get(\"CELERY_BROKER_URL\", \"redis://redis:6379/0\")\nCELERY_RESULT_BACKEND = os.environ.get(\"CELERY_RESULT_BACKEND\", \"django-db\")\nCELERY_RESULT_PERSISTENT = True\nCELERY_TASK_SOFT_TIME_LIMIT = int(\n os.environ.get(\"CELERY_TASK_SOFT_TIME_LIMIT\", \"7200\")\n)\nCELERY_TASK_TIME_LIMIT = int(os.environ.get(\"CELERY_TASK_TIME_LIMIT\", \"7260\"))\n\nCONTAINER_EXEC_DOCKER_BASE_URL = os.environ.get(\n \"CONTAINER_EXEC_DOCKER_BASE_URL\", \"unix://var/run/docker.sock\"\n)\nCONTAINER_EXEC_DOCKER_TLSVERIFY = strtobool(\n os.environ.get(\"CONTAINER_EXEC_DOCKER_TLSVERIFY\", \"False\")\n)\nCONTAINER_EXEC_DOCKER_TLSCACERT = os.environ.get(\n \"CONTAINER_EXEC_DOCKER_TLSCACERT\", \"\"\n)\nCONTAINER_EXEC_DOCKER_TLSCERT = os.environ.get(\n \"CONTAINER_EXEC_DOCKER_TLSCERT\", \"\"\n)\nCONTAINER_EXEC_DOCKER_TLSKEY = os.environ.get(\n \"CONTAINER_EXEC_DOCKER_TLSKEY\", \"\"\n)\nCONTAINER_EXEC_MEMORY_LIMIT = os.environ.get(\n \"CONTAINER_EXEC_MEMORY_LIMIT\", \"4g\"\n)\nCONTAINER_EXEC_IO_IMAGE = \"alpine:3.9\"\nCONTAINER_EXEC_IO_SHA256 = (\n \"sha256:5cb3aa00f89934411ffba5c063a9bc98ace875d8f92e77d0029543d9f2ef4ad0\"\n)\nCONTAINER_EXEC_CPU_QUOTA = int(\n os.environ.get(\"CONTAINER_EXEC_CPU_QUOTA\", \"100000\")\n)\nCONTAINER_EXEC_CPU_PERIOD = int(\n os.environ.get(\"CONTAINER_EXEC_CPU_PERIOD\", \"100000\")\n)\nCONTAINER_EXEC_DOCKER_RUNTIME = os.environ.get(\n \"CONTAINER_EXEC_DOCKER_RUNTIME\", None\n)\n\nCELERY_BEAT_SCHEDULE = {\n \"cleanup_stale_uploads\": {\n \"task\": \"grandchallenge.jqfileupload.tasks.cleanup_stale_uploads\",\n \"schedule\": timedelta(hours=1),\n },\n \"clear_sessions\": {\n \"task\": \"grandchallenge.core.tasks.clear_sessions\",\n \"schedule\": timedelta(days=1),\n },\n \"update_filter_classes\": {\n \"task\": \"grandchallenge.challenges.tasks.update_filter_classes\",\n \"schedule\": timedelta(minutes=5),\n },\n \"validate_external_challenges\": {\n \"task\": \"grandchallenge.challenges.tasks.check_external_challenge_urls\",\n \"schedule\": timedelta(days=1),\n },\n}\n\nCELERY_TASK_ROUTES = {\n \"grandchallenge.container_exec.tasks.execute_job\": \"evaluation\",\n \"grandchallenge.cases.tasks.build_images\": \"images\",\n}\n\n# Set which template pack to use for forms\nCRISPY_TEMPLATE_PACK = \"bootstrap4\"\n\n# When using bootstrap error messages need to be renamed to danger\nMESSAGE_TAGS = {messages.ERROR: \"danger\"}\n\n# CIRRUS Is an external application that can view images\nCIRRUS_APPLICATION = \"https://apps.diagnijmegen.nl/Applications/CIRRUSWeb_master_98d13770/#!/?workstation=BasicWorkstation\"\nCIRRUS_BASE_IMAGE_QUERY_PARAM = \"grand_challenge_image\"\nCIRRUS_ANNOATION_QUERY_PARAM = \"grand_challenge_overlay\"\n\n# Disallow some challenge names due to subdomain or media folder clashes\nDISALLOWED_CHALLENGE_NAMES = [\n \"m\",\n IMAGE_FILES_SUBDIRECTORY,\n \"logos\",\n \"banners\",\n \"mugshots\",\n \"docker\",\n EVALUATION_FILES_SUBDIRECTORY,\n \"evaluation-supplementary\",\n \"favicon\",\n \"i\",\n \"cache\", # for sorl-thumbnails\n JQFILEUPLOAD_UPLOAD_SUBIDRECTORY,\n *USERNAME_DENYLIST,\n]\n\nif MEDIA_ROOT[-1] != \"/\":\n msg = (\n \"MEDIA_ROOT setting should end in a slash. Found '\"\n + MEDIA_ROOT\n + \"'. Please add a slash\"\n )\n raise ImproperlyConfigured(msg)\n\nENABLE_DEBUG_TOOLBAR = False\n\nif DEBUG:\n EMAIL_BACKEND = \"django.core.mail.backends.dummy.EmailBackend\"\n\n if ENABLE_DEBUG_TOOLBAR:\n INSTALLED_APPS += (\"debug_toolbar\",)\n\n MIDDLEWARE += (\"debug_toolbar.middleware.DebugToolbarMiddleware\",)\n\n DEBUG_TOOLBAR_CONFIG = {\n \"SHOW_TOOLBAR_CALLBACK\": \"config.toolbar_callback\"\n }\n\nif not COMIC_PUBLIC_FOLDER_NAME:\n raise ImproperlyConfigured(\n \"Don't know from which folder serving publiv files\"\n \"is allowed. Please add a setting like \"\n '\\'COMIC_PUBLIC_FOLDER_NAME = \"public_html\"'\n \" to your .conf file.\"\n )\n\nif not COMIC_REGISTERED_ONLY_FOLDER_NAME:\n raise ImproperlyConfigured(\n \"Don't know from which folder serving protected files\"\n \"is allowed. Please add a setting like \"\n '\\'COMIC_REGISTERED_ONLY_FOLDER_NAME = \"datasets\"'\n \" to your .conf file.\"\n )\n\n# Modality name constants\nMODALITY_OCT = \"OCT\" # Optical coherence tomography\nMODALITY_CF = \"Fundus Photography\" # Color fundus photography\nMODALITY_FA = \"Flurescein Angiography\" # Fluorescein angiography\nMODALITY_IR = \"Infrared Reflectance Imaging\" # Infrared Reflectance imaging\n\n# Maximum file size in bytes to be opened by SimpleITK.ReadImage in cases.models.Image.get_sitk_image()\nMAX_SITK_FILE_SIZE = 268435456 # == 256 mb\n\n# Retina specific settings\nRETINA_IMAGE_CACHE_TIME = 60 * 60 * 24\nRETINA_GRADERS_GROUP_NAME = \"retina_graders\"\nRETINA_ADMINS_GROUP_NAME = \"retina_admins\"\nRETINA_IMPORT_USER_NAME = \"retina_import_user\"\nRETINA_EXCEPTION_ARCHIVE = \"Australia\"\n",
"path": "app/config/settings.py"
}
] | diff --git a/app/config/settings.py b/app/config/settings.py
index 85a8a2cd6b..f2b24552f2 100644
--- a/app/config/settings.py
+++ b/app/config/settings.py
@@ -601,7 +601,8 @@ def strtobool(val) -> bool:
}
CELERY_TASK_ROUTES = {
- "grandchallenge.container_exec.tasks.execute_job": "evaluation"
+ "grandchallenge.container_exec.tasks.execute_job": "evaluation",
+ "grandchallenge.cases.tasks.build_images": "images",
}
# Set which template pack to use for forms
diff --git a/docker-compose.yml b/docker-compose.yml
index 5a2e841960..f722e77cbd 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -160,7 +160,7 @@ services:
<<: *protected_storage_credentials
<<: *protected_storage_connections
restart: always
- command: "celery -A config worker -l info -Q evaluation -c 1"
+ command: "celery -A config worker -l info -Q evaluation,images -c 1"
scale: 1
depends_on:
web:
|
weecology__retriever-950 | Check MySQL and Postgres credential files
In addition to allowing users to directly provide their MySQL and PostgreSQL credentials, it should also be possible for them to store these credentials in the usual places.
We should check information given by the user to the retriever first, and then fall back on the configuration files for usernames and passwords if they are not provided.
For PostgreSQL this is `~/.pgpass` with the format:
```
hostname:port:database:username:password
```
See: https://wiki.postgresql.org/wiki/Pgpass. `*`s can be used in place of any of the `:` separated values.
For MySQL this is `~/.my.cnf` with the format:
```
[client]
user = root
password = yourpassword
```
See: https://dev.mysql.com/doc/refman/5.5/en/option-files.html. `.my.cnf` can contain a lot of additional configuration information so we'll need to look explicitly for `user =` and `password =`.
| [
{
"content": "from __future__ import print_function\nfrom builtins import str\nimport os\nfrom retriever.lib.models import Engine, no_cleanup\nfrom retriever import ENCODING\n\n\nclass engine(Engine):\n \"\"\"Engine instance for MySQL.\"\"\"\n name = \"MySQL\"\n abbreviation = \"mysql\"\n datatypes = {\n \"auto\": \"INT(5) NOT NULL AUTO_INCREMENT\",\n \"int\": \"INT\",\n \"bigint\": \"BIGINT\",\n \"double\": \"DOUBLE\",\n \"decimal\": \"DECIMAL\",\n \"char\": (\"TEXT\", \"VARCHAR\"),\n \"bool\": \"BOOL\",\n }\n max_int = 4294967295\n placeholder = \"%s\"\n required_opts = [(\"user\",\n \"Enter your MySQL username\",\n \"root\"),\n (\"password\",\n \"Enter your password\",\n \"\"),\n (\"host\",\n \"Enter your MySQL host\",\n \"localhost\"),\n (\"port\",\n \"Enter your MySQL port\",\n 3306),\n (\"database_name\",\n \"Format of database name\",\n \"{db}\"),\n (\"table_name\",\n \"Format of table name\",\n \"{db}.{table}\"),\n ]\n\n def create_db_statement(self):\n \"\"\"Returns a SQL statement to create a database.\"\"\"\n createstatement = \"CREATE DATABASE IF NOT EXISTS \" + self.database_name()\n return createstatement\n\n def insert_data_from_file(self, filename):\n \"\"\"Calls MySQL \"LOAD DATA LOCAL INFILE\" statement to perform a bulk\n insert.\"\"\"\n\n mysql_set_autocommit_off = \"\"\"SET autocommit=0; SET UNIQUE_CHECKS=0; SET FOREIGN_KEY_CHECKS=0; SET sql_log_bin=0;\"\"\"\n mysql_set_autocommit_on = \"\"\"SET GLOBAL innodb_flush_log_at_trx_commit=1; COMMIT; SET autocommit=1; SET unique_checks=1; SET foreign_key_checks=1;\"\"\"\n \n self.get_cursor()\n ct = len([True for c in self.table.columns if c[1][0][:3] == \"ct-\"]) != 0\n if (self.table.cleanup.function == no_cleanup and\n not self.table.fixed_width and\n not ct and\n (not hasattr(self.table, \"do_not_bulk_insert\") or not self.table.do_not_bulk_insert)):\n\n print (\"Inserting data from \" + os.path.basename(filename) + \"...\")\n\n columns = self.table.get_insert_columns()\n statement = \"\"\"\nLOAD DATA LOCAL INFILE '\"\"\" + filename.replace(\"\\\\\", \"\\\\\\\\\") + \"\"\"'\nINTO TABLE \"\"\" + self.table_name() + \"\"\"\nFIELDS TERMINATED BY '\"\"\" + self.table.delimiter + \"\"\"'\nOPTIONALLY ENCLOSED BY '\"'\nLINES TERMINATED BY '\\\\n'\nIGNORE \"\"\" + str(self.table.header_rows) + \"\"\" LINES\n(\"\"\" + columns + \")\"\n try:\n self.cursor.execute(mysql_set_autocommit_off)\n self.cursor.execute(statement)\n\n self.cursor.execute(mysql_set_autocommit_on)\n except Exception as e:\n self.disconnect() # If the execute fails the database connection can get hung up\n self.cursor.execute(mysql_set_autocommit_on)\n return Engine.insert_data_from_file(self, filename)\n else:\n return Engine.insert_data_from_file(self, filename)\n\n def table_exists(self, dbname, tablename):\n \"\"\"Checks to see if the given table exists\"\"\"\n if not hasattr(self, 'existing_table_names'):\n self.cursor.execute(\n \"SELECT table_schema, table_name \"\n \"FROM information_schema.tables WHERE table_schema NOT IN \"\n \"('mysql', 'information_schema', 'performance_schema');\")\n self.existing_table_names = set()\n for schema, table in self.cursor:\n self.existing_table_names.add((schema.lower(), table.lower()))\n return (dbname.lower(), tablename.lower()) in self.existing_table_names\n\n def set_engine_encoding(self):\n \"\"\"Set MySQL database encoding to match data encoding\n\n Please update the encoding lookup table if the required encoding is not present.\n \"\"\"\n encoding = ENCODING.lower()\n if self.script.encoding:\n encoding = self.script.encoding.lower()\n encoding_lookup = {'iso-8859-1': 'latin1', 'latin-1': 'latin1', 'utf-8': 'utf8'}\n db_encoding = encoding_lookup.get(encoding)\n self.execute(\"SET NAMES '{0}';\".format(db_encoding))\n\n def get_connection(self):\n \"\"\"Gets the db connection.\"\"\"\n args = {'host': self.opts['host'],\n 'port': int(self.opts['port']),\n 'user': self.opts['user'],\n 'passwd': self.opts['password']}\n import pymysql as dbapi\n import pymysql.constants.CLIENT as client\n args['client_flag'] = client.LOCAL_FILES\n self.get_input()\n return dbapi.connect(**args)\n",
"path": "retriever/engines/mysql.py"
}
] | [
{
"content": "from __future__ import print_function\nfrom builtins import str\nimport os\nfrom retriever.lib.models import Engine, no_cleanup\nfrom retriever import ENCODING\n\n\nclass engine(Engine):\n \"\"\"Engine instance for MySQL.\"\"\"\n name = \"MySQL\"\n abbreviation = \"mysql\"\n datatypes = {\n \"auto\": \"INT(5) NOT NULL AUTO_INCREMENT\",\n \"int\": \"INT\",\n \"bigint\": \"BIGINT\",\n \"double\": \"DOUBLE\",\n \"decimal\": \"DECIMAL\",\n \"char\": (\"TEXT\", \"VARCHAR\"),\n \"bool\": \"BOOL\",\n }\n max_int = 4294967295\n placeholder = \"%s\"\n required_opts = [(\"user\",\n \"Enter your MySQL username\",\n \"root\"),\n (\"password\",\n \"Enter your password\",\n \"\"),\n (\"host\",\n \"Enter your MySQL host\",\n \"localhost\"),\n (\"port\",\n \"Enter your MySQL port\",\n 3306),\n (\"database_name\",\n \"Format of database name\",\n \"{db}\"),\n (\"table_name\",\n \"Format of table name\",\n \"{db}.{table}\"),\n ]\n\n def create_db_statement(self):\n \"\"\"Returns a SQL statement to create a database.\"\"\"\n createstatement = \"CREATE DATABASE IF NOT EXISTS \" + self.database_name()\n return createstatement\n\n def insert_data_from_file(self, filename):\n \"\"\"Calls MySQL \"LOAD DATA LOCAL INFILE\" statement to perform a bulk\n insert.\"\"\"\n\n mysql_set_autocommit_off = \"\"\"SET autocommit=0; SET UNIQUE_CHECKS=0; SET FOREIGN_KEY_CHECKS=0; SET sql_log_bin=0;\"\"\"\n mysql_set_autocommit_on = \"\"\"SET GLOBAL innodb_flush_log_at_trx_commit=1; COMMIT; SET autocommit=1; SET unique_checks=1; SET foreign_key_checks=1;\"\"\"\n \n self.get_cursor()\n ct = len([True for c in self.table.columns if c[1][0][:3] == \"ct-\"]) != 0\n if (self.table.cleanup.function == no_cleanup and\n not self.table.fixed_width and\n not ct and\n (not hasattr(self.table, \"do_not_bulk_insert\") or not self.table.do_not_bulk_insert)):\n\n print (\"Inserting data from \" + os.path.basename(filename) + \"...\")\n\n columns = self.table.get_insert_columns()\n statement = \"\"\"\nLOAD DATA LOCAL INFILE '\"\"\" + filename.replace(\"\\\\\", \"\\\\\\\\\") + \"\"\"'\nINTO TABLE \"\"\" + self.table_name() + \"\"\"\nFIELDS TERMINATED BY '\"\"\" + self.table.delimiter + \"\"\"'\nOPTIONALLY ENCLOSED BY '\"'\nLINES TERMINATED BY '\\\\n'\nIGNORE \"\"\" + str(self.table.header_rows) + \"\"\" LINES\n(\"\"\" + columns + \")\"\n try:\n self.cursor.execute(mysql_set_autocommit_off)\n self.cursor.execute(statement)\n\n self.cursor.execute(mysql_set_autocommit_on)\n except Exception as e:\n self.disconnect() # If the execute fails the database connection can get hung up\n self.cursor.execute(mysql_set_autocommit_on)\n return Engine.insert_data_from_file(self, filename)\n else:\n return Engine.insert_data_from_file(self, filename)\n\n def table_exists(self, dbname, tablename):\n \"\"\"Checks to see if the given table exists\"\"\"\n if not hasattr(self, 'existing_table_names'):\n self.cursor.execute(\n \"SELECT table_schema, table_name \"\n \"FROM information_schema.tables WHERE table_schema NOT IN \"\n \"('mysql', 'information_schema', 'performance_schema');\")\n self.existing_table_names = set()\n for schema, table in self.cursor:\n self.existing_table_names.add((schema.lower(), table.lower()))\n return (dbname.lower(), tablename.lower()) in self.existing_table_names\n\n def set_engine_encoding(self):\n \"\"\"Set MySQL database encoding to match data encoding\n\n Please update the encoding lookup table if the required encoding is not present.\n \"\"\"\n encoding = ENCODING.lower()\n if self.script.encoding:\n encoding = self.script.encoding.lower()\n encoding_lookup = {'iso-8859-1': 'latin1', 'latin-1': 'latin1', 'utf-8': 'utf8'}\n db_encoding = encoding_lookup.get(encoding)\n self.execute(\"SET NAMES '{0}';\".format(db_encoding))\n\n def get_connection(self):\n \"\"\"Gets the db connection.\"\"\"\n args = {'host': self.opts['host'],\n 'port': int(self.opts['port']),\n 'user': self.opts['user'],\n 'passwd': self.opts['password']}\n import pymysql as dbapi\n import pymysql.constants.CLIENT as client\n args['client_flag'] = client.LOCAL_FILES\n self.get_input()\n return dbapi.connect(read_default_file='~/.my.cnf', **args)\n",
"path": "retriever/engines/mysql.py"
}
] | diff --git a/docs/introduction.rst b/docs/introduction.rst
index 3458a4577..57f8c7fbf 100644
--- a/docs/introduction.rst
+++ b/docs/introduction.rst
@@ -241,6 +241,39 @@ The ``citation`` command show the citation for the retriever and for the scripts
**To create new, edit, delete scripts please read the documentation on scripts**
+
+Storing database connection details
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The retriever reads from the standard configuration files for the database
+management systems. If you want to store connection details they should be
+stored in those files. Make sure to secure these files appropriately.
+
+For postgreSQL, create or modify `~/.pgpass`. This is a file named `.pgpass`
+located in the users home directory. It should take the general form:
+
+``hostname:port:database:username:password``
+
+where each word is replaced with the correct information for your database
+connection or replaced with an ``*`` to apply to all values for that section.
+
+For MySQL, create or modify `~/.my.cnf`. This is a file named `.my.cnf` located
+in the users home directory. The relevant portion of this file for the retriever
+is the `client` section which should take the general form:
+
+::
+
+ [client]
+ host=hostname
+ port=port
+ user=username
+ password=password
+
+where each word to the right of the `=` is replaced with the correct information
+for your database connection. Remove or comment out the lines for any values you
+don't want to set.
+
+
Acknowledgments
~~~~~~~~~~~~~~~
diff --git a/retriever/engines/mysql.py b/retriever/engines/mysql.py
index a6c0db7eb..dbaf3a87b 100644
--- a/retriever/engines/mysql.py
+++ b/retriever/engines/mysql.py
@@ -116,4 +116,4 @@ def get_connection(self):
import pymysql.constants.CLIENT as client
args['client_flag'] = client.LOCAL_FILES
self.get_input()
- return dbapi.connect(**args)
+ return dbapi.connect(read_default_file='~/.my.cnf', **args)
|
deepchecks__deepchecks-1291 | [BUG] Tables have no style rendered on `check_result.save_as_html()`
The tables are not looking good when exporting single check as html

```
from deepchecks.tabular.datasets.regression import avocado
from deepchecks.tabular.checks import TrainTestFeatureDrift
train, test = avocado.load_data(as_train_test=True)
result = TrainTestFeatureDrift().add_condition_drift_score_not_greater_than().run(train, test)
result.save_as_html()
```
| [
{
"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing the check results classes.\"\"\"\n# pylint: disable=broad-except\nimport base64\nimport io\nimport traceback\nimport warnings\nfrom typing import Any, Callable, List, Tuple, Union, TYPE_CHECKING\n\nimport jsonpickle\nimport jsonpickle.ext.pandas as jsonpickle_pd\nimport matplotlib\nimport pandas as pd\nimport numpy as np\nimport ipywidgets as widgets\nimport plotly.graph_objects as go\nfrom plotly.basedatatypes import BaseFigure\nimport plotly.io as pio\nimport plotly\nfrom matplotlib import pyplot as plt\nfrom IPython.display import display_html\nfrom pandas.io.formats.style import Styler\n\nfrom deepchecks.core.condition import Condition, ConditionCategory, ConditionResult\nfrom deepchecks.core.display_pandas import dataframe_to_html, get_conditions_table\nfrom deepchecks.core.errors import DeepchecksValueError\nfrom deepchecks.utils.dataframes import un_numpy\nfrom deepchecks.utils.strings import create_new_file_name, get_docs_summary, widget_to_html\nfrom deepchecks.utils.ipython import is_notebook\nfrom deepchecks.utils.wandb_utils import set_wandb_run_state\n\n# registers jsonpickle pandas extension for pandas support in the to_json function\njsonpickle_pd.register_handlers()\n\nif TYPE_CHECKING:\n from deepchecks.core.checks import BaseCheck\n\ntry:\n import wandb\n\n assert hasattr(wandb, '__version__') # verify package import not local dir\nexcept (ImportError, AssertionError):\n wandb = None\n\n__all__ = [\n 'CheckResult',\n 'CheckFailure',\n]\n\n\ndef _save_all_open_figures():\n figs = [plt.figure(n) for n in plt.get_fignums()]\n images = []\n for fig in figs:\n bio = io.BytesIO()\n fig.savefig(bio, format='png')\n encoded = base64.b64encode(bio.getvalue()).decode('utf-8')\n images.append(encoded)\n fig.clear()\n return images\n\n\n_CONDITIONS_HEADER = '<h5>Conditions Summary</h5>'\n_ADDITIONAL_OUTPUTS_HEADER = '<h5>Additional Outputs</h5>'\n\n\nclass CheckResult:\n \"\"\"Class which returns from a check with result that can later be used for automatic pipelines and display value.\n\n Class containing the result of a check\n\n The class stores the results and display of the check. Evaluating the result in an IPython console / notebook\n will show the result display output.\n\n Parameters\n ----------\n value : Any\n Value calculated by check. Can be used to decide if decidable check passed.\n display : List[Union[Callable, str, pd.DataFrame, Styler]] , default: None\n Dictionary with formatters for display. possible formatters are: 'text/html', 'image/png'\n header : str , default: None\n Header to be displayed in python notebook.\n \"\"\"\n\n value: Any\n header: str\n display: List[Union[Callable, str, pd.DataFrame, Styler]]\n conditions_results: List[ConditionResult]\n check: 'BaseCheck'\n\n def __init__(self, value, header: str = None, display: Any = None):\n self.value = value\n self.header = header\n self.conditions_results = []\n\n if display is not None and not isinstance(display, List):\n self.display = [display]\n else:\n self.display = display or []\n\n for item in self.display:\n if not isinstance(item, (str, pd.DataFrame, Styler, Callable, BaseFigure)):\n raise DeepchecksValueError(f'Can\\'t display item of type: {type(item)}')\n\n def display_check(self, unique_id: str = None, as_widget: bool = False,\n show_additional_outputs=True):\n \"\"\"Display the check result or return the display as widget.\n\n Parameters\n ----------\n unique_id : str\n The unique id given by the suite that displays the check.\n as_widget : bool\n Boolean that controls if to display the check regulary or if to return a widget.\n show_additional_outputs : bool\n Boolean that controls if to show additional outputs.\n Returns\n -------\n Widget\n Widget representation of the display if as_widget is True.\n \"\"\"\n if as_widget:\n box = widgets.VBox()\n box_children = []\n check_html = ''\n if unique_id:\n check_html += f'<h4 id=\"{self.get_check_id(unique_id)}\">{self.get_header()}</h4>'\n else:\n check_html += f'<h4>{self.get_header()}</h4>'\n if hasattr(self.check.__class__, '__doc__'):\n summary = get_docs_summary(self.check)\n check_html += f'<p>{summary}</p>'\n if self.conditions_results:\n check_html += _CONDITIONS_HEADER\n check_html += dataframe_to_html(get_conditions_table(self, unique_id))\n if show_additional_outputs:\n check_html += _ADDITIONAL_OUTPUTS_HEADER\n for item in self.display:\n if isinstance(item, (pd.DataFrame, Styler)):\n check_html += dataframe_to_html(item)\n elif isinstance(item, str):\n check_html += f'<div>{item}</div>'\n elif isinstance(item, BaseFigure):\n if as_widget:\n box_children.append(widgets.HTML(check_html))\n box_children.append(go.FigureWidget(data=item))\n else:\n display_html(check_html, raw=True)\n item.show()\n check_html = ''\n elif callable(item):\n try:\n if as_widget:\n plt_out = widgets.Output()\n with plt_out:\n item()\n plt.show()\n box_children.append(widgets.HTML(check_html))\n box_children.append(plt_out)\n else:\n display_html(check_html, raw=True)\n item()\n plt.show()\n check_html = ''\n except Exception as exc:\n check_html += f'Error in display {str(exc)}'\n else:\n raise Exception(f'Unable to display item of type: {type(item)}')\n if not self.display:\n check_html += '<p><b>✓</b> Nothing found</p>'\n if unique_id:\n check_html += f'<br><a href=\"#summary_{unique_id}\" style=\"font-size: 14px\">Go to top</a>'\n if as_widget:\n box_children.append(widgets.HTML(check_html))\n box.children = box_children\n return box\n display_html(check_html, raw=True)\n\n def _repr_html_(self, unique_id=None,\n show_additional_outputs=True, requirejs: bool = False):\n \"\"\"Return html representation of check result.\"\"\"\n html_out = io.StringIO()\n self.save_as_html(html_out, unique_id=unique_id,\n show_additional_outputs=show_additional_outputs, requirejs=requirejs)\n return html_out.getvalue()\n\n def save_as_html(self, file=None, unique_id=None,\n show_additional_outputs=True, requirejs: bool = True):\n \"\"\"Save output as html file.\n\n Parameters\n ----------\n file : filename or file-like object\n The file to write the HTML output to. If None writes to output.html\n requirejs: bool , default: True\n If to save with all javascript dependencies\n \"\"\"\n if file is None:\n file = 'output.html'\n widgeted_output = self.display_check(unique_id=unique_id,\n show_additional_outputs=show_additional_outputs,\n as_widget=True)\n if isinstance(file, str):\n file = create_new_file_name(file, 'html')\n widget_to_html(widgeted_output, html_out=file, title=self.get_header(), requirejs=requirejs)\n\n def _display_to_json(self) -> List[Tuple[str, str]]:\n displays = []\n old_backend = matplotlib.get_backend()\n for item in self.display:\n if isinstance(item, Styler):\n displays.append(('dataframe', item.data.to_json(orient='records')))\n elif isinstance(item, pd.DataFrame):\n displays.append(('dataframe', item.to_json(orient='records')))\n elif isinstance(item, str):\n displays.append(('html', item))\n elif isinstance(item, BaseFigure):\n displays.append(('plotly', item.to_json()))\n elif callable(item):\n try:\n matplotlib.use('Agg')\n item()\n displays.append(('plt', _save_all_open_figures()))\n except Exception:\n displays.append(('plt', ''))\n else:\n matplotlib.use(old_backend)\n raise Exception(f'Unable to create json for item of type: {type(item)}')\n matplotlib.use(old_backend)\n return displays\n\n def to_wandb(self, dedicated_run: bool = True, **kwargs: Any):\n \"\"\"Export check result to wandb.\n\n Parameters\n ----------\n dedicated_run : bool , default: None\n If to initiate and finish a new wandb run.\n If None it will be dedicated if wandb.run is None.\n kwargs: Keyword arguments to pass to wandb.init.\n Default project name is deepchecks.\n Default config is the check metadata (params, train/test/ name etc.).\n \"\"\"\n check_metadata = self._get_metadata()\n section_suffix = check_metadata['header'] + '/'\n if isinstance(self.value, pd.DataFrame):\n value = self.value.to_json()\n elif isinstance(self.value, Styler):\n value = self.value.data.to_json()\n elif isinstance(self.value, np.ndarray):\n value = self.value.tolist()\n elif isinstance(self.value, (np.ndarray, np.generic)):\n value = un_numpy(self.value)\n else:\n value = jsonpickle.dumps(self.value, unpicklable=False)\n check_metadata['value'] = value\n dedicated_run = set_wandb_run_state(dedicated_run, check_metadata, **kwargs)\n if self.conditions_results:\n cond_df = get_conditions_table([self], icon_html=False)\n cond_table = wandb.Table(dataframe=cond_df.data, allow_mixed_types=True)\n wandb.log({f'{section_suffix}conditions_table': cond_table}, commit=False)\n table_i = 0\n plot_i = 0\n old_backend = matplotlib.get_backend()\n for item in self.display:\n if isinstance(item, Styler):\n wandb.log({f'{section_suffix}display_table_{table_i}':\n wandb.Table(dataframe=item.data.reset_index(), allow_mixed_types=True)}, commit=False)\n table_i += 1\n elif isinstance(item, pd.DataFrame):\n wandb.log({f'{section_suffix}display_table_{table_i}':\n wandb.Table(dataframe=item.reset_index(), allow_mixed_types=True)}, commit=False)\n table_i += 1\n elif isinstance(item, str):\n pass\n elif isinstance(item, BaseFigure):\n wandb.log({f'{section_suffix}plot_{plot_i}': wandb.Plotly(item)})\n plot_i += 1\n elif callable(item):\n try:\n matplotlib.use('Agg')\n item()\n wandb.log({f'{section_suffix}plot_{plot_i}': plt})\n plot_i += 1\n except Exception:\n pass\n else:\n matplotlib.use(old_backend)\n raise Exception(f'Unable to process display for item of type: {type(item)}')\n\n matplotlib.use(old_backend)\n data = [check_metadata['header'],\n str(check_metadata['params']),\n check_metadata['summary'],\n value]\n final_table = wandb.Table(columns=['header', 'params', 'summary', 'value'])\n final_table.add_data(*data)\n wandb.log({f'{section_suffix}results': final_table}, commit=False)\n if dedicated_run:\n wandb.finish()\n\n def to_json(self, with_display: bool = True) -> str:\n \"\"\"Return check result as json.\n\n Parameters\n ----------\n with_display : bool\n controls if to serialize display or not\n\n Returns\n -------\n str\n {'name': .., 'params': .., 'header': ..,\n 'summary': .., 'conditions_table': .., 'value', 'display': ..}\n \"\"\"\n result_json = self._get_metadata()\n if self.conditions_results:\n cond_df = get_conditions_table(self, icon_html=False)\n result_json['conditions_table'] = cond_df.data.to_json(orient='records')\n if isinstance(self.value, pd.DataFrame):\n result_json['value'] = self.value.to_json()\n elif isinstance(self.value, Styler):\n result_json['value'] = self.value.data.to_json()\n elif isinstance(self.value, np.ndarray):\n result_json['value'] = self.value.tolist()\n elif isinstance(self.value, (np.ndarray, np.generic)):\n result_json['value'] = un_numpy(self.value)\n else:\n result_json['value'] = self.value\n if with_display:\n display_json = self._display_to_json()\n result_json['display'] = display_json\n return jsonpickle.dumps(result_json, unpicklable=False)\n\n @staticmethod\n def display_from_json(json_data):\n \"\"\"Display the check result from a json received from a to_json.\"\"\"\n json_data = jsonpickle.loads(json_data)\n if json_data.get('display') is None:\n return\n header = json_data['header']\n summary = json_data['summary']\n display_html(f'<h4>{header}</h4>', raw=True)\n display_html(f'<p>{summary}</p>', raw=True)\n if json_data.get('conditions_table'):\n display_html(_CONDITIONS_HEADER, raw=True)\n conditions_table = pd.read_json(json_data['conditions_table'], orient='records')\n with warnings.catch_warnings():\n warnings.simplefilter(action='ignore', category=FutureWarning)\n display_html(dataframe_to_html(conditions_table.style.hide_index()), raw=True)\n display_html(_ADDITIONAL_OUTPUTS_HEADER, raw=True)\n for display_type, value in json_data['display']:\n if display_type == 'html':\n display_html(value, raw=True)\n elif display_type in ['conditions', 'dataframe']:\n df: pd.DataFrame = pd.read_json(value, orient='records')\n display_html(dataframe_to_html(df), raw=True)\n elif display_type == 'plotly':\n plotly_json = io.StringIO(value)\n plotly.io.read_json(plotly_json).show()\n elif display_type == 'plt':\n display_html(f'<img src=\\'data:image/png;base64,{value}\\'>', raw=True)\n else:\n raise ValueError(f'Unexpected type of display received: {display_type}')\n\n def _get_metadata(self, with_doc_link: bool = False):\n check_name = self.check.name()\n parameters = self.check.params(True)\n header = self.get_header()\n return {'name': check_name, 'params': parameters, 'header': header,\n 'summary': get_docs_summary(self.check, with_doc_link=with_doc_link)}\n\n def _ipython_display_(self, unique_id=None, as_widget=False,\n show_additional_outputs=True):\n check_widget = self.display_check(unique_id=unique_id, as_widget=as_widget,\n show_additional_outputs=show_additional_outputs)\n if as_widget:\n display_html(check_widget)\n\n def __repr__(self):\n \"\"\"Return default __repr__ function uses value.\"\"\"\n return f'{self.get_header()}: {self.value}'\n\n def get_header(self) -> str:\n \"\"\"Return header for display. if header was defined return it, else extract name of check class.\"\"\"\n return self.header or self.check.name()\n\n def get_check_id(self, unique_id: str = '') -> str:\n \"\"\"Return check id (used for href).\"\"\"\n header = self.get_header().replace(' ', '')\n return f'{header}_{unique_id}'\n\n def process_conditions(self) -> List[Condition]:\n \"\"\"Process the conditions results from current result and check.\"\"\"\n self.conditions_results = self.check.conditions_decision(self)\n\n def have_conditions(self) -> bool:\n \"\"\"Return if this check has condition results.\"\"\"\n return bool(self.conditions_results)\n\n def have_display(self) -> bool:\n \"\"\"Return if this check has display.\"\"\"\n return bool(self.display)\n\n def passed_conditions(self) -> bool:\n \"\"\"Return if this check has no passing condition results.\"\"\"\n return all((r.is_pass for r in self.conditions_results))\n\n @property\n def priority(self) -> int:\n \"\"\"Return priority of the current result.\n\n This value is primarly used to determine suite output order.\n The logic is next:\n\n * if at least one condition did not pass and is of category 'FAIL', return 1.\n * if at least one condition did not pass and is of category 'WARN', return 2.\n * if check result do not have assigned conditions, return 3.\n * if all conditions passed, return 4.\n\n Returns\n -------\n int\n priority of the check result.\n \"\"\"\n if not self.have_conditions:\n return 3\n\n for c in self.conditions_results:\n if c.is_pass is False and c.category == ConditionCategory.FAIL:\n return 1\n if c.is_pass is False and c.category == ConditionCategory.WARN:\n return 2\n\n return 4\n\n def show(self, show_additional_outputs=True, unique_id=None):\n \"\"\"Display the check result.\n\n Parameters\n ----------\n show_additional_outputs : bool\n Boolean that controls if to show additional outputs.\n unique_id : str\n The unique id given by the suite that displays the check.\n \"\"\"\n if is_notebook():\n self.display_check(unique_id=unique_id,\n show_additional_outputs=show_additional_outputs)\n elif 'sphinx_gallery' in pio.renderers.default:\n html = self._repr_html_(unique_id=unique_id,\n show_additional_outputs=show_additional_outputs)\n\n class TempSphinx:\n def _repr_html_(self):\n return html\n return TempSphinx()\n else:\n warnings.warn('You are running in a non-interactive python shell. in order to show result you have to use '\n 'an IPython shell (etc Jupyter)')\n\n\nclass CheckFailure:\n \"\"\"Class which holds a check run exception.\n\n Parameters\n ----------\n check : BaseCheck\n exception : Exception\n header_suffix : str , default ``\n\n \"\"\"\n\n def __init__(self, check: 'BaseCheck', exception: Exception, header_suffix: str = ''):\n self.check = check\n self.exception = exception\n self.header = check.name() + header_suffix\n\n def to_json(self, with_display: bool = True):\n \"\"\"Return check failure as json.\n\n Parameters\n ----------\n with_display : bool\n controls if to serialize display or not\n\n Returns\n -------\n dict\n {'name': .., 'params': .., 'header': .., 'display': ..}\n \"\"\"\n result_json = self._get_metadata()\n if with_display:\n result_json['display'] = [('str', str(self.exception))]\n return jsonpickle.dumps(result_json, unpicklable=False)\n\n def to_wandb(self, dedicated_run: bool = True, **kwargs: Any):\n \"\"\"Export check result to wandb.\n\n Parameters\n ----------\n dedicated_run : bool , default: None\n If to initiate and finish a new wandb run.\n If None it will be dedicated if wandb.run is None.\n kwargs: Keyword arguments to pass to wandb.init.\n Default project name is deepchecks.\n Default config is the check metadata (params, train/test/ name etc.).\n \"\"\"\n check_metadata = self._get_metadata()\n section_suffix = check_metadata['header'] + '/'\n data = [check_metadata['header'],\n str(check_metadata['params']),\n check_metadata['summary'],\n str(self.exception)]\n check_metadata['value'] = str(self.exception)\n dedicated_run = set_wandb_run_state(dedicated_run, check_metadata, **kwargs)\n final_table = wandb.Table(columns=['header', 'params', 'summary', 'value'])\n final_table.add_data(*data)\n wandb.log({f'{section_suffix}results': final_table}, commit=False)\n if dedicated_run:\n wandb.finish()\n\n def _get_metadata(self, with_doc_link: bool = False):\n check_name = self.check.name()\n parameters = self.check.params(True)\n summary = get_docs_summary(self.check, with_doc_link=with_doc_link)\n return {'name': check_name, 'params': parameters, 'header': self.header, 'summary': summary}\n\n def __repr__(self):\n \"\"\"Return string representation.\"\"\"\n return self.header + ': ' + str(self.exception)\n\n def _ipython_display_(self):\n \"\"\"Display the check failure.\"\"\"\n check_html = f'<h4>{self.header}</h4>'\n if hasattr(self.check.__class__, '__doc__'):\n summary = get_docs_summary(self.check)\n check_html += f'<p>{summary}</p>'\n check_html += f'<p style=\"color:red\"> {self.exception}</p>'\n display_html(check_html, raw=True)\n\n def print_traceback(self):\n \"\"\"Print the traceback of the failure.\"\"\"\n tb_str = traceback.format_exception(etype=type(self.exception), value=self.exception,\n tb=self.exception.__traceback__)\n print(''.join(tb_str))\n",
"path": "deepchecks/core/check_result.py"
}
] | [
{
"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing the check results classes.\"\"\"\n# pylint: disable=broad-except\nimport base64\nimport io\nimport traceback\nimport warnings\nfrom typing import Any, Callable, List, Tuple, Union, TYPE_CHECKING\n\nimport jsonpickle\nimport jsonpickle.ext.pandas as jsonpickle_pd\nimport matplotlib\nimport pandas as pd\nimport numpy as np\nimport ipywidgets as widgets\nimport plotly.graph_objects as go\nfrom plotly.basedatatypes import BaseFigure\nimport plotly.io as pio\nimport plotly\nfrom matplotlib import pyplot as plt\nfrom IPython.display import display_html\nfrom pandas.io.formats.style import Styler\n\nfrom deepchecks.core.condition import Condition, ConditionCategory, ConditionResult\nfrom deepchecks.core.display_pandas import dataframe_to_html, get_conditions_table\nfrom deepchecks.core.errors import DeepchecksValueError\nfrom deepchecks.utils.dataframes import un_numpy\nfrom deepchecks.utils.strings import create_new_file_name, get_docs_summary, widget_to_html\nfrom deepchecks.utils.ipython import is_notebook\nfrom deepchecks.utils.wandb_utils import set_wandb_run_state\n\n# registers jsonpickle pandas extension for pandas support in the to_json function\njsonpickle_pd.register_handlers()\n\nif TYPE_CHECKING:\n from deepchecks.core.checks import BaseCheck\n\ntry:\n import wandb\n\n assert hasattr(wandb, '__version__') # verify package import not local dir\nexcept (ImportError, AssertionError):\n wandb = None\n\n__all__ = [\n 'CheckResult',\n 'CheckFailure',\n]\n\n\ndef _save_all_open_figures():\n figs = [plt.figure(n) for n in plt.get_fignums()]\n images = []\n for fig in figs:\n bio = io.BytesIO()\n fig.savefig(bio, format='png')\n encoded = base64.b64encode(bio.getvalue()).decode('utf-8')\n images.append(encoded)\n fig.clear()\n return images\n\n\n_CONDITIONS_HEADER = '<h5>Conditions Summary</h5>'\n_ADDITIONAL_OUTPUTS_HEADER = '<h5>Additional Outputs</h5>'\n\n\nclass CheckResult:\n \"\"\"Class which returns from a check with result that can later be used for automatic pipelines and display value.\n\n Class containing the result of a check\n\n The class stores the results and display of the check. Evaluating the result in an IPython console / notebook\n will show the result display output.\n\n Parameters\n ----------\n value : Any\n Value calculated by check. Can be used to decide if decidable check passed.\n display : List[Union[Callable, str, pd.DataFrame, Styler]] , default: None\n Dictionary with formatters for display. possible formatters are: 'text/html', 'image/png'\n header : str , default: None\n Header to be displayed in python notebook.\n \"\"\"\n\n value: Any\n header: str\n display: List[Union[Callable, str, pd.DataFrame, Styler]]\n conditions_results: List[ConditionResult]\n check: 'BaseCheck'\n\n def __init__(self, value, header: str = None, display: Any = None):\n self.value = value\n self.header = header\n self.conditions_results = []\n\n if display is not None and not isinstance(display, List):\n self.display = [display]\n else:\n self.display = display or []\n\n for item in self.display:\n if not isinstance(item, (str, pd.DataFrame, Styler, Callable, BaseFigure)):\n raise DeepchecksValueError(f'Can\\'t display item of type: {type(item)}')\n\n def display_check(self, unique_id: str = None, as_widget: bool = False,\n show_additional_outputs=True):\n \"\"\"Display the check result or return the display as widget.\n\n Parameters\n ----------\n unique_id : str\n The unique id given by the suite that displays the check.\n as_widget : bool\n Boolean that controls if to display the check regulary or if to return a widget.\n show_additional_outputs : bool\n Boolean that controls if to show additional outputs.\n Returns\n -------\n Widget\n Widget representation of the display if as_widget is True.\n \"\"\"\n if as_widget:\n box = widgets.VBox()\n box.add_class('rendered_html')\n box_children = []\n check_html = ''\n if unique_id:\n check_html += f'<h4 id=\"{self.get_check_id(unique_id)}\">{self.get_header()}</h4>'\n else:\n check_html += f'<h4>{self.get_header()}</h4>'\n if hasattr(self.check.__class__, '__doc__'):\n summary = get_docs_summary(self.check)\n check_html += f'<p>{summary}</p>'\n if self.conditions_results:\n check_html += _CONDITIONS_HEADER\n check_html += dataframe_to_html(get_conditions_table(self, unique_id))\n if show_additional_outputs:\n check_html += _ADDITIONAL_OUTPUTS_HEADER\n for item in self.display:\n if isinstance(item, (pd.DataFrame, Styler)):\n check_html += dataframe_to_html(item)\n elif isinstance(item, str):\n check_html += f'<div>{item}</div>'\n elif isinstance(item, BaseFigure):\n if as_widget:\n box_children.append(widgets.HTML(check_html))\n box_children.append(go.FigureWidget(data=item))\n else:\n display_html(check_html, raw=True)\n item.show()\n check_html = ''\n elif callable(item):\n try:\n if as_widget:\n plt_out = widgets.Output()\n with plt_out:\n item()\n plt.show()\n box_children.append(widgets.HTML(check_html))\n box_children.append(plt_out)\n else:\n display_html(check_html, raw=True)\n item()\n plt.show()\n check_html = ''\n except Exception as exc:\n check_html += f'Error in display {str(exc)}'\n else:\n raise Exception(f'Unable to display item of type: {type(item)}')\n if not self.display:\n check_html += '<p><b>✓</b> Nothing found</p>'\n if unique_id:\n check_html += f'<br><a href=\"#summary_{unique_id}\" style=\"font-size: 14px\">Go to top</a>'\n if as_widget:\n box_children.append(widgets.HTML(check_html))\n box.children = box_children\n return box\n display_html(check_html, raw=True)\n\n def _repr_html_(self, unique_id=None,\n show_additional_outputs=True, requirejs: bool = False):\n \"\"\"Return html representation of check result.\"\"\"\n html_out = io.StringIO()\n self.save_as_html(html_out, unique_id=unique_id,\n show_additional_outputs=show_additional_outputs, requirejs=requirejs)\n return html_out.getvalue()\n\n def save_as_html(self, file=None, unique_id=None,\n show_additional_outputs=True, requirejs: bool = True):\n \"\"\"Save output as html file.\n\n Parameters\n ----------\n file : filename or file-like object\n The file to write the HTML output to. If None writes to output.html\n requirejs: bool , default: True\n If to save with all javascript dependencies\n \"\"\"\n if file is None:\n file = 'output.html'\n widgeted_output = self.display_check(unique_id=unique_id,\n show_additional_outputs=show_additional_outputs,\n as_widget=True)\n if isinstance(file, str):\n file = create_new_file_name(file, 'html')\n widget_to_html(widgeted_output, html_out=file, title=self.get_header(), requirejs=requirejs)\n\n def _display_to_json(self) -> List[Tuple[str, str]]:\n displays = []\n old_backend = matplotlib.get_backend()\n for item in self.display:\n if isinstance(item, Styler):\n displays.append(('dataframe', item.data.to_json(orient='records')))\n elif isinstance(item, pd.DataFrame):\n displays.append(('dataframe', item.to_json(orient='records')))\n elif isinstance(item, str):\n displays.append(('html', item))\n elif isinstance(item, BaseFigure):\n displays.append(('plotly', item.to_json()))\n elif callable(item):\n try:\n matplotlib.use('Agg')\n item()\n displays.append(('plt', _save_all_open_figures()))\n except Exception:\n displays.append(('plt', ''))\n else:\n matplotlib.use(old_backend)\n raise Exception(f'Unable to create json for item of type: {type(item)}')\n matplotlib.use(old_backend)\n return displays\n\n def to_wandb(self, dedicated_run: bool = True, **kwargs: Any):\n \"\"\"Export check result to wandb.\n\n Parameters\n ----------\n dedicated_run : bool , default: None\n If to initiate and finish a new wandb run.\n If None it will be dedicated if wandb.run is None.\n kwargs: Keyword arguments to pass to wandb.init.\n Default project name is deepchecks.\n Default config is the check metadata (params, train/test/ name etc.).\n \"\"\"\n check_metadata = self._get_metadata()\n section_suffix = check_metadata['header'] + '/'\n if isinstance(self.value, pd.DataFrame):\n value = self.value.to_json()\n elif isinstance(self.value, Styler):\n value = self.value.data.to_json()\n elif isinstance(self.value, np.ndarray):\n value = self.value.tolist()\n elif isinstance(self.value, (np.ndarray, np.generic)):\n value = un_numpy(self.value)\n else:\n value = jsonpickle.dumps(self.value, unpicklable=False)\n check_metadata['value'] = value\n dedicated_run = set_wandb_run_state(dedicated_run, check_metadata, **kwargs)\n if self.conditions_results:\n cond_df = get_conditions_table([self], icon_html=False)\n cond_table = wandb.Table(dataframe=cond_df.data, allow_mixed_types=True)\n wandb.log({f'{section_suffix}conditions_table': cond_table}, commit=False)\n table_i = 0\n plot_i = 0\n old_backend = matplotlib.get_backend()\n for item in self.display:\n if isinstance(item, Styler):\n wandb.log({f'{section_suffix}display_table_{table_i}':\n wandb.Table(dataframe=item.data.reset_index(), allow_mixed_types=True)}, commit=False)\n table_i += 1\n elif isinstance(item, pd.DataFrame):\n wandb.log({f'{section_suffix}display_table_{table_i}':\n wandb.Table(dataframe=item.reset_index(), allow_mixed_types=True)}, commit=False)\n table_i += 1\n elif isinstance(item, str):\n pass\n elif isinstance(item, BaseFigure):\n wandb.log({f'{section_suffix}plot_{plot_i}': wandb.Plotly(item)})\n plot_i += 1\n elif callable(item):\n try:\n matplotlib.use('Agg')\n item()\n wandb.log({f'{section_suffix}plot_{plot_i}': plt})\n plot_i += 1\n except Exception:\n pass\n else:\n matplotlib.use(old_backend)\n raise Exception(f'Unable to process display for item of type: {type(item)}')\n\n matplotlib.use(old_backend)\n data = [check_metadata['header'],\n str(check_metadata['params']),\n check_metadata['summary'],\n value]\n final_table = wandb.Table(columns=['header', 'params', 'summary', 'value'])\n final_table.add_data(*data)\n wandb.log({f'{section_suffix}results': final_table}, commit=False)\n if dedicated_run:\n wandb.finish()\n\n def to_json(self, with_display: bool = True) -> str:\n \"\"\"Return check result as json.\n\n Parameters\n ----------\n with_display : bool\n controls if to serialize display or not\n\n Returns\n -------\n str\n {'name': .., 'params': .., 'header': ..,\n 'summary': .., 'conditions_table': .., 'value', 'display': ..}\n \"\"\"\n result_json = self._get_metadata()\n if self.conditions_results:\n cond_df = get_conditions_table(self, icon_html=False)\n result_json['conditions_table'] = cond_df.data.to_json(orient='records')\n if isinstance(self.value, pd.DataFrame):\n result_json['value'] = self.value.to_json()\n elif isinstance(self.value, Styler):\n result_json['value'] = self.value.data.to_json()\n elif isinstance(self.value, np.ndarray):\n result_json['value'] = self.value.tolist()\n elif isinstance(self.value, (np.ndarray, np.generic)):\n result_json['value'] = un_numpy(self.value)\n else:\n result_json['value'] = self.value\n if with_display:\n display_json = self._display_to_json()\n result_json['display'] = display_json\n return jsonpickle.dumps(result_json, unpicklable=False)\n\n @staticmethod\n def display_from_json(json_data):\n \"\"\"Display the check result from a json received from a to_json.\"\"\"\n json_data = jsonpickle.loads(json_data)\n if json_data.get('display') is None:\n return\n header = json_data['header']\n summary = json_data['summary']\n display_html(f'<h4>{header}</h4>', raw=True)\n display_html(f'<p>{summary}</p>', raw=True)\n if json_data.get('conditions_table'):\n display_html(_CONDITIONS_HEADER, raw=True)\n conditions_table = pd.read_json(json_data['conditions_table'], orient='records')\n with warnings.catch_warnings():\n warnings.simplefilter(action='ignore', category=FutureWarning)\n display_html(dataframe_to_html(conditions_table.style.hide_index()), raw=True)\n display_html(_ADDITIONAL_OUTPUTS_HEADER, raw=True)\n for display_type, value in json_data['display']:\n if display_type == 'html':\n display_html(value, raw=True)\n elif display_type in ['conditions', 'dataframe']:\n df: pd.DataFrame = pd.read_json(value, orient='records')\n display_html(dataframe_to_html(df), raw=True)\n elif display_type == 'plotly':\n plotly_json = io.StringIO(value)\n plotly.io.read_json(plotly_json).show()\n elif display_type == 'plt':\n display_html(f'<img src=\\'data:image/png;base64,{value}\\'>', raw=True)\n else:\n raise ValueError(f'Unexpected type of display received: {display_type}')\n\n def _get_metadata(self, with_doc_link: bool = False):\n check_name = self.check.name()\n parameters = self.check.params(True)\n header = self.get_header()\n return {'name': check_name, 'params': parameters, 'header': header,\n 'summary': get_docs_summary(self.check, with_doc_link=with_doc_link)}\n\n def _ipython_display_(self, unique_id=None, as_widget=False,\n show_additional_outputs=True):\n check_widget = self.display_check(unique_id=unique_id, as_widget=as_widget,\n show_additional_outputs=show_additional_outputs)\n if as_widget:\n display_html(check_widget)\n\n def __repr__(self):\n \"\"\"Return default __repr__ function uses value.\"\"\"\n return f'{self.get_header()}: {self.value}'\n\n def get_header(self) -> str:\n \"\"\"Return header for display. if header was defined return it, else extract name of check class.\"\"\"\n return self.header or self.check.name()\n\n def get_check_id(self, unique_id: str = '') -> str:\n \"\"\"Return check id (used for href).\"\"\"\n header = self.get_header().replace(' ', '')\n return f'{header}_{unique_id}'\n\n def process_conditions(self) -> List[Condition]:\n \"\"\"Process the conditions results from current result and check.\"\"\"\n self.conditions_results = self.check.conditions_decision(self)\n\n def have_conditions(self) -> bool:\n \"\"\"Return if this check has condition results.\"\"\"\n return bool(self.conditions_results)\n\n def have_display(self) -> bool:\n \"\"\"Return if this check has display.\"\"\"\n return bool(self.display)\n\n def passed_conditions(self) -> bool:\n \"\"\"Return if this check has no passing condition results.\"\"\"\n return all((r.is_pass for r in self.conditions_results))\n\n @property\n def priority(self) -> int:\n \"\"\"Return priority of the current result.\n\n This value is primarly used to determine suite output order.\n The logic is next:\n\n * if at least one condition did not pass and is of category 'FAIL', return 1.\n * if at least one condition did not pass and is of category 'WARN', return 2.\n * if check result do not have assigned conditions, return 3.\n * if all conditions passed, return 4.\n\n Returns\n -------\n int\n priority of the check result.\n \"\"\"\n if not self.have_conditions:\n return 3\n\n for c in self.conditions_results:\n if c.is_pass is False and c.category == ConditionCategory.FAIL:\n return 1\n if c.is_pass is False and c.category == ConditionCategory.WARN:\n return 2\n\n return 4\n\n def show(self, show_additional_outputs=True, unique_id=None):\n \"\"\"Display the check result.\n\n Parameters\n ----------\n show_additional_outputs : bool\n Boolean that controls if to show additional outputs.\n unique_id : str\n The unique id given by the suite that displays the check.\n \"\"\"\n if is_notebook():\n self.display_check(unique_id=unique_id,\n show_additional_outputs=show_additional_outputs)\n elif 'sphinx_gallery' in pio.renderers.default:\n html = self._repr_html_(unique_id=unique_id,\n show_additional_outputs=show_additional_outputs)\n\n class TempSphinx:\n def _repr_html_(self):\n return html\n return TempSphinx()\n else:\n warnings.warn('You are running in a non-interactive python shell. in order to show result you have to use '\n 'an IPython shell (etc Jupyter)')\n\n\nclass CheckFailure:\n \"\"\"Class which holds a check run exception.\n\n Parameters\n ----------\n check : BaseCheck\n exception : Exception\n header_suffix : str , default ``\n\n \"\"\"\n\n def __init__(self, check: 'BaseCheck', exception: Exception, header_suffix: str = ''):\n self.check = check\n self.exception = exception\n self.header = check.name() + header_suffix\n\n def to_json(self, with_display: bool = True):\n \"\"\"Return check failure as json.\n\n Parameters\n ----------\n with_display : bool\n controls if to serialize display or not\n\n Returns\n -------\n dict\n {'name': .., 'params': .., 'header': .., 'display': ..}\n \"\"\"\n result_json = self._get_metadata()\n if with_display:\n result_json['display'] = [('str', str(self.exception))]\n return jsonpickle.dumps(result_json, unpicklable=False)\n\n def to_wandb(self, dedicated_run: bool = True, **kwargs: Any):\n \"\"\"Export check result to wandb.\n\n Parameters\n ----------\n dedicated_run : bool , default: None\n If to initiate and finish a new wandb run.\n If None it will be dedicated if wandb.run is None.\n kwargs: Keyword arguments to pass to wandb.init.\n Default project name is deepchecks.\n Default config is the check metadata (params, train/test/ name etc.).\n \"\"\"\n check_metadata = self._get_metadata()\n section_suffix = check_metadata['header'] + '/'\n data = [check_metadata['header'],\n str(check_metadata['params']),\n check_metadata['summary'],\n str(self.exception)]\n check_metadata['value'] = str(self.exception)\n dedicated_run = set_wandb_run_state(dedicated_run, check_metadata, **kwargs)\n final_table = wandb.Table(columns=['header', 'params', 'summary', 'value'])\n final_table.add_data(*data)\n wandb.log({f'{section_suffix}results': final_table}, commit=False)\n if dedicated_run:\n wandb.finish()\n\n def _get_metadata(self, with_doc_link: bool = False):\n check_name = self.check.name()\n parameters = self.check.params(True)\n summary = get_docs_summary(self.check, with_doc_link=with_doc_link)\n return {'name': check_name, 'params': parameters, 'header': self.header, 'summary': summary}\n\n def __repr__(self):\n \"\"\"Return string representation.\"\"\"\n return self.header + ': ' + str(self.exception)\n\n def _ipython_display_(self):\n \"\"\"Display the check failure.\"\"\"\n check_html = f'<h4>{self.header}</h4>'\n if hasattr(self.check.__class__, '__doc__'):\n summary = get_docs_summary(self.check)\n check_html += f'<p>{summary}</p>'\n check_html += f'<p style=\"color:red\"> {self.exception}</p>'\n display_html(check_html, raw=True)\n\n def print_traceback(self):\n \"\"\"Print the traceback of the failure.\"\"\"\n tb_str = traceback.format_exception(etype=type(self.exception), value=self.exception,\n tb=self.exception.__traceback__)\n print(''.join(tb_str))\n",
"path": "deepchecks/core/check_result.py"
}
] | diff --git a/deepchecks/core/check_result.py b/deepchecks/core/check_result.py
index d9d26d9290..7426865571 100644
--- a/deepchecks/core/check_result.py
+++ b/deepchecks/core/check_result.py
@@ -130,6 +130,7 @@ def display_check(self, unique_id: str = None, as_widget: bool = False,
"""
if as_widget:
box = widgets.VBox()
+ box.add_class('rendered_html')
box_children = []
check_html = ''
if unique_id:
|
ray-project__ray-1471 | Travis test failures in test_catalog.py.
The Travis builds all seem to be failing in `test_catalog.py`.
I can reproduce some failures locally with `gym` version `0.9.5`.
Gym pushed a new version today, so that may be the issue https://pypi.python.org/pypi/gym.
For example,
```
$ python -m pytest python/ray/rllib/test/test_catalog.py
[1m============================= test session starts ==============================[0m
platform linux2 -- Python 2.7.14, pytest-3.3.2, py-1.5.2, pluggy-0.6.0
rootdir: /home/travis/build/robertnishihara/ray-private-travis/python, inifile:
[1m
collecting 0 items [0m[1m
collecting 5 items [0m[1m
collecting 5 items [0m[1m
collected 5 items [0m
python/ray/rllib/test/test_catalog.py ...FF[36m [100%][0m
=================================== FAILURES ===================================
[1m[31m____________________ ModelCatalogTest.testGymPreprocessors _____________________[0m
self = <ray.rllib.test.test_catalog.ModelCatalogTest testMethod=testGymPreprocessors>
[1m def testGymPreprocessors(self):[0m
[1m p1 = ModelCatalog.get_preprocessor([0m
[1m get_registry(), gym.make("CartPole-v0"))[0m
[1m self.assertEqual(type(p1), NoPreprocessor)[0m
[1m [0m
[1m p2 = ModelCatalog.get_preprocessor([0m
[1m> get_registry(), gym.make("FrozenLake-v0"))[0m
[1m[31mpython/ray/rllib/test/test_catalog.py[0m:41:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[1m[31mpython/ray/rllib/models/catalog.py[0m:215: in get_preprocessor
[1m return preprocessor(env.observation_space, options)[0m
[1m[31mpython/ray/rllib/models/preprocessors.py[0m:23: in __init__
[1m self._init()[0m
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ray.rllib.models.preprocessors.OneHotPreprocessor object at 0x7fad2df67dd0>
[1m def _init(self):[0m
[1m> assert self._obs_space.shape == ()[0m
[1m[31mE AssertionError[0m
[1m[31mpython/ray/rllib/models/preprocessors.py[0m:81: AssertionError
----------------------------- Captured stdout call -----------------------------
Observation shape is (4,)
Not using any observation preprocessor.
Observation shape is (16,)
Using one-hot preprocessor for discrete envs.
----------------------------- Captured stderr call -----------------------------
[2018-01-25 07:26:43,537] Making new env: CartPole-v0
[2018-01-25 07:26:43,540] Making new env: FrozenLake-v0
------------------------------ Captured log call -------------------------------
registration.py 120 INFO Making new env: CartPole-v0
registration.py 120 INFO Making new env: FrozenLake-v0
[1m[31m____________________ ModelCatalogTest.testTuplePreprocessor ____________________[0m
self = <ray.rllib.test.test_catalog.ModelCatalogTest testMethod=testTuplePreprocessor>
[1m def testTuplePreprocessor(self):[0m
[1m ray.init()[0m
[1m [0m
[1m class TupleEnv(object):[0m
[1m def __init__(self):[0m
[1m self.observation_space = Tuple([0m
[1m [Discrete(5), Box(0, 1, shape=(3,))])[0m
[1m p1 = ModelCatalog.get_preprocessor([0m
[1m> get_registry(), TupleEnv())[0m
[1m[31mpython/ray/rllib/test/test_catalog.py[0m:52:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[1m[31mpython/ray/rllib/models/catalog.py[0m:215: in get_preprocessor
[1m return preprocessor(env.observation_space, options)[0m
[1m[31mpython/ray/rllib/models/preprocessors.py[0m:23: in __init__
[1m self._init()[0m
[1m[31mpython/ray/rllib/models/preprocessors.py[0m:112: in _init
[1m preprocessor = get_preprocessor(space)(space, self._options)[0m
[1m[31mpython/ray/rllib/models/preprocessors.py[0m:23: in __init__
[1m self._init()[0m
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ray.rllib.models.preprocessors.OneHotPreprocessor object at 0x7fad4ff234d0>
[1m def _init(self):[0m
[1m> assert self._obs_space.shape == ()[0m
[1m[31mE AssertionError[0m
[1m[31mpython/ray/rllib/models/preprocessors.py[0m:81: AssertionError
----------------------------- Captured stdout call -----------------------------
Waiting for redis server at 127.0.0.1:44545 to respond...
Waiting for redis server at 127.0.0.1:60007 to respond...
Starting local scheduler with the following resources: {'GPU': 0, 'CPU': 2}.
Failed to start the UI, you may need to run 'pip install jupyter'.
Observation shape is ((5,), (3,))
Using a TupleFlatteningPreprocessor
Creating sub-preprocessor for Discrete(5)
Observation shape is (5,)
Using one-hot preprocessor for discrete envs.
----------------------------- Captured stderr call -----------------------------
Allowing the Plasma store to use up to 3.13728GB of memory.
Starting object store with directory /dev/shm and huge page support disabled
Disconnecting client on fd 22
[INFO] (/home/travis/build/robertnishihara/ray-private-travis/src/local_scheduler/local_scheduler.cc:171) Killed worker pid 14098 which hadn't started yet.
[INFO] (/home/travis/build/robertnishihara/ray-private-travis/src/local_scheduler/local_scheduler.cc:171) Killed worker pid 14099 which hadn't started yet.
Disconnecting client on fd 20
Disconnecting client on fd 18
[1m[31m====================== 2 failed, 3 passed in 7.09 seconds ======================[0m
travis_time:end:224e60d5:start=1516865197573618638,finish=1516865205120814512,duration=7547195874
[0K
[31;1mThe command "python -m pytest python/ray/rllib/test/test_catalog.py" exited with 1.[0m
```
| [
{
"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport cv2\nimport numpy as np\nimport gym\n\nATARI_OBS_SHAPE = (210, 160, 3)\nATARI_RAM_OBS_SHAPE = (128,)\n\n\nclass Preprocessor(object):\n \"\"\"Defines an abstract observation preprocessor function.\n\n Attributes:\n shape (obj): Shape of the preprocessed output.\n \"\"\"\n\n def __init__(self, obs_space, options):\n legacy_patch_shapes(obs_space)\n self._obs_space = obs_space\n self._options = options\n self._init()\n\n def _init(self):\n pass\n\n def transform(self, observation):\n \"\"\"Returns the preprocessed observation.\"\"\"\n raise NotImplementedError\n\n\nclass AtariPixelPreprocessor(Preprocessor):\n def _init(self):\n self._grayscale = self._options.get(\"grayscale\", False)\n self._zero_mean = self._options.get(\"zero_mean\", True)\n self._dim = self._options.get(\"dim\", 80)\n self._channel_major = self._options.get(\"channel_major\", False)\n if self._grayscale:\n self.shape = (self._dim, self._dim, 1)\n else:\n self.shape = (self._dim, self._dim, 3)\n\n # channel_major requires (# in-channels, row dim, col dim)\n if self._channel_major:\n self.shape = self.shape[-1:] + self.shape[:-1]\n\n def transform(self, observation):\n \"\"\"Downsamples images from (210, 160, 3) by the configured factor.\"\"\"\n scaled = observation[25:-25, :, :]\n if self._dim < 80:\n scaled = cv2.resize(scaled, (80, 80))\n # OpenAI: Resize by half, then down to 42x42 (essentially mipmapping).\n # If we resize directly we lose pixels that, when mapped to 42x42,\n # aren't close enough to the pixel boundary.\n scaled = cv2.resize(scaled, (self._dim, self._dim))\n if self._grayscale:\n scaled = scaled.mean(2)\n scaled = scaled.astype(np.float32)\n # Rescale needed for maintaining 1 channel\n scaled = np.reshape(scaled, [self._dim, self._dim, 1])\n if self._zero_mean:\n scaled = (scaled - 128) / 128\n else:\n scaled *= 1.0 / 255.0\n if self._channel_major:\n scaled = np.reshape(scaled, self.shape)\n return scaled\n\n\nclass AtariRamPreprocessor(Preprocessor):\n def _init(self):\n self.shape = (128,)\n\n def transform(self, observation):\n return (observation - 128) / 128\n\n\nclass OneHotPreprocessor(Preprocessor):\n def _init(self):\n assert self._obs_space.shape == ()\n self.shape = (self._obs_space.n,)\n\n def transform(self, observation):\n arr = np.zeros(self._obs_space.n)\n arr[observation] = 1\n return arr\n\n\nclass NoPreprocessor(Preprocessor):\n def _init(self):\n self.shape = self._obs_space.shape\n\n def transform(self, observation):\n return observation\n\n\nclass TupleFlatteningPreprocessor(Preprocessor):\n \"\"\"Preprocesses each tuple element, then flattens it all into a vector.\n\n If desired, the vector output can be unpacked via tf.reshape() within a\n custom model to handle each component separately.\n \"\"\"\n\n def _init(self):\n assert isinstance(self._obs_space, gym.spaces.Tuple)\n size = 0\n self.preprocessors = []\n for i in range(len(self._obs_space.spaces)):\n space = self._obs_space.spaces[i]\n print(\"Creating sub-preprocessor for\", space)\n preprocessor = get_preprocessor(space)(space, self._options)\n self.preprocessors.append(preprocessor)\n size += np.product(preprocessor.shape)\n self.shape = (size,)\n\n def transform(self, observation):\n assert len(observation) == len(self.preprocessors), observation\n return np.concatenate([\n np.reshape(p.transform(o), [np.product(p.shape)])\n for (o, p) in zip(observation, self.preprocessors)])\n\n\ndef get_preprocessor(space):\n \"\"\"Returns an appropriate preprocessor class for the given space.\"\"\"\n\n legacy_patch_shapes(space)\n obs_shape = space.shape\n print(\"Observation shape is {}\".format(obs_shape))\n\n if isinstance(space, gym.spaces.Discrete):\n print(\"Using one-hot preprocessor for discrete envs.\")\n preprocessor = OneHotPreprocessor\n elif obs_shape == ATARI_OBS_SHAPE:\n print(\"Assuming Atari pixel env, using AtariPixelPreprocessor.\")\n preprocessor = AtariPixelPreprocessor\n elif obs_shape == ATARI_RAM_OBS_SHAPE:\n print(\"Assuming Atari ram env, using AtariRamPreprocessor.\")\n preprocessor = AtariRamPreprocessor\n elif isinstance(space, gym.spaces.Tuple):\n print(\"Using a TupleFlatteningPreprocessor\")\n preprocessor = TupleFlatteningPreprocessor\n else:\n print(\"Not using any observation preprocessor.\")\n preprocessor = NoPreprocessor\n\n return preprocessor\n\n\ndef legacy_patch_shapes(space):\n \"\"\"Assigns shapes to spaces that don't have shapes.\n\n This is only needed for older gym versions that don't set shapes properly\n for Tuple and Discrete spaces.\n \"\"\"\n\n if not hasattr(space, \"shape\"):\n if isinstance(space, gym.spaces.Discrete):\n space.shape = ()\n elif isinstance(space, gym.spaces.Tuple):\n shapes = []\n for s in space.spaces:\n shape = legacy_patch_shapes(s)\n shapes.append(shape)\n space.shape = tuple(shapes)\n\n return space.shape\n",
"path": "python/ray/rllib/models/preprocessors.py"
}
] | [
{
"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport cv2\nimport numpy as np\nimport gym\n\nATARI_OBS_SHAPE = (210, 160, 3)\nATARI_RAM_OBS_SHAPE = (128,)\n\n\nclass Preprocessor(object):\n \"\"\"Defines an abstract observation preprocessor function.\n\n Attributes:\n shape (obj): Shape of the preprocessed output.\n \"\"\"\n\n def __init__(self, obs_space, options):\n legacy_patch_shapes(obs_space)\n self._obs_space = obs_space\n self._options = options\n self._init()\n\n def _init(self):\n pass\n\n def transform(self, observation):\n \"\"\"Returns the preprocessed observation.\"\"\"\n raise NotImplementedError\n\n\nclass AtariPixelPreprocessor(Preprocessor):\n def _init(self):\n self._grayscale = self._options.get(\"grayscale\", False)\n self._zero_mean = self._options.get(\"zero_mean\", True)\n self._dim = self._options.get(\"dim\", 80)\n self._channel_major = self._options.get(\"channel_major\", False)\n if self._grayscale:\n self.shape = (self._dim, self._dim, 1)\n else:\n self.shape = (self._dim, self._dim, 3)\n\n # channel_major requires (# in-channels, row dim, col dim)\n if self._channel_major:\n self.shape = self.shape[-1:] + self.shape[:-1]\n\n def transform(self, observation):\n \"\"\"Downsamples images from (210, 160, 3) by the configured factor.\"\"\"\n scaled = observation[25:-25, :, :]\n if self._dim < 80:\n scaled = cv2.resize(scaled, (80, 80))\n # OpenAI: Resize by half, then down to 42x42 (essentially mipmapping).\n # If we resize directly we lose pixels that, when mapped to 42x42,\n # aren't close enough to the pixel boundary.\n scaled = cv2.resize(scaled, (self._dim, self._dim))\n if self._grayscale:\n scaled = scaled.mean(2)\n scaled = scaled.astype(np.float32)\n # Rescale needed for maintaining 1 channel\n scaled = np.reshape(scaled, [self._dim, self._dim, 1])\n if self._zero_mean:\n scaled = (scaled - 128) / 128\n else:\n scaled *= 1.0 / 255.0\n if self._channel_major:\n scaled = np.reshape(scaled, self.shape)\n return scaled\n\n\nclass AtariRamPreprocessor(Preprocessor):\n def _init(self):\n self.shape = (128,)\n\n def transform(self, observation):\n return (observation - 128) / 128\n\n\nclass OneHotPreprocessor(Preprocessor):\n def _init(self):\n self.shape = (self._obs_space.n,)\n\n def transform(self, observation):\n arr = np.zeros(self._obs_space.n)\n arr[observation] = 1\n return arr\n\n\nclass NoPreprocessor(Preprocessor):\n def _init(self):\n self.shape = self._obs_space.shape\n\n def transform(self, observation):\n return observation\n\n\nclass TupleFlatteningPreprocessor(Preprocessor):\n \"\"\"Preprocesses each tuple element, then flattens it all into a vector.\n\n If desired, the vector output can be unpacked via tf.reshape() within a\n custom model to handle each component separately.\n \"\"\"\n\n def _init(self):\n assert isinstance(self._obs_space, gym.spaces.Tuple)\n size = 0\n self.preprocessors = []\n for i in range(len(self._obs_space.spaces)):\n space = self._obs_space.spaces[i]\n print(\"Creating sub-preprocessor for\", space)\n preprocessor = get_preprocessor(space)(space, self._options)\n self.preprocessors.append(preprocessor)\n size += np.product(preprocessor.shape)\n self.shape = (size,)\n\n def transform(self, observation):\n assert len(observation) == len(self.preprocessors), observation\n return np.concatenate([\n np.reshape(p.transform(o), [np.product(p.shape)])\n for (o, p) in zip(observation, self.preprocessors)])\n\n\ndef get_preprocessor(space):\n \"\"\"Returns an appropriate preprocessor class for the given space.\"\"\"\n\n legacy_patch_shapes(space)\n obs_shape = space.shape\n print(\"Observation shape is {}\".format(obs_shape))\n\n if isinstance(space, gym.spaces.Discrete):\n print(\"Using one-hot preprocessor for discrete envs.\")\n preprocessor = OneHotPreprocessor\n elif obs_shape == ATARI_OBS_SHAPE:\n print(\"Assuming Atari pixel env, using AtariPixelPreprocessor.\")\n preprocessor = AtariPixelPreprocessor\n elif obs_shape == ATARI_RAM_OBS_SHAPE:\n print(\"Assuming Atari ram env, using AtariRamPreprocessor.\")\n preprocessor = AtariRamPreprocessor\n elif isinstance(space, gym.spaces.Tuple):\n print(\"Using a TupleFlatteningPreprocessor\")\n preprocessor = TupleFlatteningPreprocessor\n else:\n print(\"Not using any observation preprocessor.\")\n preprocessor = NoPreprocessor\n\n return preprocessor\n\n\ndef legacy_patch_shapes(space):\n \"\"\"Assigns shapes to spaces that don't have shapes.\n\n This is only needed for older gym versions that don't set shapes properly\n for Tuple and Discrete spaces.\n \"\"\"\n\n if not hasattr(space, \"shape\"):\n if isinstance(space, gym.spaces.Discrete):\n space.shape = ()\n elif isinstance(space, gym.spaces.Tuple):\n shapes = []\n for s in space.spaces:\n shape = legacy_patch_shapes(s)\n shapes.append(shape)\n space.shape = tuple(shapes)\n\n return space.shape\n",
"path": "python/ray/rllib/models/preprocessors.py"
}
] | diff --git a/python/ray/rllib/models/preprocessors.py b/python/ray/rllib/models/preprocessors.py
index 01e9db16fbc3..f6ec1fa5e071 100644
--- a/python/ray/rllib/models/preprocessors.py
+++ b/python/ray/rllib/models/preprocessors.py
@@ -78,7 +78,6 @@ def transform(self, observation):
class OneHotPreprocessor(Preprocessor):
def _init(self):
- assert self._obs_space.shape == ()
self.shape = (self._obs_space.n,)
def transform(self, observation):
|
CTPUG__wafer-643 | Add support for Django 4.0
Currently failing tests (See #632)
| [
{
"content": "from glob import glob\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nREQUIRES = [\n 'Django>=2.2,<4',\n 'bleach',\n 'bleach-allowlist',\n 'diff-match-patch',\n 'django-bakery>=0.12.0',\n 'django-crispy-forms',\n 'django-markitup>=4.0.0',\n 'django-registration-redux',\n 'django-reversion',\n 'django-select2',\n 'djangorestframework',\n 'drf-extensions>=0.5.0',\n 'icalendar>=4.0',\n 'jsonfield',\n 'markdown>=2.5',\n 'pillow',\n 'py3dns',\n 'pyLibravatar',\n 'pytz',\n 'requests',\n]\n\nSOURCES = []\n\n\nwith open('README.rst', 'r') as f:\n long_description = f.read()\n\n\ndef compile_translations():\n try:\n subprocess.check_call(['./manage.py', 'compilemessages'])\n except subprocess.CalledProcessError:\n print(\"WARNING: cannot compile translations.\")\n return glob('wafer/locale/*/LC_MESSAGES/django.mo')\n\n\nsetup(\n name=\"wafer\",\n version=\"0.13.1a\",\n url='http://github.com/CTPUG/wafer',\n license='ISC',\n description=\"A wafer-thin Django library for running small conferences.\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n author='CTPUG',\n author_email='[email protected]',\n packages=find_packages(),\n include_package_data=True,\n install_requires=REQUIRES,\n dependency_links=SOURCES,\n data_files=[\n ('locale', compile_translations()),\n ],\n setup_requires=[\n # Add setuptools-git, so we get correct behaviour for\n # include_package_data\n 'setuptools_git >= 1.0',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: ISC License (ISCL)',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Framework :: Django',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "from glob import glob\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nREQUIRES = [\n 'Django>=2.2,<4',\n 'bleach',\n 'bleach-allowlist',\n 'diff-match-patch',\n 'django-bakery>=0.13.0',\n 'django-crispy-forms',\n 'django-markitup>=4.0.0',\n 'django-registration-redux',\n 'django-reversion',\n 'django-select2',\n 'djangorestframework',\n 'drf-extensions>=0.5.0',\n 'icalendar>=4.0',\n 'jsonfield',\n 'markdown>=2.5',\n 'pillow',\n 'py3dns',\n 'pyLibravatar',\n 'pytz',\n 'requests',\n]\n\nSOURCES = []\n\n\nwith open('README.rst', 'r') as f:\n long_description = f.read()\n\n\ndef compile_translations():\n try:\n subprocess.check_call(['./manage.py', 'compilemessages'])\n except subprocess.CalledProcessError:\n print(\"WARNING: cannot compile translations.\")\n return glob('wafer/locale/*/LC_MESSAGES/django.mo')\n\n\nsetup(\n name=\"wafer\",\n version=\"0.13.1a\",\n url='http://github.com/CTPUG/wafer',\n license='ISC',\n description=\"A wafer-thin Django library for running small conferences.\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n author='CTPUG',\n author_email='[email protected]',\n packages=find_packages(),\n include_package_data=True,\n install_requires=REQUIRES,\n dependency_links=SOURCES,\n data_files=[\n ('locale', compile_translations()),\n ],\n setup_requires=[\n # Add setuptools-git, so we get correct behaviour for\n # include_package_data\n 'setuptools_git >= 1.0',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: ISC License (ISCL)',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Framework :: Django',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 87913769..223b9c61 100644
--- a/setup.py
+++ b/setup.py
@@ -8,7 +8,7 @@
'bleach',
'bleach-allowlist',
'diff-match-patch',
- 'django-bakery>=0.12.0',
+ 'django-bakery>=0.13.0',
'django-crispy-forms',
'django-markitup>=4.0.0',
'django-registration-redux',
|
Nitrate__Nitrate-564 | Remove Django 2.0
Django 2.0 is not supported and marked as insecure. Refer to https://docs.djangoproject.com/en/2.0/
| [
{
"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.9.1',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='nitrate-tcms',\n version=pkg_version,\n description='A full-featured Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.1,<3.0',\n 'django-contrib-comments == 1.9.1',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='nitrate-tcms',\n version=pkg_version,\n description='A full-featured Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/.travis.yml b/.travis.yml
index 18075323..2e06adea 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,11 +4,6 @@ services:
- docker
matrix:
include:
- # Django 2.0.x with Python 3.6 and 3.7
- - python: "3.6"
- env: DJANGO_REL="django>=2.0,<2.1"
- - python: "3.7"
- env: DJANGO_REL="django>=2.0,<2.1"
# Django 2.1.x with Python 3.6 and 3.7
# Also, run tests against Django 2.1.x, Python 3.6 and several database backends
- python: "3.6"
diff --git a/setup.py b/setup.py
index f0b2be6b..566a6623 100644
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,7 @@ def get_long_description():
install_requires = [
'beautifulsoup4 >= 4.1.1',
- 'django >= 2.0,<3.0',
+ 'django >= 2.1,<3.0',
'django-contrib-comments == 1.9.1',
'django-tinymce == 2.7.0',
'django-uuslug == 1.1.8',
|
pymodbus-dev__pymodbus-2065 | ModbusException 0x07 is missing in pdu.py
In pdu.py is ModbusException NegativeAcknowledge missing. Is it possible to add: NegativeAcknowledge = 0x07 ?
class ModbusExceptions:
IllegalFunction = 0x01
IllegalAddress = 0x02
IllegalValue = 0x03
SlaveFailure = 0x04
Acknowledge = 0x05
SlaveBusy = 0x06
MemoryParityError = 0x08
GatewayPathUnavailable = 0x0A
GatewayNoResponse = 0x0B
| [
{
"content": "\"\"\"Contains base classes for modbus request/response/error packets.\"\"\"\n\n__all__ = [\n \"ModbusRequest\",\n \"ModbusResponse\",\n \"ModbusExceptions\",\n \"ExceptionResponse\",\n \"IllegalFunctionRequest\",\n]\n\n# pylint: disable=missing-type-doc\nimport struct\n\nfrom pymodbus.exceptions import NotImplementedException\nfrom pymodbus.logging import Log\nfrom pymodbus.utilities import rtuFrameSize\n\n\n# --------------------------------------------------------------------------- #\n# Base PDUs\n# --------------------------------------------------------------------------- #\nclass ModbusPDU:\n \"\"\"Base class for all Modbus messages.\n\n .. attribute:: transaction_id\n\n This value is used to uniquely identify a request\n response pair. It can be implemented as a simple counter\n\n .. attribute:: protocol_id\n\n This is a constant set at 0 to indicate Modbus. It is\n put here for ease of expansion.\n\n .. attribute:: slave_id\n\n This is used to route the request to the correct child. In\n the TCP modbus, it is used for routing (or not used at all. However,\n for the serial versions, it is used to specify which child to perform\n the requests against. The value 0x00 represents the broadcast address\n (also 0xff).\n\n .. attribute:: check\n\n This is used for LRC/CRC in the serial modbus protocols\n\n .. attribute:: skip_encode\n\n This is used when the message payload has already been encoded.\n Generally this will occur when the PayloadBuilder is being used\n to create a complicated message. By setting this to True, the\n request will pass the currently encoded message through instead\n of encoding it again.\n \"\"\"\n\n def __init__(self, slave=0, **kwargs):\n \"\"\"Initialize the base data for a modbus request.\n\n :param slave: Modbus slave slave ID\n\n \"\"\"\n self.transaction_id = kwargs.get(\"transaction\", 0)\n self.protocol_id = kwargs.get(\"protocol\", 0)\n self.slave_id = slave\n self.skip_encode = kwargs.get(\"skip_encode\", False)\n self.check = 0x0000\n\n def encode(self):\n \"\"\"Encode the message.\n\n :raises: A not implemented exception\n \"\"\"\n raise NotImplementedException()\n\n def decode(self, data):\n \"\"\"Decode data part of the message.\n\n :param data: is a string object\n :raises NotImplementedException:\n \"\"\"\n raise NotImplementedException()\n\n @classmethod\n def calculateRtuFrameSize(cls, buffer):\n \"\"\"Calculate the size of a PDU.\n\n :param buffer: A buffer containing the data that have been received.\n :returns: The number of bytes in the PDU.\n :raises NotImplementedException:\n \"\"\"\n if hasattr(cls, \"_rtu_frame_size\"):\n return cls._rtu_frame_size\n if hasattr(cls, \"_rtu_byte_count_pos\"):\n return rtuFrameSize(buffer, cls._rtu_byte_count_pos)\n raise NotImplementedException(\n f\"Cannot determine RTU frame size for {cls.__name__}\"\n )\n\n\nclass ModbusRequest(ModbusPDU):\n \"\"\"Base class for a modbus request PDU.\"\"\"\n\n function_code = -1\n\n def __init__(self, slave=0, **kwargs): # pylint: disable=useless-parent-delegation\n \"\"\"Proxy to the lower level initializer.\n\n :param slave: Modbus slave slave ID\n \"\"\"\n super().__init__(slave, **kwargs)\n\n def doException(self, exception):\n \"\"\"Build an error response based on the function.\n\n :param exception: The exception to return\n :raises: An exception response\n \"\"\"\n exc = ExceptionResponse(self.function_code, exception)\n Log.error(\"Exception response {}\", exc)\n return exc\n\n\nclass ModbusResponse(ModbusPDU):\n \"\"\"Base class for a modbus response PDU.\n\n .. attribute:: should_respond\n\n A flag that indicates if this response returns a result back\n to the client issuing the request\n\n .. attribute:: _rtu_frame_size\n\n Indicates the size of the modbus rtu response used for\n calculating how much to read.\n \"\"\"\n\n should_respond = True\n function_code = 0x00\n\n def __init__(self, slave=0, **kwargs):\n \"\"\"Proxy the lower level initializer.\n\n :param slave: Modbus slave slave ID\n\n \"\"\"\n super().__init__(slave, **kwargs)\n self.bits = []\n self.registers = []\n\n def isError(self) -> bool:\n \"\"\"Check if the error is a success or failure.\"\"\"\n return self.function_code > 0x80\n\n\n# --------------------------------------------------------------------------- #\n# Exception PDUs\n# --------------------------------------------------------------------------- #\nclass ModbusExceptions: # pylint: disable=too-few-public-methods\n \"\"\"An enumeration of the valid modbus exceptions.\"\"\"\n\n IllegalFunction = 0x01\n IllegalAddress = 0x02\n IllegalValue = 0x03\n SlaveFailure = 0x04\n Acknowledge = 0x05\n SlaveBusy = 0x06\n MemoryParityError = 0x08\n GatewayPathUnavailable = 0x0A\n GatewayNoResponse = 0x0B\n\n @classmethod\n def decode(cls, code):\n \"\"\"Give an error code, translate it to a string error name.\n\n :param code: The code number to translate\n \"\"\"\n values = {\n v: k\n for k, v in iter(cls.__dict__.items())\n if not k.startswith(\"__\") and not callable(v)\n }\n return values.get(code, None)\n\n\nclass ExceptionResponse(ModbusResponse):\n \"\"\"Base class for a modbus exception PDU.\"\"\"\n\n ExceptionOffset = 0x80\n _rtu_frame_size = 5\n\n def __init__(self, function_code, exception_code=None, **kwargs):\n \"\"\"Initialize the modbus exception response.\n\n :param function_code: The function to build an exception response for\n :param exception_code: The specific modbus exception to return\n \"\"\"\n super().__init__(**kwargs)\n self.original_code = function_code\n self.function_code = function_code | self.ExceptionOffset\n self.exception_code = exception_code\n\n def encode(self):\n \"\"\"Encode a modbus exception response.\n\n :returns: The encoded exception packet\n \"\"\"\n return struct.pack(\">B\", self.exception_code)\n\n def decode(self, data):\n \"\"\"Decode a modbus exception response.\n\n :param data: The packet data to decode\n \"\"\"\n self.exception_code = int(data[0])\n\n def __str__(self):\n \"\"\"Build a representation of an exception response.\n\n :returns: The string representation of an exception response\n \"\"\"\n message = ModbusExceptions.decode(self.exception_code)\n parameters = (self.function_code, self.original_code, message)\n return (\n \"Exception Response(%d, %d, %s)\" # pylint: disable=consider-using-f-string\n % parameters\n )\n\n\nclass IllegalFunctionRequest(ModbusRequest):\n \"\"\"Define the Modbus slave exception type \"Illegal Function\".\n\n This exception code is returned if the slave::\n\n - does not implement the function code **or**\n - is not in a state that allows it to process the function\n \"\"\"\n\n ErrorCode = 1\n\n def __init__(self, function_code, **kwargs):\n \"\"\"Initialize a IllegalFunctionRequest.\n\n :param function_code: The function we are erroring on\n \"\"\"\n super().__init__(**kwargs)\n self.function_code = function_code\n\n def decode(self, _data):\n \"\"\"Decode so this failure will run correctly.\"\"\"\n\n def execute(self, _context):\n \"\"\"Build an illegal function request error response.\n\n :returns: The error response packet\n \"\"\"\n return ExceptionResponse(self.function_code, self.ErrorCode)\n",
"path": "pymodbus/pdu.py"
}
] | [
{
"content": "\"\"\"Contains base classes for modbus request/response/error packets.\"\"\"\n\n__all__ = [\n \"ModbusRequest\",\n \"ModbusResponse\",\n \"ModbusExceptions\",\n \"ExceptionResponse\",\n \"IllegalFunctionRequest\",\n]\n\n# pylint: disable=missing-type-doc\nimport struct\n\nfrom pymodbus.exceptions import NotImplementedException\nfrom pymodbus.logging import Log\nfrom pymodbus.utilities import rtuFrameSize\n\n\n# --------------------------------------------------------------------------- #\n# Base PDUs\n# --------------------------------------------------------------------------- #\nclass ModbusPDU:\n \"\"\"Base class for all Modbus messages.\n\n .. attribute:: transaction_id\n\n This value is used to uniquely identify a request\n response pair. It can be implemented as a simple counter\n\n .. attribute:: protocol_id\n\n This is a constant set at 0 to indicate Modbus. It is\n put here for ease of expansion.\n\n .. attribute:: slave_id\n\n This is used to route the request to the correct child. In\n the TCP modbus, it is used for routing (or not used at all. However,\n for the serial versions, it is used to specify which child to perform\n the requests against. The value 0x00 represents the broadcast address\n (also 0xff).\n\n .. attribute:: check\n\n This is used for LRC/CRC in the serial modbus protocols\n\n .. attribute:: skip_encode\n\n This is used when the message payload has already been encoded.\n Generally this will occur when the PayloadBuilder is being used\n to create a complicated message. By setting this to True, the\n request will pass the currently encoded message through instead\n of encoding it again.\n \"\"\"\n\n def __init__(self, slave=0, **kwargs):\n \"\"\"Initialize the base data for a modbus request.\n\n :param slave: Modbus slave slave ID\n\n \"\"\"\n self.transaction_id = kwargs.get(\"transaction\", 0)\n self.protocol_id = kwargs.get(\"protocol\", 0)\n self.slave_id = slave\n self.skip_encode = kwargs.get(\"skip_encode\", False)\n self.check = 0x0000\n\n def encode(self):\n \"\"\"Encode the message.\n\n :raises: A not implemented exception\n \"\"\"\n raise NotImplementedException()\n\n def decode(self, data):\n \"\"\"Decode data part of the message.\n\n :param data: is a string object\n :raises NotImplementedException:\n \"\"\"\n raise NotImplementedException()\n\n @classmethod\n def calculateRtuFrameSize(cls, buffer):\n \"\"\"Calculate the size of a PDU.\n\n :param buffer: A buffer containing the data that have been received.\n :returns: The number of bytes in the PDU.\n :raises NotImplementedException:\n \"\"\"\n if hasattr(cls, \"_rtu_frame_size\"):\n return cls._rtu_frame_size\n if hasattr(cls, \"_rtu_byte_count_pos\"):\n return rtuFrameSize(buffer, cls._rtu_byte_count_pos)\n raise NotImplementedException(\n f\"Cannot determine RTU frame size for {cls.__name__}\"\n )\n\n\nclass ModbusRequest(ModbusPDU):\n \"\"\"Base class for a modbus request PDU.\"\"\"\n\n function_code = -1\n\n def __init__(self, slave=0, **kwargs): # pylint: disable=useless-parent-delegation\n \"\"\"Proxy to the lower level initializer.\n\n :param slave: Modbus slave slave ID\n \"\"\"\n super().__init__(slave, **kwargs)\n\n def doException(self, exception):\n \"\"\"Build an error response based on the function.\n\n :param exception: The exception to return\n :raises: An exception response\n \"\"\"\n exc = ExceptionResponse(self.function_code, exception)\n Log.error(\"Exception response {}\", exc)\n return exc\n\n\nclass ModbusResponse(ModbusPDU):\n \"\"\"Base class for a modbus response PDU.\n\n .. attribute:: should_respond\n\n A flag that indicates if this response returns a result back\n to the client issuing the request\n\n .. attribute:: _rtu_frame_size\n\n Indicates the size of the modbus rtu response used for\n calculating how much to read.\n \"\"\"\n\n should_respond = True\n function_code = 0x00\n\n def __init__(self, slave=0, **kwargs):\n \"\"\"Proxy the lower level initializer.\n\n :param slave: Modbus slave slave ID\n\n \"\"\"\n super().__init__(slave, **kwargs)\n self.bits = []\n self.registers = []\n\n def isError(self) -> bool:\n \"\"\"Check if the error is a success or failure.\"\"\"\n return self.function_code > 0x80\n\n\n# --------------------------------------------------------------------------- #\n# Exception PDUs\n# --------------------------------------------------------------------------- #\nclass ModbusExceptions: # pylint: disable=too-few-public-methods\n \"\"\"An enumeration of the valid modbus exceptions.\"\"\"\n\n IllegalFunction = 0x01\n IllegalAddress = 0x02\n IllegalValue = 0x03\n SlaveFailure = 0x04\n Acknowledge = 0x05\n SlaveBusy = 0x06\n NegativeAcknowledge = 0x07\n MemoryParityError = 0x08\n GatewayPathUnavailable = 0x0A\n GatewayNoResponse = 0x0B\n\n @classmethod\n def decode(cls, code):\n \"\"\"Give an error code, translate it to a string error name.\n\n :param code: The code number to translate\n \"\"\"\n values = {\n v: k\n for k, v in iter(cls.__dict__.items())\n if not k.startswith(\"__\") and not callable(v)\n }\n return values.get(code, None)\n\n\nclass ExceptionResponse(ModbusResponse):\n \"\"\"Base class for a modbus exception PDU.\"\"\"\n\n ExceptionOffset = 0x80\n _rtu_frame_size = 5\n\n def __init__(self, function_code, exception_code=None, **kwargs):\n \"\"\"Initialize the modbus exception response.\n\n :param function_code: The function to build an exception response for\n :param exception_code: The specific modbus exception to return\n \"\"\"\n super().__init__(**kwargs)\n self.original_code = function_code\n self.function_code = function_code | self.ExceptionOffset\n self.exception_code = exception_code\n\n def encode(self):\n \"\"\"Encode a modbus exception response.\n\n :returns: The encoded exception packet\n \"\"\"\n return struct.pack(\">B\", self.exception_code)\n\n def decode(self, data):\n \"\"\"Decode a modbus exception response.\n\n :param data: The packet data to decode\n \"\"\"\n self.exception_code = int(data[0])\n\n def __str__(self):\n \"\"\"Build a representation of an exception response.\n\n :returns: The string representation of an exception response\n \"\"\"\n message = ModbusExceptions.decode(self.exception_code)\n parameters = (self.function_code, self.original_code, message)\n return (\n \"Exception Response(%d, %d, %s)\" # pylint: disable=consider-using-f-string\n % parameters\n )\n\n\nclass IllegalFunctionRequest(ModbusRequest):\n \"\"\"Define the Modbus slave exception type \"Illegal Function\".\n\n This exception code is returned if the slave::\n\n - does not implement the function code **or**\n - is not in a state that allows it to process the function\n \"\"\"\n\n ErrorCode = 1\n\n def __init__(self, function_code, **kwargs):\n \"\"\"Initialize a IllegalFunctionRequest.\n\n :param function_code: The function we are erroring on\n \"\"\"\n super().__init__(**kwargs)\n self.function_code = function_code\n\n def decode(self, _data):\n \"\"\"Decode so this failure will run correctly.\"\"\"\n\n def execute(self, _context):\n \"\"\"Build an illegal function request error response.\n\n :returns: The error response packet\n \"\"\"\n return ExceptionResponse(self.function_code, self.ErrorCode)\n",
"path": "pymodbus/pdu.py"
}
] | diff --git a/pymodbus/pdu.py b/pymodbus/pdu.py
index d73b841cd..64c48b1e1 100644
--- a/pymodbus/pdu.py
+++ b/pymodbus/pdu.py
@@ -164,6 +164,7 @@ class ModbusExceptions: # pylint: disable=too-few-public-methods
SlaveFailure = 0x04
Acknowledge = 0x05
SlaveBusy = 0x06
+ NegativeAcknowledge = 0x07
MemoryParityError = 0x08
GatewayPathUnavailable = 0x0A
GatewayNoResponse = 0x0B
|
wandb__wandb-424 | Install issue on DLAMI images, conflict with PyYAML
wandb has a dependency conflict when installing on AWS Deep Learning images -- DLAMI v23
You can get arround it with 'pip install wandb --ignore-installed', but also perhaps wandb could relax PyYAML version requirement to make life easier (ie, I can't put wandb in requirements.txt because of this)
```
(pytorch_p36) ubuntu@ip-172-31-28-233:~$ pip install wandb
Collecting wandb
Using cached https://files.pythonhosted.org/packages/6a/d1/af8371f39d9383f4f1e9ba76c8894f75c01d5eddf4ec57bd45952fefab74/wandb-0.8.3-py2.py3-none-any.whl
Collecting watchdog>=0.8.3 (from wandb)
Requirement already satisfied: psutil>=5.0.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (5.4.5)
Collecting backports.tempfile>=1.0 (from wandb)
Using cached https://files.pythonhosted.org/packages/b4/5c/077f910632476281428fe254807952eb47ca78e720d059a46178c541e669/backports.tempfile-1.0-py2.py3-none-any.whl
Requirement already satisfied: requests>=2.0.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (2.20.0)
Requirement already satisfied: sentry-sdk>=0.4.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (0.9.5)
Requirement already satisfied: six>=1.10.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (1.11.0)
Collecting shortuuid>=0.5.0 (from wandb)
Collecting gql>=0.1.0 (from wandb)
Requirement already satisfied: subprocess32>=3.5.3 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (3.5.4)
Collecting GitPython>=1.0.0 (from wandb)
Using cached https://files.pythonhosted.org/packages/fe/e5/fafe827507644c32d6dc553a1c435cdf882e0c28918a5bab29f7fbebfb70/GitPython-2.1.11-py2.py3-none-any.whl
Requirement already satisfied: docker-pycreds>=0.4.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (0.4.0)
Requirement already satisfied: nvidia-ml-py3>=7.352.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (7.352.0)
Requirement already satisfied: Click>=7.0 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (7.0)
Requirement already satisfied: python-dateutil>=2.6.1 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from wandb) (2.7.3)
Collecting PyYAML>=4.2b4 (from wandb)
Requirement already satisfied: argh>=0.24.1 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from watchdog>=0.8.3->wandb) (0.26.2)
Collecting pathtools>=0.1.1 (from watchdog>=0.8.3->wandb)
Collecting backports.weakref (from backports.tempfile>=1.0->wandb)
Using cached https://files.pythonhosted.org/packages/88/ec/f598b633c3d5ffe267aaada57d961c94fdfa183c5c3ebda2b6d151943db6/backports.weakref-1.0.post1-py2.py3-none-any.whl
Requirement already satisfied: urllib3<1.25,>=1.21.1 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from requests>=2.0.0->wandb) (1.23)
Requirement already satisfied: certifi>=2017.4.17 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from requests>=2.0.0->wandb) (2019.3.9)
Requirement already satisfied: idna<2.8,>=2.5 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from requests>=2.0.0->wandb) (2.6)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in ./anaconda3/envs/pytorch_p36/lib/python3.6/site-packages (from requests>=2.0.0->wandb) (3.0.4)
Collecting graphql-core>=0.5.0 (from gql>=0.1.0->wandb)
Using cached https://files.pythonhosted.org/packages/f1/88/a4a7bf8ab66c35b146e44d77a1f9fd2c36e0ec9fb1a51581608c16deb6e3/graphql_core-2.2-py2.py3-none-any.whl
Collecting promise>=0.4.0 (from gql>=0.1.0->wandb)
Collecting gitdb2>=2.0.0 (from GitPython>=1.0.0->wandb)
Using cached https://files.pythonhosted.org/packages/da/30/a407568aa8d8f25db817cf50121a958722f3fc5f87e3a6fba1f40c0633e3/gitdb2-2.0.5-py2.py3-none-any.whl
Collecting rx>=1.6.0 (from graphql-core>=0.5.0->gql>=0.1.0->wandb)
Using cached https://files.pythonhosted.org/packages/33/0f/5ef4ac78e2a538cc1b054eb86285fe0bf7a5dbaeaac2c584757c300515e2/Rx-1.6.1-py2.py3-none-any.whl
Collecting smmap2>=2.0.0 (from gitdb2>=2.0.0->GitPython>=1.0.0->wandb)
Using cached https://files.pythonhosted.org/packages/55/d2/866d45e3a121ee15a1dc013824d58072fd5c7799c9c34d01378eb262ca8f/smmap2-2.0.5-py2.py3-none-any.whl
thinc 6.12.1 has requirement msgpack<0.6.0,>=0.5.6, but you'll have msgpack 0.6.0 which is incompatible.
tensorflow 1.13.1 has requirement protobuf>=3.6.1, but you'll have protobuf 3.5.2 which is incompatible.
tensorboard 1.13.1 has requirement protobuf>=3.6.0, but you'll have protobuf 3.5.2 which is incompatible.
docker-compose 1.24.0 has requirement PyYAML<4.3,>=3.10, but you'll have pyyaml 5.1.1 which is incompatible.
Installing collected packages: PyYAML, pathtools, watchdog, backports.weakref, backports.tempfile, shortuuid, rx, promise, graphql-core, gql, smmap2, gitdb2, GitPython, wandb
Found existing installation: PyYAML 3.12
Cannot uninstall 'PyYAML'. It is a distutils installed project and thus we cannot accurately determine which files belong to it which would lead to only a partial uninstall.
You are using pip version 10.0.1, however version 19.1.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
(pytorch_p36) ubuntu@ip-172-31-28-233:~$ echo $?
```
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup\n\nwith open('README.md') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'backports.tempfile>=1.0',\n 'Click>=7.0',\n 'GitPython>=1.0.0',\n 'gql>=0.1.0',\n 'nvidia-ml-py3>=7.352.0',\n 'python-dateutil>=2.6.1',\n 'requests>=2.0.0',\n 'shortuuid>=0.5.0',\n 'six>=1.10.0',\n 'watchdog>=0.8.3',\n 'PyYAML>=4.2b4', # watchdog depends on pyyaml but doesnt specify safe version\n 'psutil>=5.0.0',\n 'sentry-sdk>=0.4.0',\n 'subprocess32>=3.5.3',\n 'docker-pycreds>=0.4.0',\n # Removed until we bring back the board\n # 'flask-cors>=3.0.3',\n # 'flask-graphql>=1.4.0',\n # 'graphene>=2.0.0',\n]\n\ntest_requirements = [\n 'mock>=2.0.0',\n 'tox-pyenv>=1.0.3'\n]\n\nkubeflow_requirements = ['kubernetes', 'minio', 'google-cloud-storage', 'sh']\n\nsetup(\n name='wandb',\n version='0.8.4',\n description=\"A CLI and library for interacting with the Weights and Biases API.\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"Weights & Biases\",\n author_email='[email protected]',\n url='https://github.com/wandb/client',\n packages=[\n 'wandb'\n ],\n package_dir={'wandb': 'wandb'},\n entry_points={\n 'console_scripts': [\n 'wandb=wandb.cli:cli',\n 'wb=wandb.cli:cli',\n 'wanbd=wandb.cli:cli',\n 'wandb-docker-run=wandb.cli:docker_run'\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n license=\"MIT license\",\n zip_safe=False,\n keywords='wandb',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: Logging',\n 'Topic :: System :: Monitoring'\n ],\n test_suite='tests',\n tests_require=test_requirements,\n extras_require={\n 'kubeflow': kubeflow_requirements\n }\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup\n\nwith open('README.md') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'backports.tempfile>=1.0',\n 'Click>=7.0',\n 'GitPython>=1.0.0',\n 'gql>=0.1.0',\n 'nvidia-ml-py3>=7.352.0',\n 'python-dateutil>=2.6.1',\n 'requests>=2.0.0',\n 'shortuuid>=0.5.0',\n 'six>=1.10.0',\n 'watchdog>=0.8.3',\n 'psutil>=5.0.0',\n 'sentry-sdk>=0.4.0',\n 'subprocess32>=3.5.3',\n 'docker-pycreds>=0.4.0',\n # Removed until we bring back the board\n # 'flask-cors>=3.0.3',\n # 'flask-graphql>=1.4.0',\n # 'graphene>=2.0.0',\n]\n\ntest_requirements = [\n 'mock>=2.0.0',\n 'tox-pyenv>=1.0.3'\n]\n\nkubeflow_requirements = ['kubernetes', 'minio', 'google-cloud-storage', 'sh']\n\nsetup(\n name='wandb',\n version='0.8.4',\n description=\"A CLI and library for interacting with the Weights and Biases API.\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"Weights & Biases\",\n author_email='[email protected]',\n url='https://github.com/wandb/client',\n packages=[\n 'wandb'\n ],\n package_dir={'wandb': 'wandb'},\n entry_points={\n 'console_scripts': [\n 'wandb=wandb.cli:cli',\n 'wb=wandb.cli:cli',\n 'wanbd=wandb.cli:cli',\n 'wandb-docker-run=wandb.cli:docker_run'\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n license=\"MIT license\",\n zip_safe=False,\n keywords='wandb',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: Logging',\n 'Topic :: System :: Monitoring'\n ],\n test_suite='tests',\n tests_require=test_requirements,\n extras_require={\n 'kubeflow': kubeflow_requirements\n }\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 4f157098539..9613dc19495 100644
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,6 @@
'shortuuid>=0.5.0',
'six>=1.10.0',
'watchdog>=0.8.3',
- 'PyYAML>=4.2b4', # watchdog depends on pyyaml but doesnt specify safe version
'psutil>=5.0.0',
'sentry-sdk>=0.4.0',
'subprocess32>=3.5.3',
|
ceph__ceph-ansible-3614 | Python3 seems to break TASK [ceph-mon : create monitor initial keyring]
<!-- **Are you in the right place?**
1. For issues or feature requests, please create an issue in this repository.
2. Did you already search the existing open issues for anything similar? -->
**Bug Report**
# What happened:
Using stable-3.2 to control Fedora ARM 29 nodes, when I use Python3 on those ARM nodes; the firewall gets set up as expected but I get a failure on `TASK [ceph-mon : create monitor initial keyring]`.
To be able to run a copy of `site.yml.sample`, I have to use the default of Pyton2 on those Fedora ARM 29 nodes and can thus not configure the firewall (It is not ceph-ansible's problem that F29 offers no python2-firewall).
## details with Python3
While `ansible_python_interpreter=/usr/bin/python3` allows me to configure firewall (`configure_firewall: True`) it fails on `TASK [ceph-mon : create monitor initial keyring]`
```
TASK [ceph-mon : create monitor initial keyring] ****************************************************************************************
Saturday 02 February 2019 13:22:05 +0100 (0:00:00.578) 0:03:51.103 *****
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: TypeError: rstrip arg must be None or str
fatal: [odroid-hc2-00]: FAILED! => {"changed": false, "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_wt9j1z5d/ansible_module_ceph_key.py\", line 697, in <module>\n main()\n File \"/tmp/ansible_wt9j1z5d/ansible_module_ceph_key.py\", line 693, in main\n run_module()\n File \"/tmp/ansible_wt9j1z5d/ansible_module_ceph_key.py\", line 681, in run_module\n stdout=out.rstrip(b\"\\r\\n\"),\nTypeError: rstrip arg must be None or str\n", "module_stdout": "", "msg": "MODULE FAILURE", "rc": 1}
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: TypeError: rstrip arg must be None or str
fatal: [odroid-hc2-02]: FAILED! => {"changed": false, "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_fvc_9har/ansible_module_ceph_key.py\", line 697, in <module>\n main()\n File \"/tmp/ansible_fvc_9har/ansible_module_ceph_key.py\", line 693, in main\n run_module()\n File \"/tmp/ansible_fvc_9har/ansible_module_ceph_key.py\", line 681, in run_module\n stdout=out.rstrip(b\"\\r\\n\"),\nTypeError: rstrip arg must be None or str\n", "module_stdout": "", "msg": "MODULE FAILURE", "rc": 1}
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: TypeError: rstrip arg must be None or str
fatal: [odroid-hc2-01]: FAILED! => {"changed": false, "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_77ptji0m/ansible_module_ceph_key.py\", line 697, in <module>\n main()\n File \"/tmp/ansible_77ptji0m/ansible_module_ceph_key.py\", line 693, in main\n run_module()\n File \"/tmp/ansible_77ptji0m/ansible_module_ceph_key.py\", line 681, in run_module\n stdout=out.rstrip(b\"\\r\\n\"),\nTypeError: rstrip arg must be None or str\n", "module_stdout": "", "msg": "MODULE FAILURE", "rc": 1}
```
## note on Python2
without overriding the `ansible_python_interpreter`, I must set `configure_firewall: False` as there is no `python2-firewall.noarch` for Fedora 29. A copy of `site.yml.sample` runs through just fine with Python2 and I get a working cluster. Obviously I need to deal with firewall myself.
```bash
[root@odroid-hc2-00 ~]# ceph -s
cluster:
id: d4fe8da4-bad1-4564-bfaa-358e1ab8e02c
health: HEALTH_OK
services:
mon: 3 daemons, quorum odroid-hc2-00,odroid-hc2-01,odroid-hc2-02
mgr: odroid-hc2-00(active), standbys: odroid-hc2-02, odroid-hc2-01
osd: 5 osds: 5 up, 5 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0B
usage: 5.01GiB used, 8.18TiB / 8.19TiB avail
pgs:
```
I verified with `ansible -m setup odroid-hc2-00|less` that Python 2 gets used in that case. `2.7.15` to be precise.
# What you expected to happen:
Being able to have `ceph-ansible` set up the firewall on Fedora 29 nodes. Ideally by being able to use `ansible_python_interpreter=/usr/bin/python3` (allowing the ansible firewall module to be used).
# How to reproduce it (minimal and precise):
1. Have a RHEL 7 x86_64 machine to run `ceph-ansible`. Be it `ceph-ansible-3.2.4-1.el7cp.noarch` or `branch stable-3.2` from `origin [email protected]:ceph/ceph-ansible.git`; I can reproduce the problem with both. (While I could have run ceph-ansible from one of the Fedora ARM 29 nodes, using a RHSM-registered RHEL7 VM simply made it easy for me to `yum install ceph-ansible`)
2. Have 5 OSD hosts, one disk each, running Fedora ARM 29 (mine are ODROID-HC2, sadly no RHEL7 for that platform)
3. `cp site.ym.samle site.yml`
4. `ansible-playbook site.ym`
# Share your group_vars files, inventory
This is my play cluster while learning Ceph, so there are `ceph_conf_overrides`, silly small journal sizes etc, don't mind those.
```bash
[ansible@ceph-ansible-rhel7 ceph-ansible]$ pwd
/usr/share/ceph-ansible
[ansible@ceph-ansible-rhel7 ceph-ansible]$ rpm -qf /usr/share/ceph-ansible
ceph-ansible-3.2.4-1.el7cp.noarch
```
`/etc/ansible/hosts` is as follows, obviously I toggle the `ansible_python_interpreter=…` line on or off while rproducing for this bug report. And yes, I just noticed I set the ansible_user needlessly twice ;-)
```ini
[ceph-arm-nodes]
odroid-hc2-[00:04]
[ceph-arm-nodes:vars]
ansible_user=ansible
#ansible_python_interpreter=/usr/bin/python3
[ceph-housenet]
ceph-ansible-rhel7
odroid-hc2-[00:04]
[ceph-housenet:vars]
ansible_user=ansible
[mons]
odroid-hc2-[00:02]
# MGRs are typically collocated with MONs
[mgrs]
odroid-hc2-[00:02]
[osds]
odroid-hc2-[00:04]
[clients]
ceph-ansible-rhel7
odroid-hc2-00
```
```bash
[ansible@ceph-ansible-rhel7 group_vars]$ diff all.yml all.yml.sample
45c45
< cluster: ceph
---
> #cluster: ceph
63d62
< #configure_firewall: False
110d108
< ntp_daemon_type: chronyd
139c137
< ceph_origin: distro
---
> ceph_origin: repository
197d194
< ceph_repository_type: cdn
301d297
< rbd_cache_writethrough_until_flush: "false"
305d300
< rbd_client_directories: false # as per CEPH125-RHCS3.0-en-1-20180517 pages 45 and 60
350,351d344
< monitor_interface: eth0
<
374d366
< journal_size: 1024 # As per CEPH125-RHCS3.0-en-1-20180517 page 45
377,378c369
< public_network: 192.168.50.0/24 # HouseNet
< cluster_network: "{{ public_network | regex_replace(' ', '') }}"
---
> #cluster_network: "{{ public_network | regex_replace(' ', '') }}"
528,537d518
< # Overrides from CEPH125-RHCS3.0-en-1-20180517
< ceph_conf_overrides:
< global:
< mon_osd_allow_primary_affinity: 1
< mon_clock_drift_allowed: 0.5
< mon_pg_warn_min_per_osd: 0
< mon_allow_pool_delete: true
< client:
< rbd_default_features: 1
<
585a567,570
>
> # this is only here for usage with the switch-from-non-containerized-to-containerized-ceph-daemons.yml playbook
> # do not ever change this here
> #switch_to_container: false
```
```bash
[ansible@ceph-ansible-rhel7 ceph-ansible]$ diff /usr/share/ceph-ansible/group_vars/osds.yml.sample /usr/share/ceph-ansible/group_vars/osds.yml
22a23
> copy_admin_key: true
46a48,49
> devices:
> - /dev/sda
61a65
> dmcrypt: True
89a94
> osd_scenario: non-collocated # collocated was as per CEPH125-RHCS3.0-en-1-20180517 page 36, this is for my fiddlings
131,133c136,137
< # - The devices in 'dedicated_devices' will get one partition for RocksDB DB, called 'block.db'
< # and one for RocksDB WAL, called 'block.wal'. To use a single partition for RocksDB and WAL together
< # set bluestore_wal_devices to [].
---
> # - The devices in 'dedicated_devices' will get 1 partition for RocksDB DB, called 'block.db'
> # and one for RocksDB WAL, called 'block.wal'
147a152,153
> dedicated_devices:
> - /dev/mmcblk0
156,157d161
< #
< # Set bluestore_wal_devices: [] to use the same partition for RocksDB and WAL.
```
```bash
[ansible@ceph-ansible-rhel7 ceph-ansible]$ diff /usr/share/ceph-ansible/group_vars/clients.yml.sample /usr/share/ceph-ansible/group_vars/clients.yml
18a19
> copy_admin_key: true
```
# Environment details
**Environment of RHEL7 x86_64 VM running `ceph-ansible`**:
* OS (e.g. from /etc/os-release): Red Hat Enterprise Linux Server release 7.6 (Maipo)
* Kernel (e.g. `uname -a`): Linux ceph-ansible-rhel7.internal.pcfe.net 3.10.0-862.el7.x86_64 #1 SMP Wed Mar 21 18:14:51 EDT 2018 x86_64 x86_64 x86_64 GNU/Linux
* Docker version if applicable (e.g. `docker version`): n/a
* Ansible version (e.g. `ansible-playbook --version`): ansible-playbook 2.6.12
config file = /usr/share/ceph-ansible/ansible.cfg
configured module search path = [u'/home/ansible/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python2.7/site-packages/ansible
executable location = /usr/bin/ansible-playbook
python version = 2.7.5 (default, Sep 12 2018, 05:31:16) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)]
* ceph-ansible version (e.g. `git head or tag or stable branch`): ceph-ansible-3.2.4-1.el7cp.noarch and `stable-3.2` from git both allow to reproduce the problem
* Ceph version (e.g. `ceph -v`): ceph version 12.2.8-52.el7cp (3af3ca15b68572a357593c261f95038d02f46201) luminous (stable)
**Environment of Fedora ARM 29 OSD nodes**:
* OS (e.g. from /etc/os-release): Fedora release 29 (Twenty Nine)
* Kernel (e.g. `uname -a`): Linux odroid-hc2-00.fritz.box 4.20.3-200.fc29.armv7hl #1 SMP Thu Jan 17 17:09:08 UTC 2019 armv7l armv7l armv7l GNU/Linux
* Docker version if applicable (e.g. `docker version`): n/a
* Ansible version (e.g. `ansible-playbook --version`): ansible-playbook 2.7.5
-m setup run on the RHEL7 b
```
"ansible_python": {
"executable": "/usr/bin/python",
"has_sslcontext": true,
"type": "CPython",
"version": {
"major": 2,
"micro": 15,
"minor": 7,
"releaselevel": "final",
"serial": 0
},
"version_info": [
2,
7,
15,
"final",
0
]
},
* ceph-ansible version (e.g. `git head or tag or stable branch`):
* Ceph version (e.g. `ceph -v`): ceph version 12.2.10 (177915764b752804194937482a39e95e0ca3de94) luminous (stable)
# additional info
I do not expect this to get fixed in stable-3.2, after all the firewall config functionality in ceph-ansible is quite recent, but it would be nice if it was fixed in the next release
| [
{
"content": "#!/usr/bin/python\n# Copyright 2018, Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = '''\n---\nmodule: ceph_key\n\nauthor: Sebastien Han <[email protected]>\n\nshort_description: Manage Cephx key(s)\n\nversion_added: \"2.6\"\n\ndescription:\n - Manage CephX creation, deletion and updates.\n It can also list and get information about keyring(s).\noptions:\n cluster:\n description:\n - The ceph cluster name.\n required: false\n default: ceph\n name:\n description:\n - name of the CephX key\n required: true\n state:\n description:\n - If 'present' is used, the module creates a keyring\n with the associated capabilities.\n If 'present' is used and a secret is provided the module\n will always add the key. Which means it will update\n the keyring if the secret changes, the same goes for\n the capabilities.\n If 'absent' is used, the module will simply delete the keyring.\n If 'list' is used, the module will list all the keys and will\n return a json output.\n If 'update' is used, the module will **only** update\n the capabilities of a given keyring.\n If 'info' is used, the module will return in a json format the\n description of a given keyring.\n required: true\n choices: ['present', 'absent', 'list', 'update', 'info']\n default: list\n caps:\n description:\n - CephX key capabilities\n default: None\n required: false\n secret:\n description:\n - keyring's secret value\n required: false\n default: None\n containerized:\n description:\n - Wether or not this is a containerized cluster. The value is\n assigned or not depending on how the playbook runs.\n required: false\n default: None\n import_key:\n description:\n - Wether or not to import the created keyring into Ceph.\n This can be useful for someone that only wants to generate keyrings\n but not add them into Ceph.\n required: false\n default: True\n auid:\n description:\n - Sets the auid (authenticated user id) for the specified keyring\n required: false\n default: None\n dest:\n description:\n - Destination to write the keyring\n required: false\n default: /etc/ceph/\n fetch_initial_keys:\n description:\n - Fetch client.admin and bootstrap key.\n This is only needed for Nautilus and above.\n Writes down to the filesystem the initial keys generated by the monitor. # noqa E501\n This command can ONLY run from a monitor node.\n required: false\n default: false\n'''\n\nEXAMPLES = '''\n\nkeys_to_create:\n - { name: client.key, key: \"AQAin8tUUK84ExAA/QgBtI7gEMWdmnvKBzlXdQ==\", caps: { mon: \"allow rwx\", mds: \"allow *\" } , mode: \"0600\" } # noqa e501\n - { name: client.cle, caps: { mon: \"allow r\", osd: \"allow *\" } , mode: \"0600\" } # noqa e501\n\ncaps:\n mon: \"allow rwx\"\n mds: \"allow *\"\n\n- name: create ceph admin key\n ceph_key:\n name: client.admin\n state: present\n secret: AQAin8tU2DsKFBAAFIAzVTzkL3+gtAjjpQiomw==\n auid: 0\n caps:\n mon: allow *\n osd: allow *\n mgr: allow *\n mds: allow\n mode: 0400\n import_key: False\n\n- name: create monitor initial keyring\n ceph_key:\n name: mon.\n state: present\n secret: AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==\n caps:\n mon: allow *\n dest: \"/var/lib/ceph/tmp/\"\n import_key: False\n\n- name: create cephx key\n ceph_key:\n name: \"{{ keys_to_create }}\"\n state: present\n caps: \"{{ caps }}\"\n\n- name: create cephx key but don't import it in Ceph\n ceph_key:\n name: \"{{ keys_to_create }}\"\n state: present\n caps: \"{{ caps }}\"\n import_key: False\n\n- name: update cephx key\n ceph_key:\n name: \"my_key\"\n state: update\n caps: \"{{ caps }}\"\n\n- name: delete cephx key\n ceph_key:\n name: \"my_key\"\n state: absent\n\n- name: info cephx key\n ceph_key:\n name: \"my_key\"\"\n state: info\n\n- name: list cephx keys\n ceph_key:\n state: list\n\n- name: fetch cephx keys\n ceph_key:\n state: fetch_initial_keys\n'''\n\nRETURN = '''# '''\n\nfrom ansible.module_utils.basic import AnsibleModule # noqa E402\nimport datetime # noqa E402\nimport grp # noqa E402\nimport json # noqa E402\nimport os # noqa E402\nimport pwd # noqa E402\nimport stat # noqa E402\nimport struct # noqa E402\nimport time # noqa E402\nimport base64 # noqa E402\nimport socket # noqa E402\n\nCEPH_INITIAL_KEYS = ['client.admin', 'client.bootstrap-mds', 'client.bootstrap-mgr', # noqa E501\n 'client.bootstrap-osd', 'client.bootstrap-rbd', 'client.bootstrap-rbd-mirror', 'client.bootstrap-rgw'] # noqa E501\n\n\ndef fatal(message, module):\n '''\n Report a fatal error and exit\n '''\n\n if module:\n module.fail_json(msg=message, rc=1)\n else:\n raise(Exception(message))\n\n\ndef generate_secret():\n '''\n Generate a CephX secret\n '''\n\n key = os.urandom(16)\n header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))\n secret = base64.b64encode(header + key)\n\n return secret\n\n\ndef generate_caps(cmd, _type, caps):\n '''\n Generate CephX capabilities list\n '''\n\n for k, v in caps.items():\n # makes sure someone didn't pass an empty var,\n # we don't want to add an empty cap\n if len(k) == 0:\n continue\n if _type == \"ceph-authtool\":\n cmd.extend([\"--cap\"])\n cmd.extend([k, v])\n\n return cmd\n\n\ndef generate_ceph_cmd(cluster, args, user, user_key, containerized=None):\n '''\n Generate 'ceph' command line to execute\n '''\n\n cmd = []\n\n base_cmd = [\n 'ceph',\n '-n',\n user,\n '-k',\n user_key,\n '--cluster',\n cluster,\n 'auth',\n ]\n\n cmd.extend(base_cmd + args)\n\n if containerized:\n cmd = containerized.split() + cmd\n\n return cmd\n\n\ndef generate_ceph_authtool_cmd(cluster, name, secret, caps, auid, dest, containerized=None): # noqa E501\n '''\n Generate 'ceph-authtool' command line to execute\n '''\n\n cmd = [\n 'ceph-authtool',\n '--create-keyring',\n dest,\n '--name',\n name,\n '--add-key',\n secret,\n ]\n\n if auid:\n cmd.extend(['--set-uid', auid])\n\n cmd = generate_caps(cmd, \"ceph-authtool\", caps)\n\n if containerized:\n cmd = containerized.split() + cmd\n\n return cmd\n\n\ndef create_key(module, result, cluster, name, secret, caps, import_key, auid, dest, containerized=None): # noqa E501\n '''\n Create a CephX key\n '''\n\n args = [\n 'import',\n '-i',\n dest,\n ]\n cmd_list = []\n\n if not secret:\n secret = generate_secret()\n\n cmd_list.append(generate_ceph_authtool_cmd(\n cluster, name, secret, caps, auid, dest, containerized))\n\n if import_key:\n user = \"client.admin\"\n user = \"client.admin\"\n user_key = os.path.join(\n \"/etc/ceph/\" + cluster + \".client.admin.keyring\")\n cmd_list.append(generate_ceph_cmd(\n cluster, args, user, user_key, containerized))\n\n return cmd_list\n\n\ndef update_key(cluster, name, caps, containerized=None):\n '''\n Update a CephX key's capabilities\n '''\n\n cmd_list = []\n\n args = [\n 'caps',\n name,\n ]\n\n args = generate_caps(args, \"ceph\", caps)\n user = \"client.admin\"\n user_key = os.path.join(\n \"/etc/ceph/\" + cluster + \".client.admin.keyring\")\n cmd_list.append(generate_ceph_cmd(\n cluster, args, user, user_key, containerized))\n\n return cmd_list\n\n\ndef delete_key(cluster, name, containerized=None):\n '''\n Delete a CephX key\n '''\n\n cmd_list = []\n\n args = [\n 'del',\n name,\n ]\n\n user = \"client.admin\"\n user_key = os.path.join(\n \"/etc/ceph/\" + cluster + \".client.admin.keyring\")\n cmd_list.append(generate_ceph_cmd(\n cluster, args, user, user_key, containerized))\n\n return cmd_list\n\n\ndef get_key(cluster, name, dest, containerized=None):\n '''\n Get a CephX key (write on the filesystem)\n '''\n\n cmd_list = []\n\n args = [\n 'get',\n name,\n '-o',\n dest,\n ]\n\n user = \"client.admin\"\n user_key = os.path.join(\n \"/etc/ceph/\" + cluster + \".client.admin.keyring\")\n cmd_list.append(generate_ceph_cmd(\n cluster, args, user, user_key, containerized))\n\n return cmd_list\n\n\ndef info_key(cluster, name, user, user_key, output_format, containerized=None):\n '''\n Get information about a CephX key\n '''\n\n cmd_list = []\n\n args = [\n 'get',\n name,\n '-f',\n output_format,\n ]\n\n cmd_list.append(generate_ceph_cmd(\n cluster, args, user, user_key, containerized))\n\n return cmd_list\n\n\ndef list_keys(cluster, user, user_key, containerized=None):\n '''\n List all CephX keys\n '''\n\n cmd_list = []\n\n args = [\n 'ls',\n '-f',\n 'json',\n ]\n\n cmd_list.append(generate_ceph_cmd(\n cluster, args, user, user_key, containerized))\n\n return cmd_list\n\n\ndef exec_commands(module, cmd_list):\n '''\n Execute command(s)\n '''\n\n for cmd in cmd_list:\n rc, out, err = module.run_command(cmd)\n if rc != 0:\n return rc, cmd, out, err\n\n return rc, cmd, out, err\n\n\ndef lookup_ceph_initial_entities(module, out):\n '''\n Lookup Ceph initial keys entries in the auth map\n '''\n\n # convert out to json, ansible returns a string...\n try:\n out_dict = json.loads(out)\n except ValueError as e:\n fatal(\"Could not decode 'ceph auth list' json output: {}\".format(e), module) # noqa E501\n\n entities = []\n if \"auth_dump\" in out_dict:\n for key in out_dict[\"auth_dump\"]:\n for k, v in key.items():\n if k == \"entity\":\n if v in CEPH_INITIAL_KEYS:\n entities.append(v)\n else:\n fatal(\"'auth_dump' key not present in json output:\", module) # noqa E501\n\n if len(entities) != len(CEPH_INITIAL_KEYS):\n return None\n\n return entities\n\n\ndef build_key_path(cluster, entity):\n '''\n Build key path depending on the key type\n '''\n\n if \"admin\" in entity:\n path = \"/etc/ceph\"\n key_path = os.path.join(\n path + \"/\" + cluster + \".\" + entity + \".keyring\")\n elif \"bootstrap\" in entity:\n path = \"/var/lib/ceph\"\n # bootstrap keys show up as 'client.boostrap-osd'\n # however the directory is called '/var/lib/ceph/bootstrap-osd'\n # so we need to substring 'client.'\n entity_split = entity.split('.')[1]\n key_path = os.path.join(\n path + \"/\" + entity_split + \"/\" + cluster + \".keyring\")\n else:\n return None\n\n return key_path\n\n\ndef run_module():\n module_args = dict(\n cluster=dict(type='str', required=False, default='ceph'),\n name=dict(type='str', required=False),\n state=dict(type='str', required=True),\n containerized=dict(type='str', required=False, default=None),\n caps=dict(type='dict', required=False, default=None),\n secret=dict(type='str', required=False, default=None),\n import_key=dict(type='bool', required=False, default=True),\n auid=dict(type='str', required=False, default=None),\n dest=dict(type='str', required=False, default='/etc/ceph'),\n )\n\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=True,\n add_file_common_args=True,\n )\n\n # Gather module parameters in variables\n state = module.params['state']\n name = module.params.get('name')\n cluster = module.params.get('cluster')\n containerized = module.params.get('containerized')\n caps = module.params.get('caps')\n secret = module.params.get('secret')\n import_key = module.params.get('import_key')\n auid = module.params.get('auid')\n dest = module.params.get('dest')\n\n result = dict(\n changed=False,\n stdout='',\n stderr='',\n rc='',\n start='',\n end='',\n delta='',\n )\n\n if module.check_mode:\n return result\n\n startd = datetime.datetime.now()\n\n # Test if the key exists, if it does we skip its creation\n # We only want to run this check when a key needs to be added\n # There is no guarantee that any cluster is running and we don't need one\n if import_key:\n user = \"client.admin\"\n user_key = os.path.join(\n \"/etc/ceph/\" + cluster + \".client.admin.keyring\")\n output_format = \"json\"\n rc, cmd, out, err = exec_commands(\n module, info_key(cluster, name, user, user_key, output_format, containerized)) # noqa E501\n\n if state == \"present\":\n if not caps:\n fatal(\"Capabilities must be provided when state is 'present'\", module) # noqa E501\n\n # Build a different path for bootstrap keys as there are stored as\n # /var/lib/ceph/bootstrap-rbd/ceph.keyring\n if 'bootstrap' in dest:\n file_path = os.path.join(dest + \"/\" + cluster + \".keyring\")\n else:\n file_path = os.path.join(dest + \"/\" + cluster +\n \".\" + name + \".keyring\")\n\n # We allow 'present' to override any existing key\n # ONLY if a secret is provided\n # if not we skip the creation\n if import_key:\n if rc == 0 and not secret:\n # If the key exists in Ceph we must fetch it on the system\n # because nothing tells us it exists on the fs or not\n rc, cmd, out, err = exec_commands(module, get_key(cluster, name, file_path, containerized)) # noqa E501\n result[\"stdout\"] = \"skipped, since {0} already exists, we only fetched the key at {1}. If you want to update a key use 'state: update'\".format( # noqa E501\n name, file_path)\n result['rc'] = rc\n module.exit_json(**result)\n\n rc, cmd, out, err = exec_commands(module, create_key(\n module, result, cluster, name, secret, caps, import_key, auid, file_path, containerized)) # noqa E501\n\n file_args = module.load_file_common_arguments(module.params)\n file_args['path'] = file_path\n module.set_fs_attributes_if_different(file_args, False)\n elif state == \"update\":\n if not caps:\n fatal(\"Capabilities must be provided when state is 'update'\", module) # noqa E501\n\n if rc != 0:\n result[\"stdout\"] = \"skipped, since {0} does not exist\".format(name)\n result['rc'] = 0\n module.exit_json(**result)\n\n rc, cmd, out, err = exec_commands(\n module, update_key(cluster, name, caps, containerized))\n # After the update we don't need to overwrite the key on the filesystem\n # since the secret has not changed\n\n elif state == \"absent\":\n rc, cmd, out, err = exec_commands(\n module, delete_key(cluster, name, containerized))\n\n elif state == \"info\":\n if rc != 0:\n result[\"stdout\"] = \"skipped, since {0} does not exist\".format(name)\n result['rc'] = 0\n module.exit_json(**result)\n\n user = \"client.admin\"\n user_key = os.path.join(\n \"/etc/ceph/\" + cluster + \".client.admin.keyring\")\n output_format = \"json\"\n rc, cmd, out, err = exec_commands(\n module, info_key(cluster, name, user, user_key, output_format, containerized)) # noqa E501\n\n elif state == \"list\":\n user = \"client.admin\"\n user_key = os.path.join(\n \"/etc/ceph/\" + cluster + \".client.admin.keyring\")\n rc, cmd, out, err = exec_commands(\n module, list_keys(cluster, user, user_key, containerized))\n\n elif state == \"fetch_initial_keys\":\n hostname = socket.gethostname().split('.', 1)[0]\n user = \"mon.\"\n user_key = os.path.join(\n \"/var/lib/ceph/mon/\" + cluster + \"-\" + hostname + \"/keyring\")\n rc, cmd, out, err = exec_commands(\n module, list_keys(cluster, user, user_key, containerized))\n if rc != 0:\n result[\"stdout\"] = \"failed to retrieve ceph keys\".format(name)\n result['rc'] = 0\n module.exit_json(**result)\n\n entities = lookup_ceph_initial_entities(module, out)\n if entities is None:\n fatal(\"Failed to find some of the initial entities\", module)\n\n # get ceph's group and user id\n ceph_uid = pwd.getpwnam('ceph').pw_uid\n ceph_grp = grp.getgrnam('ceph').gr_gid\n\n output_format = \"plain\"\n for entity in entities:\n key_path = build_key_path(cluster, entity)\n if key_path is None:\n fatal(\"Failed to build key path, no entity yet?\", module)\n elif os.path.isfile(key_path):\n # if the key is already on the filesystem\n # there is no need to fetch it again\n continue\n\n extra_args = [\n '-o',\n key_path,\n ]\n\n info_cmd = info_key(cluster, entity, user,\n user_key, output_format, containerized)\n # we use info_cmd[0] because info_cmd is an array made of an array\n info_cmd[0].extend(extra_args)\n rc, cmd, out, err = exec_commands(\n module, info_cmd) # noqa E501\n\n # apply ceph:ceph ownership and mode 0400 on keys\n # FIXME by using\n # file_args = module.load_file_common_arguments(module.params)\n # file_args['path'] = dest\n # module.set_fs_attributes_if_different(file_args, False)\n try:\n os.chown(key_path, ceph_uid, ceph_grp)\n os.chmod(key_path, stat.S_IRUSR)\n except OSError as e:\n fatal(\"Failed to set owner/group/permissions of %s: %s\" % (\n key_path, str(e)), module)\n\n else:\n module.fail_json(\n msg='State must either be \"present\" or \"absent\" or \"update\" or \"list\" or \"info\" or \"fetch_initial_keys\".', changed=False, rc=1) # noqa E501\n\n endd = datetime.datetime.now()\n delta = endd - startd\n\n result = dict(\n cmd=cmd,\n start=str(startd),\n end=str(endd),\n delta=str(delta),\n rc=rc,\n stdout=out.rstrip(b\"\\r\\n\"),\n stderr=err.rstrip(b\"\\r\\n\"),\n changed=True,\n )\n\n if rc != 0:\n module.fail_json(msg='non-zero return code', **result)\n\n module.exit_json(**result)\n\n\ndef main():\n run_module()\n\n\nif __name__ == '__main__':\n main()\n",
"path": "library/ceph_key.py"
}
] | [
{
"content": "#!/usr/bin/python\n# Copyright 2018, Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = '''\n---\nmodule: ceph_key\n\nauthor: Sebastien Han <[email protected]>\n\nshort_description: Manage Cephx key(s)\n\nversion_added: \"2.6\"\n\ndescription:\n - Manage CephX creation, deletion and updates.\n It can also list and get information about keyring(s).\noptions:\n cluster:\n description:\n - The ceph cluster name.\n required: false\n default: ceph\n name:\n description:\n - name of the CephX key\n required: true\n state:\n description:\n - If 'present' is used, the module creates a keyring\n with the associated capabilities.\n If 'present' is used and a secret is provided the module\n will always add the key. Which means it will update\n the keyring if the secret changes, the same goes for\n the capabilities.\n If 'absent' is used, the module will simply delete the keyring.\n If 'list' is used, the module will list all the keys and will\n return a json output.\n If 'update' is used, the module will **only** update\n the capabilities of a given keyring.\n If 'info' is used, the module will return in a json format the\n description of a given keyring.\n required: true\n choices: ['present', 'absent', 'list', 'update', 'info']\n default: list\n caps:\n description:\n - CephX key capabilities\n default: None\n required: false\n secret:\n description:\n - keyring's secret value\n required: false\n default: None\n containerized:\n description:\n - Wether or not this is a containerized cluster. The value is\n assigned or not depending on how the playbook runs.\n required: false\n default: None\n import_key:\n description:\n - Wether or not to import the created keyring into Ceph.\n This can be useful for someone that only wants to generate keyrings\n but not add them into Ceph.\n required: false\n default: True\n auid:\n description:\n - Sets the auid (authenticated user id) for the specified keyring\n required: false\n default: None\n dest:\n description:\n - Destination to write the keyring\n required: false\n default: /etc/ceph/\n fetch_initial_keys:\n description:\n - Fetch client.admin and bootstrap key.\n This is only needed for Nautilus and above.\n Writes down to the filesystem the initial keys generated by the monitor. # noqa E501\n This command can ONLY run from a monitor node.\n required: false\n default: false\n'''\n\nEXAMPLES = '''\n\nkeys_to_create:\n - { name: client.key, key: \"AQAin8tUUK84ExAA/QgBtI7gEMWdmnvKBzlXdQ==\", caps: { mon: \"allow rwx\", mds: \"allow *\" } , mode: \"0600\" } # noqa e501\n - { name: client.cle, caps: { mon: \"allow r\", osd: \"allow *\" } , mode: \"0600\" } # noqa e501\n\ncaps:\n mon: \"allow rwx\"\n mds: \"allow *\"\n\n- name: create ceph admin key\n ceph_key:\n name: client.admin\n state: present\n secret: AQAin8tU2DsKFBAAFIAzVTzkL3+gtAjjpQiomw==\n auid: 0\n caps:\n mon: allow *\n osd: allow *\n mgr: allow *\n mds: allow\n mode: 0400\n import_key: False\n\n- name: create monitor initial keyring\n ceph_key:\n name: mon.\n state: present\n secret: AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==\n caps:\n mon: allow *\n dest: \"/var/lib/ceph/tmp/\"\n import_key: False\n\n- name: create cephx key\n ceph_key:\n name: \"{{ keys_to_create }}\"\n state: present\n caps: \"{{ caps }}\"\n\n- name: create cephx key but don't import it in Ceph\n ceph_key:\n name: \"{{ keys_to_create }}\"\n state: present\n caps: \"{{ caps }}\"\n import_key: False\n\n- name: update cephx key\n ceph_key:\n name: \"my_key\"\n state: update\n caps: \"{{ caps }}\"\n\n- name: delete cephx key\n ceph_key:\n name: \"my_key\"\n state: absent\n\n- name: info cephx key\n ceph_key:\n name: \"my_key\"\"\n state: info\n\n- name: list cephx keys\n ceph_key:\n state: list\n\n- name: fetch cephx keys\n ceph_key:\n state: fetch_initial_keys\n'''\n\nRETURN = '''# '''\n\nfrom ansible.module_utils.basic import AnsibleModule # noqa E402\nimport datetime # noqa E402\nimport grp # noqa E402\nimport json # noqa E402\nimport os # noqa E402\nimport pwd # noqa E402\nimport stat # noqa E402\nimport struct # noqa E402\nimport time # noqa E402\nimport base64 # noqa E402\nimport socket # noqa E402\n\nCEPH_INITIAL_KEYS = ['client.admin', 'client.bootstrap-mds', 'client.bootstrap-mgr', # noqa E501\n 'client.bootstrap-osd', 'client.bootstrap-rbd', 'client.bootstrap-rbd-mirror', 'client.bootstrap-rgw'] # noqa E501\n\n\ndef fatal(message, module):\n '''\n Report a fatal error and exit\n '''\n\n if module:\n module.fail_json(msg=message, rc=1)\n else:\n raise(Exception(message))\n\n\ndef generate_secret():\n '''\n Generate a CephX secret\n '''\n\n key = os.urandom(16)\n header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))\n secret = base64.b64encode(header + key)\n\n return secret\n\n\ndef generate_caps(cmd, _type, caps):\n '''\n Generate CephX capabilities list\n '''\n\n for k, v in caps.items():\n # makes sure someone didn't pass an empty var,\n # we don't want to add an empty cap\n if len(k) == 0:\n continue\n if _type == \"ceph-authtool\":\n cmd.extend([\"--cap\"])\n cmd.extend([k, v])\n\n return cmd\n\n\ndef generate_ceph_cmd(cluster, args, user, user_key, containerized=None):\n '''\n Generate 'ceph' command line to execute\n '''\n\n cmd = []\n\n base_cmd = [\n 'ceph',\n '-n',\n user,\n '-k',\n user_key,\n '--cluster',\n cluster,\n 'auth',\n ]\n\n cmd.extend(base_cmd + args)\n\n if containerized:\n cmd = containerized.split() + cmd\n\n return cmd\n\n\ndef generate_ceph_authtool_cmd(cluster, name, secret, caps, auid, dest, containerized=None): # noqa E501\n '''\n Generate 'ceph-authtool' command line to execute\n '''\n\n cmd = [\n 'ceph-authtool',\n '--create-keyring',\n dest,\n '--name',\n name,\n '--add-key',\n secret,\n ]\n\n if auid:\n cmd.extend(['--set-uid', auid])\n\n cmd = generate_caps(cmd, \"ceph-authtool\", caps)\n\n if containerized:\n cmd = containerized.split() + cmd\n\n return cmd\n\n\ndef create_key(module, result, cluster, name, secret, caps, import_key, auid, dest, containerized=None): # noqa E501\n '''\n Create a CephX key\n '''\n\n args = [\n 'import',\n '-i',\n dest,\n ]\n cmd_list = []\n\n if not secret:\n secret = generate_secret()\n\n cmd_list.append(generate_ceph_authtool_cmd(\n cluster, name, secret, caps, auid, dest, containerized))\n\n if import_key:\n user = \"client.admin\"\n user = \"client.admin\"\n user_key = os.path.join(\n \"/etc/ceph/\" + cluster + \".client.admin.keyring\")\n cmd_list.append(generate_ceph_cmd(\n cluster, args, user, user_key, containerized))\n\n return cmd_list\n\n\ndef update_key(cluster, name, caps, containerized=None):\n '''\n Update a CephX key's capabilities\n '''\n\n cmd_list = []\n\n args = [\n 'caps',\n name,\n ]\n\n args = generate_caps(args, \"ceph\", caps)\n user = \"client.admin\"\n user_key = os.path.join(\n \"/etc/ceph/\" + cluster + \".client.admin.keyring\")\n cmd_list.append(generate_ceph_cmd(\n cluster, args, user, user_key, containerized))\n\n return cmd_list\n\n\ndef delete_key(cluster, name, containerized=None):\n '''\n Delete a CephX key\n '''\n\n cmd_list = []\n\n args = [\n 'del',\n name,\n ]\n\n user = \"client.admin\"\n user_key = os.path.join(\n \"/etc/ceph/\" + cluster + \".client.admin.keyring\")\n cmd_list.append(generate_ceph_cmd(\n cluster, args, user, user_key, containerized))\n\n return cmd_list\n\n\ndef get_key(cluster, name, dest, containerized=None):\n '''\n Get a CephX key (write on the filesystem)\n '''\n\n cmd_list = []\n\n args = [\n 'get',\n name,\n '-o',\n dest,\n ]\n\n user = \"client.admin\"\n user_key = os.path.join(\n \"/etc/ceph/\" + cluster + \".client.admin.keyring\")\n cmd_list.append(generate_ceph_cmd(\n cluster, args, user, user_key, containerized))\n\n return cmd_list\n\n\ndef info_key(cluster, name, user, user_key, output_format, containerized=None):\n '''\n Get information about a CephX key\n '''\n\n cmd_list = []\n\n args = [\n 'get',\n name,\n '-f',\n output_format,\n ]\n\n cmd_list.append(generate_ceph_cmd(\n cluster, args, user, user_key, containerized))\n\n return cmd_list\n\n\ndef list_keys(cluster, user, user_key, containerized=None):\n '''\n List all CephX keys\n '''\n\n cmd_list = []\n\n args = [\n 'ls',\n '-f',\n 'json',\n ]\n\n cmd_list.append(generate_ceph_cmd(\n cluster, args, user, user_key, containerized))\n\n return cmd_list\n\n\ndef exec_commands(module, cmd_list):\n '''\n Execute command(s)\n '''\n\n for cmd in cmd_list:\n rc, out, err = module.run_command(cmd)\n if rc != 0:\n return rc, cmd, out, err\n\n return rc, cmd, out, err\n\n\ndef lookup_ceph_initial_entities(module, out):\n '''\n Lookup Ceph initial keys entries in the auth map\n '''\n\n # convert out to json, ansible returns a string...\n try:\n out_dict = json.loads(out)\n except ValueError as e:\n fatal(\"Could not decode 'ceph auth list' json output: {}\".format(e), module) # noqa E501\n\n entities = []\n if \"auth_dump\" in out_dict:\n for key in out_dict[\"auth_dump\"]:\n for k, v in key.items():\n if k == \"entity\":\n if v in CEPH_INITIAL_KEYS:\n entities.append(v)\n else:\n fatal(\"'auth_dump' key not present in json output:\", module) # noqa E501\n\n if len(entities) != len(CEPH_INITIAL_KEYS):\n return None\n\n return entities\n\n\ndef build_key_path(cluster, entity):\n '''\n Build key path depending on the key type\n '''\n\n if \"admin\" in entity:\n path = \"/etc/ceph\"\n key_path = os.path.join(\n path + \"/\" + cluster + \".\" + entity + \".keyring\")\n elif \"bootstrap\" in entity:\n path = \"/var/lib/ceph\"\n # bootstrap keys show up as 'client.boostrap-osd'\n # however the directory is called '/var/lib/ceph/bootstrap-osd'\n # so we need to substring 'client.'\n entity_split = entity.split('.')[1]\n key_path = os.path.join(\n path + \"/\" + entity_split + \"/\" + cluster + \".keyring\")\n else:\n return None\n\n return key_path\n\n\ndef run_module():\n module_args = dict(\n cluster=dict(type='str', required=False, default='ceph'),\n name=dict(type='str', required=False),\n state=dict(type='str', required=True),\n containerized=dict(type='str', required=False, default=None),\n caps=dict(type='dict', required=False, default=None),\n secret=dict(type='str', required=False, default=None),\n import_key=dict(type='bool', required=False, default=True),\n auid=dict(type='str', required=False, default=None),\n dest=dict(type='str', required=False, default='/etc/ceph'),\n )\n\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=True,\n add_file_common_args=True,\n )\n\n # Gather module parameters in variables\n state = module.params['state']\n name = module.params.get('name')\n cluster = module.params.get('cluster')\n containerized = module.params.get('containerized')\n caps = module.params.get('caps')\n secret = module.params.get('secret')\n import_key = module.params.get('import_key')\n auid = module.params.get('auid')\n dest = module.params.get('dest')\n\n result = dict(\n changed=False,\n stdout='',\n stderr='',\n rc='',\n start='',\n end='',\n delta='',\n )\n\n if module.check_mode:\n return result\n\n startd = datetime.datetime.now()\n\n # Test if the key exists, if it does we skip its creation\n # We only want to run this check when a key needs to be added\n # There is no guarantee that any cluster is running and we don't need one\n if import_key:\n user = \"client.admin\"\n user_key = os.path.join(\n \"/etc/ceph/\" + cluster + \".client.admin.keyring\")\n output_format = \"json\"\n rc, cmd, out, err = exec_commands(\n module, info_key(cluster, name, user, user_key, output_format, containerized)) # noqa E501\n\n if state == \"present\":\n if not caps:\n fatal(\"Capabilities must be provided when state is 'present'\", module) # noqa E501\n\n # Build a different path for bootstrap keys as there are stored as\n # /var/lib/ceph/bootstrap-rbd/ceph.keyring\n if 'bootstrap' in dest:\n file_path = os.path.join(dest + \"/\" + cluster + \".keyring\")\n else:\n file_path = os.path.join(dest + \"/\" + cluster +\n \".\" + name + \".keyring\")\n\n # We allow 'present' to override any existing key\n # ONLY if a secret is provided\n # if not we skip the creation\n if import_key:\n if rc == 0 and not secret:\n # If the key exists in Ceph we must fetch it on the system\n # because nothing tells us it exists on the fs or not\n rc, cmd, out, err = exec_commands(module, get_key(cluster, name, file_path, containerized)) # noqa E501\n result[\"stdout\"] = \"skipped, since {0} already exists, we only fetched the key at {1}. If you want to update a key use 'state: update'\".format( # noqa E501\n name, file_path)\n result['rc'] = rc\n module.exit_json(**result)\n\n rc, cmd, out, err = exec_commands(module, create_key(\n module, result, cluster, name, secret, caps, import_key, auid, file_path, containerized)) # noqa E501\n\n file_args = module.load_file_common_arguments(module.params)\n file_args['path'] = file_path\n module.set_fs_attributes_if_different(file_args, False)\n elif state == \"update\":\n if not caps:\n fatal(\"Capabilities must be provided when state is 'update'\", module) # noqa E501\n\n if rc != 0:\n result[\"stdout\"] = \"skipped, since {0} does not exist\".format(name)\n result['rc'] = 0\n module.exit_json(**result)\n\n rc, cmd, out, err = exec_commands(\n module, update_key(cluster, name, caps, containerized))\n # After the update we don't need to overwrite the key on the filesystem\n # since the secret has not changed\n\n elif state == \"absent\":\n rc, cmd, out, err = exec_commands(\n module, delete_key(cluster, name, containerized))\n\n elif state == \"info\":\n if rc != 0:\n result[\"stdout\"] = \"skipped, since {0} does not exist\".format(name)\n result['rc'] = 0\n module.exit_json(**result)\n\n user = \"client.admin\"\n user_key = os.path.join(\n \"/etc/ceph/\" + cluster + \".client.admin.keyring\")\n output_format = \"json\"\n rc, cmd, out, err = exec_commands(\n module, info_key(cluster, name, user, user_key, output_format, containerized)) # noqa E501\n\n elif state == \"list\":\n user = \"client.admin\"\n user_key = os.path.join(\n \"/etc/ceph/\" + cluster + \".client.admin.keyring\")\n rc, cmd, out, err = exec_commands(\n module, list_keys(cluster, user, user_key, containerized))\n\n elif state == \"fetch_initial_keys\":\n hostname = socket.gethostname().split('.', 1)[0]\n user = \"mon.\"\n user_key = os.path.join(\n \"/var/lib/ceph/mon/\" + cluster + \"-\" + hostname + \"/keyring\")\n rc, cmd, out, err = exec_commands(\n module, list_keys(cluster, user, user_key, containerized))\n if rc != 0:\n result[\"stdout\"] = \"failed to retrieve ceph keys\".format(name)\n result['rc'] = 0\n module.exit_json(**result)\n\n entities = lookup_ceph_initial_entities(module, out)\n if entities is None:\n fatal(\"Failed to find some of the initial entities\", module)\n\n # get ceph's group and user id\n ceph_uid = pwd.getpwnam('ceph').pw_uid\n ceph_grp = grp.getgrnam('ceph').gr_gid\n\n output_format = \"plain\"\n for entity in entities:\n key_path = build_key_path(cluster, entity)\n if key_path is None:\n fatal(\"Failed to build key path, no entity yet?\", module)\n elif os.path.isfile(key_path):\n # if the key is already on the filesystem\n # there is no need to fetch it again\n continue\n\n extra_args = [\n '-o',\n key_path,\n ]\n\n info_cmd = info_key(cluster, entity, user,\n user_key, output_format, containerized)\n # we use info_cmd[0] because info_cmd is an array made of an array\n info_cmd[0].extend(extra_args)\n rc, cmd, out, err = exec_commands(\n module, info_cmd) # noqa E501\n\n # apply ceph:ceph ownership and mode 0400 on keys\n # FIXME by using\n # file_args = module.load_file_common_arguments(module.params)\n # file_args['path'] = dest\n # module.set_fs_attributes_if_different(file_args, False)\n try:\n os.chown(key_path, ceph_uid, ceph_grp)\n os.chmod(key_path, stat.S_IRUSR)\n except OSError as e:\n fatal(\"Failed to set owner/group/permissions of %s: %s\" % (\n key_path, str(e)), module)\n\n else:\n module.fail_json(\n msg='State must either be \"present\" or \"absent\" or \"update\" or \"list\" or \"info\" or \"fetch_initial_keys\".', changed=False, rc=1) # noqa E501\n\n endd = datetime.datetime.now()\n delta = endd - startd\n\n result = dict(\n cmd=cmd,\n start=str(startd),\n end=str(endd),\n delta=str(delta),\n rc=rc,\n stdout=out.rstrip(\"\\r\\n\"),\n stderr=err.rstrip(\"\\r\\n\"),\n changed=True,\n )\n\n if rc != 0:\n module.fail_json(msg='non-zero return code', **result)\n\n module.exit_json(**result)\n\n\ndef main():\n run_module()\n\n\nif __name__ == '__main__':\n main()\n",
"path": "library/ceph_key.py"
}
] | diff --git a/library/ceph_key.py b/library/ceph_key.py
index a5fd517c91..34a3b79b25 100644
--- a/library/ceph_key.py
+++ b/library/ceph_key.py
@@ -678,8 +678,8 @@ def run_module():
end=str(endd),
delta=str(delta),
rc=rc,
- stdout=out.rstrip(b"\r\n"),
- stderr=err.rstrip(b"\r\n"),
+ stdout=out.rstrip("\r\n"),
+ stderr=err.rstrip("\r\n"),
changed=True,
)
|
pallets__click-2175 | `click.secho` is improperly typed
The `file` argument for `click.secho` is missing part of its typehint causing the entire secho function to be untyped.
This is not flagged by mypy strict mode, but does in pyright strict mode.
---
Install pyright and click
```bash
python -m venv .venv && source .venv/bin/activate
pip install click pyright
```
Create a py file
```py
# main.py
import click
click.secho("hello")
```
Set pyright to strict mode
```toml
# pyproject.toml
[tool.pyright]
typeCheckingMode = "strict"
```
Run pyright
```bash
pyright main.py
```
Result:
```bash
error: Type of "secho" is partially unknown
Type of "secho" is "(message: Any | None = None, file: IO[Unknown] | None = None, nl: bool = True, err: bool = False, color: bool | None = None, **styles: Any) -> None" (reportUnknownMemberType)
```
---
The function should not produce a typing error. I will PR a fix for this momentarily.
---
Environment:
- Python version: 3.10.1
- Click version: 8.0.3
| [
{
"content": "import inspect\nimport io\nimport itertools\nimport os\nimport sys\nimport typing as t\nfrom gettext import gettext as _\n\nfrom ._compat import isatty\nfrom ._compat import strip_ansi\nfrom ._compat import WIN\nfrom .exceptions import Abort\nfrom .exceptions import UsageError\nfrom .globals import resolve_color_default\nfrom .types import Choice\nfrom .types import convert_type\nfrom .types import ParamType\nfrom .utils import echo\nfrom .utils import LazyFile\n\nif t.TYPE_CHECKING:\n from ._termui_impl import ProgressBar\n\nV = t.TypeVar(\"V\")\n\n# The prompt functions to use. The doc tools currently override these\n# functions to customize how they work.\nvisible_prompt_func: t.Callable[[str], str] = input\n\n_ansi_colors = {\n \"black\": 30,\n \"red\": 31,\n \"green\": 32,\n \"yellow\": 33,\n \"blue\": 34,\n \"magenta\": 35,\n \"cyan\": 36,\n \"white\": 37,\n \"reset\": 39,\n \"bright_black\": 90,\n \"bright_red\": 91,\n \"bright_green\": 92,\n \"bright_yellow\": 93,\n \"bright_blue\": 94,\n \"bright_magenta\": 95,\n \"bright_cyan\": 96,\n \"bright_white\": 97,\n}\n_ansi_reset_all = \"\\033[0m\"\n\n\ndef hidden_prompt_func(prompt: str) -> str:\n import getpass\n\n return getpass.getpass(prompt)\n\n\ndef _build_prompt(\n text: str,\n suffix: str,\n show_default: bool = False,\n default: t.Optional[t.Any] = None,\n show_choices: bool = True,\n type: t.Optional[ParamType] = None,\n) -> str:\n prompt = text\n if type is not None and show_choices and isinstance(type, Choice):\n prompt += f\" ({', '.join(map(str, type.choices))})\"\n if default is not None and show_default:\n prompt = f\"{prompt} [{_format_default(default)}]\"\n return f\"{prompt}{suffix}\"\n\n\ndef _format_default(default: t.Any) -> t.Any:\n if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, \"name\"):\n return default.name # type: ignore\n\n return default\n\n\ndef prompt(\n text: str,\n default: t.Optional[t.Any] = None,\n hide_input: bool = False,\n confirmation_prompt: t.Union[bool, str] = False,\n type: t.Optional[t.Union[ParamType, t.Any]] = None,\n value_proc: t.Optional[t.Callable[[str], t.Any]] = None,\n prompt_suffix: str = \": \",\n show_default: bool = True,\n err: bool = False,\n show_choices: bool = True,\n) -> t.Any:\n \"\"\"Prompts a user for input. This is a convenience function that can\n be used to prompt a user for input later.\n\n If the user aborts the input by sending an interrupt signal, this\n function will catch it and raise a :exc:`Abort` exception.\n\n :param text: the text to show for the prompt.\n :param default: the default value to use if no input happens. If this\n is not given it will prompt until it's aborted.\n :param hide_input: if this is set to true then the input value will\n be hidden.\n :param confirmation_prompt: Prompt a second time to confirm the\n value. Can be set to a string instead of ``True`` to customize\n the message.\n :param type: the type to use to check the value against.\n :param value_proc: if this parameter is provided it's a function that\n is invoked instead of the type conversion to\n convert a value.\n :param prompt_suffix: a suffix that should be added to the prompt.\n :param show_default: shows or hides the default value in the prompt.\n :param err: if set to true the file defaults to ``stderr`` instead of\n ``stdout``, the same as with echo.\n :param show_choices: Show or hide choices if the passed type is a Choice.\n For example if type is a Choice of either day or week,\n show_choices is true and text is \"Group by\" then the\n prompt will be \"Group by (day, week): \".\n\n .. versionadded:: 8.0\n ``confirmation_prompt`` can be a custom string.\n\n .. versionadded:: 7.0\n Added the ``show_choices`` parameter.\n\n .. versionadded:: 6.0\n Added unicode support for cmd.exe on Windows.\n\n .. versionadded:: 4.0\n Added the `err` parameter.\n\n \"\"\"\n\n def prompt_func(text: str) -> str:\n f = hidden_prompt_func if hide_input else visible_prompt_func\n try:\n # Write the prompt separately so that we get nice\n # coloring through colorama on Windows\n echo(text.rstrip(\" \"), nl=False, err=err)\n # Echo a space to stdout to work around an issue where\n # readline causes backspace to clear the whole line.\n return f(\" \")\n except (KeyboardInterrupt, EOFError):\n # getpass doesn't print a newline if the user aborts input with ^C.\n # Allegedly this behavior is inherited from getpass(3).\n # A doc bug has been filed at https://bugs.python.org/issue24711\n if hide_input:\n echo(None, err=err)\n raise Abort() from None\n\n if value_proc is None:\n value_proc = convert_type(type, default)\n\n prompt = _build_prompt(\n text, prompt_suffix, show_default, default, show_choices, type\n )\n\n if confirmation_prompt:\n if confirmation_prompt is True:\n confirmation_prompt = _(\"Repeat for confirmation\")\n\n confirmation_prompt = _build_prompt(confirmation_prompt, prompt_suffix)\n\n while True:\n while True:\n value = prompt_func(prompt)\n if value:\n break\n elif default is not None:\n value = default\n break\n try:\n result = value_proc(value)\n except UsageError as e:\n if hide_input:\n echo(_(\"Error: The value you entered was invalid.\"), err=err)\n else:\n echo(_(\"Error: {e.message}\").format(e=e), err=err) # noqa: B306\n continue\n if not confirmation_prompt:\n return result\n while True:\n value2 = prompt_func(confirmation_prompt)\n if value2:\n break\n if value == value2:\n return result\n echo(_(\"Error: The two entered values do not match.\"), err=err)\n\n\ndef confirm(\n text: str,\n default: t.Optional[bool] = False,\n abort: bool = False,\n prompt_suffix: str = \": \",\n show_default: bool = True,\n err: bool = False,\n) -> bool:\n \"\"\"Prompts for confirmation (yes/no question).\n\n If the user aborts the input by sending a interrupt signal this\n function will catch it and raise a :exc:`Abort` exception.\n\n :param text: the question to ask.\n :param default: The default value to use when no input is given. If\n ``None``, repeat until input is given.\n :param abort: if this is set to `True` a negative answer aborts the\n exception by raising :exc:`Abort`.\n :param prompt_suffix: a suffix that should be added to the prompt.\n :param show_default: shows or hides the default value in the prompt.\n :param err: if set to true the file defaults to ``stderr`` instead of\n ``stdout``, the same as with echo.\n\n .. versionchanged:: 8.0\n Repeat until input is given if ``default`` is ``None``.\n\n .. versionadded:: 4.0\n Added the ``err`` parameter.\n \"\"\"\n prompt = _build_prompt(\n text,\n prompt_suffix,\n show_default,\n \"y/n\" if default is None else (\"Y/n\" if default else \"y/N\"),\n )\n\n while True:\n try:\n # Write the prompt separately so that we get nice\n # coloring through colorama on Windows\n echo(prompt.rstrip(\" \"), nl=False, err=err)\n # Echo a space to stdout to work around an issue where\n # readline causes backspace to clear the whole line.\n value = visible_prompt_func(\" \").lower().strip()\n except (KeyboardInterrupt, EOFError):\n raise Abort() from None\n if value in (\"y\", \"yes\"):\n rv = True\n elif value in (\"n\", \"no\"):\n rv = False\n elif default is not None and value == \"\":\n rv = default\n else:\n echo(_(\"Error: invalid input\"), err=err)\n continue\n break\n if abort and not rv:\n raise Abort()\n return rv\n\n\ndef get_terminal_size() -> os.terminal_size:\n \"\"\"Returns the current size of the terminal as tuple in the form\n ``(width, height)`` in columns and rows.\n\n .. deprecated:: 8.0\n Will be removed in Click 8.1. Use\n :func:`shutil.get_terminal_size` instead.\n \"\"\"\n import shutil\n import warnings\n\n warnings.warn(\n \"'click.get_terminal_size()' is deprecated and will be removed\"\n \" in Click 8.1. Use 'shutil.get_terminal_size()' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return shutil.get_terminal_size()\n\n\ndef echo_via_pager(\n text_or_generator: t.Union[t.Iterable[str], t.Callable[[], t.Iterable[str]], str],\n color: t.Optional[bool] = None,\n) -> None:\n \"\"\"This function takes a text and shows it via an environment specific\n pager on stdout.\n\n .. versionchanged:: 3.0\n Added the `color` flag.\n\n :param text_or_generator: the text to page, or alternatively, a\n generator emitting the text to page.\n :param color: controls if the pager supports ANSI colors or not. The\n default is autodetection.\n \"\"\"\n color = resolve_color_default(color)\n\n if inspect.isgeneratorfunction(text_or_generator):\n i = t.cast(t.Callable[[], t.Iterable[str]], text_or_generator)()\n elif isinstance(text_or_generator, str):\n i = [text_or_generator]\n else:\n i = iter(t.cast(t.Iterable[str], text_or_generator))\n\n # convert every element of i to a text type if necessary\n text_generator = (el if isinstance(el, str) else str(el) for el in i)\n\n from ._termui_impl import pager\n\n return pager(itertools.chain(text_generator, \"\\n\"), color)\n\n\ndef progressbar(\n iterable: t.Optional[t.Iterable[V]] = None,\n length: t.Optional[int] = None,\n label: t.Optional[str] = None,\n show_eta: bool = True,\n show_percent: t.Optional[bool] = None,\n show_pos: bool = False,\n item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None,\n fill_char: str = \"#\",\n empty_char: str = \"-\",\n bar_template: str = \"%(label)s [%(bar)s] %(info)s\",\n info_sep: str = \" \",\n width: int = 36,\n file: t.Optional[t.TextIO] = None,\n color: t.Optional[bool] = None,\n update_min_steps: int = 1,\n) -> \"ProgressBar[V]\":\n \"\"\"This function creates an iterable context manager that can be used\n to iterate over something while showing a progress bar. It will\n either iterate over the `iterable` or `length` items (that are counted\n up). While iteration happens, this function will print a rendered\n progress bar to the given `file` (defaults to stdout) and will attempt\n to calculate remaining time and more. By default, this progress bar\n will not be rendered if the file is not a terminal.\n\n The context manager creates the progress bar. When the context\n manager is entered the progress bar is already created. With every\n iteration over the progress bar, the iterable passed to the bar is\n advanced and the bar is updated. When the context manager exits,\n a newline is printed and the progress bar is finalized on screen.\n\n Note: The progress bar is currently designed for use cases where the\n total progress can be expected to take at least several seconds.\n Because of this, the ProgressBar class object won't display\n progress that is considered too fast, and progress where the time\n between steps is less than a second.\n\n No printing must happen or the progress bar will be unintentionally\n destroyed.\n\n Example usage::\n\n with progressbar(items) as bar:\n for item in bar:\n do_something_with(item)\n\n Alternatively, if no iterable is specified, one can manually update the\n progress bar through the `update()` method instead of directly\n iterating over the progress bar. The update method accepts the number\n of steps to increment the bar with::\n\n with progressbar(length=chunks.total_bytes) as bar:\n for chunk in chunks:\n process_chunk(chunk)\n bar.update(chunks.bytes)\n\n The ``update()`` method also takes an optional value specifying the\n ``current_item`` at the new position. This is useful when used\n together with ``item_show_func`` to customize the output for each\n manual step::\n\n with click.progressbar(\n length=total_size,\n label='Unzipping archive',\n item_show_func=lambda a: a.filename\n ) as bar:\n for archive in zip_file:\n archive.extract()\n bar.update(archive.size, archive)\n\n :param iterable: an iterable to iterate over. If not provided the length\n is required.\n :param length: the number of items to iterate over. By default the\n progressbar will attempt to ask the iterator about its\n length, which might or might not work. If an iterable is\n also provided this parameter can be used to override the\n length. If an iterable is not provided the progress bar\n will iterate over a range of that length.\n :param label: the label to show next to the progress bar.\n :param show_eta: enables or disables the estimated time display. This is\n automatically disabled if the length cannot be\n determined.\n :param show_percent: enables or disables the percentage display. The\n default is `True` if the iterable has a length or\n `False` if not.\n :param show_pos: enables or disables the absolute position display. The\n default is `False`.\n :param item_show_func: A function called with the current item which\n can return a string to show next to the progress bar. If the\n function returns ``None`` nothing is shown. The current item can\n be ``None``, such as when entering and exiting the bar.\n :param fill_char: the character to use to show the filled part of the\n progress bar.\n :param empty_char: the character to use to show the non-filled part of\n the progress bar.\n :param bar_template: the format string to use as template for the bar.\n The parameters in it are ``label`` for the label,\n ``bar`` for the progress bar and ``info`` for the\n info section.\n :param info_sep: the separator between multiple info items (eta etc.)\n :param width: the width of the progress bar in characters, 0 means full\n terminal width\n :param file: The file to write to. If this is not a terminal then\n only the label is printed.\n :param color: controls if the terminal supports ANSI colors or not. The\n default is autodetection. This is only needed if ANSI\n codes are included anywhere in the progress bar output\n which is not the case by default.\n :param update_min_steps: Render only when this many updates have\n completed. This allows tuning for very fast iterators.\n\n .. versionchanged:: 8.0\n Output is shown even if execution time is less than 0.5 seconds.\n\n .. versionchanged:: 8.0\n ``item_show_func`` shows the current item, not the previous one.\n\n .. versionchanged:: 8.0\n Labels are echoed if the output is not a TTY. Reverts a change\n in 7.0 that removed all output.\n\n .. versionadded:: 8.0\n Added the ``update_min_steps`` parameter.\n\n .. versionchanged:: 4.0\n Added the ``color`` parameter. Added the ``update`` method to\n the object.\n\n .. versionadded:: 2.0\n \"\"\"\n from ._termui_impl import ProgressBar\n\n color = resolve_color_default(color)\n return ProgressBar(\n iterable=iterable,\n length=length,\n show_eta=show_eta,\n show_percent=show_percent,\n show_pos=show_pos,\n item_show_func=item_show_func,\n fill_char=fill_char,\n empty_char=empty_char,\n bar_template=bar_template,\n info_sep=info_sep,\n file=file,\n label=label,\n width=width,\n color=color,\n update_min_steps=update_min_steps,\n )\n\n\ndef clear() -> None:\n \"\"\"Clears the terminal screen. This will have the effect of clearing\n the whole visible space of the terminal and moving the cursor to the\n top left. This does not do anything if not connected to a terminal.\n\n .. versionadded:: 2.0\n \"\"\"\n if not isatty(sys.stdout):\n return\n if WIN:\n os.system(\"cls\")\n else:\n sys.stdout.write(\"\\033[2J\\033[1;1H\")\n\n\ndef _interpret_color(\n color: t.Union[int, t.Tuple[int, int, int], str], offset: int = 0\n) -> str:\n if isinstance(color, int):\n return f\"{38 + offset};5;{color:d}\"\n\n if isinstance(color, (tuple, list)):\n r, g, b = color\n return f\"{38 + offset};2;{r:d};{g:d};{b:d}\"\n\n return str(_ansi_colors[color] + offset)\n\n\ndef style(\n text: t.Any,\n fg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,\n bg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,\n bold: t.Optional[bool] = None,\n dim: t.Optional[bool] = None,\n underline: t.Optional[bool] = None,\n overline: t.Optional[bool] = None,\n italic: t.Optional[bool] = None,\n blink: t.Optional[bool] = None,\n reverse: t.Optional[bool] = None,\n strikethrough: t.Optional[bool] = None,\n reset: bool = True,\n) -> str:\n \"\"\"Styles a text with ANSI styles and returns the new string. By\n default the styling is self contained which means that at the end\n of the string a reset code is issued. This can be prevented by\n passing ``reset=False``.\n\n Examples::\n\n click.echo(click.style('Hello World!', fg='green'))\n click.echo(click.style('ATTENTION!', blink=True))\n click.echo(click.style('Some things', reverse=True, fg='cyan'))\n click.echo(click.style('More colors', fg=(255, 12, 128), bg=117))\n\n Supported color names:\n\n * ``black`` (might be a gray)\n * ``red``\n * ``green``\n * ``yellow`` (might be an orange)\n * ``blue``\n * ``magenta``\n * ``cyan``\n * ``white`` (might be light gray)\n * ``bright_black``\n * ``bright_red``\n * ``bright_green``\n * ``bright_yellow``\n * ``bright_blue``\n * ``bright_magenta``\n * ``bright_cyan``\n * ``bright_white``\n * ``reset`` (reset the color code only)\n\n If the terminal supports it, color may also be specified as:\n\n - An integer in the interval [0, 255]. The terminal must support\n 8-bit/256-color mode.\n - An RGB tuple of three integers in [0, 255]. The terminal must\n support 24-bit/true-color mode.\n\n See https://en.wikipedia.org/wiki/ANSI_color and\n https://gist.github.com/XVilka/8346728 for more information.\n\n :param text: the string to style with ansi codes.\n :param fg: if provided this will become the foreground color.\n :param bg: if provided this will become the background color.\n :param bold: if provided this will enable or disable bold mode.\n :param dim: if provided this will enable or disable dim mode. This is\n badly supported.\n :param underline: if provided this will enable or disable underline.\n :param overline: if provided this will enable or disable overline.\n :param italic: if provided this will enable or disable italic.\n :param blink: if provided this will enable or disable blinking.\n :param reverse: if provided this will enable or disable inverse\n rendering (foreground becomes background and the\n other way round).\n :param strikethrough: if provided this will enable or disable\n striking through text.\n :param reset: by default a reset-all code is added at the end of the\n string which means that styles do not carry over. This\n can be disabled to compose styles.\n\n .. versionchanged:: 8.0\n A non-string ``message`` is converted to a string.\n\n .. versionchanged:: 8.0\n Added support for 256 and RGB color codes.\n\n .. versionchanged:: 8.0\n Added the ``strikethrough``, ``italic``, and ``overline``\n parameters.\n\n .. versionchanged:: 7.0\n Added support for bright colors.\n\n .. versionadded:: 2.0\n \"\"\"\n if not isinstance(text, str):\n text = str(text)\n\n bits = []\n\n if fg:\n try:\n bits.append(f\"\\033[{_interpret_color(fg)}m\")\n except KeyError:\n raise TypeError(f\"Unknown color {fg!r}\") from None\n\n if bg:\n try:\n bits.append(f\"\\033[{_interpret_color(bg, 10)}m\")\n except KeyError:\n raise TypeError(f\"Unknown color {bg!r}\") from None\n\n if bold is not None:\n bits.append(f\"\\033[{1 if bold else 22}m\")\n if dim is not None:\n bits.append(f\"\\033[{2 if dim else 22}m\")\n if underline is not None:\n bits.append(f\"\\033[{4 if underline else 24}m\")\n if overline is not None:\n bits.append(f\"\\033[{53 if overline else 55}m\")\n if italic is not None:\n bits.append(f\"\\033[{3 if italic else 23}m\")\n if blink is not None:\n bits.append(f\"\\033[{5 if blink else 25}m\")\n if reverse is not None:\n bits.append(f\"\\033[{7 if reverse else 27}m\")\n if strikethrough is not None:\n bits.append(f\"\\033[{9 if strikethrough else 29}m\")\n bits.append(text)\n if reset:\n bits.append(_ansi_reset_all)\n return \"\".join(bits)\n\n\ndef unstyle(text: str) -> str:\n \"\"\"Removes ANSI styling information from a string. Usually it's not\n necessary to use this function as Click's echo function will\n automatically remove styling if necessary.\n\n .. versionadded:: 2.0\n\n :param text: the text to remove style information from.\n \"\"\"\n return strip_ansi(text)\n\n\ndef secho(\n message: t.Optional[t.Any] = None,\n file: t.Optional[t.IO] = None,\n nl: bool = True,\n err: bool = False,\n color: t.Optional[bool] = None,\n **styles: t.Any,\n) -> None:\n \"\"\"This function combines :func:`echo` and :func:`style` into one\n call. As such the following two calls are the same::\n\n click.secho('Hello World!', fg='green')\n click.echo(click.style('Hello World!', fg='green'))\n\n All keyword arguments are forwarded to the underlying functions\n depending on which one they go with.\n\n Non-string types will be converted to :class:`str`. However,\n :class:`bytes` are passed directly to :meth:`echo` without applying\n style. If you want to style bytes that represent text, call\n :meth:`bytes.decode` first.\n\n .. versionchanged:: 8.0\n A non-string ``message`` is converted to a string. Bytes are\n passed through without style applied.\n\n .. versionadded:: 2.0\n \"\"\"\n if message is not None and not isinstance(message, (bytes, bytearray)):\n message = style(message, **styles)\n\n return echo(message, file=file, nl=nl, err=err, color=color)\n\n\ndef edit(\n text: t.Optional[t.AnyStr] = None,\n editor: t.Optional[str] = None,\n env: t.Optional[t.Mapping[str, str]] = None,\n require_save: bool = True,\n extension: str = \".txt\",\n filename: t.Optional[str] = None,\n) -> t.Optional[t.AnyStr]:\n r\"\"\"Edits the given text in the defined editor. If an editor is given\n (should be the full path to the executable but the regular operating\n system search path is used for finding the executable) it overrides\n the detected editor. Optionally, some environment variables can be\n used. If the editor is closed without changes, `None` is returned. In\n case a file is edited directly the return value is always `None` and\n `require_save` and `extension` are ignored.\n\n If the editor cannot be opened a :exc:`UsageError` is raised.\n\n Note for Windows: to simplify cross-platform usage, the newlines are\n automatically converted from POSIX to Windows and vice versa. As such,\n the message here will have ``\\n`` as newline markers.\n\n :param text: the text to edit.\n :param editor: optionally the editor to use. Defaults to automatic\n detection.\n :param env: environment variables to forward to the editor.\n :param require_save: if this is true, then not saving in the editor\n will make the return value become `None`.\n :param extension: the extension to tell the editor about. This defaults\n to `.txt` but changing this might change syntax\n highlighting.\n :param filename: if provided it will edit this file instead of the\n provided text contents. It will not use a temporary\n file as an indirection in that case.\n \"\"\"\n from ._termui_impl import Editor\n\n ed = Editor(editor=editor, env=env, require_save=require_save, extension=extension)\n\n if filename is None:\n return ed.edit(text)\n\n ed.edit_file(filename)\n return None\n\n\ndef launch(url: str, wait: bool = False, locate: bool = False) -> int:\n \"\"\"This function launches the given URL (or filename) in the default\n viewer application for this file type. If this is an executable, it\n might launch the executable in a new session. The return value is\n the exit code of the launched application. Usually, ``0`` indicates\n success.\n\n Examples::\n\n click.launch('https://click.palletsprojects.com/')\n click.launch('/my/downloaded/file', locate=True)\n\n .. versionadded:: 2.0\n\n :param url: URL or filename of the thing to launch.\n :param wait: Wait for the program to exit before returning. This\n only works if the launched program blocks. In particular,\n ``xdg-open`` on Linux does not block.\n :param locate: if this is set to `True` then instead of launching the\n application associated with the URL it will attempt to\n launch a file manager with the file located. This\n might have weird effects if the URL does not point to\n the filesystem.\n \"\"\"\n from ._termui_impl import open_url\n\n return open_url(url, wait=wait, locate=locate)\n\n\n# If this is provided, getchar() calls into this instead. This is used\n# for unittesting purposes.\n_getchar: t.Optional[t.Callable[[bool], str]] = None\n\n\ndef getchar(echo: bool = False) -> str:\n \"\"\"Fetches a single character from the terminal and returns it. This\n will always return a unicode character and under certain rare\n circumstances this might return more than one character. The\n situations which more than one character is returned is when for\n whatever reason multiple characters end up in the terminal buffer or\n standard input was not actually a terminal.\n\n Note that this will always read from the terminal, even if something\n is piped into the standard input.\n\n Note for Windows: in rare cases when typing non-ASCII characters, this\n function might wait for a second character and then return both at once.\n This is because certain Unicode characters look like special-key markers.\n\n .. versionadded:: 2.0\n\n :param echo: if set to `True`, the character read will also show up on\n the terminal. The default is to not show it.\n \"\"\"\n global _getchar\n\n if _getchar is None:\n from ._termui_impl import getchar as f\n\n _getchar = f\n\n return _getchar(echo)\n\n\ndef raw_terminal() -> t.ContextManager[int]:\n from ._termui_impl import raw_terminal as f\n\n return f()\n\n\ndef pause(info: t.Optional[str] = None, err: bool = False) -> None:\n \"\"\"This command stops execution and waits for the user to press any\n key to continue. This is similar to the Windows batch \"pause\"\n command. If the program is not run through a terminal, this command\n will instead do nothing.\n\n .. versionadded:: 2.0\n\n .. versionadded:: 4.0\n Added the `err` parameter.\n\n :param info: The message to print before pausing. Defaults to\n ``\"Press any key to continue...\"``.\n :param err: if set to message goes to ``stderr`` instead of\n ``stdout``, the same as with echo.\n \"\"\"\n if not isatty(sys.stdin) or not isatty(sys.stdout):\n return\n\n if info is None:\n info = _(\"Press any key to continue...\")\n\n try:\n if info:\n echo(info, nl=False, err=err)\n try:\n getchar()\n except (KeyboardInterrupt, EOFError):\n pass\n finally:\n if info:\n echo(err=err)\n",
"path": "src/click/termui.py"
}
] | [
{
"content": "import inspect\nimport io\nimport itertools\nimport os\nimport sys\nimport typing as t\nfrom gettext import gettext as _\n\nfrom ._compat import isatty\nfrom ._compat import strip_ansi\nfrom ._compat import WIN\nfrom .exceptions import Abort\nfrom .exceptions import UsageError\nfrom .globals import resolve_color_default\nfrom .types import Choice\nfrom .types import convert_type\nfrom .types import ParamType\nfrom .utils import echo\nfrom .utils import LazyFile\n\nif t.TYPE_CHECKING:\n from ._termui_impl import ProgressBar\n\nV = t.TypeVar(\"V\")\n\n# The prompt functions to use. The doc tools currently override these\n# functions to customize how they work.\nvisible_prompt_func: t.Callable[[str], str] = input\n\n_ansi_colors = {\n \"black\": 30,\n \"red\": 31,\n \"green\": 32,\n \"yellow\": 33,\n \"blue\": 34,\n \"magenta\": 35,\n \"cyan\": 36,\n \"white\": 37,\n \"reset\": 39,\n \"bright_black\": 90,\n \"bright_red\": 91,\n \"bright_green\": 92,\n \"bright_yellow\": 93,\n \"bright_blue\": 94,\n \"bright_magenta\": 95,\n \"bright_cyan\": 96,\n \"bright_white\": 97,\n}\n_ansi_reset_all = \"\\033[0m\"\n\n\ndef hidden_prompt_func(prompt: str) -> str:\n import getpass\n\n return getpass.getpass(prompt)\n\n\ndef _build_prompt(\n text: str,\n suffix: str,\n show_default: bool = False,\n default: t.Optional[t.Any] = None,\n show_choices: bool = True,\n type: t.Optional[ParamType] = None,\n) -> str:\n prompt = text\n if type is not None and show_choices and isinstance(type, Choice):\n prompt += f\" ({', '.join(map(str, type.choices))})\"\n if default is not None and show_default:\n prompt = f\"{prompt} [{_format_default(default)}]\"\n return f\"{prompt}{suffix}\"\n\n\ndef _format_default(default: t.Any) -> t.Any:\n if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, \"name\"):\n return default.name # type: ignore\n\n return default\n\n\ndef prompt(\n text: str,\n default: t.Optional[t.Any] = None,\n hide_input: bool = False,\n confirmation_prompt: t.Union[bool, str] = False,\n type: t.Optional[t.Union[ParamType, t.Any]] = None,\n value_proc: t.Optional[t.Callable[[str], t.Any]] = None,\n prompt_suffix: str = \": \",\n show_default: bool = True,\n err: bool = False,\n show_choices: bool = True,\n) -> t.Any:\n \"\"\"Prompts a user for input. This is a convenience function that can\n be used to prompt a user for input later.\n\n If the user aborts the input by sending an interrupt signal, this\n function will catch it and raise a :exc:`Abort` exception.\n\n :param text: the text to show for the prompt.\n :param default: the default value to use if no input happens. If this\n is not given it will prompt until it's aborted.\n :param hide_input: if this is set to true then the input value will\n be hidden.\n :param confirmation_prompt: Prompt a second time to confirm the\n value. Can be set to a string instead of ``True`` to customize\n the message.\n :param type: the type to use to check the value against.\n :param value_proc: if this parameter is provided it's a function that\n is invoked instead of the type conversion to\n convert a value.\n :param prompt_suffix: a suffix that should be added to the prompt.\n :param show_default: shows or hides the default value in the prompt.\n :param err: if set to true the file defaults to ``stderr`` instead of\n ``stdout``, the same as with echo.\n :param show_choices: Show or hide choices if the passed type is a Choice.\n For example if type is a Choice of either day or week,\n show_choices is true and text is \"Group by\" then the\n prompt will be \"Group by (day, week): \".\n\n .. versionadded:: 8.0\n ``confirmation_prompt`` can be a custom string.\n\n .. versionadded:: 7.0\n Added the ``show_choices`` parameter.\n\n .. versionadded:: 6.0\n Added unicode support for cmd.exe on Windows.\n\n .. versionadded:: 4.0\n Added the `err` parameter.\n\n \"\"\"\n\n def prompt_func(text: str) -> str:\n f = hidden_prompt_func if hide_input else visible_prompt_func\n try:\n # Write the prompt separately so that we get nice\n # coloring through colorama on Windows\n echo(text.rstrip(\" \"), nl=False, err=err)\n # Echo a space to stdout to work around an issue where\n # readline causes backspace to clear the whole line.\n return f(\" \")\n except (KeyboardInterrupt, EOFError):\n # getpass doesn't print a newline if the user aborts input with ^C.\n # Allegedly this behavior is inherited from getpass(3).\n # A doc bug has been filed at https://bugs.python.org/issue24711\n if hide_input:\n echo(None, err=err)\n raise Abort() from None\n\n if value_proc is None:\n value_proc = convert_type(type, default)\n\n prompt = _build_prompt(\n text, prompt_suffix, show_default, default, show_choices, type\n )\n\n if confirmation_prompt:\n if confirmation_prompt is True:\n confirmation_prompt = _(\"Repeat for confirmation\")\n\n confirmation_prompt = _build_prompt(confirmation_prompt, prompt_suffix)\n\n while True:\n while True:\n value = prompt_func(prompt)\n if value:\n break\n elif default is not None:\n value = default\n break\n try:\n result = value_proc(value)\n except UsageError as e:\n if hide_input:\n echo(_(\"Error: The value you entered was invalid.\"), err=err)\n else:\n echo(_(\"Error: {e.message}\").format(e=e), err=err) # noqa: B306\n continue\n if not confirmation_prompt:\n return result\n while True:\n value2 = prompt_func(confirmation_prompt)\n if value2:\n break\n if value == value2:\n return result\n echo(_(\"Error: The two entered values do not match.\"), err=err)\n\n\ndef confirm(\n text: str,\n default: t.Optional[bool] = False,\n abort: bool = False,\n prompt_suffix: str = \": \",\n show_default: bool = True,\n err: bool = False,\n) -> bool:\n \"\"\"Prompts for confirmation (yes/no question).\n\n If the user aborts the input by sending a interrupt signal this\n function will catch it and raise a :exc:`Abort` exception.\n\n :param text: the question to ask.\n :param default: The default value to use when no input is given. If\n ``None``, repeat until input is given.\n :param abort: if this is set to `True` a negative answer aborts the\n exception by raising :exc:`Abort`.\n :param prompt_suffix: a suffix that should be added to the prompt.\n :param show_default: shows or hides the default value in the prompt.\n :param err: if set to true the file defaults to ``stderr`` instead of\n ``stdout``, the same as with echo.\n\n .. versionchanged:: 8.0\n Repeat until input is given if ``default`` is ``None``.\n\n .. versionadded:: 4.0\n Added the ``err`` parameter.\n \"\"\"\n prompt = _build_prompt(\n text,\n prompt_suffix,\n show_default,\n \"y/n\" if default is None else (\"Y/n\" if default else \"y/N\"),\n )\n\n while True:\n try:\n # Write the prompt separately so that we get nice\n # coloring through colorama on Windows\n echo(prompt.rstrip(\" \"), nl=False, err=err)\n # Echo a space to stdout to work around an issue where\n # readline causes backspace to clear the whole line.\n value = visible_prompt_func(\" \").lower().strip()\n except (KeyboardInterrupt, EOFError):\n raise Abort() from None\n if value in (\"y\", \"yes\"):\n rv = True\n elif value in (\"n\", \"no\"):\n rv = False\n elif default is not None and value == \"\":\n rv = default\n else:\n echo(_(\"Error: invalid input\"), err=err)\n continue\n break\n if abort and not rv:\n raise Abort()\n return rv\n\n\ndef get_terminal_size() -> os.terminal_size:\n \"\"\"Returns the current size of the terminal as tuple in the form\n ``(width, height)`` in columns and rows.\n\n .. deprecated:: 8.0\n Will be removed in Click 8.1. Use\n :func:`shutil.get_terminal_size` instead.\n \"\"\"\n import shutil\n import warnings\n\n warnings.warn(\n \"'click.get_terminal_size()' is deprecated and will be removed\"\n \" in Click 8.1. Use 'shutil.get_terminal_size()' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return shutil.get_terminal_size()\n\n\ndef echo_via_pager(\n text_or_generator: t.Union[t.Iterable[str], t.Callable[[], t.Iterable[str]], str],\n color: t.Optional[bool] = None,\n) -> None:\n \"\"\"This function takes a text and shows it via an environment specific\n pager on stdout.\n\n .. versionchanged:: 3.0\n Added the `color` flag.\n\n :param text_or_generator: the text to page, or alternatively, a\n generator emitting the text to page.\n :param color: controls if the pager supports ANSI colors or not. The\n default is autodetection.\n \"\"\"\n color = resolve_color_default(color)\n\n if inspect.isgeneratorfunction(text_or_generator):\n i = t.cast(t.Callable[[], t.Iterable[str]], text_or_generator)()\n elif isinstance(text_or_generator, str):\n i = [text_or_generator]\n else:\n i = iter(t.cast(t.Iterable[str], text_or_generator))\n\n # convert every element of i to a text type if necessary\n text_generator = (el if isinstance(el, str) else str(el) for el in i)\n\n from ._termui_impl import pager\n\n return pager(itertools.chain(text_generator, \"\\n\"), color)\n\n\ndef progressbar(\n iterable: t.Optional[t.Iterable[V]] = None,\n length: t.Optional[int] = None,\n label: t.Optional[str] = None,\n show_eta: bool = True,\n show_percent: t.Optional[bool] = None,\n show_pos: bool = False,\n item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None,\n fill_char: str = \"#\",\n empty_char: str = \"-\",\n bar_template: str = \"%(label)s [%(bar)s] %(info)s\",\n info_sep: str = \" \",\n width: int = 36,\n file: t.Optional[t.TextIO] = None,\n color: t.Optional[bool] = None,\n update_min_steps: int = 1,\n) -> \"ProgressBar[V]\":\n \"\"\"This function creates an iterable context manager that can be used\n to iterate over something while showing a progress bar. It will\n either iterate over the `iterable` or `length` items (that are counted\n up). While iteration happens, this function will print a rendered\n progress bar to the given `file` (defaults to stdout) and will attempt\n to calculate remaining time and more. By default, this progress bar\n will not be rendered if the file is not a terminal.\n\n The context manager creates the progress bar. When the context\n manager is entered the progress bar is already created. With every\n iteration over the progress bar, the iterable passed to the bar is\n advanced and the bar is updated. When the context manager exits,\n a newline is printed and the progress bar is finalized on screen.\n\n Note: The progress bar is currently designed for use cases where the\n total progress can be expected to take at least several seconds.\n Because of this, the ProgressBar class object won't display\n progress that is considered too fast, and progress where the time\n between steps is less than a second.\n\n No printing must happen or the progress bar will be unintentionally\n destroyed.\n\n Example usage::\n\n with progressbar(items) as bar:\n for item in bar:\n do_something_with(item)\n\n Alternatively, if no iterable is specified, one can manually update the\n progress bar through the `update()` method instead of directly\n iterating over the progress bar. The update method accepts the number\n of steps to increment the bar with::\n\n with progressbar(length=chunks.total_bytes) as bar:\n for chunk in chunks:\n process_chunk(chunk)\n bar.update(chunks.bytes)\n\n The ``update()`` method also takes an optional value specifying the\n ``current_item`` at the new position. This is useful when used\n together with ``item_show_func`` to customize the output for each\n manual step::\n\n with click.progressbar(\n length=total_size,\n label='Unzipping archive',\n item_show_func=lambda a: a.filename\n ) as bar:\n for archive in zip_file:\n archive.extract()\n bar.update(archive.size, archive)\n\n :param iterable: an iterable to iterate over. If not provided the length\n is required.\n :param length: the number of items to iterate over. By default the\n progressbar will attempt to ask the iterator about its\n length, which might or might not work. If an iterable is\n also provided this parameter can be used to override the\n length. If an iterable is not provided the progress bar\n will iterate over a range of that length.\n :param label: the label to show next to the progress bar.\n :param show_eta: enables or disables the estimated time display. This is\n automatically disabled if the length cannot be\n determined.\n :param show_percent: enables or disables the percentage display. The\n default is `True` if the iterable has a length or\n `False` if not.\n :param show_pos: enables or disables the absolute position display. The\n default is `False`.\n :param item_show_func: A function called with the current item which\n can return a string to show next to the progress bar. If the\n function returns ``None`` nothing is shown. The current item can\n be ``None``, such as when entering and exiting the bar.\n :param fill_char: the character to use to show the filled part of the\n progress bar.\n :param empty_char: the character to use to show the non-filled part of\n the progress bar.\n :param bar_template: the format string to use as template for the bar.\n The parameters in it are ``label`` for the label,\n ``bar`` for the progress bar and ``info`` for the\n info section.\n :param info_sep: the separator between multiple info items (eta etc.)\n :param width: the width of the progress bar in characters, 0 means full\n terminal width\n :param file: The file to write to. If this is not a terminal then\n only the label is printed.\n :param color: controls if the terminal supports ANSI colors or not. The\n default is autodetection. This is only needed if ANSI\n codes are included anywhere in the progress bar output\n which is not the case by default.\n :param update_min_steps: Render only when this many updates have\n completed. This allows tuning for very fast iterators.\n\n .. versionchanged:: 8.0\n Output is shown even if execution time is less than 0.5 seconds.\n\n .. versionchanged:: 8.0\n ``item_show_func`` shows the current item, not the previous one.\n\n .. versionchanged:: 8.0\n Labels are echoed if the output is not a TTY. Reverts a change\n in 7.0 that removed all output.\n\n .. versionadded:: 8.0\n Added the ``update_min_steps`` parameter.\n\n .. versionchanged:: 4.0\n Added the ``color`` parameter. Added the ``update`` method to\n the object.\n\n .. versionadded:: 2.0\n \"\"\"\n from ._termui_impl import ProgressBar\n\n color = resolve_color_default(color)\n return ProgressBar(\n iterable=iterable,\n length=length,\n show_eta=show_eta,\n show_percent=show_percent,\n show_pos=show_pos,\n item_show_func=item_show_func,\n fill_char=fill_char,\n empty_char=empty_char,\n bar_template=bar_template,\n info_sep=info_sep,\n file=file,\n label=label,\n width=width,\n color=color,\n update_min_steps=update_min_steps,\n )\n\n\ndef clear() -> None:\n \"\"\"Clears the terminal screen. This will have the effect of clearing\n the whole visible space of the terminal and moving the cursor to the\n top left. This does not do anything if not connected to a terminal.\n\n .. versionadded:: 2.0\n \"\"\"\n if not isatty(sys.stdout):\n return\n if WIN:\n os.system(\"cls\")\n else:\n sys.stdout.write(\"\\033[2J\\033[1;1H\")\n\n\ndef _interpret_color(\n color: t.Union[int, t.Tuple[int, int, int], str], offset: int = 0\n) -> str:\n if isinstance(color, int):\n return f\"{38 + offset};5;{color:d}\"\n\n if isinstance(color, (tuple, list)):\n r, g, b = color\n return f\"{38 + offset};2;{r:d};{g:d};{b:d}\"\n\n return str(_ansi_colors[color] + offset)\n\n\ndef style(\n text: t.Any,\n fg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,\n bg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,\n bold: t.Optional[bool] = None,\n dim: t.Optional[bool] = None,\n underline: t.Optional[bool] = None,\n overline: t.Optional[bool] = None,\n italic: t.Optional[bool] = None,\n blink: t.Optional[bool] = None,\n reverse: t.Optional[bool] = None,\n strikethrough: t.Optional[bool] = None,\n reset: bool = True,\n) -> str:\n \"\"\"Styles a text with ANSI styles and returns the new string. By\n default the styling is self contained which means that at the end\n of the string a reset code is issued. This can be prevented by\n passing ``reset=False``.\n\n Examples::\n\n click.echo(click.style('Hello World!', fg='green'))\n click.echo(click.style('ATTENTION!', blink=True))\n click.echo(click.style('Some things', reverse=True, fg='cyan'))\n click.echo(click.style('More colors', fg=(255, 12, 128), bg=117))\n\n Supported color names:\n\n * ``black`` (might be a gray)\n * ``red``\n * ``green``\n * ``yellow`` (might be an orange)\n * ``blue``\n * ``magenta``\n * ``cyan``\n * ``white`` (might be light gray)\n * ``bright_black``\n * ``bright_red``\n * ``bright_green``\n * ``bright_yellow``\n * ``bright_blue``\n * ``bright_magenta``\n * ``bright_cyan``\n * ``bright_white``\n * ``reset`` (reset the color code only)\n\n If the terminal supports it, color may also be specified as:\n\n - An integer in the interval [0, 255]. The terminal must support\n 8-bit/256-color mode.\n - An RGB tuple of three integers in [0, 255]. The terminal must\n support 24-bit/true-color mode.\n\n See https://en.wikipedia.org/wiki/ANSI_color and\n https://gist.github.com/XVilka/8346728 for more information.\n\n :param text: the string to style with ansi codes.\n :param fg: if provided this will become the foreground color.\n :param bg: if provided this will become the background color.\n :param bold: if provided this will enable or disable bold mode.\n :param dim: if provided this will enable or disable dim mode. This is\n badly supported.\n :param underline: if provided this will enable or disable underline.\n :param overline: if provided this will enable or disable overline.\n :param italic: if provided this will enable or disable italic.\n :param blink: if provided this will enable or disable blinking.\n :param reverse: if provided this will enable or disable inverse\n rendering (foreground becomes background and the\n other way round).\n :param strikethrough: if provided this will enable or disable\n striking through text.\n :param reset: by default a reset-all code is added at the end of the\n string which means that styles do not carry over. This\n can be disabled to compose styles.\n\n .. versionchanged:: 8.0\n A non-string ``message`` is converted to a string.\n\n .. versionchanged:: 8.0\n Added support for 256 and RGB color codes.\n\n .. versionchanged:: 8.0\n Added the ``strikethrough``, ``italic``, and ``overline``\n parameters.\n\n .. versionchanged:: 7.0\n Added support for bright colors.\n\n .. versionadded:: 2.0\n \"\"\"\n if not isinstance(text, str):\n text = str(text)\n\n bits = []\n\n if fg:\n try:\n bits.append(f\"\\033[{_interpret_color(fg)}m\")\n except KeyError:\n raise TypeError(f\"Unknown color {fg!r}\") from None\n\n if bg:\n try:\n bits.append(f\"\\033[{_interpret_color(bg, 10)}m\")\n except KeyError:\n raise TypeError(f\"Unknown color {bg!r}\") from None\n\n if bold is not None:\n bits.append(f\"\\033[{1 if bold else 22}m\")\n if dim is not None:\n bits.append(f\"\\033[{2 if dim else 22}m\")\n if underline is not None:\n bits.append(f\"\\033[{4 if underline else 24}m\")\n if overline is not None:\n bits.append(f\"\\033[{53 if overline else 55}m\")\n if italic is not None:\n bits.append(f\"\\033[{3 if italic else 23}m\")\n if blink is not None:\n bits.append(f\"\\033[{5 if blink else 25}m\")\n if reverse is not None:\n bits.append(f\"\\033[{7 if reverse else 27}m\")\n if strikethrough is not None:\n bits.append(f\"\\033[{9 if strikethrough else 29}m\")\n bits.append(text)\n if reset:\n bits.append(_ansi_reset_all)\n return \"\".join(bits)\n\n\ndef unstyle(text: str) -> str:\n \"\"\"Removes ANSI styling information from a string. Usually it's not\n necessary to use this function as Click's echo function will\n automatically remove styling if necessary.\n\n .. versionadded:: 2.0\n\n :param text: the text to remove style information from.\n \"\"\"\n return strip_ansi(text)\n\n\ndef secho(\n message: t.Optional[t.Any] = None,\n file: t.Optional[t.IO[t.AnyStr]] = None,\n nl: bool = True,\n err: bool = False,\n color: t.Optional[bool] = None,\n **styles: t.Any,\n) -> None:\n \"\"\"This function combines :func:`echo` and :func:`style` into one\n call. As such the following two calls are the same::\n\n click.secho('Hello World!', fg='green')\n click.echo(click.style('Hello World!', fg='green'))\n\n All keyword arguments are forwarded to the underlying functions\n depending on which one they go with.\n\n Non-string types will be converted to :class:`str`. However,\n :class:`bytes` are passed directly to :meth:`echo` without applying\n style. If you want to style bytes that represent text, call\n :meth:`bytes.decode` first.\n\n .. versionchanged:: 8.0\n A non-string ``message`` is converted to a string. Bytes are\n passed through without style applied.\n\n .. versionadded:: 2.0\n \"\"\"\n if message is not None and not isinstance(message, (bytes, bytearray)):\n message = style(message, **styles)\n\n return echo(message, file=file, nl=nl, err=err, color=color)\n\n\ndef edit(\n text: t.Optional[t.AnyStr] = None,\n editor: t.Optional[str] = None,\n env: t.Optional[t.Mapping[str, str]] = None,\n require_save: bool = True,\n extension: str = \".txt\",\n filename: t.Optional[str] = None,\n) -> t.Optional[t.AnyStr]:\n r\"\"\"Edits the given text in the defined editor. If an editor is given\n (should be the full path to the executable but the regular operating\n system search path is used for finding the executable) it overrides\n the detected editor. Optionally, some environment variables can be\n used. If the editor is closed without changes, `None` is returned. In\n case a file is edited directly the return value is always `None` and\n `require_save` and `extension` are ignored.\n\n If the editor cannot be opened a :exc:`UsageError` is raised.\n\n Note for Windows: to simplify cross-platform usage, the newlines are\n automatically converted from POSIX to Windows and vice versa. As such,\n the message here will have ``\\n`` as newline markers.\n\n :param text: the text to edit.\n :param editor: optionally the editor to use. Defaults to automatic\n detection.\n :param env: environment variables to forward to the editor.\n :param require_save: if this is true, then not saving in the editor\n will make the return value become `None`.\n :param extension: the extension to tell the editor about. This defaults\n to `.txt` but changing this might change syntax\n highlighting.\n :param filename: if provided it will edit this file instead of the\n provided text contents. It will not use a temporary\n file as an indirection in that case.\n \"\"\"\n from ._termui_impl import Editor\n\n ed = Editor(editor=editor, env=env, require_save=require_save, extension=extension)\n\n if filename is None:\n return ed.edit(text)\n\n ed.edit_file(filename)\n return None\n\n\ndef launch(url: str, wait: bool = False, locate: bool = False) -> int:\n \"\"\"This function launches the given URL (or filename) in the default\n viewer application for this file type. If this is an executable, it\n might launch the executable in a new session. The return value is\n the exit code of the launched application. Usually, ``0`` indicates\n success.\n\n Examples::\n\n click.launch('https://click.palletsprojects.com/')\n click.launch('/my/downloaded/file', locate=True)\n\n .. versionadded:: 2.0\n\n :param url: URL or filename of the thing to launch.\n :param wait: Wait for the program to exit before returning. This\n only works if the launched program blocks. In particular,\n ``xdg-open`` on Linux does not block.\n :param locate: if this is set to `True` then instead of launching the\n application associated with the URL it will attempt to\n launch a file manager with the file located. This\n might have weird effects if the URL does not point to\n the filesystem.\n \"\"\"\n from ._termui_impl import open_url\n\n return open_url(url, wait=wait, locate=locate)\n\n\n# If this is provided, getchar() calls into this instead. This is used\n# for unittesting purposes.\n_getchar: t.Optional[t.Callable[[bool], str]] = None\n\n\ndef getchar(echo: bool = False) -> str:\n \"\"\"Fetches a single character from the terminal and returns it. This\n will always return a unicode character and under certain rare\n circumstances this might return more than one character. The\n situations which more than one character is returned is when for\n whatever reason multiple characters end up in the terminal buffer or\n standard input was not actually a terminal.\n\n Note that this will always read from the terminal, even if something\n is piped into the standard input.\n\n Note for Windows: in rare cases when typing non-ASCII characters, this\n function might wait for a second character and then return both at once.\n This is because certain Unicode characters look like special-key markers.\n\n .. versionadded:: 2.0\n\n :param echo: if set to `True`, the character read will also show up on\n the terminal. The default is to not show it.\n \"\"\"\n global _getchar\n\n if _getchar is None:\n from ._termui_impl import getchar as f\n\n _getchar = f\n\n return _getchar(echo)\n\n\ndef raw_terminal() -> t.ContextManager[int]:\n from ._termui_impl import raw_terminal as f\n\n return f()\n\n\ndef pause(info: t.Optional[str] = None, err: bool = False) -> None:\n \"\"\"This command stops execution and waits for the user to press any\n key to continue. This is similar to the Windows batch \"pause\"\n command. If the program is not run through a terminal, this command\n will instead do nothing.\n\n .. versionadded:: 2.0\n\n .. versionadded:: 4.0\n Added the `err` parameter.\n\n :param info: The message to print before pausing. Defaults to\n ``\"Press any key to continue...\"``.\n :param err: if set to message goes to ``stderr`` instead of\n ``stdout``, the same as with echo.\n \"\"\"\n if not isatty(sys.stdin) or not isatty(sys.stdout):\n return\n\n if info is None:\n info = _(\"Press any key to continue...\")\n\n try:\n if info:\n echo(info, nl=False, err=err)\n try:\n getchar()\n except (KeyboardInterrupt, EOFError):\n pass\n finally:\n if info:\n echo(err=err)\n",
"path": "src/click/termui.py"
}
] | diff --git a/CHANGES.rst b/CHANGES.rst
index 6348d6f25..6c9a79327 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -14,6 +14,7 @@ Unreleased
- Fix a typo in the Bash completion script that affected file and
directory completion. If this script was generated by a previous
version, it should be regenerated. :issue:`2163`
+- Fix typing for ``secho`` file argument. :issue:`2174`
Version 8.0.3
diff --git a/src/click/termui.py b/src/click/termui.py
index a7a8d03cb..07b5257cc 100644
--- a/src/click/termui.py
+++ b/src/click/termui.py
@@ -624,7 +624,7 @@ def unstyle(text: str) -> str:
def secho(
message: t.Optional[t.Any] = None,
- file: t.Optional[t.IO] = None,
+ file: t.Optional[t.IO[t.AnyStr]] = None,
nl: bool = True,
err: bool = False,
color: t.Optional[bool] = None,
|
jupyterhub__zero-to-jupyterhub-k8s-403 | Allow making JupyterLab default thing to launch
Is there a way to make JupyterLab come up by default when new users connect?
Is there a way to get the JupyterHub control panel from JupyterLab?
| [
{
"content": "import os\nimport glob\nfrom tornado.httpclient import AsyncHTTPClient\n\nfrom z2jh import get_config, get_secret\n\n# Configure JupyterHub to use the curl backend for making HTTP requests,\n# rather than the pure-python implementations. The default one starts\n# being too slow to make a large number of requests to the proxy API\n# at the rate required.\nAsyncHTTPClient.configure(\"tornado.curl_httpclient.CurlAsyncHTTPClient\")\n\nc.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'\n\n# Connect to a proxy running in a different pod\nc.ConfigurableHTTPProxy.api_url = 'http://{}:{}'.format(os.environ['PROXY_API_SERVICE_HOST'], int(os.environ['PROXY_API_SERVICE_PORT']))\nc.ConfigurableHTTPProxy.should_start = False\n\n# Do not shut down user pods when hub is restarted\nc.JupyterHub.cleanup_servers = False\n\n# Check that the proxy has routes appropriately setup\n# This isn't the best named setting :D\nc.JupyterHub.last_activity_interval = 60\n\n# Max number of servers that can be spawning at any one time\nc.JupyterHub.concurrent_spawn_limit = get_config('hub.concurrent-spawn-limit')\n\nactive_server_limit = get_config('hub.active-server-limit', None)\n\nif active_server_limit is not None:\n c.JupyterHub.active_server_limit = int(active_server_limit)\n\nc.JupyterHub.ip = os.environ['PROXY_PUBLIC_SERVICE_HOST']\nc.JupyterHub.port = int(os.environ['PROXY_PUBLIC_SERVICE_PORT'])\n\n# the hub should listen on all interfaces, so the proxy can access it\nc.JupyterHub.hub_ip = '0.0.0.0'\n\nc.KubeSpawner.namespace = os.environ.get('POD_NAMESPACE', 'default')\n\nc.KubeSpawner.start_timeout = get_config('singleuser.start-timeout')\n\n# Use env var for this, since we want hub to restart when this changes\nc.KubeSpawner.singleuser_image_spec = os.environ['SINGLEUSER_IMAGE']\n\nc.KubeSpawner.singleuser_extra_labels = get_config('singleuser.extra-labels', {})\n\nc.KubeSpawner.singleuser_uid = get_config('singleuser.uid')\nc.KubeSpawner.singleuser_fs_gid = get_config('singleuser.fs-gid')\n\nservice_account_name = get_config('singleuser.service-account-name', None)\nif service_account_name:\n c.KubeSpawner.singleuser_service_account = service_account_name\n\nc.KubeSpawner.singleuser_node_selector = get_config('singleuser.node-selector')\n# Configure dynamically provisioning pvc\nstorage_type = get_config('singleuser.storage.type')\nif storage_type == 'dynamic':\n c.KubeSpawner.pvc_name_template = 'claim-{username}{servername}'\n c.KubeSpawner.user_storage_pvc_ensure = True\n storage_class = get_config('singleuser.storage.dynamic.storage-class', None)\n if storage_class:\n c.KubeSpawner.user_storage_class = storage_class\n c.KubeSpawner.user_storage_access_modes = ['ReadWriteOnce']\n c.KubeSpawner.user_storage_capacity = get_config('singleuser.storage.capacity')\n\n # Add volumes to singleuser pods\n c.KubeSpawner.volumes = [\n {\n 'name': 'volume-{username}{servername}',\n 'persistentVolumeClaim': {\n 'claimName': 'claim-{username}{servername}'\n }\n }\n ]\n c.KubeSpawner.volume_mounts = [\n {\n 'mountPath': get_config('singleuser.storage.home_mount_path'),\n 'name': 'volume-{username}{servername}'\n }\n ]\nelif storage_type == 'static':\n pvc_claim_name = get_config('singleuser.storage.static.pvc-name')\n c.KubeSpawner.volumes = [{\n 'name': 'home',\n 'persistentVolumeClaim': {\n 'claimName': pvc_claim_name\n }\n }]\n\n c.KubeSpawner.volume_mounts = [{\n 'mountPath': get_config('singleuser.storage.home_mount_path'),\n 'name': 'home',\n 'subPath': get_config('singleuser.storage.static.sub-path')\n }]\n\nc.KubeSpawner.volumes.extend(get_config('singleuser.storage.extra-volumes', []))\nc.KubeSpawner.volume_mounts.extend(get_config('singleuser.storage.extra-volume-mounts', []))\n\nlifecycle_hooks = get_config('singleuser.lifecycle-hooks')\nif lifecycle_hooks:\n c.KubeSpawner.singleuser_lifecycle_hooks = lifecycle_hooks\n\ninit_containers = get_config('singleuser.init-containers')\nif init_containers:\n c.KubeSpawner.singleuser_init_containers = init_containers\n\n# Gives spawned containers access to the API of the hub\nc.KubeSpawner.hub_connect_ip = os.environ['HUB_SERVICE_HOST']\nc.KubeSpawner.hub_connect_port = int(os.environ['HUB_SERVICE_PORT'])\n\nc.JupyterHub.hub_connect_ip = os.environ['HUB_SERVICE_HOST']\nc.JupyterHub.hub_connect_port = int(os.environ['HUB_SERVICE_PORT'])\n\nc.KubeSpawner.mem_limit = get_config('singleuser.memory.limit')\nc.KubeSpawner.mem_guarantee = get_config('singleuser.memory.guarantee')\nc.KubeSpawner.cpu_limit = get_config('singleuser.cpu.limit')\nc.KubeSpawner.cpu_guarantee = get_config('singleuser.cpu.guarantee')\n\n# Allow switching authenticators easily\nauth_type = get_config('auth.type')\nemail_domain = 'local'\n\nif auth_type == 'google':\n c.JupyterHub.authenticator_class = 'oauthenticator.GoogleOAuthenticator'\n c.GoogleOAuthenticator.client_id = get_config('auth.google.client-id')\n c.GoogleOAuthenticator.client_secret = get_config('auth.google.client-secret')\n c.GoogleOAuthenticator.oauth_callback_url = get_config('auth.google.callback-url')\n c.GoogleOAuthenticator.hosted_domain = get_config('auth.google.hosted-domain')\n c.GoogleOAuthenticator.login_service = get_config('auth.google.login-service')\n email_domain = get_config('auth.google.hosted-domain')\nelif auth_type == 'github':\n c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'\n c.GitHubOAuthenticator.oauth_callback_url = get_config('auth.github.callback-url')\n c.GitHubOAuthenticator.client_id = get_config('auth.github.client-id')\n c.GitHubOAuthenticator.client_secret = get_config('auth.github.client-secret')\nelif auth_type == 'cilogon':\n c.JupyterHub.authenticator_class = 'oauthenticator.CILogonOAuthenticator'\n c.CILogonOAuthenticator.oauth_callback_url = get_config('auth.cilogon.callback-url')\n c.CILogonOAuthenticator.client_id = get_config('auth.cilogon.client-id')\n c.CILogonOAuthenticator.client_secret = get_config('auth.cilogon.client-secret')\nelif auth_type == 'gitlab':\n c.JupyterHub.authenticator_class = 'oauthenticator.gitlab.GitLabOAuthenticator'\n c.GitLabOAuthenticator.oauth_callback_url = get_config('auth.gitlab.callback-url')\n c.GitLabOAuthenticator.client_id = get_config('auth.gitlab.client-id')\n c.GitLabOAuthenticator.client_secret = get_config('auth.gitlab.client-secret')\nelif auth_type == 'mediawiki':\n c.JupyterHub.authenticator_class = 'oauthenticator.mediawiki.MWOAuthenticator'\n c.MWOAuthenticator.client_id = get_config('auth.mediawiki.client-id')\n c.MWOAuthenticator.client_secret = get_config('auth.mediawiki.client-secret')\n c.MWOAuthenticator.index_url = get_config('auth.mediawiki.index-url')\nelif auth_type == 'globus':\n c.JupyterHub.authenticator_class = 'oauthenticator.globus.GlobusOAuthenticator'\n c.GlobusOAuthenticator.oauth_callback_url = get_config('auth.globus.callback-url')\n c.GlobusOAuthenticator.client_id = get_config('auth.globus.client-id')\n c.GlobusOAuthenticator.client_secret = get_config('auth.globus.client-secret')\n c.GlobusOAuthenticator.identity_provider = get_config('auth.globus.identity-provider', '')\nelif auth_type == 'hmac':\n c.JupyterHub.authenticator_class = 'hmacauthenticator.HMACAuthenticator'\n c.HMACAuthenticator.secret_key = bytes.fromhex(get_config('auth.hmac.secret-key'))\nelif auth_type == 'dummy':\n c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'\n c.DummyAuthenticator.password = get_config('auth.dummy.password', None)\nelif auth_type == 'tmp':\n c.JupyterHub.authenticator_class = 'tmpauthenticator.TmpAuthenticator'\nelif auth_type == 'lti':\n c.JupyterHub.authenticator_class = 'ltiauthenticator.LTIAuthenticator'\n c.LTIAuthenticator.consumers = get_config('auth.lti.consumers')\nelif auth_type == 'custom':\n # full_class_name looks like \"myauthenticator.MyAuthenticator\".\n # To create a docker image with this class availabe, you can just have the\n # following Dockerifle:\n # FROM jupyterhub/k8s-hub:v0.4\n # RUN pip3 install myauthenticator\n full_class_name = get_config('auth.custom.class-name')\n c.JupyterHub.authenticator_class = full_class_name\n auth_class_name = full_class_name.rsplit('.', 1)[-1]\n auth_config = c[auth_class_name]\n auth_config.update(get_config('auth.custom.config') or {})\nelse:\n raise ValueError(\"Unhandled auth type: %r\" % auth_type)\n\nc.Authenticator.enable_auth_state = get_config('auth.state.enabled', False)\n\ndef generate_user_email(spawner):\n \"\"\"\n Used as the EMAIL environment variable\n \"\"\"\n return '{username}@{domain}'.format(\n username=spawner.user.name, domain=email_domain\n )\n\ndef generate_user_name(spawner):\n \"\"\"\n Used as GIT_AUTHOR_NAME and GIT_COMMITTER_NAME environment variables\n \"\"\"\n return spawner.user.name\n\nc.KubeSpawner.environment = {\n 'EMAIL': generate_user_email,\n # git requires these committer attributes\n 'GIT_AUTHOR_NAME': generate_user_name,\n 'GIT_COMMITTER_NAME': generate_user_name\n}\n\nc.KubeSpawner.environment.update(get_config('singleuser.extra-env', {}))\n\n# Enable admins to access user servers\nc.JupyterHub.admin_access = get_config('auth.admin.access')\nc.Authenticator.admin_users = get_config('auth.admin.users', [])\nc.Authenticator.whitelist = get_config('auth.whitelist.users', [])\n\nc.JupyterHub.base_url = get_config('hub.base_url')\n\nc.JupyterHub.services = []\n\nif get_config('cull.enabled', False):\n cull_timeout = get_config('cull.timeout')\n cull_every = get_config('cull.every')\n cull_cmd = [\n '/usr/local/bin/cull_idle_servers.py',\n '--timeout=%s' % cull_timeout,\n '--cull-every=%s' % cull_every,\n '--url=http://127.0.0.1:8081' + c.JupyterHub.base_url + 'hub/api'\n ]\n if get_config('cull.users'):\n cull_cmd.append('--cull-users')\n c.JupyterHub.services.append({\n 'name': 'cull-idle',\n 'admin': True,\n 'command': cull_cmd,\n })\n\nfor name, service in get_config('hub.services', {}).items():\n api_token = get_secret('services.token.%s' % name)\n # jupyterhub.services is a list of dicts, but\n # in the helm chart it is a dict of dicts for easier merged-config\n service.setdefault('name', name)\n if api_token:\n service['api_token'] = api_token\n c.JupyterHub.services.append(service)\n\n\nc.JupyterHub.db_url = get_config('hub.db_url')\n\ncmd = get_config('singleuser.cmd', None)\nif cmd:\n c.Spawner.cmd = cmd\n\n\nscheduler_strategy = get_config('singleuser.scheduler-strategy', 'spread')\n\nif scheduler_strategy == 'pack':\n # FIXME: Support setting affinity directly in KubeSpawner\n c.KubeSpawner.singleuser_extra_pod_config = {\n 'affinity': {\n 'podAffinity': {\n 'preferredDuringSchedulingIgnoredDuringExecution': [{\n 'weight': 100,\n 'podAffinityTerm': {\n 'labelSelector': {\n 'matchExpressions': [{\n 'key': 'component',\n 'operator': 'In',\n 'values': ['singleuser-server']\n }]\n },\n 'topologyKey': 'kubernetes.io/hostname'\n }\n }],\n }\n }\n }\nelse:\n # Set default to {} so subconfigs can easily update it\n c.KubeSpawner.singleuser_extra_pod_config = {}\n\nextra_configs = sorted(glob.glob('/etc/jupyterhub/config/hub.extra-config.*.py'))\nfor ec in extra_configs:\n load_subconfig(ec)\n",
"path": "images/hub/jupyterhub_config.py"
}
] | [
{
"content": "import os\nimport glob\nfrom tornado.httpclient import AsyncHTTPClient\n\nfrom z2jh import get_config, get_secret\n\n# Configure JupyterHub to use the curl backend for making HTTP requests,\n# rather than the pure-python implementations. The default one starts\n# being too slow to make a large number of requests to the proxy API\n# at the rate required.\nAsyncHTTPClient.configure(\"tornado.curl_httpclient.CurlAsyncHTTPClient\")\n\nc.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'\n\n# Connect to a proxy running in a different pod\nc.ConfigurableHTTPProxy.api_url = 'http://{}:{}'.format(os.environ['PROXY_API_SERVICE_HOST'], int(os.environ['PROXY_API_SERVICE_PORT']))\nc.ConfigurableHTTPProxy.should_start = False\n\n# Do not shut down user pods when hub is restarted\nc.JupyterHub.cleanup_servers = False\n\n# Check that the proxy has routes appropriately setup\n# This isn't the best named setting :D\nc.JupyterHub.last_activity_interval = 60\n\n# Max number of servers that can be spawning at any one time\nc.JupyterHub.concurrent_spawn_limit = get_config('hub.concurrent-spawn-limit')\n\nactive_server_limit = get_config('hub.active-server-limit', None)\n\nif active_server_limit is not None:\n c.JupyterHub.active_server_limit = int(active_server_limit)\n\nc.JupyterHub.ip = os.environ['PROXY_PUBLIC_SERVICE_HOST']\nc.JupyterHub.port = int(os.environ['PROXY_PUBLIC_SERVICE_PORT'])\n\n# the hub should listen on all interfaces, so the proxy can access it\nc.JupyterHub.hub_ip = '0.0.0.0'\n\nc.KubeSpawner.namespace = os.environ.get('POD_NAMESPACE', 'default')\n\nc.KubeSpawner.start_timeout = get_config('singleuser.start-timeout')\n\n# Use env var for this, since we want hub to restart when this changes\nc.KubeSpawner.singleuser_image_spec = os.environ['SINGLEUSER_IMAGE']\n\nc.KubeSpawner.singleuser_extra_labels = get_config('singleuser.extra-labels', {})\n\nc.KubeSpawner.singleuser_uid = get_config('singleuser.uid')\nc.KubeSpawner.singleuser_fs_gid = get_config('singleuser.fs-gid')\n\nservice_account_name = get_config('singleuser.service-account-name', None)\nif service_account_name:\n c.KubeSpawner.singleuser_service_account = service_account_name\n\nc.KubeSpawner.singleuser_node_selector = get_config('singleuser.node-selector')\n# Configure dynamically provisioning pvc\nstorage_type = get_config('singleuser.storage.type')\nif storage_type == 'dynamic':\n c.KubeSpawner.pvc_name_template = 'claim-{username}{servername}'\n c.KubeSpawner.user_storage_pvc_ensure = True\n storage_class = get_config('singleuser.storage.dynamic.storage-class', None)\n if storage_class:\n c.KubeSpawner.user_storage_class = storage_class\n c.KubeSpawner.user_storage_access_modes = ['ReadWriteOnce']\n c.KubeSpawner.user_storage_capacity = get_config('singleuser.storage.capacity')\n\n # Add volumes to singleuser pods\n c.KubeSpawner.volumes = [\n {\n 'name': 'volume-{username}{servername}',\n 'persistentVolumeClaim': {\n 'claimName': 'claim-{username}{servername}'\n }\n }\n ]\n c.KubeSpawner.volume_mounts = [\n {\n 'mountPath': get_config('singleuser.storage.home_mount_path'),\n 'name': 'volume-{username}{servername}'\n }\n ]\nelif storage_type == 'static':\n pvc_claim_name = get_config('singleuser.storage.static.pvc-name')\n c.KubeSpawner.volumes = [{\n 'name': 'home',\n 'persistentVolumeClaim': {\n 'claimName': pvc_claim_name\n }\n }]\n\n c.KubeSpawner.volume_mounts = [{\n 'mountPath': get_config('singleuser.storage.home_mount_path'),\n 'name': 'home',\n 'subPath': get_config('singleuser.storage.static.sub-path')\n }]\n\nc.KubeSpawner.volumes.extend(get_config('singleuser.storage.extra-volumes', []))\nc.KubeSpawner.volume_mounts.extend(get_config('singleuser.storage.extra-volume-mounts', []))\n\nlifecycle_hooks = get_config('singleuser.lifecycle-hooks')\nif lifecycle_hooks:\n c.KubeSpawner.singleuser_lifecycle_hooks = lifecycle_hooks\n\ninit_containers = get_config('singleuser.init-containers')\nif init_containers:\n c.KubeSpawner.singleuser_init_containers = init_containers\n\n# Gives spawned containers access to the API of the hub\nc.KubeSpawner.hub_connect_ip = os.environ['HUB_SERVICE_HOST']\nc.KubeSpawner.hub_connect_port = int(os.environ['HUB_SERVICE_PORT'])\n\nc.JupyterHub.hub_connect_ip = os.environ['HUB_SERVICE_HOST']\nc.JupyterHub.hub_connect_port = int(os.environ['HUB_SERVICE_PORT'])\n\nc.KubeSpawner.mem_limit = get_config('singleuser.memory.limit')\nc.KubeSpawner.mem_guarantee = get_config('singleuser.memory.guarantee')\nc.KubeSpawner.cpu_limit = get_config('singleuser.cpu.limit')\nc.KubeSpawner.cpu_guarantee = get_config('singleuser.cpu.guarantee')\n\n# Allow switching authenticators easily\nauth_type = get_config('auth.type')\nemail_domain = 'local'\n\nif auth_type == 'google':\n c.JupyterHub.authenticator_class = 'oauthenticator.GoogleOAuthenticator'\n c.GoogleOAuthenticator.client_id = get_config('auth.google.client-id')\n c.GoogleOAuthenticator.client_secret = get_config('auth.google.client-secret')\n c.GoogleOAuthenticator.oauth_callback_url = get_config('auth.google.callback-url')\n c.GoogleOAuthenticator.hosted_domain = get_config('auth.google.hosted-domain')\n c.GoogleOAuthenticator.login_service = get_config('auth.google.login-service')\n email_domain = get_config('auth.google.hosted-domain')\nelif auth_type == 'github':\n c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'\n c.GitHubOAuthenticator.oauth_callback_url = get_config('auth.github.callback-url')\n c.GitHubOAuthenticator.client_id = get_config('auth.github.client-id')\n c.GitHubOAuthenticator.client_secret = get_config('auth.github.client-secret')\nelif auth_type == 'cilogon':\n c.JupyterHub.authenticator_class = 'oauthenticator.CILogonOAuthenticator'\n c.CILogonOAuthenticator.oauth_callback_url = get_config('auth.cilogon.callback-url')\n c.CILogonOAuthenticator.client_id = get_config('auth.cilogon.client-id')\n c.CILogonOAuthenticator.client_secret = get_config('auth.cilogon.client-secret')\nelif auth_type == 'gitlab':\n c.JupyterHub.authenticator_class = 'oauthenticator.gitlab.GitLabOAuthenticator'\n c.GitLabOAuthenticator.oauth_callback_url = get_config('auth.gitlab.callback-url')\n c.GitLabOAuthenticator.client_id = get_config('auth.gitlab.client-id')\n c.GitLabOAuthenticator.client_secret = get_config('auth.gitlab.client-secret')\nelif auth_type == 'mediawiki':\n c.JupyterHub.authenticator_class = 'oauthenticator.mediawiki.MWOAuthenticator'\n c.MWOAuthenticator.client_id = get_config('auth.mediawiki.client-id')\n c.MWOAuthenticator.client_secret = get_config('auth.mediawiki.client-secret')\n c.MWOAuthenticator.index_url = get_config('auth.mediawiki.index-url')\nelif auth_type == 'globus':\n c.JupyterHub.authenticator_class = 'oauthenticator.globus.GlobusOAuthenticator'\n c.GlobusOAuthenticator.oauth_callback_url = get_config('auth.globus.callback-url')\n c.GlobusOAuthenticator.client_id = get_config('auth.globus.client-id')\n c.GlobusOAuthenticator.client_secret = get_config('auth.globus.client-secret')\n c.GlobusOAuthenticator.identity_provider = get_config('auth.globus.identity-provider', '')\nelif auth_type == 'hmac':\n c.JupyterHub.authenticator_class = 'hmacauthenticator.HMACAuthenticator'\n c.HMACAuthenticator.secret_key = bytes.fromhex(get_config('auth.hmac.secret-key'))\nelif auth_type == 'dummy':\n c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'\n c.DummyAuthenticator.password = get_config('auth.dummy.password', None)\nelif auth_type == 'tmp':\n c.JupyterHub.authenticator_class = 'tmpauthenticator.TmpAuthenticator'\nelif auth_type == 'lti':\n c.JupyterHub.authenticator_class = 'ltiauthenticator.LTIAuthenticator'\n c.LTIAuthenticator.consumers = get_config('auth.lti.consumers')\nelif auth_type == 'custom':\n # full_class_name looks like \"myauthenticator.MyAuthenticator\".\n # To create a docker image with this class availabe, you can just have the\n # following Dockerifle:\n # FROM jupyterhub/k8s-hub:v0.4\n # RUN pip3 install myauthenticator\n full_class_name = get_config('auth.custom.class-name')\n c.JupyterHub.authenticator_class = full_class_name\n auth_class_name = full_class_name.rsplit('.', 1)[-1]\n auth_config = c[auth_class_name]\n auth_config.update(get_config('auth.custom.config') or {})\nelse:\n raise ValueError(\"Unhandled auth type: %r\" % auth_type)\n\nc.Authenticator.enable_auth_state = get_config('auth.state.enabled', False)\n\ndef generate_user_email(spawner):\n \"\"\"\n Used as the EMAIL environment variable\n \"\"\"\n return '{username}@{domain}'.format(\n username=spawner.user.name, domain=email_domain\n )\n\ndef generate_user_name(spawner):\n \"\"\"\n Used as GIT_AUTHOR_NAME and GIT_COMMITTER_NAME environment variables\n \"\"\"\n return spawner.user.name\n\nc.KubeSpawner.environment = {\n 'EMAIL': generate_user_email,\n # git requires these committer attributes\n 'GIT_AUTHOR_NAME': generate_user_name,\n 'GIT_COMMITTER_NAME': generate_user_name\n}\n\nc.KubeSpawner.environment.update(get_config('singleuser.extra-env', {}))\n\n# Enable admins to access user servers\nc.JupyterHub.admin_access = get_config('auth.admin.access')\nc.Authenticator.admin_users = get_config('auth.admin.users', [])\nc.Authenticator.whitelist = get_config('auth.whitelist.users', [])\n\nc.JupyterHub.base_url = get_config('hub.base_url')\n\nc.JupyterHub.services = []\n\nif get_config('cull.enabled', False):\n cull_timeout = get_config('cull.timeout')\n cull_every = get_config('cull.every')\n cull_cmd = [\n '/usr/local/bin/cull_idle_servers.py',\n '--timeout=%s' % cull_timeout,\n '--cull-every=%s' % cull_every,\n '--url=http://127.0.0.1:8081' + c.JupyterHub.base_url + 'hub/api'\n ]\n if get_config('cull.users'):\n cull_cmd.append('--cull-users')\n c.JupyterHub.services.append({\n 'name': 'cull-idle',\n 'admin': True,\n 'command': cull_cmd,\n })\n\nfor name, service in get_config('hub.services', {}).items():\n api_token = get_secret('services.token.%s' % name)\n # jupyterhub.services is a list of dicts, but\n # in the helm chart it is a dict of dicts for easier merged-config\n service.setdefault('name', name)\n if api_token:\n service['api_token'] = api_token\n c.JupyterHub.services.append(service)\n\n\nc.JupyterHub.db_url = get_config('hub.db_url')\n\ncmd = get_config('singleuser.cmd', None)\nif cmd:\n c.Spawner.cmd = cmd\n\ndefault_url = get_config('singleuser.default-url', None)\nif default_url:\n c.Spawner.default_url = default_url\n\nscheduler_strategy = get_config('singleuser.scheduler-strategy', 'spread')\n\nif scheduler_strategy == 'pack':\n # FIXME: Support setting affinity directly in KubeSpawner\n c.KubeSpawner.singleuser_extra_pod_config = {\n 'affinity': {\n 'podAffinity': {\n 'preferredDuringSchedulingIgnoredDuringExecution': [{\n 'weight': 100,\n 'podAffinityTerm': {\n 'labelSelector': {\n 'matchExpressions': [{\n 'key': 'component',\n 'operator': 'In',\n 'values': ['singleuser-server']\n }]\n },\n 'topologyKey': 'kubernetes.io/hostname'\n }\n }],\n }\n }\n }\nelse:\n # Set default to {} so subconfigs can easily update it\n c.KubeSpawner.singleuser_extra_pod_config = {}\n\nextra_configs = sorted(glob.glob('/etc/jupyterhub/config/hub.extra-config.*.py'))\nfor ec in extra_configs:\n load_subconfig(ec)\n",
"path": "images/hub/jupyterhub_config.py"
}
] | diff --git a/doc/source/user-environment.rst b/doc/source/user-environment.rst
index 42cb34a6a2..fdf4b829e4 100644
--- a/doc/source/user-environment.rst
+++ b/doc/source/user-environment.rst
@@ -185,6 +185,35 @@ how to configure JupyterHub to build off of this image:
9. **Enjoy your new computing environment!** You should now have a live
computing environment built off of the Docker image we’ve created.
+Use JupyterLab by default
+-------------------------
+
+`JupyterLab <https://github.com/jupyterlab/jupyterlab>`_ is the next generation
+user interface for Project Jupyter. It can be used with JupyterHub, both as an
+optional interface and as a default.
+
+1. `Install JupyterLab <https://github.com/jupyterlab/jupyterlab#installation`_
+ in your user image.
+2. `Install JupyterLab Hub extension
+ <https://github.com/jupyterhub/jupyterlab-hub#installation>`_ in your user
+ image. This provides a nice UI for accessing JupyterHub control panel from
+ JupyterLab. You only need the `jupyter labextension` command.
+3. If you want users to launch automatically into JupyterLab instead of classic
+ notebook, use the following in your ``config.yaml``
+
+ .. code-block:: yaml
+ singleuser:
+ defaultUrl: "/lab"
+
+ This will put users into JupyterLab when they launch.
+4. Users can always classic Jupyter Notebook by replacing the ``/lab`` in the URL
+ after their server starts with ``/tree``. Similarly, you can access
+ JupyterLab even if it is not the default by replacing ``/tree`` in the URL
+ with ``/lab``
+
+.. note::
+ JupyterLab is just about to go into beta, so use with caution!
+
Set environment variables
-------------------------
diff --git a/images/hub/jupyterhub_config.py b/images/hub/jupyterhub_config.py
index deef3e1529..9b8d675859 100644
--- a/images/hub/jupyterhub_config.py
+++ b/images/hub/jupyterhub_config.py
@@ -248,6 +248,9 @@ def generate_user_name(spawner):
if cmd:
c.Spawner.cmd = cmd
+default_url = get_config('singleuser.default-url', None)
+if default_url:
+ c.Spawner.default_url = default_url
scheduler_strategy = get_config('singleuser.scheduler-strategy', 'spread')
diff --git a/jupyterhub/templates/hub/configmap.yaml b/jupyterhub/templates/hub/configmap.yaml
index 8f15f498f8..398fae2c3e 100644
--- a/jupyterhub/templates/hub/configmap.yaml
+++ b/jupyterhub/templates/hub/configmap.yaml
@@ -113,6 +113,9 @@ data:
{{ if .Values.singleuser.cmd -}}
singleuser.cmd: {{ .Values.singleuser.cmd | quote }}
{{- end }}
+ {{ if .Values.singleuser.defaultUrl }}
+ singleuser.default-url: {{ .Values.singleuser.defaultUrl | quote }}
+ {{- end }}
singleuser.uid: {{ .Values.singleuser.uid | quote }}
singleuser.fs-gid: {{ .Values.singleuser.fsGid | quote }}
diff --git a/jupyterhub/values.yaml b/jupyterhub/values.yaml
index efb5e8c80e..11b6dca487 100644
--- a/jupyterhub/values.yaml
+++ b/jupyterhub/values.yaml
@@ -143,6 +143,7 @@ singleuser:
limit:
guarantee: 1G
cmd: jupyterhub-singleuser
+ defaultUrl:
prePuller:
hook:
|
sql-machine-learning__elasticdl-368 | Better check for codec names
currently, codec name argument is not checked. A typo would result in worker interpreting encoded data.
| [
{
"content": "import logging\nimport time\nimport argparse\nimport os\n\nimport grpc\nimport tensorflow as tf\n\ntf.enable_eager_execution()\n\nfrom concurrent import futures\nfrom recordio import File\nfrom elasticdl.proto import master_pb2_grpc\nfrom elasticdl.master.servicer import MasterServicer\nfrom elasticdl.master.task_queue import _TaskQueue\nfrom elasticdl.master.k8s_worker_manager import WorkerManager\nfrom elasticdl.common.model_helper import load_user_model, build_model\n\n\ndef _make_task_queue(data_dir, record_per_task, num_epoch):\n f_records = {}\n for f in os.listdir(data_dir):\n p = os.path.join(data_dir, f)\n with File(p, \"r\") as rio:\n f_records[p] = rio.count()\n return _TaskQueue(f_records, record_per_task, num_epoch)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(description=\"ElasticDL Master\")\n parser.add_argument(\n \"--model_file\",\n help=\"Full file path of user defined neural model\",\n required=True,\n )\n parser.add_argument(\n \"--train_data_dir\",\n help=\"Training data directory. Files should be in RecordIO format\",\n required=True,\n )\n parser.add_argument(\"--record_per_task\", type=int, required=True)\n parser.add_argument(\"--num_epoch\", type=int, required=True)\n parser.add_argument(\n \"--grads_to_wait\",\n type=int,\n help=\"Number of gradients to wait before updating model\",\n required=True,\n )\n parser.add_argument(\n \"--minibatch_size\",\n type=int,\n help=\"Minibatch size used by workers to compute gradients\",\n required=True,\n )\n parser.add_argument(\n \"--num_worker\",\n type=int,\n help=\"the number of workers used in training\",\n default=0,\n )\n parser.add_argument(\n \"--worker_image\", help=\"docker image for worker\", default=None\n )\n parser.add_argument(\"--job_name\", help=\"job name\", required=True)\n parser.add_argument(\n \"--codec-type\",\n default=None,\n help=\"Type of codec(tf_example or None)\",\n )\n return parser.parse_args()\n\n\ndef main():\n # TODO: pass port via flags.\n PORT = 50001\n logger = logging.getLogger(\"master\")\n args = _parse_args()\n task_q = _make_task_queue(\n args.train_data_dir, args.record_per_task, args.num_epoch\n )\n model_module = load_user_model(args.model_file)\n model_inst = model_module.model\n build_model(model_inst, model_module.feature_columns())\n optimizer = model_module.optimizer()\n\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=64))\n master_pb2_grpc.add_MasterServicer_to_server(\n MasterServicer(\n logger,\n args.grads_to_wait,\n args.minibatch_size,\n optimizer,\n task_q,\n init_var=model_inst.trainable_variables,\n ),\n server,\n )\n server.add_insecure_port(\"[::]:{}\".format(PORT))\n server.start()\n logger.warning(\"Server started at port: %d\", PORT)\n\n if args.num_worker:\n master_addr = \"%s:%d\" % (os.getenv(\"MY_POD_IP\", \"localhost\"), PORT)\n worker_command = [\"python\"]\n worker_args = [\n \"-m\",\n \"elasticdl.worker.main\",\n \"--model_file\",\n args.model_file,\n \"--master_addr\",\n master_addr,\n \"--codec-type\",\n args.codec_type\n ]\n\n worker_manager = WorkerManager(\n job_name=args.job_name,\n worker_image=args.worker_image,\n command=worker_command,\n args=worker_args,\n namespace=\"default\",\n num_worker=args.num_worker,\n )\n worker_manager.start_workers(restart_policy=\"Never\")\n\n try:\n while True:\n if task_q.finished():\n break\n time.sleep(30)\n except KeyboardInterrupt:\n logger.warning(\"Server stopping\")\n\n if args.num_worker:\n # TODO: worker_manager.remove_workers supports synchronized call\n worker_manager.remove_workers()\n # wait for worker pod to be deleted\n max_check_num = 10\n for _ in range(max_check_num):\n time.sleep(3)\n counters = worker_manager.get_counters()\n if not counters:\n break\n server.stop(0)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n main()\n",
"path": "elasticdl/master/main.py"
}
] | [
{
"content": "import logging\nimport time\nimport argparse\nimport os\n\nimport grpc\nimport tensorflow as tf\n\ntf.enable_eager_execution()\n\nfrom concurrent import futures\nfrom recordio import File\nfrom elasticdl.proto import master_pb2_grpc\nfrom elasticdl.master.servicer import MasterServicer\nfrom elasticdl.master.task_queue import _TaskQueue\nfrom elasticdl.master.k8s_worker_manager import WorkerManager\nfrom elasticdl.common.model_helper import load_user_model, build_model\n\n\ndef _make_task_queue(data_dir, record_per_task, num_epoch):\n f_records = {}\n for f in os.listdir(data_dir):\n p = os.path.join(data_dir, f)\n with File(p, \"r\") as rio:\n f_records[p] = rio.count()\n return _TaskQueue(f_records, record_per_task, num_epoch)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(description=\"ElasticDL Master\")\n parser.add_argument(\n \"--model_file\",\n help=\"Full file path of user defined neural model\",\n required=True,\n )\n parser.add_argument(\n \"--train_data_dir\",\n help=\"Training data directory. Files should be in RecordIO format\",\n required=True,\n )\n parser.add_argument(\"--record_per_task\", type=int, required=True)\n parser.add_argument(\"--num_epoch\", type=int, required=True)\n parser.add_argument(\n \"--grads_to_wait\",\n type=int,\n help=\"Number of gradients to wait before updating model\",\n required=True,\n )\n parser.add_argument(\n \"--minibatch_size\",\n type=int,\n help=\"Minibatch size used by workers to compute gradients\",\n required=True,\n )\n parser.add_argument(\n \"--num_worker\",\n type=int,\n help=\"the number of workers used in training\",\n default=0,\n )\n parser.add_argument(\n \"--worker_image\", help=\"docker image for worker\", default=None\n )\n parser.add_argument(\"--job_name\", help=\"job name\", required=True)\n parser.add_argument(\n \"--codec-type\",\n default=None,\n choices=[\"tf_example\"],\n help=\"Type of codec(tf_example or None)\",\n )\n return parser.parse_args()\n\n\ndef main():\n # TODO: pass port via flags.\n PORT = 50001\n logger = logging.getLogger(\"master\")\n args = _parse_args()\n task_q = _make_task_queue(\n args.train_data_dir, args.record_per_task, args.num_epoch\n )\n model_module = load_user_model(args.model_file)\n model_inst = model_module.model\n build_model(model_inst, model_module.feature_columns())\n optimizer = model_module.optimizer()\n\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=64))\n master_pb2_grpc.add_MasterServicer_to_server(\n MasterServicer(\n logger,\n args.grads_to_wait,\n args.minibatch_size,\n optimizer,\n task_q,\n init_var=model_inst.trainable_variables,\n ),\n server,\n )\n server.add_insecure_port(\"[::]:{}\".format(PORT))\n server.start()\n logger.warning(\"Server started at port: %d\", PORT)\n\n if args.num_worker:\n master_addr = \"%s:%d\" % (os.getenv(\"MY_POD_IP\", \"localhost\"), PORT)\n worker_command = [\"python\"]\n worker_args = [\n \"-m\",\n \"elasticdl.worker.main\",\n \"--model_file\",\n args.model_file,\n \"--master_addr\",\n master_addr,\n \"--codec-type\",\n args.codec_type\n ]\n\n worker_manager = WorkerManager(\n job_name=args.job_name,\n worker_image=args.worker_image,\n command=worker_command,\n args=worker_args,\n namespace=\"default\",\n num_worker=args.num_worker,\n )\n worker_manager.start_workers(restart_policy=\"Never\")\n\n try:\n while True:\n if task_q.finished():\n break\n time.sleep(30)\n except KeyboardInterrupt:\n logger.warning(\"Server stopping\")\n\n if args.num_worker:\n # TODO: worker_manager.remove_workers supports synchronized call\n worker_manager.remove_workers()\n # wait for worker pod to be deleted\n max_check_num = 10\n for _ in range(max_check_num):\n time.sleep(3)\n counters = worker_manager.get_counters()\n if not counters:\n break\n server.stop(0)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n main()\n",
"path": "elasticdl/master/main.py"
}
] | diff --git a/elasticdl/master/main.py b/elasticdl/master/main.py
index d06686bbc..0f2b0b4c7 100644
--- a/elasticdl/master/main.py
+++ b/elasticdl/master/main.py
@@ -65,6 +65,7 @@ def _parse_args():
parser.add_argument(
"--codec-type",
default=None,
+ choices=["tf_example"],
help="Type of codec(tf_example or None)",
)
return parser.parse_args()
|
vega__altair-1844 | Fix simple typo: packge -> package
There is a small typo in setup.py.
Should read package rather than packge.
| [
{
"content": "import io\nimport os\nimport re\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n#==============================================================================\n# Utilities\n#==============================================================================\n\ndef read(path, encoding='utf-8'):\n path = os.path.join(os.path.dirname(__file__), path)\n with io.open(path, encoding=encoding) as fp:\n return fp.read()\n\n\ndef get_install_requirements(path):\n content = read(path)\n return [\n req\n for req in content.split(\"\\n\")\n if req != '' and not req.startswith('#')\n ]\n\n\ndef version(path):\n \"\"\"Obtain the packge version from a python file e.g. pkg/__init__.py\n\n See <https://packaging.python.org/en/latest/single_source_version.html>.\n \"\"\"\n version_file = read(path)\n version_match = re.search(r\"\"\"^__version__ = ['\"]([^'\"]*)['\"]\"\"\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n# From https://github.com/jupyterlab/jupyterlab/blob/master/setupbase.py, BSD licensed\ndef find_packages(top=HERE):\n \"\"\"\n Find all of the packages.\n \"\"\"\n packages = []\n for d, dirs, _ in os.walk(top, followlinks=True):\n if os.path.exists(os.path.join(d, '__init__.py')):\n packages.append(os.path.relpath(d, top).replace(os.path.sep, '.'))\n elif d != top:\n # Do not look for packages in subfolders if current is not a package\n dirs[:] = []\n return packages\n\n#==============================================================================\n# Variables\n#==============================================================================\n\nDESCRIPTION = \"Altair: A declarative statistical visualization library for Python.\"\nLONG_DESCRIPTION = read(\"README.md\")\nLONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'\nNAME = \"altair\"\nPACKAGES = find_packages()\nAUTHOR = \"Brian E. Granger / Jake VanderPlas\"\nAUTHOR_EMAIL = \"[email protected]\"\nURL = 'http://altair-viz.github.io'\nDOWNLOAD_URL = 'http://github.com/altair-viz/altair/'\nLICENSE = 'BSD 3-clause'\nINSTALL_REQUIRES = get_install_requirements(\"requirements.txt\")\nPYTHON_REQUIRES = \">=3.5\"\nDEV_REQUIRES = get_install_requirements(\"requirements_dev.txt\")\nVERSION = version('altair/__init__.py')\n\n\nsetup(name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n url=URL,\n download_url=DOWNLOAD_URL,\n license=LICENSE,\n packages=PACKAGES,\n include_package_data=True,\n install_requires=INSTALL_REQUIRES,\n python_requires=PYTHON_REQUIRES,\n extras_require={\n 'dev': DEV_REQUIRES\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n )\n",
"path": "setup.py"
}
] | [
{
"content": "import io\nimport os\nimport re\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n#==============================================================================\n# Utilities\n#==============================================================================\n\ndef read(path, encoding='utf-8'):\n path = os.path.join(os.path.dirname(__file__), path)\n with io.open(path, encoding=encoding) as fp:\n return fp.read()\n\n\ndef get_install_requirements(path):\n content = read(path)\n return [\n req\n for req in content.split(\"\\n\")\n if req != '' and not req.startswith('#')\n ]\n\n\ndef version(path):\n \"\"\"Obtain the package version from a python file e.g. pkg/__init__.py\n\n See <https://packaging.python.org/en/latest/single_source_version.html>.\n \"\"\"\n version_file = read(path)\n version_match = re.search(r\"\"\"^__version__ = ['\"]([^'\"]*)['\"]\"\"\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n# From https://github.com/jupyterlab/jupyterlab/blob/master/setupbase.py, BSD licensed\ndef find_packages(top=HERE):\n \"\"\"\n Find all of the packages.\n \"\"\"\n packages = []\n for d, dirs, _ in os.walk(top, followlinks=True):\n if os.path.exists(os.path.join(d, '__init__.py')):\n packages.append(os.path.relpath(d, top).replace(os.path.sep, '.'))\n elif d != top:\n # Do not look for packages in subfolders if current is not a package\n dirs[:] = []\n return packages\n\n#==============================================================================\n# Variables\n#==============================================================================\n\nDESCRIPTION = \"Altair: A declarative statistical visualization library for Python.\"\nLONG_DESCRIPTION = read(\"README.md\")\nLONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'\nNAME = \"altair\"\nPACKAGES = find_packages()\nAUTHOR = \"Brian E. Granger / Jake VanderPlas\"\nAUTHOR_EMAIL = \"[email protected]\"\nURL = 'http://altair-viz.github.io'\nDOWNLOAD_URL = 'http://github.com/altair-viz/altair/'\nLICENSE = 'BSD 3-clause'\nINSTALL_REQUIRES = get_install_requirements(\"requirements.txt\")\nPYTHON_REQUIRES = \">=3.5\"\nDEV_REQUIRES = get_install_requirements(\"requirements_dev.txt\")\nVERSION = version('altair/__init__.py')\n\n\nsetup(name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n url=URL,\n download_url=DOWNLOAD_URL,\n license=LICENSE,\n packages=PACKAGES,\n include_package_data=True,\n install_requires=INSTALL_REQUIRES,\n python_requires=PYTHON_REQUIRES,\n extras_require={\n 'dev': DEV_REQUIRES\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n )\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 8c2e4e73b..da9afb201 100644
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,7 @@ def get_install_requirements(path):
def version(path):
- """Obtain the packge version from a python file e.g. pkg/__init__.py
+ """Obtain the package version from a python file e.g. pkg/__init__.py
See <https://packaging.python.org/en/latest/single_source_version.html>.
"""
|
pyca__cryptography-3803 | Signer/Verifier deprecation warning has wrong stacklevel
Seeing this with Cryptography 2.0:
```
.../python3.5/site-packages/cryptography/hazmat/backends/openssl/rsa.py:477: DeprecationWarning: signer and verifier have been deprecated. Please use sign and verify instead.
_warn_sign_verify_deprecated()
.../python3.5/site-packages/cryptography/hazmat/backends/openssl/rsa.py:382: DeprecationWarning: signer and verifier have been deprecated. Please use sign and verify instead.
_warn_sign_verify_deprecated()
```
I see a few open issues related to deprecations (e.g. #3794), but I'm not sure if any of them cover this particular message.
| [
{
"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport warnings\n\nfrom cryptography import utils\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric.utils import Prehashed\n\n\ndef _calculate_digest_and_algorithm(backend, data, algorithm):\n if not isinstance(algorithm, Prehashed):\n hash_ctx = hashes.Hash(algorithm, backend)\n hash_ctx.update(data)\n data = hash_ctx.finalize()\n else:\n algorithm = algorithm._algorithm\n\n if len(data) != algorithm.digest_size:\n raise ValueError(\n \"The provided data must be the same length as the hash \"\n \"algorithm's digest size.\"\n )\n\n return (data, algorithm)\n\n\ndef _check_not_prehashed(signature_algorithm):\n if isinstance(signature_algorithm, Prehashed):\n raise TypeError(\n \"Prehashed is only supported in the sign and verify methods. \"\n \"It cannot be used with signer or verifier.\"\n )\n\n\ndef _warn_sign_verify_deprecated():\n warnings.warn(\n \"signer and verifier have been deprecated. Please use sign \"\n \"and verify instead.\",\n utils.PersistentlyDeprecated,\n stacklevel=2\n )\n",
"path": "src/cryptography/hazmat/backends/openssl/utils.py"
}
] | [
{
"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport warnings\n\nfrom cryptography import utils\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric.utils import Prehashed\n\n\ndef _calculate_digest_and_algorithm(backend, data, algorithm):\n if not isinstance(algorithm, Prehashed):\n hash_ctx = hashes.Hash(algorithm, backend)\n hash_ctx.update(data)\n data = hash_ctx.finalize()\n else:\n algorithm = algorithm._algorithm\n\n if len(data) != algorithm.digest_size:\n raise ValueError(\n \"The provided data must be the same length as the hash \"\n \"algorithm's digest size.\"\n )\n\n return (data, algorithm)\n\n\ndef _check_not_prehashed(signature_algorithm):\n if isinstance(signature_algorithm, Prehashed):\n raise TypeError(\n \"Prehashed is only supported in the sign and verify methods. \"\n \"It cannot be used with signer or verifier.\"\n )\n\n\ndef _warn_sign_verify_deprecated():\n warnings.warn(\n \"signer and verifier have been deprecated. Please use sign \"\n \"and verify instead.\",\n utils.PersistentlyDeprecated,\n stacklevel=3\n )\n",
"path": "src/cryptography/hazmat/backends/openssl/utils.py"
}
] | diff --git a/src/cryptography/hazmat/backends/openssl/utils.py b/src/cryptography/hazmat/backends/openssl/utils.py
index ff1b97458735..05d0fe589158 100644
--- a/src/cryptography/hazmat/backends/openssl/utils.py
+++ b/src/cryptography/hazmat/backends/openssl/utils.py
@@ -41,5 +41,5 @@ def _warn_sign_verify_deprecated():
"signer and verifier have been deprecated. Please use sign "
"and verify instead.",
utils.PersistentlyDeprecated,
- stacklevel=2
+ stacklevel=3
)
|
fossasia__open-event-server-9132 | Add the unique ticket code into the downlad CSV file
The CSV download file of the attendee list does not include the numbers on the QR Code. Please add this field "Ticket-ID".
The ticket ID has the following format: 135ccbd7-9b23-4a52-a7fd-326fec1b2c1c
Whereas the order has a format like this: #O1691408152-34896

Expected: The exported CSV should have a table column "Ticket ID" with the ticket ID number that is encoded in the QR code as well.

| [
{
"content": "import base64\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom io import BytesIO\n\nimport qrcode\nfrom citext import CIText\n\nfrom app.api.helpers.storage import UPLOAD_PATHS, generate_hash\nfrom app.models import db\nfrom app.models.base import SoftDeletionModel\n\n\n@dataclass(init=False, unsafe_hash=True)\nclass TicketHolder(SoftDeletionModel):\n __tablename__ = \"ticket_holders\"\n\n id: int = db.Column(db.Integer, primary_key=True)\n firstname: str = db.Column(db.String)\n lastname: str = db.Column(db.String)\n email: str = db.Column(CIText)\n address: str = db.Column(db.String)\n city: str = db.Column(db.String)\n state: str = db.Column(db.String)\n country: str = db.Column(db.String)\n job_title: str = db.Column(db.String)\n phone: str = db.Column(db.String)\n tax_business_info: str = db.Column(db.String)\n billing_address: str = db.Column(db.String)\n home_address: str = db.Column(db.String)\n shipping_address: str = db.Column(db.String)\n company: str = db.Column(db.String)\n work_address: str = db.Column(db.String)\n work_phone: str = db.Column(db.String)\n website: str = db.Column(db.String)\n blog: str = db.Column(db.String)\n twitter: str = db.Column(db.String)\n facebook: str = db.Column(db.String)\n instagram: str = db.Column(db.String)\n linkedin: str = db.Column(db.String)\n github: str = db.Column(db.String)\n gender: str = db.Column(db.String)\n accept_video_recording: bool = db.Column(db.Boolean)\n accept_share_details: bool = db.Column(db.Boolean)\n accept_receive_emails: bool = db.Column(db.Boolean)\n age_group: str = db.Column(db.String)\n home_wiki: str = db.Column(db.String)\n wiki_scholarship: str = db.Column(db.String)\n birth_date: datetime = db.Column(db.DateTime(timezone=True))\n pdf_url: str = db.Column(db.String)\n ticket_id: int = db.Column(\n db.Integer, db.ForeignKey('tickets.id', ondelete='CASCADE'), nullable=False\n )\n order_id: int = db.Column(db.Integer, db.ForeignKey('orders.id', ondelete='CASCADE'))\n is_checked_in: bool = db.Column(db.Boolean, default=False)\n is_checked_out: bool = db.Column(db.Boolean, default=False)\n is_registered: bool = db.Column(db.Boolean, default=False)\n device_name_checkin: str = db.Column(db.String)\n checkin_times: str = db.Column(db.String)\n checkout_times: str = db.Column(db.String)\n register_times: str = db.Column(db.String)\n attendee_notes: str = db.Column(db.String)\n event_id: int = db.Column(\n db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'), nullable=False\n )\n created_at: datetime = db.Column(db.DateTime(timezone=True), default=datetime.utcnow)\n modified_at: datetime = db.Column(\n db.DateTime(timezone=True), default=datetime.utcnow, onupdate=datetime.utcnow\n )\n complex_field_values: str = db.Column(db.JSON)\n is_consent_of_refund_policy: bool = db.Column(db.Boolean, default=False)\n native_language: str = db.Column(db.JSON)\n fluent_language: str = db.Column(db.JSON)\n user = db.relationship(\n 'User',\n foreign_keys=[email],\n primaryjoin='User.email == TicketHolder.email',\n viewonly=True,\n backref='attendees',\n sync_backref=False,\n )\n order = db.relationship('Order', backref='ticket_holders')\n ticket = db.relationship('Ticket', backref='ticket_holders')\n is_consent_form_field: bool = db.Column(db.Boolean, default=False)\n is_consent_form_field_photo: bool = db.Column(db.Boolean, default=False)\n is_consent_form_field_email: bool = db.Column(db.Boolean, default=False)\n is_badge_printed: bool = db.Column(db.Boolean, default=False)\n badge_printed_at: datetime = db.Column(db.DateTime(timezone=True))\n is_discount_applied: bool = db.Column(db.Boolean, default=False)\n is_access_code_applied: bool = db.Column(db.Boolean, default=False)\n tag_id: int = db.Column(db.Integer, db.ForeignKey('tags.id', ondelete='CASCADE'))\n tag = db.relationship('Tag', backref='ticket_holders')\n\n @property\n def name(self):\n firstname = self.firstname if self.firstname else ''\n lastname = self.lastname if self.lastname else ''\n if firstname and lastname:\n return f'{firstname} {lastname}'\n else:\n return ''\n\n @property\n def qr_code(self):\n qr = qrcode.QRCode(\n version=1,\n error_correction=qrcode.constants.ERROR_CORRECT_L,\n box_size=10,\n border=0,\n )\n qr.add_data(self.order.identifier + \"-\" + str(self.id))\n qr.make(fit=True)\n img = qr.make_image()\n\n buffer = BytesIO()\n img.save(buffer, format=\"JPEG\")\n img_str = str(base64.b64encode(buffer.getvalue()), 'utf-8')\n return img_str\n\n @property\n def serialize(self):\n \"\"\"Return object data in easily serializable format\"\"\"\n return {\n 'id': self.id,\n 'firstname': self.firstname,\n 'lastname': self.lastname,\n 'email': self.email,\n 'city': self.city,\n 'address': self.address,\n 'state': self.state,\n 'country': self.country,\n 'company': self.company,\n 'taxBusinessInfo': self.tax_business_info,\n }\n\n @property\n def pdf_url_path(self) -> str:\n key = UPLOAD_PATHS['pdf']['tickets_all'].format(\n identifier=self.order.identifier, extra_identifier=self.id\n )\n return (\n f'generated/tickets/{key}/{generate_hash(key)}/'\n + self.order.identifier\n + '.pdf'\n )\n",
"path": "app/models/ticket_holder.py"
}
] | [
{
"content": "import base64\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom io import BytesIO\n\nimport qrcode\nfrom citext import CIText\n\nfrom app.api.helpers.storage import UPLOAD_PATHS, generate_hash\nfrom app.models import db\nfrom app.models.base import SoftDeletionModel\n\n\n@dataclass(init=False, unsafe_hash=True)\nclass TicketHolder(SoftDeletionModel):\n __tablename__ = \"ticket_holders\"\n\n id: int = db.Column(db.Integer, primary_key=True)\n firstname: str = db.Column(db.String)\n lastname: str = db.Column(db.String)\n email: str = db.Column(CIText)\n address: str = db.Column(db.String)\n city: str = db.Column(db.String)\n state: str = db.Column(db.String)\n country: str = db.Column(db.String)\n job_title: str = db.Column(db.String)\n phone: str = db.Column(db.String)\n tax_business_info: str = db.Column(db.String)\n billing_address: str = db.Column(db.String)\n home_address: str = db.Column(db.String)\n shipping_address: str = db.Column(db.String)\n company: str = db.Column(db.String)\n work_address: str = db.Column(db.String)\n work_phone: str = db.Column(db.String)\n website: str = db.Column(db.String)\n blog: str = db.Column(db.String)\n twitter: str = db.Column(db.String)\n facebook: str = db.Column(db.String)\n instagram: str = db.Column(db.String)\n linkedin: str = db.Column(db.String)\n github: str = db.Column(db.String)\n gender: str = db.Column(db.String)\n accept_video_recording: bool = db.Column(db.Boolean)\n accept_share_details: bool = db.Column(db.Boolean)\n accept_receive_emails: bool = db.Column(db.Boolean)\n age_group: str = db.Column(db.String)\n home_wiki: str = db.Column(db.String)\n wiki_scholarship: str = db.Column(db.String)\n birth_date: datetime = db.Column(db.DateTime(timezone=True))\n pdf_url: str = db.Column(db.String)\n ticket_id: int = db.Column(\n db.Integer, db.ForeignKey('tickets.id', ondelete='CASCADE'), nullable=False\n )\n order_id: int = db.Column(db.Integer, db.ForeignKey('orders.id', ondelete='CASCADE'))\n is_checked_in: bool = db.Column(db.Boolean, default=False)\n is_checked_out: bool = db.Column(db.Boolean, default=False)\n is_registered: bool = db.Column(db.Boolean, default=False)\n device_name_checkin: str = db.Column(db.String)\n checkin_times: str = db.Column(db.String)\n checkout_times: str = db.Column(db.String)\n register_times: str = db.Column(db.String)\n attendee_notes: str = db.Column(db.String)\n event_id: int = db.Column(\n db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'), nullable=False\n )\n created_at: datetime = db.Column(db.DateTime(timezone=True), default=datetime.utcnow)\n modified_at: datetime = db.Column(\n db.DateTime(timezone=True), default=datetime.utcnow, onupdate=datetime.utcnow\n )\n complex_field_values: str = db.Column(db.JSON)\n is_consent_of_refund_policy: bool = db.Column(db.Boolean, default=False)\n native_language: str = db.Column(db.JSON)\n fluent_language: str = db.Column(db.JSON)\n user = db.relationship(\n 'User',\n foreign_keys=[email],\n primaryjoin='User.email == TicketHolder.email',\n viewonly=True,\n backref='attendees',\n sync_backref=False,\n )\n order = db.relationship('Order', backref='ticket_holders')\n ticket = db.relationship('Ticket', backref='ticket_holders')\n is_consent_form_field: bool = db.Column(db.Boolean, default=False)\n is_consent_form_field_photo: bool = db.Column(db.Boolean, default=False)\n is_consent_form_field_email: bool = db.Column(db.Boolean, default=False)\n is_badge_printed: bool = db.Column(db.Boolean, default=False)\n badge_printed_at: datetime = db.Column(db.DateTime(timezone=True))\n is_discount_applied: bool = db.Column(db.Boolean, default=False)\n is_access_code_applied: bool = db.Column(db.Boolean, default=False)\n tag_id: int = db.Column(db.Integer, db.ForeignKey('tags.id', ondelete='CASCADE'))\n tag = db.relationship('Tag', backref='ticket_holders')\n\n @property\n def name(self):\n firstname = self.firstname if self.firstname else ''\n lastname = self.lastname if self.lastname else ''\n if firstname and lastname:\n return f'{firstname} {lastname}'\n else:\n return ''\n\n @property\n def qr_code(self):\n qr = qrcode.QRCode(\n version=1,\n error_correction=qrcode.constants.ERROR_CORRECT_L,\n box_size=10,\n border=0,\n )\n qr.add_data(self.order.identifier)\n qr.make(fit=True)\n img = qr.make_image()\n\n buffer = BytesIO()\n img.save(buffer, format=\"JPEG\")\n img_str = str(base64.b64encode(buffer.getvalue()), 'utf-8')\n return img_str\n\n @property\n def serialize(self):\n \"\"\"Return object data in easily serializable format\"\"\"\n return {\n 'id': self.id,\n 'firstname': self.firstname,\n 'lastname': self.lastname,\n 'email': self.email,\n 'city': self.city,\n 'address': self.address,\n 'state': self.state,\n 'country': self.country,\n 'company': self.company,\n 'taxBusinessInfo': self.tax_business_info,\n }\n\n @property\n def pdf_url_path(self) -> str:\n key = UPLOAD_PATHS['pdf']['tickets_all'].format(\n identifier=self.order.identifier, extra_identifier=self.id\n )\n return (\n f'generated/tickets/{key}/{generate_hash(key)}/'\n + self.order.identifier\n + '.pdf'\n )\n",
"path": "app/models/ticket_holder.py"
}
] | diff --git a/app/models/ticket_holder.py b/app/models/ticket_holder.py
index 3501b54e7a..1d8f1371a3 100644
--- a/app/models/ticket_holder.py
+++ b/app/models/ticket_holder.py
@@ -108,7 +108,7 @@ def qr_code(self):
box_size=10,
border=0,
)
- qr.add_data(self.order.identifier + "-" + str(self.id))
+ qr.add_data(self.order.identifier)
qr.make(fit=True)
img = qr.make_image()
|
ansible-collections__community.vmware-1280 | community.vmware.vmware_guest_powerstate not finding VM by name
##### SUMMARY
When trying to control powerstate of a VM by name the module is unable to find the VM. This despite the fact that the exact same parameters will find the VM in other modules (such as vmware_guest_snapshot).
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
vmware_guest_powerstate
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```ansible [core 2.12.2]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.8.12 (default, Sep 21 2021, 00:10:52) [GCC 8.5.0 20210514 (Red Hat 8.5.0-3)]
jinja version = 2.10.3
```
##### COLLECTION VERSION
<!--- Paste verbatim output from "ansible-galaxy collection list <namespace>.<collection>" between the quotes
for example: ansible-galaxy collection list community.general
-->
```# /root/.ansible/collections/ansible_collections
Collection Version
---------------- -------
community.vmware 2.1.0
[root@jumpserver snaprevert_test]# ansible-galaxy collection list community.vmware
```
##### CONFIGURATION
<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
```[root@jumpserver snaprevert_test]# ansible-config dump --only-changed
[root@jumpserver snaprevert_test]#
```
##### OS / ENVIRONMENT
```NAME="CentOS Stream"
VERSION="8"
ID="centos"
ID_LIKE="rhel fedora"
VERSION_ID="8"
PLATFORM_ID="platform:el8"
PRETTY_NAME="CentOS Stream 8"
ANSI_COLOR="0;31"
CPE_NAME="cpe:/o:centos:centos:8"
HOME_URL="https://centos.org/"
BUG_REPORT_URL="https://bugzilla.redhat.com/"
REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 8"
REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream"```
##### STEPS TO REPRODUCE
Running the playbook below you'll find that the vmware_guest_snapshot task will find the VM and perform action while the vmware_guest_powerstate will fail with "Unable to set power state for non-existing virtual machine" despite all parameters being identical.
```---
- name: Test of snapshot revert
hosts: localhost
gather_facts: no
vars:
vcenter_hostname: 1.2.3.4
vcenter_username: [email protected]
vcenter_password: FOO
datacenter_name: BAR
tasks:
- name: Revert to initial snapshot
community.vmware.vmware_guest_snapshot:
validate_certs: no
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
folder: "/{{ datacenter_name }}/vm/Jumpserver_VMs/"
name: "jump_7216"
state: revert
snapshot_name: "Initial_Setup"
delegate_to: localhost
- name: Power on machine
community.vmware.vmware_guest_powerstate:
validate_certs: no
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
folder: "/{{ datacenter_name }}/vm/Jumpserver_VMs/"
name: "jump_7216"
state: powered-on
delegate_to: localhost
```
##### EXPECTED RESULTS
I would expect vmware_guest_powerstate to find the VM just like vmware_guest_snapshot does.
##### ACTUAL RESULTS
Task fails with "non-existing virtual machine" error despite VM existing.
<!--- Paste verbatim command output between quotes -->
```PLAY [Test of snapshot revert] ********************************************************************************************************************************************************************************************************************************************************************************************************************************************
TASK [Revert to a snapshot] ***********************************************************************************************************************************************************************************************************************************************************************************************************************************************
changed: [localhost]
TASK [Power on machine] ****************************************************************************************************************************************************************************************************************************************************************************************************************************************************
fatal: [localhost]: FAILED! => {"changed": false, "msg": "Unable to set power state for non-existing virtual machine : 'jump_7216'"}
PLAY RECAP ****************************************************************************************************************************************************************************************************************************************************************************************************************************************************************
localhost : ok=1 changed=1 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
| [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright: (c) 2017, Abhijeet Kasurde <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r\"\"\"\n---\nmodule: vmware_guest_powerstate\nshort_description: Manages power states of virtual machines in vCenter\ndescription:\n- Power on / Power off / Restart a virtual machine.\nauthor:\n- Abhijeet Kasurde (@Akasurde) <[email protected]>\nrequirements:\n- python >= 2.6\n- PyVmomi\noptions:\n datacenter:\n description:\n - The I(datacenter) where the VM you'd like to operate the power.\n - This parameter is case sensitive.\n default: ha-datacenter\n type: str\n version_added: '1.13.0'\n state:\n description:\n - Set the state of the virtual machine.\n choices: [ powered-off, powered-on, reboot-guest, restarted, shutdown-guest, suspended, present]\n default: present\n type: str\n name:\n description:\n - Name of the virtual machine to work with.\n - Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).\n type: str\n name_match:\n description:\n - If multiple virtual machines matching the name, use the first or last found.\n default: first\n choices: [ first, last ]\n type: str\n uuid:\n description:\n - UUID of the instance to manage if known, this is VMware's unique identifier.\n - This is required if C(name) or C(moid) is not supplied.\n type: str\n moid:\n description:\n - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.\n - This is required if C(name) or C(uuid) is not supplied.\n type: str\n use_instance_uuid:\n description:\n - Whether to use the VMware instance UUID rather than the BIOS UUID.\n default: false\n type: bool\n folder:\n description:\n - Destination folder, absolute or relative path to find an existing guest.\n - The folder should include the datacenter. ESX's datacenter is ha-datacenter\n - 'Examples:'\n - ' folder: /ha-datacenter/vm'\n - ' folder: ha-datacenter/vm'\n - ' folder: /datacenter1/vm'\n - ' folder: datacenter1/vm'\n - ' folder: /datacenter1/vm/folder1'\n - ' folder: datacenter1/vm/folder1'\n - ' folder: /folder1/datacenter1/vm'\n - ' folder: folder1/datacenter1/vm'\n - ' folder: /folder1/datacenter1/vm/folder2'\n type: str\n scheduled_at:\n description:\n - Date and time in string format at which specified task needs to be performed.\n - \"The required format for date and time - 'dd/mm/yyyy hh:mm'.\"\n - Scheduling task requires vCenter server. A standalone ESXi server does not support this option.\n type: str\n schedule_task_name:\n description:\n - Name of schedule task.\n - Valid only if C(scheduled_at) is specified.\n type: str\n required: False\n schedule_task_description:\n description:\n - Description of schedule task.\n - Valid only if C(scheduled_at) is specified.\n type: str\n required: False\n schedule_task_enabled:\n description:\n - Flag to indicate whether the scheduled task is enabled or disabled.\n type: bool\n required: False\n default: True\n force:\n description:\n - Ignore warnings and complete the actions.\n - This parameter is useful while forcing virtual machine state.\n default: False\n type: bool\n state_change_timeout:\n description:\n - If the C(state) is set to C(shutdown-guest), by default the module will return immediately after sending the shutdown signal.\n - If this argument is set to a positive integer, the module will instead wait for the VM to reach the poweredoff state.\n - The value sets a timeout in seconds for the module to wait for the state change.\n default: 0\n type: int\n answer:\n description:\n - A list of questions to answer, should one or more arise while waiting for the task to complete.\n - Some common uses are to allow a cdrom to be changed even if locked, or to answer the question as to whether a VM was copied or moved.\n - The I(answer) can be used if I(state) is C(powered-on).\n suboptions:\n question:\n description:\n - The message id, for example C(msg.uuid.altered).\n type: str\n required: True\n response:\n description:\n - The choice key, for example C(button.uuid.copiedTheVM).\n type: str\n required: True\n type: list\n elements: dict\n version_added: '1.11.0'\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\n\"\"\"\n\nEXAMPLES = r\"\"\"\n- name: Set the state of a virtual machine to poweroff\n community.vmware.vmware_guest_powerstate:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n folder: \"/{{ datacenter_name }}/vm/my_folder\"\n name: \"{{ guest_name }}\"\n state: powered-off\n delegate_to: localhost\n register: deploy\n\n- name: Set the state of a virtual machine to poweron using MoID\n community.vmware.vmware_guest_powerstate:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n folder: \"/{{ datacenter_name }}/vm/my_folder\"\n moid: vm-42\n state: powered-on\n delegate_to: localhost\n register: deploy\n\n- name: Set the state of a virtual machine to poweroff at given scheduled time\n community.vmware.vmware_guest_powerstate:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n folder: \"/{{ datacenter_name }}/vm/my_folder\"\n name: \"{{ guest_name }}\"\n state: powered-off\n scheduled_at: \"09/01/2018 10:18\"\n schedule_task_name: \"task_00001\"\n schedule_task_description: \"Sample task to poweroff VM\"\n schedule_task_enabled: True\n delegate_to: localhost\n register: deploy_at_schedule_datetime\n\n- name: Wait for the virtual machine to shutdown\n community.vmware.vmware_guest_powerstate:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n name: \"{{ guest_name }}\"\n state: shutdown-guest\n state_change_timeout: 200\n delegate_to: localhost\n register: deploy\n\n- name: Automatically answer if a question locked a virtual machine\n block:\n - name: Power on a virtual machine without the answer param\n community.vmware.vmware_guest_powerstate:\n hostname: \"{{ esxi_hostname }}\"\n username: \"{{ esxi_username }}\"\n password: \"{{ esxi_password }}\"\n validate_certs: false\n folder: \"{{ f1 }}\"\n name: \"{{ vm_name }}\"\n state: powered-on\n rescue:\n - name: Power on a virtual machine with the answer param\n community.vmware.vmware_guest_powerstate:\n hostname: \"{{ esxi_hostname }}\"\n username: \"{{ esxi_username }}\"\n password: \"{{ esxi_password }}\"\n validate_certs: false\n folder: \"{{ f1 }}\"\n name: \"{{ vm_name }}\"\n answer:\n - question: \"msg.uuid.altered\"\n response: \"button.uuid.copiedTheVM\"\n state: powered-on\n\"\"\"\n\nRETURN = r\"\"\" # \"\"\"\n\ntry:\n from pyVmomi import vim, vmodl\nexcept ImportError:\n pass\n\nfrom random import randint\nfrom datetime import datetime\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, set_vm_power_state, vmware_argument_spec, \\\n check_answer_question_status, make_answer_response, answer_question, gather_vm_facts\nfrom ansible.module_utils._text import to_native\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(\n datacenter=dict(type='str', default='ha-datacenter'),\n state=dict(type='str', default='present',\n choices=['present', 'powered-off', 'powered-on', 'reboot-guest', 'restarted', 'shutdown-guest', 'suspended']),\n name=dict(type='str'),\n name_match=dict(type='str', choices=['first', 'last'], default='first'),\n uuid=dict(type='str'),\n moid=dict(type='str'),\n use_instance_uuid=dict(type='bool', default=False),\n folder=dict(type='str'),\n force=dict(type='bool', default=False),\n scheduled_at=dict(type='str'),\n schedule_task_name=dict(),\n schedule_task_description=dict(),\n schedule_task_enabled=dict(type='bool', default=True),\n state_change_timeout=dict(type='int', default=0),\n answer=dict(type='list',\n elements='dict',\n options=dict(\n question=dict(type='str', required=True),\n response=dict(type='str', required=True)\n ))\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=False,\n mutually_exclusive=[\n ['name', 'uuid', 'moid'],\n ['scheduled_at', 'answer']\n ],\n )\n\n result = dict(changed=False,)\n\n pyv = PyVmomi(module)\n\n # Check if the VM exists before continuing\n vm = pyv.get_vm()\n\n if vm:\n # VM already exists, so set power state\n scheduled_at = module.params.get('scheduled_at', None)\n if scheduled_at:\n if not pyv.is_vcenter():\n module.fail_json(msg=\"Scheduling task requires vCenter, hostname %s \"\n \"is an ESXi server.\" % module.params.get('hostname'))\n powerstate = {\n 'present': vim.VirtualMachine.PowerOn,\n 'powered-off': vim.VirtualMachine.PowerOff,\n 'powered-on': vim.VirtualMachine.PowerOn,\n 'reboot-guest': vim.VirtualMachine.RebootGuest,\n 'restarted': vim.VirtualMachine.Reset,\n 'shutdown-guest': vim.VirtualMachine.ShutdownGuest,\n 'suspended': vim.VirtualMachine.Suspend,\n }\n dt = ''\n try:\n dt = datetime.strptime(scheduled_at, '%d/%m/%Y %H:%M')\n except ValueError as e:\n module.fail_json(msg=\"Failed to convert given date and time string to Python datetime object,\"\n \"please specify string in 'dd/mm/yyyy hh:mm' format: %s\" % to_native(e))\n schedule_task_spec = vim.scheduler.ScheduledTaskSpec()\n schedule_task_name = module.params['schedule_task_name'] or 'task_%s' % str(randint(10000, 99999))\n schedule_task_desc = module.params['schedule_task_description']\n if schedule_task_desc is None:\n schedule_task_desc = 'Schedule task for vm %s for ' \\\n 'operation %s at %s' % (vm.name, module.params['state'], scheduled_at)\n schedule_task_spec.name = schedule_task_name\n schedule_task_spec.description = schedule_task_desc\n schedule_task_spec.scheduler = vim.scheduler.OnceTaskScheduler()\n schedule_task_spec.scheduler.runAt = dt\n schedule_task_spec.action = vim.action.MethodAction()\n schedule_task_spec.action.name = powerstate[module.params['state']]\n schedule_task_spec.enabled = module.params['schedule_task_enabled']\n\n try:\n pyv.content.scheduledTaskManager.CreateScheduledTask(vm, schedule_task_spec)\n # As this is async task, we create scheduled task and mark state to changed.\n module.exit_json(changed=True)\n except vim.fault.InvalidName as e:\n module.fail_json(msg=\"Failed to create scheduled task %s for %s : %s\" % (module.params.get('state'),\n vm.name,\n to_native(e.msg)))\n except vim.fault.DuplicateName as e:\n module.exit_json(changed=False, details=to_native(e.msg))\n except vmodl.fault.InvalidArgument as e:\n module.fail_json(msg=\"Failed to create scheduled task %s as specifications \"\n \"given are invalid: %s\" % (module.params.get('state'),\n to_native(e.msg)))\n else:\n # Check if a virtual machine is locked by a question\n if check_answer_question_status(vm) and module.params['answer']:\n try:\n responses = make_answer_response(vm, module.params['answer'])\n answer_question(vm, responses)\n except Exception as e:\n module.fail_json(msg=\"%s\" % e)\n\n # Wait until a virtual machine is unlocked\n while True:\n if check_answer_question_status(vm) is False:\n break\n\n result['changed'] = True\n result['instance'] = gather_vm_facts(pyv.content, vm)\n else:\n result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'],\n module.params['answer'])\n result['answer'] = module.params['answer']\n else:\n id = module.params.get('uuid') or module.params.get('moid') or module.params.get('name')\n module.fail_json(msg=\"Unable to set power state for non-existing virtual machine : '%s'\" % id)\n\n if result.get('failed') is True:\n module.fail_json(**result)\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "plugins/modules/vmware_guest_powerstate.py"
}
] | [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright: (c) 2017, Abhijeet Kasurde <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r\"\"\"\n---\nmodule: vmware_guest_powerstate\nshort_description: Manages power states of virtual machines in vCenter\ndescription:\n- Power on / Power off / Restart a virtual machine.\nauthor:\n- Abhijeet Kasurde (@Akasurde) <[email protected]>\nrequirements:\n- python >= 2.6\n- PyVmomi\noptions:\n datacenter:\n description:\n - The I(datacenter) where the VM you'd like to operate the power.\n - This parameter is case sensitive.\n default: ha-datacenter\n type: str\n version_added: '1.13.0'\n state:\n description:\n - Set the state of the virtual machine.\n choices: [ powered-off, powered-on, reboot-guest, restarted, shutdown-guest, suspended, present]\n default: present\n type: str\n name:\n description:\n - Name of the virtual machine to work with.\n - Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).\n type: str\n name_match:\n description:\n - If multiple virtual machines matching the name, use the first or last found.\n default: first\n choices: [ first, last ]\n type: str\n uuid:\n description:\n - UUID of the instance to manage if known, this is VMware's unique identifier.\n - This is required if C(name) or C(moid) is not supplied.\n type: str\n moid:\n description:\n - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.\n - This is required if C(name) or C(uuid) is not supplied.\n type: str\n use_instance_uuid:\n description:\n - Whether to use the VMware instance UUID rather than the BIOS UUID.\n default: false\n type: bool\n folder:\n description:\n - Destination folder, absolute or relative path to find an existing guest.\n - The folder should include the datacenter. ESX's datacenter is ha-datacenter\n - 'Examples:'\n - ' folder: /ha-datacenter/vm'\n - ' folder: ha-datacenter/vm'\n - ' folder: /datacenter1/vm'\n - ' folder: datacenter1/vm'\n - ' folder: /datacenter1/vm/folder1'\n - ' folder: datacenter1/vm/folder1'\n - ' folder: /folder1/datacenter1/vm'\n - ' folder: folder1/datacenter1/vm'\n - ' folder: /folder1/datacenter1/vm/folder2'\n type: str\n scheduled_at:\n description:\n - Date and time in string format at which specified task needs to be performed.\n - \"The required format for date and time - 'dd/mm/yyyy hh:mm'.\"\n - Scheduling task requires vCenter server. A standalone ESXi server does not support this option.\n type: str\n schedule_task_name:\n description:\n - Name of schedule task.\n - Valid only if C(scheduled_at) is specified.\n type: str\n required: False\n schedule_task_description:\n description:\n - Description of schedule task.\n - Valid only if C(scheduled_at) is specified.\n type: str\n required: False\n schedule_task_enabled:\n description:\n - Flag to indicate whether the scheduled task is enabled or disabled.\n type: bool\n required: False\n default: True\n force:\n description:\n - Ignore warnings and complete the actions.\n - This parameter is useful while forcing virtual machine state.\n default: False\n type: bool\n state_change_timeout:\n description:\n - If the C(state) is set to C(shutdown-guest), by default the module will return immediately after sending the shutdown signal.\n - If this argument is set to a positive integer, the module will instead wait for the VM to reach the poweredoff state.\n - The value sets a timeout in seconds for the module to wait for the state change.\n default: 0\n type: int\n answer:\n description:\n - A list of questions to answer, should one or more arise while waiting for the task to complete.\n - Some common uses are to allow a cdrom to be changed even if locked, or to answer the question as to whether a VM was copied or moved.\n - The I(answer) can be used if I(state) is C(powered-on).\n suboptions:\n question:\n description:\n - The message id, for example C(msg.uuid.altered).\n type: str\n required: True\n response:\n description:\n - The choice key, for example C(button.uuid.copiedTheVM).\n type: str\n required: True\n type: list\n elements: dict\n version_added: '1.11.0'\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\n\"\"\"\n\nEXAMPLES = r\"\"\"\n- name: Set the state of a virtual machine to poweroff\n community.vmware.vmware_guest_powerstate:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n folder: \"/{{ datacenter_name }}/vm/my_folder\"\n name: \"{{ guest_name }}\"\n state: powered-off\n delegate_to: localhost\n register: deploy\n\n- name: Set the state of a virtual machine to poweron using MoID\n community.vmware.vmware_guest_powerstate:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n folder: \"/{{ datacenter_name }}/vm/my_folder\"\n moid: vm-42\n state: powered-on\n delegate_to: localhost\n register: deploy\n\n- name: Set the state of a virtual machine to poweroff at given scheduled time\n community.vmware.vmware_guest_powerstate:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n folder: \"/{{ datacenter_name }}/vm/my_folder\"\n name: \"{{ guest_name }}\"\n state: powered-off\n scheduled_at: \"09/01/2018 10:18\"\n schedule_task_name: \"task_00001\"\n schedule_task_description: \"Sample task to poweroff VM\"\n schedule_task_enabled: True\n delegate_to: localhost\n register: deploy_at_schedule_datetime\n\n- name: Wait for the virtual machine to shutdown\n community.vmware.vmware_guest_powerstate:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n name: \"{{ guest_name }}\"\n state: shutdown-guest\n state_change_timeout: 200\n delegate_to: localhost\n register: deploy\n\n- name: Automatically answer if a question locked a virtual machine\n block:\n - name: Power on a virtual machine without the answer param\n community.vmware.vmware_guest_powerstate:\n hostname: \"{{ esxi_hostname }}\"\n username: \"{{ esxi_username }}\"\n password: \"{{ esxi_password }}\"\n validate_certs: false\n folder: \"{{ f1 }}\"\n name: \"{{ vm_name }}\"\n state: powered-on\n rescue:\n - name: Power on a virtual machine with the answer param\n community.vmware.vmware_guest_powerstate:\n hostname: \"{{ esxi_hostname }}\"\n username: \"{{ esxi_username }}\"\n password: \"{{ esxi_password }}\"\n validate_certs: false\n folder: \"{{ f1 }}\"\n name: \"{{ vm_name }}\"\n answer:\n - question: \"msg.uuid.altered\"\n response: \"button.uuid.copiedTheVM\"\n state: powered-on\n\"\"\"\n\nRETURN = r\"\"\" # \"\"\"\n\ntry:\n from pyVmomi import vim, vmodl\nexcept ImportError:\n pass\n\nfrom random import randint\nfrom datetime import datetime\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, set_vm_power_state, vmware_argument_spec, \\\n check_answer_question_status, make_answer_response, answer_question, gather_vm_facts\nfrom ansible.module_utils._text import to_native\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(\n datacenter=dict(type='str', default='ha-datacenter'),\n state=dict(type='str', default='present',\n choices=['present', 'powered-off', 'powered-on', 'reboot-guest', 'restarted', 'shutdown-guest', 'suspended']),\n name=dict(type='str'),\n name_match=dict(type='str', choices=['first', 'last'], default='first'),\n uuid=dict(type='str'),\n moid=dict(type='str'),\n use_instance_uuid=dict(type='bool', default=False),\n folder=dict(type='str'),\n force=dict(type='bool', default=False),\n scheduled_at=dict(type='str'),\n schedule_task_name=dict(),\n schedule_task_description=dict(),\n schedule_task_enabled=dict(type='bool', default=True),\n state_change_timeout=dict(type='int', default=0),\n answer=dict(type='list',\n elements='dict',\n options=dict(\n question=dict(type='str', required=True),\n response=dict(type='str', required=True)\n ))\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=False,\n mutually_exclusive=[\n ['name', 'uuid', 'moid'],\n ['scheduled_at', 'answer']\n ],\n )\n\n result = dict(changed=False,)\n\n if module.params['folder']:\n module.params['folder'] = module.params['folder'].rstrip('/')\n\n pyv = PyVmomi(module)\n\n # Check if the VM exists before continuing\n vm = pyv.get_vm()\n\n if vm:\n # VM already exists, so set power state\n scheduled_at = module.params.get('scheduled_at', None)\n if scheduled_at:\n if not pyv.is_vcenter():\n module.fail_json(msg=\"Scheduling task requires vCenter, hostname %s \"\n \"is an ESXi server.\" % module.params.get('hostname'))\n powerstate = {\n 'present': vim.VirtualMachine.PowerOn,\n 'powered-off': vim.VirtualMachine.PowerOff,\n 'powered-on': vim.VirtualMachine.PowerOn,\n 'reboot-guest': vim.VirtualMachine.RebootGuest,\n 'restarted': vim.VirtualMachine.Reset,\n 'shutdown-guest': vim.VirtualMachine.ShutdownGuest,\n 'suspended': vim.VirtualMachine.Suspend,\n }\n dt = ''\n try:\n dt = datetime.strptime(scheduled_at, '%d/%m/%Y %H:%M')\n except ValueError as e:\n module.fail_json(msg=\"Failed to convert given date and time string to Python datetime object,\"\n \"please specify string in 'dd/mm/yyyy hh:mm' format: %s\" % to_native(e))\n schedule_task_spec = vim.scheduler.ScheduledTaskSpec()\n schedule_task_name = module.params['schedule_task_name'] or 'task_%s' % str(randint(10000, 99999))\n schedule_task_desc = module.params['schedule_task_description']\n if schedule_task_desc is None:\n schedule_task_desc = 'Schedule task for vm %s for ' \\\n 'operation %s at %s' % (vm.name, module.params['state'], scheduled_at)\n schedule_task_spec.name = schedule_task_name\n schedule_task_spec.description = schedule_task_desc\n schedule_task_spec.scheduler = vim.scheduler.OnceTaskScheduler()\n schedule_task_spec.scheduler.runAt = dt\n schedule_task_spec.action = vim.action.MethodAction()\n schedule_task_spec.action.name = powerstate[module.params['state']]\n schedule_task_spec.enabled = module.params['schedule_task_enabled']\n\n try:\n pyv.content.scheduledTaskManager.CreateScheduledTask(vm, schedule_task_spec)\n # As this is async task, we create scheduled task and mark state to changed.\n module.exit_json(changed=True)\n except vim.fault.InvalidName as e:\n module.fail_json(msg=\"Failed to create scheduled task %s for %s : %s\" % (module.params.get('state'),\n vm.name,\n to_native(e.msg)))\n except vim.fault.DuplicateName as e:\n module.exit_json(changed=False, details=to_native(e.msg))\n except vmodl.fault.InvalidArgument as e:\n module.fail_json(msg=\"Failed to create scheduled task %s as specifications \"\n \"given are invalid: %s\" % (module.params.get('state'),\n to_native(e.msg)))\n else:\n # Check if a virtual machine is locked by a question\n if check_answer_question_status(vm) and module.params['answer']:\n try:\n responses = make_answer_response(vm, module.params['answer'])\n answer_question(vm, responses)\n except Exception as e:\n module.fail_json(msg=\"%s\" % e)\n\n # Wait until a virtual machine is unlocked\n while True:\n if check_answer_question_status(vm) is False:\n break\n\n result['changed'] = True\n result['instance'] = gather_vm_facts(pyv.content, vm)\n else:\n result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'],\n module.params['answer'])\n result['answer'] = module.params['answer']\n else:\n id = module.params.get('uuid') or module.params.get('moid') or module.params.get('name')\n module.fail_json(msg=\"Unable to set power state for non-existing virtual machine : '%s'\" % id)\n\n if result.get('failed') is True:\n module.fail_json(**result)\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "plugins/modules/vmware_guest_powerstate.py"
}
] | diff --git a/changelogs/fragments/1238-vmware_guest_powerstate-ignore_trailing_slash_in_folder.yml b/changelogs/fragments/1238-vmware_guest_powerstate-ignore_trailing_slash_in_folder.yml
new file mode 100644
index 0000000000..f73c29011e
--- /dev/null
+++ b/changelogs/fragments/1238-vmware_guest_powerstate-ignore_trailing_slash_in_folder.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - vmware_guest_powerstate - Ignore trailing `/` in `folder` parameter like other guest modules do (https://github.com/ansible-collections/community.vmware/issues/1238).
diff --git a/plugins/modules/vmware_guest_powerstate.py b/plugins/modules/vmware_guest_powerstate.py
index 4aafbec5de..712debf4e2 100644
--- a/plugins/modules/vmware_guest_powerstate.py
+++ b/plugins/modules/vmware_guest_powerstate.py
@@ -261,6 +261,9 @@ def main():
result = dict(changed=False,)
+ if module.params['folder']:
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
pyv = PyVmomi(module)
# Check if the VM exists before continuing
diff --git a/tests/integration/targets/vmware_guest_powerstate/tasks/main.yml b/tests/integration/targets/vmware_guest_powerstate/tasks/main.yml
index 8fb3e8ad31..9c48b74e83 100644
--- a/tests/integration/targets/vmware_guest_powerstate/tasks/main.yml
+++ b/tests/integration/targets/vmware_guest_powerstate/tasks/main.yml
@@ -40,7 +40,7 @@
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
name: test_vm1
- folder: '{{ f0 }}'
+ folder: '{{ f0 }}/' # Test with a trailing / because of issue 1238
state: powered-off
register: poweroff_d1_c1_f0
|
oppia__oppia-7459 | Upgrade @typescript-eslint/eslint-plugin
`eslint-utils` is currently out of date, https://github.com/oppia/oppia/pull/7451 provides a temporary fix, but we need to upgrade the main package that requires `eslint-utils` to ensure that we have a long term fix.
When fixing this, please make sure that the lint tests run successfully.
| [
{
"content": "# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"One-off jobs for feedback models.\"\"\"\n\nfrom core import jobs\nfrom core.platform import models\n\n(feedback_models,) = models.Registry.import_models([models.NAMES.feedback])\n\n\nclass GeneralFeedbackThreadUserOneOffJob(jobs.BaseMapReduceOneOffJobManager):\n \"\"\"One-off job for setting user_id and thread_id for all\n GeneralFeedbackThreadUserModels.\n \"\"\"\n @classmethod\n def entity_classes_to_map_over(cls):\n \"\"\"Return a list of datastore class references to map over.\"\"\"\n return [feedback_models.GeneralFeedbackThreadUserModel]\n\n @staticmethod\n def map(model_instance):\n \"\"\"Implements the map function for this job.\"\"\"\n user_id, thread_id = model_instance.id.split('.', 1)\n if model_instance.user_id is None:\n model_instance.user_id = user_id\n if model_instance.thread_id is None:\n model_instance.thread_id = thread_id\n model_instance.put(update_last_updated_time=False)\n yield ('SUCCESS', model_instance.id)\n\n @staticmethod\n def reduce(key, values):\n yield (key, len(values))\n",
"path": "core/domain/feedback_jobs_one_off.py"
}
] | [
{
"content": "# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"One-off jobs for feedback models.\"\"\"\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\n\nfrom core import jobs\nfrom core.platform import models\n\n(feedback_models,) = models.Registry.import_models([models.NAMES.feedback])\n\n\nclass GeneralFeedbackThreadUserOneOffJob(jobs.BaseMapReduceOneOffJobManager):\n \"\"\"One-off job for setting user_id and thread_id for all\n GeneralFeedbackThreadUserModels.\n \"\"\"\n @classmethod\n def entity_classes_to_map_over(cls):\n \"\"\"Return a list of datastore class references to map over.\"\"\"\n return [feedback_models.GeneralFeedbackThreadUserModel]\n\n @staticmethod\n def map(model_instance):\n \"\"\"Implements the map function for this job.\"\"\"\n user_id, thread_id = model_instance.id.split('.', 1)\n if model_instance.user_id is None:\n model_instance.user_id = user_id\n if model_instance.thread_id is None:\n model_instance.thread_id = thread_id\n model_instance.put(update_last_updated_time=False)\n yield ('SUCCESS', model_instance.id)\n\n @staticmethod\n def reduce(key, values):\n yield (key, len(values))\n",
"path": "core/domain/feedback_jobs_one_off.py"
}
] | diff --git a/.eslintrc b/.eslintrc
index 2a39be6f84aae..ab5cdb333ac38 100644
--- a/.eslintrc
+++ b/.eslintrc
@@ -149,6 +149,7 @@
"no-multi-str": [
"error"
],
+ "no-prototype-builtins": "off",
"no-redeclare": [
"off"
],
diff --git a/core/domain/feedback_jobs_one_off.py b/core/domain/feedback_jobs_one_off.py
index f92e661a17fd5..99788c22302f9 100644
--- a/core/domain/feedback_jobs_one_off.py
+++ b/core/domain/feedback_jobs_one_off.py
@@ -13,6 +13,7 @@
# limitations under the License.
"""One-off jobs for feedback models."""
+from __future__ import absolute_import # pylint: disable=import-only-modules
from core import jobs
from core.platform import models
diff --git a/core/domain/feedback_jobs_one_off_test.py b/core/domain/feedback_jobs_one_off_test.py
index f324fd80c1114..cdb625cf4d684 100644
--- a/core/domain/feedback_jobs_one_off_test.py
+++ b/core/domain/feedback_jobs_one_off_test.py
@@ -13,6 +13,7 @@
# limitations under the License.
"""Tests for Feedback-related jobs."""
+from __future__ import absolute_import # pylint: disable=import-only-modules
import ast
diff --git a/extensions/classifiers/SVMPredictionService.ts b/extensions/classifiers/SVMPredictionService.ts
index 35cc33bd3bcad..ca2bab119cfea 100644
--- a/extensions/classifiers/SVMPredictionService.ts
+++ b/extensions/classifiers/SVMPredictionService.ts
@@ -154,7 +154,7 @@ export class SVMPredictionService {
}
if (iter >= maxIter) {
- console.info('Exceeds maxIter in calculateMulticlassProbabilities');
+ console.warn('Exceeds maxIter in calculateMulticlassProbabilities');
}
return P;
diff --git a/package-lock.json b/package-lock.json
index f6550ace26216..29434cca09d4b 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -975,83 +975,55 @@
}
},
"@typescript-eslint/eslint-plugin": {
- "version": "1.13.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-1.13.0.tgz",
- "integrity": "sha512-WQHCozMnuNADiqMtsNzp96FNox5sOVpU8Xt4meaT4em8lOG1SrOv92/mUbEHQVh90sldKSfcOc/I0FOb/14G1g==",
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-2.0.0.tgz",
+ "integrity": "sha512-Mo45nxTTELODdl7CgpZKJISvLb+Fu64OOO2ZFc2x8sYSnUpFrBUW3H+H/ZGYmEkfnL6VkdtOSxgdt+Av79j0sA==",
"dev": true,
"requires": {
- "@typescript-eslint/experimental-utils": "1.13.0",
- "eslint-utils": "^1.3.1",
+ "@typescript-eslint/experimental-utils": "2.0.0",
+ "eslint-utils": "^1.4.0",
"functional-red-black-tree": "^1.0.1",
"regexpp": "^2.0.1",
- "tsutils": "^3.7.0"
- },
- "dependencies": {
- "@typescript-eslint/experimental-utils": {
- "version": "1.13.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-1.13.0.tgz",
- "integrity": "sha512-zmpS6SyqG4ZF64ffaJ6uah6tWWWgZ8m+c54XXgwFtUv0jNz8aJAVx8chMCvnk7yl6xwn8d+d96+tWp7fXzTuDg==",
- "dev": true,
- "requires": {
- "@types/json-schema": "^7.0.3",
- "@typescript-eslint/typescript-estree": "1.13.0",
- "eslint-scope": "^4.0.0"
- }
- },
- "@typescript-eslint/typescript-estree": {
- "version": "1.13.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-1.13.0.tgz",
- "integrity": "sha512-b5rCmd2e6DCC6tCTN9GSUAuxdYwCM/k/2wdjHGrIRGPSJotWMCe/dGpi66u42bhuh8q3QBzqM4TMA1GUUCJvdw==",
- "dev": true,
- "requires": {
- "lodash.unescape": "4.0.1",
- "semver": "5.5.0"
- }
- },
- "semver": {
- "version": "5.5.0",
- "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.0.tgz",
- "integrity": "sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==",
- "dev": true
- }
+ "tsutils": "^3.14.0"
}
},
"@typescript-eslint/experimental-utils": {
- "version": "1.12.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-1.12.0.tgz",
- "integrity": "sha512-s0soOTMJloytr9GbPteMLNiO2HvJ+qgQkRNplABXiVw6vq7uQRvidkby64Gqt/nA7pys74HksHwRULaB/QRVyw==",
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-2.0.0.tgz",
+ "integrity": "sha512-XGJG6GNBXIEx/mN4eTRypN/EUmsd0VhVGQ1AG+WTgdvjHl0G8vHhVBHrd/5oI6RRYBRnedNymSYWW1HAdivtmg==",
"dev": true,
"requires": {
- "@typescript-eslint/typescript-estree": "1.12.0",
+ "@types/json-schema": "^7.0.3",
+ "@typescript-eslint/typescript-estree": "2.0.0",
"eslint-scope": "^4.0.0"
}
},
"@typescript-eslint/parser": {
- "version": "1.12.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-1.12.0.tgz",
- "integrity": "sha512-0uzbaa9ZLCA5yMWJywnJJ7YVENKGWVUhJDV5UrMoldC5HoI54W5kkdPhTfmtFKpPFp93MIwmJj0/61ztvmz5Dw==",
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-2.0.0.tgz",
+ "integrity": "sha512-ibyMBMr0383ZKserIsp67+WnNVoM402HKkxqXGlxEZsXtnGGurbnY90pBO3e0nBUM7chEEOcxUhgw9aPq7fEBA==",
"dev": true,
"requires": {
"@types/eslint-visitor-keys": "^1.0.0",
- "@typescript-eslint/experimental-utils": "1.12.0",
- "@typescript-eslint/typescript-estree": "1.12.0",
+ "@typescript-eslint/experimental-utils": "2.0.0",
+ "@typescript-eslint/typescript-estree": "2.0.0",
"eslint-visitor-keys": "^1.0.0"
}
},
"@typescript-eslint/typescript-estree": {
- "version": "1.12.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-1.12.0.tgz",
- "integrity": "sha512-nwN6yy//XcVhFs0ZyU+teJHB8tbCm7AIA8mu6E2r5hu6MajwYBY3Uwop7+rPZWUN/IUOHpL8C+iUPMDVYUU3og==",
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-2.0.0.tgz",
+ "integrity": "sha512-NXbmzA3vWrSgavymlzMWNecgNOuiMMp62MO3kI7awZRLRcsA1QrYWo6q08m++uuAGVbXH/prZi2y1AWuhSu63w==",
"dev": true,
"requires": {
"lodash.unescape": "4.0.1",
- "semver": "5.5.0"
+ "semver": "^6.2.0"
},
"dependencies": {
"semver": {
- "version": "5.5.0",
- "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.0.tgz",
- "integrity": "sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==",
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
+ "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
"dev": true
}
}
@@ -1267,9 +1239,9 @@
"dev": true
},
"acorn-jsx": {
- "version": "5.0.1",
- "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.0.1.tgz",
- "integrity": "sha512-HJ7CfNHrfJLlNTzIEUTj43LNWGkqpRLxm3YjAlcD0ACydk9XynzYsCBHxut+iqt+1aBXkx9UP/w/ZqMr13XIzg==",
+ "version": "5.0.2",
+ "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.0.2.tgz",
+ "integrity": "sha512-tiNTrP1MP0QrChmD2DdupCr6HWSFeKVw5d/dHTu4Y7rkAkRhU/Dt7dphAfIUyxtHpl/eBVip5uTNSpQJHylpAw==",
"dev": true
},
"adm-zip": {
@@ -4378,47 +4350,48 @@
}
},
"eslint": {
- "version": "5.16.0",
- "resolved": "https://registry.npmjs.org/eslint/-/eslint-5.16.0.tgz",
- "integrity": "sha512-S3Rz11i7c8AA5JPv7xAH+dOyq/Cu/VXHiHXBPOU1k/JAM5dXqQPt3qcrhpHSorXmrpu2g0gkIBVXAqCpzfoZIg==",
+ "version": "6.2.2",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-6.2.2.tgz",
+ "integrity": "sha512-mf0elOkxHbdyGX1IJEUsNBzCDdyoUgljF3rRlgfyYh0pwGnreLc0jjD6ZuleOibjmnUWZLY2eXwSooeOgGJ2jw==",
"dev": true,
"requires": {
"@babel/code-frame": "^7.0.0",
- "ajv": "^6.9.1",
+ "ajv": "^6.10.0",
"chalk": "^2.1.0",
"cross-spawn": "^6.0.5",
"debug": "^4.0.1",
"doctrine": "^3.0.0",
- "eslint-scope": "^4.0.3",
- "eslint-utils": "^1.3.1",
- "eslint-visitor-keys": "^1.0.0",
- "espree": "^5.0.1",
+ "eslint-scope": "^5.0.0",
+ "eslint-utils": "^1.4.2",
+ "eslint-visitor-keys": "^1.1.0",
+ "espree": "^6.1.1",
"esquery": "^1.0.1",
"esutils": "^2.0.2",
"file-entry-cache": "^5.0.1",
"functional-red-black-tree": "^1.0.1",
- "glob": "^7.1.2",
+ "glob-parent": "^5.0.0",
"globals": "^11.7.0",
"ignore": "^4.0.6",
"import-fresh": "^3.0.0",
"imurmurhash": "^0.1.4",
- "inquirer": "^6.2.2",
- "js-yaml": "^3.13.0",
+ "inquirer": "^6.4.1",
+ "is-glob": "^4.0.0",
+ "js-yaml": "^3.13.1",
"json-stable-stringify-without-jsonify": "^1.0.1",
"levn": "^0.3.0",
- "lodash": "^4.17.11",
+ "lodash": "^4.17.14",
"minimatch": "^3.0.4",
"mkdirp": "^0.5.1",
"natural-compare": "^1.4.0",
"optionator": "^0.8.2",
- "path-is-inside": "^1.0.2",
"progress": "^2.0.0",
"regexpp": "^2.0.1",
- "semver": "^5.5.1",
- "strip-ansi": "^4.0.0",
- "strip-json-comments": "^2.0.1",
+ "semver": "^6.1.2",
+ "strip-ansi": "^5.2.0",
+ "strip-json-comments": "^3.0.1",
"table": "^5.2.3",
- "text-table": "^0.2.0"
+ "text-table": "^0.2.0",
+ "v8-compile-cache": "^2.0.3"
},
"dependencies": {
"debug": {
@@ -4430,6 +4403,22 @@
"ms": "^2.1.1"
}
},
+ "eslint-scope": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.0.0.tgz",
+ "integrity": "sha512-oYrhJW7S0bxAFDvWqzvMPRm6pcgcnWc4QnofCAqRTRfQC0JcwenzGglTtsLyIuuWFfkqDG9vz67cnttSd53djw==",
+ "dev": true,
+ "requires": {
+ "esrecurse": "^4.1.0",
+ "estraverse": "^4.1.1"
+ }
+ },
+ "eslint-visitor-keys": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz",
+ "integrity": "sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A==",
+ "dev": true
+ },
"ignore": {
"version": "4.0.6",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz",
@@ -4457,6 +4446,12 @@
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
"integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
"dev": true
+ },
+ "semver": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
+ "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
+ "dev": true
}
}
},
@@ -4467,12 +4462,12 @@
"dev": true
},
"eslint-plugin-html": {
- "version": "5.0.5",
- "resolved": "https://registry.npmjs.org/eslint-plugin-html/-/eslint-plugin-html-5.0.5.tgz",
- "integrity": "sha512-v/33i3OD0fuXcRXexVyXXBOe4mLBLBQoF1UO1Uy9D+XLq4MC8K45GcQKfqjC/FnHAHp3pYUjpHHktYNCtShGmg==",
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-html/-/eslint-plugin-html-6.0.0.tgz",
+ "integrity": "sha512-PQcGippOHS+HTbQCStmH5MY1BF2MaU8qW/+Mvo/8xTa/ioeMXdSP+IiaBw2+nh0KEMfYQKuTz1Zo+vHynjwhbg==",
"dev": true,
"requires": {
- "htmlparser2": "^3.10.0"
+ "htmlparser2": "^3.10.1"
}
},
"eslint-scope": {
@@ -4501,20 +4496,26 @@
"dev": true
},
"espree": {
- "version": "5.0.1",
- "resolved": "https://registry.npmjs.org/espree/-/espree-5.0.1.tgz",
- "integrity": "sha512-qWAZcWh4XE/RwzLJejfcofscgMc9CamR6Tn1+XRXNzrvUSSbiAjGOI/fggztjIi7y9VLPqnICMIPiGyr8JaZ0A==",
+ "version": "6.1.1",
+ "resolved": "https://registry.npmjs.org/espree/-/espree-6.1.1.tgz",
+ "integrity": "sha512-EYbr8XZUhWbYCqQRW0duU5LxzL5bETN6AjKBGy1302qqzPaCH10QbRg3Wvco79Z8x9WbiE8HYB4e75xl6qUYvQ==",
"dev": true,
"requires": {
- "acorn": "^6.0.7",
- "acorn-jsx": "^5.0.0",
- "eslint-visitor-keys": "^1.0.0"
+ "acorn": "^7.0.0",
+ "acorn-jsx": "^5.0.2",
+ "eslint-visitor-keys": "^1.1.0"
},
"dependencies": {
"acorn": {
- "version": "6.2.1",
- "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.2.1.tgz",
- "integrity": "sha512-JD0xT5FCRDNyjDda3Lrg/IxFscp9q4tiYtxE1/nOzlKCk7hIRuYjhq1kCNkbPjMRMZuFq20HNQn1I9k8Oj0E+Q==",
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.0.0.tgz",
+ "integrity": "sha512-PaF/MduxijYYt7unVGRuds1vBC9bFxbNf+VWqhOClfdgy7RlVkQqt610ig1/yxTgsDIfW1cWDel5EBbOy3jdtQ==",
+ "dev": true
+ },
+ "eslint-visitor-keys": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz",
+ "integrity": "sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A==",
"dev": true
}
}
@@ -7011,9 +7012,9 @@
"dev": true
},
"inquirer": {
- "version": "6.5.0",
- "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-6.5.0.tgz",
- "integrity": "sha512-scfHejeG/lVZSpvCXpsB4j/wQNPM5JC8kiElOI0OUTwmc1RTpXr4H32/HOlQHcZiYl2z2VElwuCVDRG8vFmbnA==",
+ "version": "6.5.2",
+ "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-6.5.2.tgz",
+ "integrity": "sha512-cntlB5ghuB0iuO65Ovoi8ogLHiWGs/5yNrtUcKjFhSSiVeAIVpD7koaSU9RM8mpXw5YDi9RdYXGQMaOURB7ycQ==",
"dev": true,
"requires": {
"ansi-escapes": "^3.2.0",
@@ -7029,23 +7030,6 @@
"string-width": "^2.1.0",
"strip-ansi": "^5.1.0",
"through": "^2.3.6"
- },
- "dependencies": {
- "ansi-regex": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
- "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
- "dev": true
- },
- "strip-ansi": {
- "version": "5.2.0",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
- "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
- "dev": true,
- "requires": {
- "ansi-regex": "^4.1.0"
- }
- }
}
},
"interpret": {
@@ -12565,6 +12549,17 @@
"requires": {
"is-fullwidth-code-point": "^2.0.0",
"strip-ansi": "^4.0.0"
+ },
+ "dependencies": {
+ "strip-ansi": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz",
+ "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^3.0.0"
+ }
+ }
}
},
"string.prototype.startswith": {
@@ -12595,12 +12590,20 @@
}
},
"strip-ansi": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz",
- "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=",
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
+ "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
"dev": true,
"requires": {
- "ansi-regex": "^3.0.0"
+ "ansi-regex": "^4.1.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
+ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
+ "dev": true
+ }
}
},
"strip-bom": {
@@ -12628,9 +12631,9 @@
}
},
"strip-json-comments": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
- "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=",
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.0.1.tgz",
+ "integrity": "sha512-VTyMAUfdm047mwKl+u79WIdrZxtFtn+nBxHeb844XBQ9uMNTuTHdx2hc5RiAJYqwTj3wc/xe5HLSdJSkJ+WfZw==",
"dev": true
},
"style-loader": {
diff --git a/package.json b/package.json
index 8dbb95f19114b..25cd376d3a773 100644
--- a/package.json
+++ b/package.json
@@ -56,8 +56,8 @@
"@types/q": "^1.5.1",
"@types/select2": "^4.0.48",
"@types/selenium-webdriver": "^4.0.0",
- "@typescript-eslint/eslint-plugin": "^1.13.0",
- "@typescript-eslint/parser": "^1.7.0",
+ "@typescript-eslint/eslint-plugin": "^2.0.0",
+ "@typescript-eslint/parser": "^2.0.0",
"ajv": "^6.10.0",
"angular": "1.6.6",
"angular-route": "1.6.6",
@@ -70,9 +70,9 @@
"css-loader": "^3.1.0",
"dotenv": "^7.0.0",
"enhanced-resolve": "^4.1.0",
- "eslint": "^5.16.0",
- "eslint-plugin-angular": "^4.0.0",
- "eslint-plugin-html": "^5.0.3",
+ "eslint": "^6.0.0",
+ "eslint-plugin-angular": "^4.0.1",
+ "eslint-plugin-html": "^6.0.0",
"fork-ts-checker-webpack-plugin": "^1.3.3",
"gulp": "^4.0.1",
"gulp-concat": "^2.6.1",
diff --git a/typings/custom-scope-defs.d.ts b/typings/custom-scope-defs.d.ts
index 627f535edcf10..04d5a683b0b64 100644
--- a/typings/custom-scope-defs.d.ts
+++ b/typings/custom-scope-defs.d.ts
@@ -15,7 +15,8 @@ interface ICustomScope extends ng.IScope {
onFileCleared?: (() => void);
droppedFile?: any;
- // custom-forms-directives/audio-file-uploader.directive.ts, image-uploader.directive.ts
+ // custom-forms-directives/audio-file-uploader.directive.ts,
+ // image-uploader.directive.ts
errorMessage?: string;
onFileChanged?: ((file: any, fileName?: string) => void);
@@ -36,7 +37,8 @@ interface ICustomScope extends ng.IScope {
getAlwaysEditable?: (() => boolean);
getIsEditable?: (() => boolean);
- // value-generator-editor.directive.ts, CopierDirective.ts, RandomSelectorDirective.ts
+ // value-generator-editor.directive.ts, CopierDirective.ts,
+ // RandomSelectorDirective.ts
generatorId?: string;
// value-generator-editor.directive.ts
diff --git a/typings/globals.d.ts b/typings/globals.d.ts
index 9fd2595152ac6..8ab9e1e7996ad 100644
--- a/typings/globals.d.ts
+++ b/typings/globals.d.ts
@@ -1,4 +1,4 @@
-// Using angular without declaration gives the following error:
+// Using angular without declaration gives the following error:
// 'angular' refers to a UMD global, but the current file is a module.
// Consider adding an import instead. To fix this, we need to mark
// angular as a global. Ref: https://stackoverflow.com/a/42035067
|
PyGithub__PyGithub-486 | GistFile.content is None If Gist haven't complete
If gist object haven't complete, files in this gist has no content.
I create an pull request using Just4test account.
| [
{
"content": "# -*- coding: utf-8 -*-\n\n# ########################## Copyrights and license ############################\n# #\n# Copyright 2012 Steve English <[email protected]> #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.github.io/PyGithub/v1/index.html #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n# ##############################################################################\n\nimport github.GithubObject\nimport github.PaginatedList\n\nimport github.GistComment\nimport github.NamedUser\nimport github.GistFile\nimport github.GistHistoryState\n\n\nclass Gist(github.GithubObject.CompletableGithubObject):\n \"\"\"\n This class represents Gists as returned for example by http://developer.github.com/v3/todo\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__({\"id\": self._id.value})\n\n @property\n def comments(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._comments)\n return self._comments.value\n\n @property\n def comments_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._comments_url)\n return self._comments_url.value\n\n @property\n def commits_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._commits_url)\n return self._commits_url.value\n\n @property\n def created_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._created_at)\n return self._created_at.value\n\n @property\n def description(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._description)\n return self._description.value\n\n @property\n def files(self):\n \"\"\"\n :type: dict of string to :class:`github.GistFile.GistFile`\n \"\"\"\n self._completeIfNotSet(self._files)\n return self._files.value\n\n @property\n def fork_of(self):\n \"\"\"\n :type: :class:`github.Gist.Gist`\n \"\"\"\n self._completeIfNotSet(self._fork_of)\n return self._fork_of.value\n\n @property\n def forks(self):\n \"\"\"\n :type: list of :class:`github.Gist.Gist`\n \"\"\"\n self._completeIfNotSet(self._forks)\n return self._forks.value\n\n @property\n def forks_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._forks_url)\n return self._forks_url.value\n\n @property\n def git_pull_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._git_pull_url)\n return self._git_pull_url.value\n\n @property\n def git_push_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._git_push_url)\n return self._git_push_url.value\n\n @property\n def history(self):\n \"\"\"\n :type: list of :class:`github.GistHistoryState.GistHistoryState`\n \"\"\"\n self._completeIfNotSet(self._history)\n return self._history.value\n\n @property\n def html_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._html_url)\n return self._html_url.value\n\n @property\n def id(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._id)\n return self._id.value\n\n @property\n def owner(self):\n \"\"\"\n :type: :class:`github.NamedUser.NamedUser`\n \"\"\"\n self._completeIfNotSet(self._owner)\n return self._owner.value\n\n @property\n def public(self):\n \"\"\"\n :type: bool\n \"\"\"\n self._completeIfNotSet(self._public)\n return self._public.value\n\n @property\n def updated_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._updated_at)\n return self._updated_at.value\n\n @property\n def url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._url)\n return self._url.value\n\n @property\n def user(self):\n \"\"\"\n :type: :class:`github.NamedUser.NamedUser`\n \"\"\"\n self._completeIfNotSet(self._user)\n return self._user.value\n\n def create_comment(self, body):\n \"\"\"\n :calls: `POST /gists/:gist_id/comments <http://developer.github.com/v3/gists/comments>`_\n :param body: string\n :rtype: :class:`github.GistComment.GistComment`\n \"\"\"\n assert isinstance(body, (str, unicode)), body\n post_parameters = {\n \"body\": body,\n }\n headers, data = self._requester.requestJsonAndCheck(\n \"POST\",\n self.url + \"/comments\",\n input=post_parameters\n )\n return github.GistComment.GistComment(self._requester, headers, data, completed=True)\n\n def create_fork(self):\n \"\"\"\n :calls: `POST /gists/:id/forks <http://developer.github.com/v3/gists>`_\n :rtype: :class:`github.Gist.Gist`\n \"\"\"\n headers, data = self._requester.requestJsonAndCheck(\n \"POST\",\n self.url + \"/forks\"\n )\n return Gist(self._requester, headers, data, completed=True)\n\n def delete(self):\n \"\"\"\n :calls: `DELETE /gists/:id <http://developer.github.com/v3/gists>`_\n :rtype: None\n \"\"\"\n headers, data = self._requester.requestJsonAndCheck(\n \"DELETE\",\n self.url\n )\n\n def edit(self, description=github.GithubObject.NotSet, files=github.GithubObject.NotSet):\n \"\"\"\n :calls: `PATCH /gists/:id <http://developer.github.com/v3/gists>`_\n :param description: string\n :param files: dict of string to :class:`github.InputFileContent.InputFileContent`\n :rtype: None\n \"\"\"\n assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description\n assert files is github.GithubObject.NotSet or all(element is None or isinstance(element, github.InputFileContent) for element in files.itervalues()), files\n post_parameters = dict()\n if description is not github.GithubObject.NotSet:\n post_parameters[\"description\"] = description\n if files is not github.GithubObject.NotSet:\n post_parameters[\"files\"] = dict((key, None if value is None else value._identity) for key, value in files.iteritems())\n headers, data = self._requester.requestJsonAndCheck(\n \"PATCH\",\n self.url,\n input=post_parameters\n )\n self._useAttributes(data)\n\n def get_comment(self, id):\n \"\"\"\n :calls: `GET /gists/:gist_id/comments/:id <http://developer.github.com/v3/gists/comments>`_\n :param id: integer\n :rtype: :class:`github.GistComment.GistComment`\n \"\"\"\n assert isinstance(id, (int, long)), id\n headers, data = self._requester.requestJsonAndCheck(\n \"GET\",\n self.url + \"/comments/\" + str(id)\n )\n return github.GistComment.GistComment(self._requester, headers, data, completed=True)\n\n def get_comments(self):\n \"\"\"\n :calls: `GET /gists/:gist_id/comments <http://developer.github.com/v3/gists/comments>`_\n :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.GistComment.GistComment`\n \"\"\"\n return github.PaginatedList.PaginatedList(\n github.GistComment.GistComment,\n self._requester,\n self.url + \"/comments\",\n None\n )\n\n def is_starred(self):\n \"\"\"\n :calls: `GET /gists/:id/star <http://developer.github.com/v3/gists>`_\n :rtype: bool\n \"\"\"\n status, headers, data = self._requester.requestJson(\n \"GET\",\n self.url + \"/star\"\n )\n return status == 204\n\n def reset_starred(self):\n \"\"\"\n :calls: `DELETE /gists/:id/star <http://developer.github.com/v3/gists>`_\n :rtype: None\n \"\"\"\n headers, data = self._requester.requestJsonAndCheck(\n \"DELETE\",\n self.url + \"/star\"\n )\n\n def set_starred(self):\n \"\"\"\n :calls: `PUT /gists/:id/star <http://developer.github.com/v3/gists>`_\n :rtype: None\n \"\"\"\n headers, data = self._requester.requestJsonAndCheck(\n \"PUT\",\n self.url + \"/star\"\n )\n\n def _initAttributes(self):\n self._comments = github.GithubObject.NotSet\n self._comments_url = github.GithubObject.NotSet\n self._commits_url = github.GithubObject.NotSet\n self._created_at = github.GithubObject.NotSet\n self._description = github.GithubObject.NotSet\n self._files = github.GithubObject.NotSet\n self._fork_of = github.GithubObject.NotSet\n self._forks = github.GithubObject.NotSet\n self._forks_url = github.GithubObject.NotSet\n self._git_pull_url = github.GithubObject.NotSet\n self._git_push_url = github.GithubObject.NotSet\n self._history = github.GithubObject.NotSet\n self._html_url = github.GithubObject.NotSet\n self._id = github.GithubObject.NotSet\n self._owner = github.GithubObject.NotSet\n self._public = github.GithubObject.NotSet\n self._updated_at = github.GithubObject.NotSet\n self._url = github.GithubObject.NotSet\n self._user = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"comments\" in attributes: # pragma no branch\n self._comments = self._makeIntAttribute(attributes[\"comments\"])\n if \"comments_url\" in attributes: # pragma no branch\n self._comments_url = self._makeStringAttribute(attributes[\"comments_url\"])\n if \"commits_url\" in attributes: # pragma no branch\n self._commits_url = self._makeStringAttribute(attributes[\"commits_url\"])\n if \"created_at\" in attributes: # pragma no branch\n self._created_at = self._makeDatetimeAttribute(attributes[\"created_at\"])\n if \"description\" in attributes: # pragma no branch\n self._description = self._makeStringAttribute(attributes[\"description\"])\n if \"files\" in attributes: # pragma no branch\n self._files = self._makeDictOfStringsToClassesAttribute(github.GistFile.GistFile, attributes[\"files\"])\n if \"fork_of\" in attributes: # pragma no branch\n self._fork_of = self._makeClassAttribute(Gist, attributes[\"fork_of\"])\n if \"forks\" in attributes: # pragma no branch\n self._forks = self._makeListOfClassesAttribute(Gist, attributes[\"forks\"])\n if \"forks_url\" in attributes: # pragma no branch\n self._forks_url = self._makeStringAttribute(attributes[\"forks_url\"])\n if \"git_pull_url\" in attributes: # pragma no branch\n self._git_pull_url = self._makeStringAttribute(attributes[\"git_pull_url\"])\n if \"git_push_url\" in attributes: # pragma no branch\n self._git_push_url = self._makeStringAttribute(attributes[\"git_push_url\"])\n if \"history\" in attributes: # pragma no branch\n self._history = self._makeListOfClassesAttribute(github.GistHistoryState.GistHistoryState, attributes[\"history\"])\n if \"html_url\" in attributes: # pragma no branch\n self._html_url = self._makeStringAttribute(attributes[\"html_url\"])\n if \"id\" in attributes: # pragma no branch\n self._id = self._makeStringAttribute(attributes[\"id\"])\n if \"owner\" in attributes: # pragma no branch\n self._owner = self._makeClassAttribute(github.NamedUser.NamedUser, attributes[\"owner\"])\n if \"public\" in attributes: # pragma no branch\n self._public = self._makeBoolAttribute(attributes[\"public\"])\n if \"updated_at\" in attributes: # pragma no branch\n self._updated_at = self._makeDatetimeAttribute(attributes[\"updated_at\"])\n if \"url\" in attributes: # pragma no branch\n self._url = self._makeStringAttribute(attributes[\"url\"])\n if \"user\" in attributes: # pragma no branch\n self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes[\"user\"])\n",
"path": "github/Gist.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\n# ########################## Copyrights and license ############################\n# #\n# Copyright 2012 Steve English <[email protected]> #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.github.io/PyGithub/v1/index.html #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n# ##############################################################################\n\nimport github.GithubObject\nimport github.PaginatedList\n\nimport github.GistComment\nimport github.NamedUser\nimport github.GistFile\nimport github.GistHistoryState\n\n\nclass Gist(github.GithubObject.CompletableGithubObject):\n \"\"\"\n This class represents Gists as returned for example by http://developer.github.com/v3/todo\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__({\"id\": self._id.value})\n\n @property\n def comments(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._comments)\n return self._comments.value\n\n @property\n def comments_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._comments_url)\n return self._comments_url.value\n\n @property\n def commits_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._commits_url)\n return self._commits_url.value\n\n @property\n def created_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._created_at)\n return self._created_at.value\n\n @property\n def description(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._description)\n return self._description.value\n\n @property\n def files(self):\n \"\"\"\n :type: dict of string to :class:`github.GistFile.GistFile`\n \"\"\"\n self._completeIfNeeded()\n return self._files.value\n\n @property\n def fork_of(self):\n \"\"\"\n :type: :class:`github.Gist.Gist`\n \"\"\"\n self._completeIfNotSet(self._fork_of)\n return self._fork_of.value\n\n @property\n def forks(self):\n \"\"\"\n :type: list of :class:`github.Gist.Gist`\n \"\"\"\n self._completeIfNotSet(self._forks)\n return self._forks.value\n\n @property\n def forks_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._forks_url)\n return self._forks_url.value\n\n @property\n def git_pull_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._git_pull_url)\n return self._git_pull_url.value\n\n @property\n def git_push_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._git_push_url)\n return self._git_push_url.value\n\n @property\n def history(self):\n \"\"\"\n :type: list of :class:`github.GistHistoryState.GistHistoryState`\n \"\"\"\n self._completeIfNotSet(self._history)\n return self._history.value\n\n @property\n def html_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._html_url)\n return self._html_url.value\n\n @property\n def id(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._id)\n return self._id.value\n\n @property\n def owner(self):\n \"\"\"\n :type: :class:`github.NamedUser.NamedUser`\n \"\"\"\n self._completeIfNotSet(self._owner)\n return self._owner.value\n\n @property\n def public(self):\n \"\"\"\n :type: bool\n \"\"\"\n self._completeIfNotSet(self._public)\n return self._public.value\n\n @property\n def updated_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._updated_at)\n return self._updated_at.value\n\n @property\n def url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._url)\n return self._url.value\n\n @property\n def user(self):\n \"\"\"\n :type: :class:`github.NamedUser.NamedUser`\n \"\"\"\n self._completeIfNotSet(self._user)\n return self._user.value\n\n def create_comment(self, body):\n \"\"\"\n :calls: `POST /gists/:gist_id/comments <http://developer.github.com/v3/gists/comments>`_\n :param body: string\n :rtype: :class:`github.GistComment.GistComment`\n \"\"\"\n assert isinstance(body, (str, unicode)), body\n post_parameters = {\n \"body\": body,\n }\n headers, data = self._requester.requestJsonAndCheck(\n \"POST\",\n self.url + \"/comments\",\n input=post_parameters\n )\n return github.GistComment.GistComment(self._requester, headers, data, completed=True)\n\n def create_fork(self):\n \"\"\"\n :calls: `POST /gists/:id/forks <http://developer.github.com/v3/gists>`_\n :rtype: :class:`github.Gist.Gist`\n \"\"\"\n headers, data = self._requester.requestJsonAndCheck(\n \"POST\",\n self.url + \"/forks\"\n )\n return Gist(self._requester, headers, data, completed=True)\n\n def delete(self):\n \"\"\"\n :calls: `DELETE /gists/:id <http://developer.github.com/v3/gists>`_\n :rtype: None\n \"\"\"\n headers, data = self._requester.requestJsonAndCheck(\n \"DELETE\",\n self.url\n )\n\n def edit(self, description=github.GithubObject.NotSet, files=github.GithubObject.NotSet):\n \"\"\"\n :calls: `PATCH /gists/:id <http://developer.github.com/v3/gists>`_\n :param description: string\n :param files: dict of string to :class:`github.InputFileContent.InputFileContent`\n :rtype: None\n \"\"\"\n assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description\n assert files is github.GithubObject.NotSet or all(element is None or isinstance(element, github.InputFileContent) for element in files.itervalues()), files\n post_parameters = dict()\n if description is not github.GithubObject.NotSet:\n post_parameters[\"description\"] = description\n if files is not github.GithubObject.NotSet:\n post_parameters[\"files\"] = dict((key, None if value is None else value._identity) for key, value in files.iteritems())\n headers, data = self._requester.requestJsonAndCheck(\n \"PATCH\",\n self.url,\n input=post_parameters\n )\n self._useAttributes(data)\n\n def get_comment(self, id):\n \"\"\"\n :calls: `GET /gists/:gist_id/comments/:id <http://developer.github.com/v3/gists/comments>`_\n :param id: integer\n :rtype: :class:`github.GistComment.GistComment`\n \"\"\"\n assert isinstance(id, (int, long)), id\n headers, data = self._requester.requestJsonAndCheck(\n \"GET\",\n self.url + \"/comments/\" + str(id)\n )\n return github.GistComment.GistComment(self._requester, headers, data, completed=True)\n\n def get_comments(self):\n \"\"\"\n :calls: `GET /gists/:gist_id/comments <http://developer.github.com/v3/gists/comments>`_\n :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.GistComment.GistComment`\n \"\"\"\n return github.PaginatedList.PaginatedList(\n github.GistComment.GistComment,\n self._requester,\n self.url + \"/comments\",\n None\n )\n\n def is_starred(self):\n \"\"\"\n :calls: `GET /gists/:id/star <http://developer.github.com/v3/gists>`_\n :rtype: bool\n \"\"\"\n status, headers, data = self._requester.requestJson(\n \"GET\",\n self.url + \"/star\"\n )\n return status == 204\n\n def reset_starred(self):\n \"\"\"\n :calls: `DELETE /gists/:id/star <http://developer.github.com/v3/gists>`_\n :rtype: None\n \"\"\"\n headers, data = self._requester.requestJsonAndCheck(\n \"DELETE\",\n self.url + \"/star\"\n )\n\n def set_starred(self):\n \"\"\"\n :calls: `PUT /gists/:id/star <http://developer.github.com/v3/gists>`_\n :rtype: None\n \"\"\"\n headers, data = self._requester.requestJsonAndCheck(\n \"PUT\",\n self.url + \"/star\"\n )\n\n def _initAttributes(self):\n self._comments = github.GithubObject.NotSet\n self._comments_url = github.GithubObject.NotSet\n self._commits_url = github.GithubObject.NotSet\n self._created_at = github.GithubObject.NotSet\n self._description = github.GithubObject.NotSet\n self._files = github.GithubObject.NotSet\n self._fork_of = github.GithubObject.NotSet\n self._forks = github.GithubObject.NotSet\n self._forks_url = github.GithubObject.NotSet\n self._git_pull_url = github.GithubObject.NotSet\n self._git_push_url = github.GithubObject.NotSet\n self._history = github.GithubObject.NotSet\n self._html_url = github.GithubObject.NotSet\n self._id = github.GithubObject.NotSet\n self._owner = github.GithubObject.NotSet\n self._public = github.GithubObject.NotSet\n self._updated_at = github.GithubObject.NotSet\n self._url = github.GithubObject.NotSet\n self._user = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"comments\" in attributes: # pragma no branch\n self._comments = self._makeIntAttribute(attributes[\"comments\"])\n if \"comments_url\" in attributes: # pragma no branch\n self._comments_url = self._makeStringAttribute(attributes[\"comments_url\"])\n if \"commits_url\" in attributes: # pragma no branch\n self._commits_url = self._makeStringAttribute(attributes[\"commits_url\"])\n if \"created_at\" in attributes: # pragma no branch\n self._created_at = self._makeDatetimeAttribute(attributes[\"created_at\"])\n if \"description\" in attributes: # pragma no branch\n self._description = self._makeStringAttribute(attributes[\"description\"])\n if \"files\" in attributes: # pragma no branch\n self._files = self._makeDictOfStringsToClassesAttribute(github.GistFile.GistFile, attributes[\"files\"])\n if \"fork_of\" in attributes: # pragma no branch\n self._fork_of = self._makeClassAttribute(Gist, attributes[\"fork_of\"])\n if \"forks\" in attributes: # pragma no branch\n self._forks = self._makeListOfClassesAttribute(Gist, attributes[\"forks\"])\n if \"forks_url\" in attributes: # pragma no branch\n self._forks_url = self._makeStringAttribute(attributes[\"forks_url\"])\n if \"git_pull_url\" in attributes: # pragma no branch\n self._git_pull_url = self._makeStringAttribute(attributes[\"git_pull_url\"])\n if \"git_push_url\" in attributes: # pragma no branch\n self._git_push_url = self._makeStringAttribute(attributes[\"git_push_url\"])\n if \"history\" in attributes: # pragma no branch\n self._history = self._makeListOfClassesAttribute(github.GistHistoryState.GistHistoryState, attributes[\"history\"])\n if \"html_url\" in attributes: # pragma no branch\n self._html_url = self._makeStringAttribute(attributes[\"html_url\"])\n if \"id\" in attributes: # pragma no branch\n self._id = self._makeStringAttribute(attributes[\"id\"])\n if \"owner\" in attributes: # pragma no branch\n self._owner = self._makeClassAttribute(github.NamedUser.NamedUser, attributes[\"owner\"])\n if \"public\" in attributes: # pragma no branch\n self._public = self._makeBoolAttribute(attributes[\"public\"])\n if \"updated_at\" in attributes: # pragma no branch\n self._updated_at = self._makeDatetimeAttribute(attributes[\"updated_at\"])\n if \"url\" in attributes: # pragma no branch\n self._url = self._makeStringAttribute(attributes[\"url\"])\n if \"user\" in attributes: # pragma no branch\n self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes[\"user\"])\n",
"path": "github/Gist.py"
}
] | diff --git a/github/Gist.py b/github/Gist.py
index 8d75e1e10e..12e83c9547 100644
--- a/github/Gist.py
+++ b/github/Gist.py
@@ -88,7 +88,7 @@ def files(self):
"""
:type: dict of string to :class:`github.GistFile.GistFile`
"""
- self._completeIfNotSet(self._files)
+ self._completeIfNeeded()
return self._files.value
@property
|
projectmesa__mesa-1432 | `OSError: Int or String expected` when running boid_flockers example
**Describe the bug**
Running the `boid_flockers` example results in `OSError: Int or String expected`.
**Expected behavior**
Examples should be able to run without errors.
**Additional context**
This is likely due to a breaking change introduced through https://github.com/projectmesa/mesa/pull/1403: a new parameter `port` is added before `model_params` in `ModularServer.__init__()`, i.e.,
```diff
def __init__(
- self, model_cls, visualization_elements, name="Mesa Model", model_params=None
+ self,
+ model_cls,
+ visualization_elements,
+ name="Mesa Model",
+ port=None,
+ model_params=None,
):
```
As a result, in the `boid_flockers` example, `model_params` gets passed into `__init__()` as `port`:
```python
server = mesa.visualization.ModularServer(
BoidFlockers, [boid_canvas], "Boids", model_params
)
```
Examples such as `bank_reserves` are not affected:
```python
server = mesa.visualization.ModularServer(
BankReserves,
[canvas_element, chart_element],
"Bank Reserves Model",
model_params=model_params,
)
```
| [
{
"content": "\"\"\"\nModularServer\n=============\n\nA visualization server which renders a model via one or more elements.\n\nThe concept for the modular visualization server as follows:\nA visualization is composed of VisualizationElements, each of which defines how\nto generate some visualization from a model instance and render it on the\nclient. VisualizationElements may be anything from a simple text display to\na multilayered HTML5 canvas.\n\nThe actual server is launched with one or more VisualizationElements;\nit runs the model object through each of them, generating data to be sent to\nthe client. The client page is also generated based on the JavaScript code\nprovided by each element.\n\nThis file consists of the following classes:\n\nVisualizationElement: Parent class for all other visualization elements, with\n the minimal necessary options.\nPageHandler: The handler for the visualization page, generated from a template\n and built from the various visualization elements.\nSocketHandler: Handles the websocket connection between the client page and\n the server.\nModularServer: The overall visualization application class which stores and\n controls the model and visualization instance.\n\n\nModularServer should *not* need to be subclassed on a model-by-model basis; it\nshould be primarily a pass-through for VisualizationElement subclasses, which\ndefine the actual visualization specifics.\n\nFor example, suppose we have created two visualization elements for our model,\ncalled canvasvis and graphvis; we would launch a server with:\n\n server = ModularServer(MyModel, [canvasvis, graphvis], name=\"My Model\")\n server.launch()\n\nThe client keeps track of what step it is showing. Clicking the Step button in\nthe browser sends a message requesting the viz_state corresponding to the next\nstep position, which is then sent back to the client via the websocket.\n\nThe websocket protocol is as follows:\nEach message is a JSON object, with a \"type\" property which defines the rest of\nthe structure.\n\nServer -> Client:\n Send over the model state to visualize.\n Model state is a list, with each element corresponding to a div; each div\n is expected to have a render function associated with it, which knows how\n to render that particular data. The example below includes two elements:\n the first is data for a CanvasGrid, the second for a raw text display.\n\n {\n \"type\": \"viz_state\",\n \"data\": [{0:[ {\"Shape\": \"circle\", \"x\": 0, \"y\": 0, \"r\": 0.5,\n \"Color\": \"#AAAAAA\", \"Filled\": \"true\", \"Layer\": 0,\n \"text\": 'A', \"text_color\": \"white\" }]},\n \"Shape Count: 1\"]\n }\n\n Informs the client that the model is over.\n {\"type\": \"end\"}\n\n Informs the client of the current model's parameters\n {\n \"type\": \"model_params\",\n \"params\": 'dict' of model params, (i.e. {arg_1: val_1, ...})\n }\n\nClient -> Server:\n Reset the model.\n TODO: Allow this to come with parameters\n {\n \"type\": \"reset\"\n }\n\n Get a given state.\n {\n \"type\": \"get_step\",\n \"step:\" index of the step to get.\n }\n\n Submit model parameter updates\n {\n \"type\": \"submit_params\",\n \"param\": name of model parameter\n \"value\": new value for 'param'\n }\n\n Get the model's parameters\n {\n \"type\": \"get_params\"\n }\n\n\"\"\"\nimport asyncio\nimport os\nimport platform\nimport tornado.autoreload\nimport tornado.ioloop\nimport tornado.web\nimport tornado.websocket\nimport tornado.escape\nimport tornado.gen\nimport webbrowser\n\nfrom mesa.visualization.UserParam import UserSettableParameter, UserParam\n\n# Suppress several pylint warnings for this file.\n# Attributes being defined outside of init is a Tornado feature.\n# pylint: disable=attribute-defined-outside-init\n\n# Change the event loop policy for windows\nif platform.system() == \"Windows\" and platform.python_version_tuple() >= (\"3\", \"7\"):\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n\nD3_JS_FILE = \"external/d3-7.4.3.min.js\"\nCHART_JS_FILE = \"external/chart-3.6.1.min.js\"\n\n\ndef is_user_param(val):\n return isinstance(val, UserSettableParameter) or issubclass(\n val.__class__, UserParam\n )\n\n\nclass VisualizationElement:\n \"\"\"\n Defines an element of the visualization.\n\n Attributes:\n package_includes: A list of external JavaScript and CSS files to\n include that are part of the Mesa packages.\n local_includes: A list of JavaScript and CSS files that are local to\n the directory that the server is being run in.\n js_code: A JavaScript code string to instantiate the element.\n\n Methods:\n render: Takes a model object, and produces JSON data which can be sent\n to the client.\n\n \"\"\"\n\n package_includes = []\n local_includes = []\n js_code = \"\"\n render_args = {}\n\n def __init__(self):\n pass\n\n def render(self, model):\n \"\"\"Build visualization data from a model object.\n\n Args:\n model: A model object\n\n Returns:\n A JSON-ready object.\n\n \"\"\"\n return \"<b>VisualizationElement goes here</b>.\"\n\n\nclass TextElement(VisualizationElement):\n \"\"\"\n Module for drawing live-updating text.\n \"\"\"\n\n package_includes = [\"TextModule.js\"]\n js_code = \"elements.push(new TextModule());\"\n\n\n# =============================================================================\n# Actual Tornado code starts here:\n\n\nclass PageHandler(tornado.web.RequestHandler):\n \"\"\"Handler for the HTML template which holds the visualization.\"\"\"\n\n def get(self):\n elements = self.application.visualization_elements\n for i, element in enumerate(elements):\n element.index = i\n self.render(\n \"modular_template.html\",\n port=self.application.port,\n model_name=self.application.model_name,\n description=self.application.description,\n package_js_includes=self.application.package_js_includes,\n package_css_includes=self.application.package_css_includes,\n local_js_includes=self.application.local_js_includes,\n local_css_includes=self.application.local_css_includes,\n scripts=self.application.js_code,\n )\n\n\nclass SocketHandler(tornado.websocket.WebSocketHandler):\n \"\"\"Handler for websocket.\"\"\"\n\n def open(self):\n if self.application.verbose:\n print(\"Socket opened!\")\n self.write_message(\n {\"type\": \"model_params\", \"params\": self.application.user_params}\n )\n\n def check_origin(self, origin):\n return True\n\n @property\n def viz_state_message(self):\n return {\"type\": \"viz_state\", \"data\": self.application.render_model()}\n\n def on_message(self, message):\n \"\"\"Receiving a message from the websocket, parse, and act accordingly.\"\"\"\n if self.application.verbose:\n print(message)\n msg = tornado.escape.json_decode(message)\n\n if msg[\"type\"] == \"get_step\":\n if not self.application.model.running:\n self.write_message({\"type\": \"end\"})\n else:\n self.application.model.step()\n self.write_message(self.viz_state_message)\n\n elif msg[\"type\"] == \"reset\":\n self.application.reset_model()\n self.write_message(self.viz_state_message)\n\n elif msg[\"type\"] == \"submit_params\":\n param = msg[\"param\"]\n value = msg[\"value\"]\n\n # Is the param editable?\n if param in self.application.user_params:\n if is_user_param(self.application.model_kwargs[param]):\n self.application.model_kwargs[param].value = value\n else:\n self.application.model_kwargs[param] = value\n\n else:\n if self.application.verbose:\n print(\"Unexpected message!\")\n\n\nclass ModularServer(tornado.web.Application):\n \"\"\"Main visualization application.\"\"\"\n\n EXCLUDE_LIST = (\"width\", \"height\")\n\n def __init__(\n self,\n model_cls,\n visualization_elements,\n name=\"Mesa Model\",\n port=None,\n model_params=None,\n ):\n \"\"\"\n Args:\n model_cls: Mesa model class\n visualization_elements: visualisation elements\n name: A String for the model name\n port: Port the webserver listens to (int)\n Order of configuration:\n 1. Parameter to ModularServer.launch\n 2. Parameter to ModularServer()\n 3. Environment var PORT\n 4. Default value (8521)\n model_params: A dict of model parameters\n \"\"\"\n\n self.verbose = True\n self.max_steps = 100000\n\n if port is not None:\n self.port = port\n else:\n # Default port to listen on\n self.port = int(os.getenv(\"PORT\", 8521))\n\n # Handlers and other globals:\n page_handler = (r\"/\", PageHandler)\n socket_handler = (r\"/ws\", SocketHandler)\n static_handler = (\n r\"/static/(.*)\",\n tornado.web.StaticFileHandler,\n {\"path\": os.path.dirname(__file__) + \"/templates\"},\n )\n local_handler = (\n r\"/local/(.*)\",\n tornado.web.StaticFileHandler,\n {\"path\": \"\"},\n )\n\n self.handlers = [page_handler, socket_handler, static_handler, local_handler]\n\n self.settings = {\n \"debug\": True,\n \"autoreload\": False,\n \"template_path\": os.path.dirname(__file__) + \"/templates\",\n }\n\n \"\"\"Create a new visualization server with the given elements.\"\"\"\n if model_params is None:\n model_params = {}\n # Prep visualization elements:\n self.visualization_elements = self._auto_convert_functions_to_TextElements(\n visualization_elements\n )\n self.package_js_includes = set()\n self.package_css_includes = set()\n self.local_js_includes = set()\n self.local_css_includes = set()\n self.js_code = []\n for element in self.visualization_elements:\n for include_file in element.package_includes:\n if self._is_stylesheet(include_file):\n self.package_css_includes.add(include_file)\n else:\n self.package_js_includes.add(include_file)\n for include_file in element.local_includes:\n if self._is_stylesheet(include_file):\n self.local_css_includes.add(include_file)\n else:\n self.local_js_includes.add(include_file)\n self.js_code.append(element.js_code)\n\n # Initializing the model\n self.model_name = name\n self.model_cls = model_cls\n self.description = \"No description available\"\n if hasattr(model_cls, \"description\"):\n self.description = model_cls.description\n elif model_cls.__doc__ is not None:\n self.description = model_cls.__doc__\n\n self.model_kwargs = model_params\n self.reset_model()\n\n # Initializing the application itself:\n super().__init__(self.handlers, **self.settings)\n\n @property\n def user_params(self):\n result = {}\n for param, val in self.model_kwargs.items():\n if is_user_param(val):\n result[param] = val.json\n\n return result\n\n def reset_model(self):\n \"\"\"Reinstantiate the model object, using the current parameters.\"\"\"\n\n model_params = {}\n for key, val in self.model_kwargs.items():\n if is_user_param(val):\n if val.param_type == \"static_text\":\n # static_text is never used for setting params\n continue\n model_params[key] = val.value\n else:\n model_params[key] = val\n\n self.model = self.model_cls(**model_params)\n # We specify the `running` attribute here so that the user doesn't have\n # to define it explicitly in their model's __init__.\n self.model.running = True\n\n def render_model(self):\n \"\"\"Turn the current state of the model into a dictionary of\n visualizations\n\n \"\"\"\n visualization_state = []\n for element in self.visualization_elements:\n element_state = element.render(self.model)\n visualization_state.append(element_state)\n return visualization_state\n\n def launch(self, port=None, open_browser=True):\n \"\"\"Run the app.\"\"\"\n if port is not None:\n self.port = port\n url = f\"http://127.0.0.1:{self.port}\"\n print(f\"Interface starting at {url}\")\n self.listen(self.port)\n if open_browser:\n webbrowser.open(url)\n tornado.autoreload.start()\n tornado.ioloop.IOLoop.current().start()\n\n @staticmethod\n def _is_stylesheet(filename):\n return filename.lower().endswith(\".css\")\n\n def _auto_convert_fn_to_TextElement(self, x):\n \"\"\"\n Automatically convert a function to a TextElement object.\n See https://github.com/projectmesa/mesa/issues/1233.\n \"\"\"\n\n # Note: a class constructor is also a callable.\n if not callable(x):\n # i.e. not a function\n return x\n\n class MyTextElement(TextElement):\n def render(self, model):\n return x(model)\n\n return MyTextElement()\n\n def _auto_convert_functions_to_TextElements(self, visualization_elements):\n out_elements = [\n self._auto_convert_fn_to_TextElement(e) for e in visualization_elements\n ]\n return out_elements\n",
"path": "mesa/visualization/ModularVisualization.py"
}
] | [
{
"content": "\"\"\"\nModularServer\n=============\n\nA visualization server which renders a model via one or more elements.\n\nThe concept for the modular visualization server as follows:\nA visualization is composed of VisualizationElements, each of which defines how\nto generate some visualization from a model instance and render it on the\nclient. VisualizationElements may be anything from a simple text display to\na multilayered HTML5 canvas.\n\nThe actual server is launched with one or more VisualizationElements;\nit runs the model object through each of them, generating data to be sent to\nthe client. The client page is also generated based on the JavaScript code\nprovided by each element.\n\nThis file consists of the following classes:\n\nVisualizationElement: Parent class for all other visualization elements, with\n the minimal necessary options.\nPageHandler: The handler for the visualization page, generated from a template\n and built from the various visualization elements.\nSocketHandler: Handles the websocket connection between the client page and\n the server.\nModularServer: The overall visualization application class which stores and\n controls the model and visualization instance.\n\n\nModularServer should *not* need to be subclassed on a model-by-model basis; it\nshould be primarily a pass-through for VisualizationElement subclasses, which\ndefine the actual visualization specifics.\n\nFor example, suppose we have created two visualization elements for our model,\ncalled canvasvis and graphvis; we would launch a server with:\n\n server = ModularServer(MyModel, [canvasvis, graphvis], name=\"My Model\")\n server.launch()\n\nThe client keeps track of what step it is showing. Clicking the Step button in\nthe browser sends a message requesting the viz_state corresponding to the next\nstep position, which is then sent back to the client via the websocket.\n\nThe websocket protocol is as follows:\nEach message is a JSON object, with a \"type\" property which defines the rest of\nthe structure.\n\nServer -> Client:\n Send over the model state to visualize.\n Model state is a list, with each element corresponding to a div; each div\n is expected to have a render function associated with it, which knows how\n to render that particular data. The example below includes two elements:\n the first is data for a CanvasGrid, the second for a raw text display.\n\n {\n \"type\": \"viz_state\",\n \"data\": [{0:[ {\"Shape\": \"circle\", \"x\": 0, \"y\": 0, \"r\": 0.5,\n \"Color\": \"#AAAAAA\", \"Filled\": \"true\", \"Layer\": 0,\n \"text\": 'A', \"text_color\": \"white\" }]},\n \"Shape Count: 1\"]\n }\n\n Informs the client that the model is over.\n {\"type\": \"end\"}\n\n Informs the client of the current model's parameters\n {\n \"type\": \"model_params\",\n \"params\": 'dict' of model params, (i.e. {arg_1: val_1, ...})\n }\n\nClient -> Server:\n Reset the model.\n TODO: Allow this to come with parameters\n {\n \"type\": \"reset\"\n }\n\n Get a given state.\n {\n \"type\": \"get_step\",\n \"step:\" index of the step to get.\n }\n\n Submit model parameter updates\n {\n \"type\": \"submit_params\",\n \"param\": name of model parameter\n \"value\": new value for 'param'\n }\n\n Get the model's parameters\n {\n \"type\": \"get_params\"\n }\n\n\"\"\"\nimport asyncio\nimport os\nimport platform\nimport tornado.autoreload\nimport tornado.ioloop\nimport tornado.web\nimport tornado.websocket\nimport tornado.escape\nimport tornado.gen\nimport webbrowser\n\nfrom mesa.visualization.UserParam import UserSettableParameter, UserParam\n\n# Suppress several pylint warnings for this file.\n# Attributes being defined outside of init is a Tornado feature.\n# pylint: disable=attribute-defined-outside-init\n\n# Change the event loop policy for windows\nif platform.system() == \"Windows\" and platform.python_version_tuple() >= (\"3\", \"7\"):\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n\nD3_JS_FILE = \"external/d3-7.4.3.min.js\"\nCHART_JS_FILE = \"external/chart-3.6.1.min.js\"\n\n\ndef is_user_param(val):\n return isinstance(val, UserSettableParameter) or issubclass(\n val.__class__, UserParam\n )\n\n\nclass VisualizationElement:\n \"\"\"\n Defines an element of the visualization.\n\n Attributes:\n package_includes: A list of external JavaScript and CSS files to\n include that are part of the Mesa packages.\n local_includes: A list of JavaScript and CSS files that are local to\n the directory that the server is being run in.\n js_code: A JavaScript code string to instantiate the element.\n\n Methods:\n render: Takes a model object, and produces JSON data which can be sent\n to the client.\n\n \"\"\"\n\n package_includes = []\n local_includes = []\n js_code = \"\"\n render_args = {}\n\n def __init__(self):\n pass\n\n def render(self, model):\n \"\"\"Build visualization data from a model object.\n\n Args:\n model: A model object\n\n Returns:\n A JSON-ready object.\n\n \"\"\"\n return \"<b>VisualizationElement goes here</b>.\"\n\n\nclass TextElement(VisualizationElement):\n \"\"\"\n Module for drawing live-updating text.\n \"\"\"\n\n package_includes = [\"TextModule.js\"]\n js_code = \"elements.push(new TextModule());\"\n\n\n# =============================================================================\n# Actual Tornado code starts here:\n\n\nclass PageHandler(tornado.web.RequestHandler):\n \"\"\"Handler for the HTML template which holds the visualization.\"\"\"\n\n def get(self):\n elements = self.application.visualization_elements\n for i, element in enumerate(elements):\n element.index = i\n self.render(\n \"modular_template.html\",\n port=self.application.port,\n model_name=self.application.model_name,\n description=self.application.description,\n package_js_includes=self.application.package_js_includes,\n package_css_includes=self.application.package_css_includes,\n local_js_includes=self.application.local_js_includes,\n local_css_includes=self.application.local_css_includes,\n scripts=self.application.js_code,\n )\n\n\nclass SocketHandler(tornado.websocket.WebSocketHandler):\n \"\"\"Handler for websocket.\"\"\"\n\n def open(self):\n if self.application.verbose:\n print(\"Socket opened!\")\n self.write_message(\n {\"type\": \"model_params\", \"params\": self.application.user_params}\n )\n\n def check_origin(self, origin):\n return True\n\n @property\n def viz_state_message(self):\n return {\"type\": \"viz_state\", \"data\": self.application.render_model()}\n\n def on_message(self, message):\n \"\"\"Receiving a message from the websocket, parse, and act accordingly.\"\"\"\n if self.application.verbose:\n print(message)\n msg = tornado.escape.json_decode(message)\n\n if msg[\"type\"] == \"get_step\":\n if not self.application.model.running:\n self.write_message({\"type\": \"end\"})\n else:\n self.application.model.step()\n self.write_message(self.viz_state_message)\n\n elif msg[\"type\"] == \"reset\":\n self.application.reset_model()\n self.write_message(self.viz_state_message)\n\n elif msg[\"type\"] == \"submit_params\":\n param = msg[\"param\"]\n value = msg[\"value\"]\n\n # Is the param editable?\n if param in self.application.user_params:\n if is_user_param(self.application.model_kwargs[param]):\n self.application.model_kwargs[param].value = value\n else:\n self.application.model_kwargs[param] = value\n\n else:\n if self.application.verbose:\n print(\"Unexpected message!\")\n\n\nclass ModularServer(tornado.web.Application):\n \"\"\"Main visualization application.\"\"\"\n\n EXCLUDE_LIST = (\"width\", \"height\")\n\n def __init__(\n self,\n model_cls,\n visualization_elements,\n name=\"Mesa Model\",\n model_params=None,\n port=None,\n ):\n \"\"\"\n Args:\n model_cls: Mesa model class\n visualization_elements: visualisation elements\n name: A String for the model name\n port: Port the webserver listens to (int)\n Order of configuration:\n 1. Parameter to ModularServer.launch\n 2. Parameter to ModularServer()\n 3. Environment var PORT\n 4. Default value (8521)\n model_params: A dict of model parameters\n \"\"\"\n\n self.verbose = True\n self.max_steps = 100000\n\n if port is not None:\n self.port = port\n else:\n # Default port to listen on\n self.port = int(os.getenv(\"PORT\", 8521))\n\n # Handlers and other globals:\n page_handler = (r\"/\", PageHandler)\n socket_handler = (r\"/ws\", SocketHandler)\n static_handler = (\n r\"/static/(.*)\",\n tornado.web.StaticFileHandler,\n {\"path\": os.path.dirname(__file__) + \"/templates\"},\n )\n local_handler = (\n r\"/local/(.*)\",\n tornado.web.StaticFileHandler,\n {\"path\": \"\"},\n )\n\n self.handlers = [page_handler, socket_handler, static_handler, local_handler]\n\n self.settings = {\n \"debug\": True,\n \"autoreload\": False,\n \"template_path\": os.path.dirname(__file__) + \"/templates\",\n }\n\n \"\"\"Create a new visualization server with the given elements.\"\"\"\n if model_params is None:\n model_params = {}\n # Prep visualization elements:\n self.visualization_elements = self._auto_convert_functions_to_TextElements(\n visualization_elements\n )\n self.package_js_includes = set()\n self.package_css_includes = set()\n self.local_js_includes = set()\n self.local_css_includes = set()\n self.js_code = []\n for element in self.visualization_elements:\n for include_file in element.package_includes:\n if self._is_stylesheet(include_file):\n self.package_css_includes.add(include_file)\n else:\n self.package_js_includes.add(include_file)\n for include_file in element.local_includes:\n if self._is_stylesheet(include_file):\n self.local_css_includes.add(include_file)\n else:\n self.local_js_includes.add(include_file)\n self.js_code.append(element.js_code)\n\n # Initializing the model\n self.model_name = name\n self.model_cls = model_cls\n self.description = \"No description available\"\n if hasattr(model_cls, \"description\"):\n self.description = model_cls.description\n elif model_cls.__doc__ is not None:\n self.description = model_cls.__doc__\n\n self.model_kwargs = model_params\n self.reset_model()\n\n # Initializing the application itself:\n super().__init__(self.handlers, **self.settings)\n\n @property\n def user_params(self):\n result = {}\n for param, val in self.model_kwargs.items():\n if is_user_param(val):\n result[param] = val.json\n\n return result\n\n def reset_model(self):\n \"\"\"Reinstantiate the model object, using the current parameters.\"\"\"\n\n model_params = {}\n for key, val in self.model_kwargs.items():\n if is_user_param(val):\n if val.param_type == \"static_text\":\n # static_text is never used for setting params\n continue\n model_params[key] = val.value\n else:\n model_params[key] = val\n\n self.model = self.model_cls(**model_params)\n # We specify the `running` attribute here so that the user doesn't have\n # to define it explicitly in their model's __init__.\n self.model.running = True\n\n def render_model(self):\n \"\"\"Turn the current state of the model into a dictionary of\n visualizations\n\n \"\"\"\n visualization_state = []\n for element in self.visualization_elements:\n element_state = element.render(self.model)\n visualization_state.append(element_state)\n return visualization_state\n\n def launch(self, port=None, open_browser=True):\n \"\"\"Run the app.\"\"\"\n if port is not None:\n self.port = port\n url = f\"http://127.0.0.1:{self.port}\"\n print(f\"Interface starting at {url}\")\n self.listen(self.port)\n if open_browser:\n webbrowser.open(url)\n tornado.autoreload.start()\n tornado.ioloop.IOLoop.current().start()\n\n @staticmethod\n def _is_stylesheet(filename):\n return filename.lower().endswith(\".css\")\n\n def _auto_convert_fn_to_TextElement(self, x):\n \"\"\"\n Automatically convert a function to a TextElement object.\n See https://github.com/projectmesa/mesa/issues/1233.\n \"\"\"\n\n # Note: a class constructor is also a callable.\n if not callable(x):\n # i.e. not a function\n return x\n\n class MyTextElement(TextElement):\n def render(self, model):\n return x(model)\n\n return MyTextElement()\n\n def _auto_convert_functions_to_TextElements(self, visualization_elements):\n out_elements = [\n self._auto_convert_fn_to_TextElement(e) for e in visualization_elements\n ]\n return out_elements\n",
"path": "mesa/visualization/ModularVisualization.py"
}
] | diff --git a/mesa/visualization/ModularVisualization.py b/mesa/visualization/ModularVisualization.py
index cc6d727bb97..9aa2c4b53dd 100644
--- a/mesa/visualization/ModularVisualization.py
+++ b/mesa/visualization/ModularVisualization.py
@@ -257,8 +257,8 @@ def __init__(
model_cls,
visualization_elements,
name="Mesa Model",
- port=None,
model_params=None,
+ port=None,
):
"""
Args:
|
google__jax-1096 | jaxlib build w/ cuda: File not found during compilation
I'm compiling `jaxlib` with CUDA 10.0 on Ubuntu 18.04. The build fails with the following error:
```
$ python3 build/build.py --enable_cuda --cuda_path /usr/local/cuda-10.0/ --cudnn_path /usr/local/cuda-10.0/ --enable_march_native
[...]
ERROR: /home/clem/.cache/bazel/_bazel_clem/ffaac3f7c6ad1cb26f04f1933452eef6/external/nccl_archive/BUILD.bazel:53:1: error while parsing .d file: /h
ome/clem/.cache/bazel/_bazel_clem/ffaac3f7c6ad1cb26f04f1933452eef6/execroot/__main__/bazel-out/k8-opt/bin/external/nccl_archive/_objs/device_lib/pr
od_i32_reduce_scatter.cu.d (No such file or directory)
nvcc fatal : Could not open input file /tmp/tmpxft_00000004_00000000-6_prod_i32_reduce_scatter.cu.compute_35.cpp1.ii
Target //build:install_xla_in_source_tree failed to build
INFO: Elapsed time: 278.116s, Critical Path: 69.60s
INFO: 1281 processes: 1281 linux-sandbox.
FAILED: Build did NOT complete successfully
FAILED: Build did NOT complete successfully
Traceback (most recent call last):
File "build/build.py", line 331, in <module>
main()
File "build/build.py", line 326, in main
[":install_xla_in_source_tree", os.getcwd()])
File "build/build.py", line 50, in shell
output = subprocess.check_output(cmd)
File "/usr/lib/python3.6/subprocess.py", line 356, in check_output
**kwargs).stdout
File "/usr/lib/python3.6/subprocess.py", line 438, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['./bazel-0.24.1-linux-x86_64', 'run', '--verbose_failures=true', '--config=opt', '--config=mkl_open_source
_only', '--config=cuda', ':install_xla_in_source_tree', '/home/clem/git/jax/build']' returned non-zero exit status 1.
```
Above this error message are only compiler warnings but no errors which could lead to some file not being created. Am I missing something? Or might there be a file name bug? Thanks a lot for your help!
---
I'm on a fresh Ubuntu 18.04.2 install with CUDA 10.0, cudnn and driver version 410.48.
[Full log](http://paste.ubuntu.com/p/tvXBHbr5gw/)
| [
{
"content": "#!/usr/bin/python\n#\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Helper script for building JAX's libjax easily.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport collections\nimport hashlib\nimport os\nimport platform\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport urllib\n\n# pylint: disable=g-import-not-at-top\nif hasattr(urllib, \"urlretrieve\"):\n urlretrieve = urllib.urlretrieve\nelse:\n import urllib.request\n urlretrieve = urllib.request.urlretrieve\n\nif hasattr(shutil, \"which\"):\n which = shutil.which\nelse:\n from distutils.spawn import find_executable as which\n# pylint: enable=g-import-not-at-top\n\n\ndef shell(cmd):\n output = subprocess.check_output(cmd)\n return output.decode(\"UTF-8\").strip()\n\n\n# Python\n\ndef get_python_bin_path(python_bin_path_flag):\n \"\"\"Returns the path to the Python interpreter to use.\"\"\"\n return python_bin_path_flag or sys.executable\n\n\n# Bazel\n\nBAZEL_BASE_URI = \"https://github.com/bazelbuild/bazel/releases/download/0.24.1/\"\nBazelPackage = collections.namedtuple(\"BazelPackage\", [\"file\", \"sha256\"])\nbazel_packages = {\n \"Linux\":\n BazelPackage(\n file=\"bazel-0.24.1-linux-x86_64\",\n sha256=\n \"e18e2877e18a447eb5d94f5efbec375366d82af6443c6a83a93c62657a7b1c32\"),\n \"Darwin\":\n BazelPackage(\n file=\"bazel-0.24.1-darwin-x86_64\",\n sha256=\n \"cf763752550050d117e03659aaa6ccd6f97da1f983a6029300a497fdaeaaec46\"),\n}\n\n\ndef download_and_verify_bazel():\n \"\"\"Downloads a bazel binary from Github, verifying its SHA256 hash.\"\"\"\n package = bazel_packages.get(platform.system())\n if package is None:\n return None\n\n if not os.access(package.file, os.X_OK):\n uri = BAZEL_BASE_URI + package.file\n sys.stdout.write(\"Downloading bazel from: {}\\n\".format(uri))\n\n def progress(block_count, block_size, total_size):\n if total_size <= 0:\n total_size = 170**6\n progress = (block_count * block_size) / total_size\n num_chars = 40\n progress_chars = int(num_chars * progress)\n sys.stdout.write(\"{} [{}{}] {}%\\r\".format(\n package.file, \"#\" * progress_chars,\n \".\" * (num_chars - progress_chars), int(progress * 100.0)))\n\n tmp_path, _ = urlretrieve(uri, None, progress)\n sys.stdout.write(\"\\n\")\n\n # Verify that the downloaded Bazel binary has the expected SHA256.\n downloaded_file = open(tmp_path, \"rb\")\n contents = downloaded_file.read()\n downloaded_file.close()\n digest = hashlib.sha256(contents).hexdigest()\n if digest != package.sha256:\n print(\n \"Checksum mismatch for downloaded bazel binary (expected {}; got {}).\"\n .format(package.sha256, digest))\n sys.exit(-1)\n\n # Write the file as the bazel file name.\n out_file = open(package.file, \"wb\")\n out_file.write(contents)\n out_file.close()\n\n # Mark the file as executable.\n st = os.stat(package.file)\n os.chmod(package.file,\n st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n return \"./\" + package.file\n\n\ndef get_bazel_path(bazel_path_flag):\n \"\"\"Returns the path to a Bazel binary, downloading Bazel if not found.\"\"\"\n if bazel_path_flag:\n return bazel_path_flag\n\n bazel = which(\"bazel\")\n if bazel:\n return bazel\n\n bazel = download_and_verify_bazel()\n if bazel:\n return bazel\n\n print(\"Cannot find or download bazel. Please install bazel.\")\n sys.exit(-1)\n\n\ndef check_bazel_version(bazel_path, min_version, max_version):\n \"\"\"Checks Bazel's version is in the range [`min_version`, `max_version`).\"\"\"\n version_output = shell([bazel_path, \"--bazelrc=/dev/null\", \"version\"])\n match = re.search(\"Build label: *([0-9\\\\.]+)[^0-9\\\\.]\", version_output)\n if match is None:\n print(\"Warning: bazel installation is not a release version. Make sure \"\n \"bazel is at least {}\".format(min_version))\n return\n version = match.group(1)\n min_ints = [int(x) for x in min_version.split(\".\")]\n actual_ints = [int(x) for x in match.group(1).split(\".\")]\n if min_ints > actual_ints:\n print(\"Outdated bazel revision (>= {} required, found {})\".format(\n min_version, version))\n sys.exit(0)\n if max_version is not None:\n max_ints = [int(x) for x in max_version.split(\".\")]\n if actual_ints >= max_ints:\n print(\"Please downgrade your bazel revision to build JAX (>= {} and < {}\"\n \" required, found {})\".format(min_version, max_version, version))\n sys.exit(0)\n\n\nBAZELRC_TEMPLATE = \"\"\"\nbuild --action_env PYTHON_BIN_PATH=\"{python_bin_path}\"\nbuild --python_path=\"{python_bin_path}\"\nbuild --action_env TF_NEED_CUDA=\"{tf_need_cuda}\"\nbuild --distinct_host_configuration=false\nbuild --copt=-Wno-sign-compare\nbuild -c opt\nbuild:opt --copt=-march=native\nbuild:opt --host_copt=-march=native\nbuild:mkl_open_source_only --define=tensorflow_mkldnn_contraction_kernel=1\n\n# Sets the default Apple platform to macOS.\nbuild --apple_platform_type=macos\n\n# Disable enabled-by-default TensorFlow features that we don't care about.\nbuild --define=no_aws_support=true\nbuild --define=no_gcp_support=true\nbuild --define=no_hdfs_support=true\nbuild --define=no_kafka_support=true\nbuild --define=no_ignite_support=true\nbuild --define=grpc_no_ares=true\n\nbuild:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain\nbuild:cuda --define=using_cuda=true --define=using_cuda_nvcc=true\n\"\"\"\n\n\n\ndef write_bazelrc(cuda_toolkit_path=None, cudnn_install_path=None, **kwargs):\n f = open(\"../.bazelrc\", \"w\")\n f.write(BAZELRC_TEMPLATE.format(**kwargs))\n if cuda_toolkit_path:\n f.write(\"build --action_env CUDA_TOOLKIT_PATH=\\\"{cuda_toolkit_path}\\\"\\n\"\n .format(cuda_toolkit_path=cuda_toolkit_path))\n if cudnn_install_path:\n f.write(\"build --action_env CUDNN_INSTALL_PATH=\\\"{cudnn_install_path}\\\"\\n\"\n .format(cudnn_install_path=cudnn_install_path))\n f.close()\n\n\nBANNER = r\"\"\"\n _ _ __ __\n | | / \\ \\ \\/ /\n _ | |/ _ \\ \\ /\n| |_| / ___ \\/ \\\n \\___/_/ \\/_/\\_\\\n\n\"\"\"\n\nEPILOG = \"\"\"\n\nFrom the 'build' directory in the JAX repository, run\n python build.py\nor\n python3 build.py\nto download and build JAX's XLA (jaxlib) dependency.\n\"\"\"\n\n\ndef _parse_string_as_bool(s):\n \"\"\"Parses a string as a boolean argument.\"\"\"\n lower = s.lower()\n if lower == \"true\":\n return True\n elif lower == \"false\":\n return False\n else:\n raise ValueError(\"Expected either 'true' or 'false'; got {}\".format(s))\n\n\ndef add_boolean_argument(parser, name, default=False, help_str=None):\n \"\"\"Creates a boolean flag.\"\"\"\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--\" + name,\n nargs=\"?\",\n default=default,\n const=True,\n type=_parse_string_as_bool,\n help=help_str)\n group.add_argument(\"--no\" + name, dest=name, action=\"store_false\")\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Builds libjax from source.\", epilog=EPILOG)\n parser.add_argument(\n \"--bazel_path\",\n help=\"Path to the Bazel binary to use. The default is to find bazel via \"\n \"the PATH; if none is found, downloads a fresh copy of bazel from \"\n \"GitHub.\")\n parser.add_argument(\n \"--python_bin_path\",\n help=\"Path to Python binary to use. The default is the Python \"\n \"interpreter used to run the build script.\")\n add_boolean_argument(\n parser,\n \"enable_march_native\",\n default=False,\n help_str=\"Generate code targeted to the current machine? This may \"\n \"increase performance, but may generate code that does not run on \"\n \"older machines.\")\n add_boolean_argument(\n parser,\n \"enable_mkl_dnn\",\n default=True,\n help_str=\"Should we build with MKL-DNN enabled?\")\n add_boolean_argument(\n parser,\n \"enable_cuda\",\n help_str=\"Should we build with CUDA enabled? Requires CUDA and CuDNN.\")\n parser.add_argument(\n \"--cuda_path\",\n default=None,\n help=\"Path to the CUDA toolkit.\")\n parser.add_argument(\n \"--cudnn_path\",\n default=None,\n help=\"Path to CUDNN libraries.\")\n args = parser.parse_args()\n\n print(BANNER)\n os.chdir(os.path.dirname(__file__ or args.prog) or '.')\n\n # Find a working Bazel.\n bazel_path = get_bazel_path(args.bazel_path)\n check_bazel_version(bazel_path, min_version=\"0.24.0\", max_version=None)\n print(\"Bazel binary path: {}\".format(bazel_path))\n\n python_bin_path = get_python_bin_path(args.python_bin_path)\n print(\"Python binary path: {}\".format(python_bin_path))\n\n print(\"MKL-DNN enabled: {}\".format(\"yes\" if args.enable_mkl_dnn else \"no\"))\n print(\"-march=native: {}\".format(\"yes\" if args.enable_march_native else \"no\"))\n\n cuda_toolkit_path = args.cuda_path\n cudnn_install_path = args.cudnn_path\n print(\"CUDA enabled: {}\".format(\"yes\" if args.enable_cuda else \"no\"))\n if args.enable_cuda:\n if cuda_toolkit_path:\n print(\"CUDA toolkit path: {}\".format(cuda_toolkit_path))\n if cudnn_install_path:\n print(\"CUDNN library path: {}\".format(cudnn_install_path))\n write_bazelrc(\n python_bin_path=python_bin_path,\n tf_need_cuda=1 if args.enable_cuda else 0,\n cuda_toolkit_path=cuda_toolkit_path,\n cudnn_install_path=cudnn_install_path)\n\n print(\"\\nBuilding XLA and installing it in the jaxlib source tree...\")\n config_args = []\n if args.enable_march_native:\n config_args += [\"--config=opt\"]\n if args.enable_mkl_dnn:\n config_args += [\"--config=mkl_open_source_only\"]\n if args.enable_cuda:\n config_args += [\"--config=cuda\"]\n shell(\n [bazel_path, \"run\", \"--verbose_failures=true\"] +\n config_args +\n [\":install_xla_in_source_tree\", os.getcwd()])\n shell([bazel_path, \"shutdown\"])\n\n\nif __name__ == \"__main__\":\n main()\n",
"path": "build/build.py"
}
] | [
{
"content": "#!/usr/bin/python\n#\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Helper script for building JAX's libjax easily.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport collections\nimport hashlib\nimport os\nimport platform\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport urllib\n\n# pylint: disable=g-import-not-at-top\nif hasattr(urllib, \"urlretrieve\"):\n urlretrieve = urllib.urlretrieve\nelse:\n import urllib.request\n urlretrieve = urllib.request.urlretrieve\n\nif hasattr(shutil, \"which\"):\n which = shutil.which\nelse:\n from distutils.spawn import find_executable as which\n# pylint: enable=g-import-not-at-top\n\n\ndef shell(cmd):\n output = subprocess.check_output(cmd)\n return output.decode(\"UTF-8\").strip()\n\n\n# Python\n\ndef get_python_bin_path(python_bin_path_flag):\n \"\"\"Returns the path to the Python interpreter to use.\"\"\"\n return python_bin_path_flag or sys.executable\n\n\n# Bazel\n\nBAZEL_BASE_URI = \"https://github.com/bazelbuild/bazel/releases/download/0.24.1/\"\nBazelPackage = collections.namedtuple(\"BazelPackage\", [\"file\", \"sha256\"])\nbazel_packages = {\n \"Linux\":\n BazelPackage(\n file=\"bazel-0.24.1-linux-x86_64\",\n sha256=\n \"e18e2877e18a447eb5d94f5efbec375366d82af6443c6a83a93c62657a7b1c32\"),\n \"Darwin\":\n BazelPackage(\n file=\"bazel-0.24.1-darwin-x86_64\",\n sha256=\n \"cf763752550050d117e03659aaa6ccd6f97da1f983a6029300a497fdaeaaec46\"),\n}\n\n\ndef download_and_verify_bazel():\n \"\"\"Downloads a bazel binary from Github, verifying its SHA256 hash.\"\"\"\n package = bazel_packages.get(platform.system())\n if package is None:\n return None\n\n if not os.access(package.file, os.X_OK):\n uri = BAZEL_BASE_URI + package.file\n sys.stdout.write(\"Downloading bazel from: {}\\n\".format(uri))\n\n def progress(block_count, block_size, total_size):\n if total_size <= 0:\n total_size = 170**6\n progress = (block_count * block_size) / total_size\n num_chars = 40\n progress_chars = int(num_chars * progress)\n sys.stdout.write(\"{} [{}{}] {}%\\r\".format(\n package.file, \"#\" * progress_chars,\n \".\" * (num_chars - progress_chars), int(progress * 100.0)))\n\n tmp_path, _ = urlretrieve(uri, None, progress)\n sys.stdout.write(\"\\n\")\n\n # Verify that the downloaded Bazel binary has the expected SHA256.\n downloaded_file = open(tmp_path, \"rb\")\n contents = downloaded_file.read()\n downloaded_file.close()\n digest = hashlib.sha256(contents).hexdigest()\n if digest != package.sha256:\n print(\n \"Checksum mismatch for downloaded bazel binary (expected {}; got {}).\"\n .format(package.sha256, digest))\n sys.exit(-1)\n\n # Write the file as the bazel file name.\n out_file = open(package.file, \"wb\")\n out_file.write(contents)\n out_file.close()\n\n # Mark the file as executable.\n st = os.stat(package.file)\n os.chmod(package.file,\n st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n return \"./\" + package.file\n\n\ndef get_bazel_path(bazel_path_flag):\n \"\"\"Returns the path to a Bazel binary, downloading Bazel if not found.\"\"\"\n if bazel_path_flag:\n return bazel_path_flag\n\n bazel = which(\"bazel\")\n if bazel:\n return bazel\n\n bazel = download_and_verify_bazel()\n if bazel:\n return bazel\n\n print(\"Cannot find or download bazel. Please install bazel.\")\n sys.exit(-1)\n\n\ndef check_bazel_version(bazel_path, min_version, max_version):\n \"\"\"Checks Bazel's version is in the range [`min_version`, `max_version`).\"\"\"\n version_output = shell([bazel_path, \"--bazelrc=/dev/null\", \"version\"])\n match = re.search(\"Build label: *([0-9\\\\.]+)[^0-9\\\\.]\", version_output)\n if match is None:\n print(\"Warning: bazel installation is not a release version. Make sure \"\n \"bazel is at least {}\".format(min_version))\n return\n version = match.group(1)\n min_ints = [int(x) for x in min_version.split(\".\")]\n actual_ints = [int(x) for x in match.group(1).split(\".\")]\n if min_ints > actual_ints:\n print(\"Outdated bazel revision (>= {} required, found {})\".format(\n min_version, version))\n sys.exit(0)\n if max_version is not None:\n max_ints = [int(x) for x in max_version.split(\".\")]\n if actual_ints >= max_ints:\n print(\"Please downgrade your bazel revision to build JAX (>= {} and < {}\"\n \" required, found {})\".format(min_version, max_version, version))\n sys.exit(0)\n\n\nBAZELRC_TEMPLATE = \"\"\"\nbuild --action_env PYTHON_BIN_PATH=\"{python_bin_path}\"\nbuild --python_path=\"{python_bin_path}\"\nbuild --action_env TF_NEED_CUDA=\"{tf_need_cuda}\"\nbuild --distinct_host_configuration=false\nbuild --copt=-Wno-sign-compare\nbuild -c opt\nbuild:opt --copt=-march=native\nbuild:opt --host_copt=-march=native\nbuild:mkl_open_source_only --define=tensorflow_mkldnn_contraction_kernel=1\n\n# Sets the default Apple platform to macOS.\nbuild --apple_platform_type=macos\n\n# Disable enabled-by-default TensorFlow features that we don't care about.\nbuild --define=no_aws_support=true\nbuild --define=no_gcp_support=true\nbuild --define=no_hdfs_support=true\nbuild --define=no_kafka_support=true\nbuild --define=no_ignite_support=true\nbuild --define=grpc_no_ares=true\n\nbuild:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain\nbuild:cuda --define=using_cuda=true --define=using_cuda_nvcc=true\n\nbuild --spawn_strategy=standalone\nbuild --strategy=Genrule=standalone\n\"\"\"\n\n\n\ndef write_bazelrc(cuda_toolkit_path=None, cudnn_install_path=None, **kwargs):\n f = open(\"../.bazelrc\", \"w\")\n f.write(BAZELRC_TEMPLATE.format(**kwargs))\n if cuda_toolkit_path:\n f.write(\"build --action_env CUDA_TOOLKIT_PATH=\\\"{cuda_toolkit_path}\\\"\\n\"\n .format(cuda_toolkit_path=cuda_toolkit_path))\n if cudnn_install_path:\n f.write(\"build --action_env CUDNN_INSTALL_PATH=\\\"{cudnn_install_path}\\\"\\n\"\n .format(cudnn_install_path=cudnn_install_path))\n f.close()\n\n\nBANNER = r\"\"\"\n _ _ __ __\n | | / \\ \\ \\/ /\n _ | |/ _ \\ \\ /\n| |_| / ___ \\/ \\\n \\___/_/ \\/_/\\_\\\n\n\"\"\"\n\nEPILOG = \"\"\"\n\nFrom the 'build' directory in the JAX repository, run\n python build.py\nor\n python3 build.py\nto download and build JAX's XLA (jaxlib) dependency.\n\"\"\"\n\n\ndef _parse_string_as_bool(s):\n \"\"\"Parses a string as a boolean argument.\"\"\"\n lower = s.lower()\n if lower == \"true\":\n return True\n elif lower == \"false\":\n return False\n else:\n raise ValueError(\"Expected either 'true' or 'false'; got {}\".format(s))\n\n\ndef add_boolean_argument(parser, name, default=False, help_str=None):\n \"\"\"Creates a boolean flag.\"\"\"\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--\" + name,\n nargs=\"?\",\n default=default,\n const=True,\n type=_parse_string_as_bool,\n help=help_str)\n group.add_argument(\"--no\" + name, dest=name, action=\"store_false\")\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Builds libjax from source.\", epilog=EPILOG)\n parser.add_argument(\n \"--bazel_path\",\n help=\"Path to the Bazel binary to use. The default is to find bazel via \"\n \"the PATH; if none is found, downloads a fresh copy of bazel from \"\n \"GitHub.\")\n parser.add_argument(\n \"--python_bin_path\",\n help=\"Path to Python binary to use. The default is the Python \"\n \"interpreter used to run the build script.\")\n add_boolean_argument(\n parser,\n \"enable_march_native\",\n default=False,\n help_str=\"Generate code targeted to the current machine? This may \"\n \"increase performance, but may generate code that does not run on \"\n \"older machines.\")\n add_boolean_argument(\n parser,\n \"enable_mkl_dnn\",\n default=True,\n help_str=\"Should we build with MKL-DNN enabled?\")\n add_boolean_argument(\n parser,\n \"enable_cuda\",\n help_str=\"Should we build with CUDA enabled? Requires CUDA and CuDNN.\")\n parser.add_argument(\n \"--cuda_path\",\n default=None,\n help=\"Path to the CUDA toolkit.\")\n parser.add_argument(\n \"--cudnn_path\",\n default=None,\n help=\"Path to CUDNN libraries.\")\n args = parser.parse_args()\n\n print(BANNER)\n os.chdir(os.path.dirname(__file__ or args.prog) or '.')\n\n # Find a working Bazel.\n bazel_path = get_bazel_path(args.bazel_path)\n check_bazel_version(bazel_path, min_version=\"0.24.0\", max_version=None)\n print(\"Bazel binary path: {}\".format(bazel_path))\n\n python_bin_path = get_python_bin_path(args.python_bin_path)\n print(\"Python binary path: {}\".format(python_bin_path))\n\n print(\"MKL-DNN enabled: {}\".format(\"yes\" if args.enable_mkl_dnn else \"no\"))\n print(\"-march=native: {}\".format(\"yes\" if args.enable_march_native else \"no\"))\n\n cuda_toolkit_path = args.cuda_path\n cudnn_install_path = args.cudnn_path\n print(\"CUDA enabled: {}\".format(\"yes\" if args.enable_cuda else \"no\"))\n if args.enable_cuda:\n if cuda_toolkit_path:\n print(\"CUDA toolkit path: {}\".format(cuda_toolkit_path))\n if cudnn_install_path:\n print(\"CUDNN library path: {}\".format(cudnn_install_path))\n write_bazelrc(\n python_bin_path=python_bin_path,\n tf_need_cuda=1 if args.enable_cuda else 0,\n cuda_toolkit_path=cuda_toolkit_path,\n cudnn_install_path=cudnn_install_path)\n\n print(\"\\nBuilding XLA and installing it in the jaxlib source tree...\")\n config_args = []\n if args.enable_march_native:\n config_args += [\"--config=opt\"]\n if args.enable_mkl_dnn:\n config_args += [\"--config=mkl_open_source_only\"]\n if args.enable_cuda:\n config_args += [\"--config=cuda\"]\n shell(\n [bazel_path, \"run\", \"--verbose_failures=true\"] +\n config_args +\n [\":install_xla_in_source_tree\", os.getcwd()])\n shell([bazel_path, \"shutdown\"])\n\n\nif __name__ == \"__main__\":\n main()\n",
"path": "build/build.py"
}
] | diff --git a/build/build.py b/build/build.py
index f2428415c590..32fb7b38d180 100755
--- a/build/build.py
+++ b/build/build.py
@@ -187,6 +187,9 @@ def check_bazel_version(bazel_path, min_version, max_version):
build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true
+
+build --spawn_strategy=standalone
+build --strategy=Genrule=standalone
"""
|
Parsl__parsl-534 | Fix import error
```
ImportError: cannot import name 'BashApp' from 'parsl.app.python' (/home/annawoodard/parsl/parsl/app/python.py)
```
It looks like I introduced this bug in 3d0e2d1e69ad27a133b0c40a42472ae43876d5f2.
| [
{
"content": "\"\"\"Definitions for the @App decorator and the App classes.\n\nThe App class encapsulates a generic leaf task that can be executed asynchronously.\n\"\"\"\nimport logging\nfrom inspect import getsource\nfrom hashlib import md5\nfrom inspect import signature\n\nfrom parsl.app.errors import InvalidAppTypeError\n\nlogger = logging.getLogger(__name__)\n\n\nclass AppBase(object):\n \"\"\"This is the base class that defines the two external facing functions that an App must define.\n\n The __init__ () which is called when the interpreter sees the definition of the decorated\n function, and the __call__ () which is invoked when a decorated function is called by the user.\n\n \"\"\"\n\n def __init__(self, func, data_flow_kernel=None, walltime=60, executors='all', cache=False):\n \"\"\"Construct the App object.\n\n Args:\n - func (function): Takes the function to be made into an App\n\n Kwargs:\n - data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for\n managing this app. This can be omitted only\n after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`.\n - walltime (int) : Walltime in seconds for the app execution.\n - executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'.\n - cache (Bool) : Enable caching of this app ?\n\n Returns:\n - App object.\n\n \"\"\"\n self.__name__ = func.__name__\n self.func = func\n self.data_flow_kernel = data_flow_kernel\n self.status = 'created'\n self.executors = executors\n self.cache = cache\n if not (isinstance(executors, list) or isinstance(executors, str)):\n logger.error(\"App {} specifies invalid executor option, expects string or list\".format(\n func.__name__))\n\n if cache is True:\n try:\n self.fn_source = getsource(func)\n except OSError:\n logger.debug(\"Unable to get source code for AppCaching. Recommend creating module\")\n self.fn_source = func.__name__\n\n self.func_hash = md5(self.fn_source.encode('utf-8')).hexdigest()\n else:\n self.func_hash = func.__name__\n\n params = signature(func).parameters\n\n self.kwargs = {}\n if 'stdout' in params:\n self.kwargs['stdout'] = params['stdout'].default\n if 'stderr' in params:\n self.kwargs['stderr'] = params['stderr'].default\n self.outputs = params['outputs'].default if 'outputs' in params else []\n self.inputs = params['inputs'].default if 'inputs' in params else []\n\n def __call__(self, *args, **kwargs):\n \"\"\"The __call__ function must be implemented in the subclasses.\"\"\"\n raise NotImplementedError\n\n\ndef app_wrapper(func):\n\n def wrapper(*args, **kwargs):\n logger.debug(\"App wrapper begins\")\n x = func(*args, **kwargs)\n logger.debug(\"App wrapper ends\")\n return x\n\n return wrapper\n\n\ndef App(apptype, data_flow_kernel=None, walltime=60, cache=False, executors='all'):\n \"\"\"The App decorator function.\n\n Args:\n - apptype (string) : Apptype can be bash|python\n\n Kwargs:\n - data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for\n managing this app. This can be omitted only\n after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`.\n - walltime (int) : Walltime for app in seconds,\n default=60\n - executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'.\n - cache (Bool) : Enable caching of the app call\n default=False\n\n Returns:\n A PythonApp or BashApp object, which when called runs the apps through the executor.\n \"\"\"\n\n from parsl.app.python import PythonApp\n from parsl.app.bash import BashApp\n\n logger.warning(\"The 'App' decorator will be depreciated in Parsl 0.8. Please use 'python_app' or 'bash_app' instead.\")\n\n if apptype is 'python':\n app_class = PythonApp\n elif apptype is 'bash':\n app_class = BashApp\n else:\n raise InvalidAppTypeError(\"Invalid apptype requested {}; must be 'python' or 'bash'\".format(apptype))\n\n def wrapper(f):\n return app_class(f,\n data_flow_kernel=data_flow_kernel,\n walltime=walltime,\n cache=cache,\n executors=executors)\n return wrapper\n\n\ndef python_app(function=None, data_flow_kernel=None, walltime=60, cache=False, executors='all'):\n \"\"\"Decorator function for making python apps.\n\n Parameters\n ----------\n function : function\n Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis,\n for example, `@python_app` if using all defaults or `@python_app(walltime=120)`. If the\n decorator is used alone, function will be the actual function being decorated, whereas if it\n is called with arguments, function will be None. Default is None.\n data_flow_kernel : DataFlowKernel\n The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can\n be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None.\n walltime : int\n Walltime for app in seconds. Default is 60.\n executors : string or list\n Labels of the executors that this app can execute over. Default is 'all'.\n cache : bool\n Enable caching of the app call. Default is False.\n \"\"\"\n from parsl.app.python import PythonApp\n\n def decorator(func):\n def wrapper(f):\n return PythonApp(f,\n data_flow_kernel=data_flow_kernel,\n walltime=walltime,\n cache=cache,\n executors=executors)\n return wrapper(func)\n if function is not None:\n return decorator(function)\n return decorator\n\n\ndef bash_app(function=None, data_flow_kernel=None, walltime=60, cache=False, executors='all'):\n \"\"\"Decorator function for making bash apps.\n\n Parameters\n ----------\n function : function\n Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis,\n for example, `@bash_app` if using all defaults or `@bash_app(walltime=120)`. If the\n decorator is used alone, function will be the actual function being decorated, whereas if it\n is called with arguments, function will be None. Default is None.\n data_flow_kernel : DataFlowKernel\n The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can\n be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None.\n walltime : int\n Walltime for app in seconds. Default is 60.\n executors : string or list\n Labels of the executors that this app can execute over. Default is 'all'.\n cache : bool\n Enable caching of the app call. Default is False.\n \"\"\"\n from parsl.app.python import BashApp\n\n def decorator(func):\n def wrapper(f):\n return BashApp(f,\n data_flow_kernel=data_flow_kernel,\n walltime=walltime,\n cache=cache,\n executors=executors)\n return wrapper(func)\n if function is not None:\n return decorator(function)\n return decorator\n",
"path": "parsl/app/app.py"
}
] | [
{
"content": "\"\"\"Definitions for the @App decorator and the App classes.\n\nThe App class encapsulates a generic leaf task that can be executed asynchronously.\n\"\"\"\nimport logging\nfrom inspect import getsource\nfrom hashlib import md5\nfrom inspect import signature\n\nfrom parsl.app.errors import InvalidAppTypeError\n\nlogger = logging.getLogger(__name__)\n\n\nclass AppBase(object):\n \"\"\"This is the base class that defines the two external facing functions that an App must define.\n\n The __init__ () which is called when the interpreter sees the definition of the decorated\n function, and the __call__ () which is invoked when a decorated function is called by the user.\n\n \"\"\"\n\n def __init__(self, func, data_flow_kernel=None, walltime=60, executors='all', cache=False):\n \"\"\"Construct the App object.\n\n Args:\n - func (function): Takes the function to be made into an App\n\n Kwargs:\n - data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for\n managing this app. This can be omitted only\n after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`.\n - walltime (int) : Walltime in seconds for the app execution.\n - executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'.\n - cache (Bool) : Enable caching of this app ?\n\n Returns:\n - App object.\n\n \"\"\"\n self.__name__ = func.__name__\n self.func = func\n self.data_flow_kernel = data_flow_kernel\n self.status = 'created'\n self.executors = executors\n self.cache = cache\n if not (isinstance(executors, list) or isinstance(executors, str)):\n logger.error(\"App {} specifies invalid executor option, expects string or list\".format(\n func.__name__))\n\n if cache is True:\n try:\n self.fn_source = getsource(func)\n except OSError:\n logger.debug(\"Unable to get source code for AppCaching. Recommend creating module\")\n self.fn_source = func.__name__\n\n self.func_hash = md5(self.fn_source.encode('utf-8')).hexdigest()\n else:\n self.func_hash = func.__name__\n\n params = signature(func).parameters\n\n self.kwargs = {}\n if 'stdout' in params:\n self.kwargs['stdout'] = params['stdout'].default\n if 'stderr' in params:\n self.kwargs['stderr'] = params['stderr'].default\n self.outputs = params['outputs'].default if 'outputs' in params else []\n self.inputs = params['inputs'].default if 'inputs' in params else []\n\n def __call__(self, *args, **kwargs):\n \"\"\"The __call__ function must be implemented in the subclasses.\"\"\"\n raise NotImplementedError\n\n\ndef app_wrapper(func):\n\n def wrapper(*args, **kwargs):\n logger.debug(\"App wrapper begins\")\n x = func(*args, **kwargs)\n logger.debug(\"App wrapper ends\")\n return x\n\n return wrapper\n\n\ndef App(apptype, data_flow_kernel=None, walltime=60, cache=False, executors='all'):\n \"\"\"The App decorator function.\n\n Args:\n - apptype (string) : Apptype can be bash|python\n\n Kwargs:\n - data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for\n managing this app. This can be omitted only\n after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`.\n - walltime (int) : Walltime for app in seconds,\n default=60\n - executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'.\n - cache (Bool) : Enable caching of the app call\n default=False\n\n Returns:\n A PythonApp or BashApp object, which when called runs the apps through the executor.\n \"\"\"\n\n from parsl.app.python import PythonApp\n from parsl.app.bash import BashApp\n\n logger.warning(\"The 'App' decorator will be depreciated in Parsl 0.8. Please use 'python_app' or 'bash_app' instead.\")\n\n if apptype is 'python':\n app_class = PythonApp\n elif apptype is 'bash':\n app_class = BashApp\n else:\n raise InvalidAppTypeError(\"Invalid apptype requested {}; must be 'python' or 'bash'\".format(apptype))\n\n def wrapper(f):\n return app_class(f,\n data_flow_kernel=data_flow_kernel,\n walltime=walltime,\n cache=cache,\n executors=executors)\n return wrapper\n\n\ndef python_app(function=None, data_flow_kernel=None, walltime=60, cache=False, executors='all'):\n \"\"\"Decorator function for making python apps.\n\n Parameters\n ----------\n function : function\n Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis,\n for example, `@python_app` if using all defaults or `@python_app(walltime=120)`. If the\n decorator is used alone, function will be the actual function being decorated, whereas if it\n is called with arguments, function will be None. Default is None.\n data_flow_kernel : DataFlowKernel\n The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can\n be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None.\n walltime : int\n Walltime for app in seconds. Default is 60.\n executors : string or list\n Labels of the executors that this app can execute over. Default is 'all'.\n cache : bool\n Enable caching of the app call. Default is False.\n \"\"\"\n from parsl.app.python import PythonApp\n\n def decorator(func):\n def wrapper(f):\n return PythonApp(f,\n data_flow_kernel=data_flow_kernel,\n walltime=walltime,\n cache=cache,\n executors=executors)\n return wrapper(func)\n if function is not None:\n return decorator(function)\n return decorator\n\n\ndef bash_app(function=None, data_flow_kernel=None, walltime=60, cache=False, executors='all'):\n \"\"\"Decorator function for making bash apps.\n\n Parameters\n ----------\n function : function\n Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis,\n for example, `@bash_app` if using all defaults or `@bash_app(walltime=120)`. If the\n decorator is used alone, function will be the actual function being decorated, whereas if it\n is called with arguments, function will be None. Default is None.\n data_flow_kernel : DataFlowKernel\n The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can\n be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None.\n walltime : int\n Walltime for app in seconds. Default is 60.\n executors : string or list\n Labels of the executors that this app can execute over. Default is 'all'.\n cache : bool\n Enable caching of the app call. Default is False.\n \"\"\"\n from parsl.app.bash import BashApp\n\n def decorator(func):\n def wrapper(f):\n return BashApp(f,\n data_flow_kernel=data_flow_kernel,\n walltime=walltime,\n cache=cache,\n executors=executors)\n return wrapper(func)\n if function is not None:\n return decorator(function)\n return decorator\n",
"path": "parsl/app/app.py"
}
] | diff --git a/parsl/app/app.py b/parsl/app/app.py
index 6c7523f2b6..9b75a8ee88 100644
--- a/parsl/app/app.py
+++ b/parsl/app/app.py
@@ -181,7 +181,7 @@ def bash_app(function=None, data_flow_kernel=None, walltime=60, cache=False, exe
cache : bool
Enable caching of the app call. Default is False.
"""
- from parsl.app.python import BashApp
+ from parsl.app.bash import BashApp
def decorator(func):
def wrapper(f):
|
hylang__hy-161 | LIST-COMP breaks with certain variable names
Try compiling:
```
(list-comp (, i j) (i [-1 0 1] j [-1 0 1]))
```
With hy and you'll get some strange errors. If you replace "i" and "j" with "x" and "y" respectively, the same piece of code works as expected.
| [
{
"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.models.expression import HyExpression\nfrom hy.models.integer import HyInteger\nfrom hy.models.lambdalist import HyLambdaListKeyword\nfrom hy.models.float import HyFloat\nfrom hy.models.complex import HyComplex\nfrom hy.models.symbol import HySymbol\nfrom hy.models.string import HyString\nfrom hy.models.keyword import HyKeyword\nfrom hy.models.dict import HyDict\nfrom hy.models.list import HyList\n\nfrom hy.errors import HyError\n\nfrom abc import ABCMeta, abstractmethod\n\n\nWHITESPACE = [\" \", \"\\t\", \"\\n\", \"\\r\"]\n\n\nclass LexException(HyError):\n \"\"\"\n Error during the Lexing of a Hython expression.\n \"\"\"\n pass\n\n\ndef _resolve_atom(obj):\n \"\"\"\n Resolve a bare atom into one of the following (in order):\n\n - Integer\n - LambdaListKeyword\n - Float\n - Complex\n - Symbol\n \"\"\"\n try:\n return HyInteger(obj)\n except ValueError:\n pass\n\n if obj.startswith(\"&\"):\n return HyLambdaListKeyword(obj)\n\n try:\n return HyFloat(obj)\n except ValueError:\n pass\n\n try:\n return HyComplex(obj)\n except ValueError:\n pass\n\n table = {\n \"true\": \"True\",\n \"false\": \"False\",\n \"null\": \"None\",\n }\n\n if obj in table:\n return HySymbol(table[obj])\n\n if obj.startswith(\":\"):\n return HyKeyword(obj)\n\n if obj.startswith(\"*\") and obj.endswith(\"*\") and obj not in (\"*\", \"**\"):\n obj = obj[1:-1].upper()\n\n if \"-\" in obj and obj != \"-\":\n obj = obj.replace(\"-\", \"_\")\n\n return HySymbol(obj)\n\n\nclass State(object):\n \"\"\"\n Generic State model.\n \"\"\"\n\n __slots__ = (\"nodes\", \"machine\")\n __metaclass__ = ABCMeta\n\n def __init__(self, machine):\n self.machine = machine\n\n def _enter(self):\n \"\"\" Internal shim for running global ``enter`` code \"\"\"\n self.result = None\n self.nodes = []\n self.enter()\n\n def _exit(self):\n \"\"\" Internal shim for running global ``exit`` code \"\"\"\n self.exit()\n\n def enter(self):\n \"\"\"\n Overridable ``enter`` routines. Subclasses may implement this.\n \"\"\"\n pass\n\n def exit(self):\n \"\"\"\n Overridable ``exit`` routines. Subclasses may implement this.\n \"\"\"\n pass\n\n @abstractmethod\n def process(self, char):\n \"\"\"\n Overridable ``process`` routines. Subclasses must implement this to be\n useful.\n \"\"\"\n pass # ABC\n\n\nclass ListeyThing(State):\n\n def enter(self):\n self.buf = \"\"\n\n def commit(self):\n if self.buf != \"\":\n ret = _resolve_atom(self.buf)\n ret.start_line = self._start_line\n ret.start_column = self._start_column\n ret.end_line = self.machine.line\n ret.end_column = (self.machine.column - 1)\n\n self.nodes.append(ret)\n self.buf = \"\"\n\n def exit(self):\n self.commit()\n self.result = self.result_type(self.nodes)\n\n def process(self, char):\n if char == \"(\":\n self.commit()\n self.machine.sub(Expression)\n return\n\n if char == \"{\":\n self.commit()\n self.machine.sub(Dict)\n return\n\n if char == \"[\":\n self.commit()\n self.machine.sub(List)\n return\n\n if char == \"\\\"\":\n self.commit()\n self.machine.sub(String)\n return\n\n if char == \";\":\n self.commit()\n self.machine.sub(Comment)\n return\n\n if char == self.end_char:\n return Idle\n\n if char in \")]}\":\n raise LexException(\"Unexpected closing character: `%s'\" % (char))\n\n if char in WHITESPACE:\n self.commit()\n return\n\n if self.buf == \"\":\n self._start_line = self.machine.line\n self._start_column = self.machine.column\n\n self.buf += char\n\n\nclass List(ListeyThing):\n \"\"\"\n This state parses a Hy list (like a Clojure vector) for use in native\n Python interop.\n\n [foo 1 2 3 4] is a good example.\n \"\"\"\n\n result_type = HyList\n end_char = \"]\"\n\n\nclass Expression(ListeyThing):\n \"\"\"\n This state parses a Hy expression (statement, to be evaluated at runtime)\n for running things & stuff.\n \"\"\"\n\n result_type = HyExpression\n end_char = \")\"\n\n\nclass Dict(ListeyThing):\n \"\"\"\n This state parses a Hy dict for things.\n \"\"\"\n\n def exit(self):\n self.commit()\n it = iter(self.nodes)\n result = dict(zip(it, it))\n self.result = HyDict(result)\n\n end_char = \"}\"\n\n\nclass String(State):\n \"\"\"\n String state. This will handle stuff like:\n\n (println \"foobar\")\n ^^^^^^^^ -- String\n \"\"\"\n\n def enter(self):\n self.escaped = False\n\n def exit(self):\n self.result = HyString(\"\".join(self.nodes))\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - \" - Idle\n \"\"\"\n if self.escaped:\n self.escaped = False\n if char == \"n\":\n self.nodes.append(\"\\n\")\n return\n if char == \"\\\\\":\n self.nodes.append(\"\\\\\")\n return\n if char == \"\\\"\":\n self.nodes.append(\"\\\"\")\n return\n\n raise LexException(\"Unknown modifier: `%s'\" % (char))\n\n if char == \"\\\"\":\n return Idle\n\n if char == \"\\\\\":\n self.escaped = True\n return\n\n self.nodes.append(char)\n\n\nclass Atom(State):\n \"\"\"\n This state parses integer constants, boolean constants, and symbols\n \"\"\"\n\n def __init__(self, machine):\n State.__init__(self, machine)\n self.initial_buf = ''\n\n def enter(self):\n self.buf = self.initial_buf\n\n def exit(self):\n self.result = _resolve_atom(self.buf)\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - WHITESPACE - Idle\n - ; - Comment\n \"\"\"\n\n if char in WHITESPACE:\n return Idle\n\n if char == \";\":\n return Comment\n\n self.buf += char\n\n\ndef AtomStartingWith(initial_char):\n def AtomFactory(machine):\n state = Atom(machine)\n state.initial_buf = initial_char\n return state\n return AtomFactory\n\n\nclass Idle(State):\n \"\"\"\n Idle state. This is the first (and last) thing that we should\n be in.\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - ( - Expression\n - [ - List\n - { - Dict\n - \\\" - String\n - ; - Comment\n - # - Hash\n - (default) - Atom\n \"\"\"\n\n if char == \"(\":\n return Expression\n\n if char == \"[\":\n return List\n\n if char == \"{\":\n return Dict\n\n if char == \"\\\"\":\n return String\n\n if char == \";\":\n return Comment\n\n if char == \"#\":\n return Hash\n\n if char in WHITESPACE:\n return\n\n return AtomStartingWith(char)\n\n\nclass Comment(State):\n \"\"\"\n Comment state.\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - \\n - Idle\n - (default) - disregard.\n \"\"\"\n\n if char == \"\\n\":\n return Idle\n\n\nclass Hash(State):\n \"\"\"\n Hash state\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - ! - Comment\n \"\"\"\n\n if char == \"!\":\n return Comment\n\n raise LexException(\"Unknown char (Hash state): `%s'\" % (char))\n",
"path": "hy/lex/states.py"
}
] | [
{
"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.models.expression import HyExpression\nfrom hy.models.integer import HyInteger\nfrom hy.models.lambdalist import HyLambdaListKeyword\nfrom hy.models.float import HyFloat\nfrom hy.models.complex import HyComplex\nfrom hy.models.symbol import HySymbol\nfrom hy.models.string import HyString\nfrom hy.models.keyword import HyKeyword\nfrom hy.models.dict import HyDict\nfrom hy.models.list import HyList\n\nfrom hy.errors import HyError\n\nfrom abc import ABCMeta, abstractmethod\n\n\nWHITESPACE = [\" \", \"\\t\", \"\\n\", \"\\r\"]\n\n\nclass LexException(HyError):\n \"\"\"\n Error during the Lexing of a Hython expression.\n \"\"\"\n pass\n\n\ndef _resolve_atom(obj):\n \"\"\"\n Resolve a bare atom into one of the following (in order):\n\n - Integer\n - LambdaListKeyword\n - Float\n - Complex\n - Symbol\n \"\"\"\n try:\n return HyInteger(obj)\n except ValueError:\n pass\n\n if obj.startswith(\"&\"):\n return HyLambdaListKeyword(obj)\n\n try:\n return HyFloat(obj)\n except ValueError:\n pass\n\n if obj != \"j\":\n try:\n return HyComplex(obj)\n except ValueError:\n pass\n\n table = {\n \"true\": \"True\",\n \"false\": \"False\",\n \"null\": \"None\",\n }\n\n if obj in table:\n return HySymbol(table[obj])\n\n if obj.startswith(\":\"):\n return HyKeyword(obj)\n\n if obj.startswith(\"*\") and obj.endswith(\"*\") and obj not in (\"*\", \"**\"):\n obj = obj[1:-1].upper()\n\n if \"-\" in obj and obj != \"-\":\n obj = obj.replace(\"-\", \"_\")\n\n return HySymbol(obj)\n\n\nclass State(object):\n \"\"\"\n Generic State model.\n \"\"\"\n\n __slots__ = (\"nodes\", \"machine\")\n __metaclass__ = ABCMeta\n\n def __init__(self, machine):\n self.machine = machine\n\n def _enter(self):\n \"\"\" Internal shim for running global ``enter`` code \"\"\"\n self.result = None\n self.nodes = []\n self.enter()\n\n def _exit(self):\n \"\"\" Internal shim for running global ``exit`` code \"\"\"\n self.exit()\n\n def enter(self):\n \"\"\"\n Overridable ``enter`` routines. Subclasses may implement this.\n \"\"\"\n pass\n\n def exit(self):\n \"\"\"\n Overridable ``exit`` routines. Subclasses may implement this.\n \"\"\"\n pass\n\n @abstractmethod\n def process(self, char):\n \"\"\"\n Overridable ``process`` routines. Subclasses must implement this to be\n useful.\n \"\"\"\n pass # ABC\n\n\nclass ListeyThing(State):\n\n def enter(self):\n self.buf = \"\"\n\n def commit(self):\n if self.buf != \"\":\n ret = _resolve_atom(self.buf)\n ret.start_line = self._start_line\n ret.start_column = self._start_column\n ret.end_line = self.machine.line\n ret.end_column = (self.machine.column - 1)\n\n self.nodes.append(ret)\n self.buf = \"\"\n\n def exit(self):\n self.commit()\n self.result = self.result_type(self.nodes)\n\n def process(self, char):\n if char == \"(\":\n self.commit()\n self.machine.sub(Expression)\n return\n\n if char == \"{\":\n self.commit()\n self.machine.sub(Dict)\n return\n\n if char == \"[\":\n self.commit()\n self.machine.sub(List)\n return\n\n if char == \"\\\"\":\n self.commit()\n self.machine.sub(String)\n return\n\n if char == \";\":\n self.commit()\n self.machine.sub(Comment)\n return\n\n if char == self.end_char:\n return Idle\n\n if char in \")]}\":\n raise LexException(\"Unexpected closing character: `%s'\" % (char))\n\n if char in WHITESPACE:\n self.commit()\n return\n\n if self.buf == \"\":\n self._start_line = self.machine.line\n self._start_column = self.machine.column\n\n self.buf += char\n\n\nclass List(ListeyThing):\n \"\"\"\n This state parses a Hy list (like a Clojure vector) for use in native\n Python interop.\n\n [foo 1 2 3 4] is a good example.\n \"\"\"\n\n result_type = HyList\n end_char = \"]\"\n\n\nclass Expression(ListeyThing):\n \"\"\"\n This state parses a Hy expression (statement, to be evaluated at runtime)\n for running things & stuff.\n \"\"\"\n\n result_type = HyExpression\n end_char = \")\"\n\n\nclass Dict(ListeyThing):\n \"\"\"\n This state parses a Hy dict for things.\n \"\"\"\n\n def exit(self):\n self.commit()\n it = iter(self.nodes)\n result = dict(zip(it, it))\n self.result = HyDict(result)\n\n end_char = \"}\"\n\n\nclass String(State):\n \"\"\"\n String state. This will handle stuff like:\n\n (println \"foobar\")\n ^^^^^^^^ -- String\n \"\"\"\n\n def enter(self):\n self.escaped = False\n\n def exit(self):\n self.result = HyString(\"\".join(self.nodes))\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - \" - Idle\n \"\"\"\n if self.escaped:\n self.escaped = False\n if char == \"n\":\n self.nodes.append(\"\\n\")\n return\n if char == \"\\\\\":\n self.nodes.append(\"\\\\\")\n return\n if char == \"\\\"\":\n self.nodes.append(\"\\\"\")\n return\n\n raise LexException(\"Unknown modifier: `%s'\" % (char))\n\n if char == \"\\\"\":\n return Idle\n\n if char == \"\\\\\":\n self.escaped = True\n return\n\n self.nodes.append(char)\n\n\nclass Atom(State):\n \"\"\"\n This state parses integer constants, boolean constants, and symbols\n \"\"\"\n\n def __init__(self, machine):\n State.__init__(self, machine)\n self.initial_buf = ''\n\n def enter(self):\n self.buf = self.initial_buf\n\n def exit(self):\n self.result = _resolve_atom(self.buf)\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - WHITESPACE - Idle\n - ; - Comment\n \"\"\"\n\n if char in WHITESPACE:\n return Idle\n\n if char == \";\":\n return Comment\n\n self.buf += char\n\n\ndef AtomStartingWith(initial_char):\n def AtomFactory(machine):\n state = Atom(machine)\n state.initial_buf = initial_char\n return state\n return AtomFactory\n\n\nclass Idle(State):\n \"\"\"\n Idle state. This is the first (and last) thing that we should\n be in.\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - ( - Expression\n - [ - List\n - { - Dict\n - \\\" - String\n - ; - Comment\n - # - Hash\n - (default) - Atom\n \"\"\"\n\n if char == \"(\":\n return Expression\n\n if char == \"[\":\n return List\n\n if char == \"{\":\n return Dict\n\n if char == \"\\\"\":\n return String\n\n if char == \";\":\n return Comment\n\n if char == \"#\":\n return Hash\n\n if char in WHITESPACE:\n return\n\n return AtomStartingWith(char)\n\n\nclass Comment(State):\n \"\"\"\n Comment state.\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - \\n - Idle\n - (default) - disregard.\n \"\"\"\n\n if char == \"\\n\":\n return Idle\n\n\nclass Hash(State):\n \"\"\"\n Hash state\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - ! - Comment\n \"\"\"\n\n if char == \"!\":\n return Comment\n\n raise LexException(\"Unknown char (Hash state): `%s'\" % (char))\n",
"path": "hy/lex/states.py"
}
] | diff --git a/hy/lex/states.py b/hy/lex/states.py
index 8c8ffd4df..772f8c6a0 100644
--- a/hy/lex/states.py
+++ b/hy/lex/states.py
@@ -67,10 +67,11 @@ def _resolve_atom(obj):
except ValueError:
pass
- try:
- return HyComplex(obj)
- except ValueError:
- pass
+ if obj != "j":
+ try:
+ return HyComplex(obj)
+ except ValueError:
+ pass
table = {
"true": "True",
diff --git a/tests/lex/test_lex.py b/tests/lex/test_lex.py
index cc3a76072..590e51ee9 100644
--- a/tests/lex/test_lex.py
+++ b/tests/lex/test_lex.py
@@ -230,3 +230,12 @@ def test_hashbang():
""" Ensure we can escape things """
entry = tokenize("#!this is a comment\n")
assert entry == []
+
+
+def test_complex():
+ """Ensure we tokenize complex numbers properly"""
+ # This is a regression test for #143
+ entry = tokenize("(1j)")[0][0]
+ assert entry == HyComplex("1.0j")
+ entry = tokenize("(j)")[0][0]
+ assert entry == HySymbol("j")
diff --git a/tests/native_tests/language.hy b/tests/native_tests/language.hy
index 9722f5b9b..beb61c5ed 100644
--- a/tests/native_tests/language.hy
+++ b/tests/native_tests/language.hy
@@ -420,7 +420,8 @@
(assert (= (sorted (list-comp (* y 2) ((, x y) (.items {"1" 1 "2" 2}))))
[2 4]))
(assert (= (list-comp (, x y) (x (range 2) y (range 2)))
- [(, 0 0) (, 0 1) (, 1 0) (, 1 1)])))
+ [(, 0 0) (, 0 1) (, 1 0) (, 1 1)]))
+ (assert (= (list-comp j (j [1 2])) [1 2])))
(defn test-defn-order []
|
buildbot__buildbot-380 | Update flake8 to version 2.6.
This introduces a few new errors:
- `W503` line break before binary operator
I don't think this is a sensible choice and the codebase doesn't follow this convention.
- `E731` do not assign a lambda expression, use a def
This is used often in tests for functions that return canned values. I think turning them into `def`s obscures that.
- `E402` module level import not at top of file
I've fixed most of the occurrences of this, as they are fairly trivial.
| [
{
"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Portions Copyright Buildbot Team Members\n# Portions Copyright Canonical Ltd. 2009\n\nimport time\nfrom email.Message import Message\nfrom email.Utils import formatdate\nfrom zope.interface import implements\nfrom twisted.python import log, failure\nfrom twisted.internet import defer, reactor\nfrom twisted.application import service\nfrom twisted.spread import pb\nfrom twisted.python.reflect import namedModule\n\nfrom buildbot.status.slave import SlaveStatus\nfrom buildbot.status.mail import MailNotifier\nfrom buildbot.process import metrics, botmaster\nfrom buildbot.interfaces import IBuildSlave, ILatentBuildSlave\nfrom buildbot.process.properties import Properties\nfrom buildbot.locks import LockAccess\nfrom buildbot.util import subscription\nfrom buildbot import config\n\nclass AbstractBuildSlave(config.ReconfigurableServiceMixin, pb.Avatar,\n service.MultiService):\n \"\"\"This is the master-side representative for a remote buildbot slave.\n There is exactly one for each slave described in the config file (the\n c['slaves'] list). When buildbots connect in (.attach), they get a\n reference to this instance. The BotMaster object is stashed as the\n .botmaster attribute. The BotMaster is also our '.parent' Service.\n\n I represent a build slave -- a remote machine capable of\n running builds. I am instantiated by the configuration file, and can be\n subclassed to add extra functionality.\"\"\"\n\n implements(IBuildSlave)\n keepalive_timer = None\n keepalive_interval = None\n\n # reconfig slaves after builders\n reconfig_priority = 64\n\n def __init__(self, name, password, max_builds=None,\n notify_on_missing=[], missing_timeout=3600,\n properties={}, locks=None, keepalive_interval=3600):\n \"\"\"\n @param name: botname this machine will supply when it connects\n @param password: password this machine will supply when\n it connects\n @param max_builds: maximum number of simultaneous builds that will\n be run concurrently on this buildslave (the\n default is None for no limit)\n @param properties: properties that will be applied to builds run on\n this slave\n @type properties: dictionary\n @param locks: A list of locks that must be acquired before this slave\n can be used\n @type locks: dictionary\n \"\"\"\n service.MultiService.__init__(self)\n self.slavename = name\n self.password = password\n\n # PB registration\n self.registration = None\n self.registered_port = None\n\n # these are set when the service is started, and unset when it is\n # stopped\n self.botmaster = None\n self.master = None\n\n self.slave_status = SlaveStatus(name)\n self.slave = None # a RemoteReference to the Bot, when connected\n self.slave_commands = None\n self.slavebuilders = {}\n self.max_builds = max_builds\n self.access = []\n if locks:\n self.access = locks\n self.lock_subscriptions = []\n\n self.properties = Properties()\n self.properties.update(properties, \"BuildSlave\")\n self.properties.setProperty(\"slavename\", name, \"BuildSlave\")\n\n self.lastMessageReceived = 0\n if isinstance(notify_on_missing, str):\n notify_on_missing = [notify_on_missing]\n self.notify_on_missing = notify_on_missing\n for i in notify_on_missing:\n if not isinstance(i, str):\n config.error(\n 'notify_on_missing arg %r is not a string' % (i,))\n self.missing_timeout = missing_timeout\n self.missing_timer = None\n self.keepalive_interval = keepalive_interval\n\n self.detached_subs = None\n\n self._old_builder_list = None\n\n def __repr__(self):\n return \"<%s %r>\" % (self.__class__.__name__, self.slavename)\n\n def updateLocks(self):\n \"\"\"Convert the L{LockAccess} objects in C{self.locks} into real lock\n objects, while also maintaining the subscriptions to lock releases.\"\"\"\n # unsubscribe from any old locks\n for s in self.lock_subscriptions:\n s.unsubscribe()\n\n # convert locks into their real form\n locks = []\n for access in self.access:\n if not isinstance(access, LockAccess):\n access = access.defaultAccess()\n lock = self.botmaster.getLockByID(access.lockid)\n locks.append((lock, access))\n self.locks = [(l.getLock(self), la) for l, la in locks]\n self.lock_subscriptions = [ l.subscribeToReleases(self._lockReleased)\n for l, la in self.locks ]\n\n def locksAvailable(self):\n \"\"\"\n I am called to see if all the locks I depend on are available,\n in which I return True, otherwise I return False\n \"\"\"\n if not self.locks:\n return True\n for lock, access in self.locks:\n if not lock.isAvailable(access):\n return False\n return True\n\n def acquireLocks(self):\n \"\"\"\n I am called when a build is preparing to run. I try to claim all\n the locks that are needed for a build to happen. If I can't, then\n my caller should give up the build and try to get another slave\n to look at it.\n \"\"\"\n log.msg(\"acquireLocks(slave %s, locks %s)\" % (self, self.locks))\n if not self.locksAvailable():\n log.msg(\"slave %s can't lock, giving up\" % (self, ))\n return False\n # all locks are available, claim them all\n for lock, access in self.locks:\n lock.claim(self, access)\n return True\n\n def releaseLocks(self):\n \"\"\"\n I am called to release any locks after a build has finished\n \"\"\"\n log.msg(\"releaseLocks(%s): %s\" % (self, self.locks))\n for lock, access in self.locks:\n lock.release(self, access)\n\n def _lockReleased(self):\n \"\"\"One of the locks for this slave was released; try scheduling\n builds.\"\"\"\n if not self.botmaster:\n return # oh well..\n self.botmaster.maybeStartBuildsForSlave(self.slavename)\n\n def startService(self):\n self.updateLocks()\n self.startMissingTimer()\n return service.MultiService.startService(self)\n\n def reconfigService(self, new_config):\n # Given a new BuildSlave, configure this one identically. Because\n # BuildSlave objects are remotely referenced, we can't replace them\n # without disconnecting the slave, yet there's no reason to do that.\n new = self.findNewSlaveInstance(new_config)\n\n assert self.slavename == new.slavename\n\n # do we need to re-register?\n if (not self.registration or\n self.password != new.password or\n new_config.slavePortnum != self.registered_port):\n if self.registration:\n self.registration.unregister()\n self.password = new.password\n self.registered_port = new_config.slavePortnum\n self.registration = self.master.pbmanager.register(\n self.registered_port, self.slavename,\n self.password, self.getPerspective)\n\n # adopt new instance's configuration parameters\n self.max_builds = new.max_builds\n self.access = new.access\n self.notify_on_missing = new.notify_on_missing\n self.keepalive_interval = new.keepalive_interval\n\n if self.missing_timeout != new.missing_timeout:\n running_missing_timer = self.missing_timer\n self.stopMissingTimer()\n self.missing_timeout = new.missing_timeout\n if running_missing_timer:\n self.startMissingTimer()\n\n properties = Properties()\n properties.updateFromProperties(new.properties)\n self.properties = properties\n\n self.updateLocks()\n\n # update the attached slave's notion of which builders are attached.\n # This assumes that the relevant builders have already been configured,\n # which is why the reconfig_priority is set low in this class.\n d = self.updateSlave()\n\n # and chain up\n d.addCallback(lambda _ :\n config.ReconfigurableServiceMixin.reconfigService(self,\n new_config))\n\n return d\n\n def stopService(self):\n self.stopMissingTimer()\n return service.MultiService.stopService(self)\n\n def findNewSlaveInstance(self, new_config):\n # TODO: called multiple times per reconfig; use 1-element cache?\n for sl in new_config.slaves:\n if sl.slavename == self.slavename:\n return sl\n assert 0, \"no new slave named '%s'\" % self.slavename\n\n def startMissingTimer(self):\n if self.notify_on_missing and self.missing_timeout and self.parent:\n self.stopMissingTimer() # in case it's already running\n self.missing_timer = reactor.callLater(self.missing_timeout,\n self._missing_timer_fired)\n\n def stopMissingTimer(self):\n if self.missing_timer:\n self.missing_timer.cancel()\n self.missing_timer = None\n\n def getPerspective(self, mind, slavename):\n assert slavename == self.slavename\n metrics.MetricCountEvent.log(\"attached_slaves\", 1)\n\n # record when this connection attempt occurred\n if self.slave_status:\n self.slave_status.recordConnectTime()\n\n\n if self.isConnected():\n # duplicate slave - send it to arbitration\n arb = botmaster.DuplicateSlaveArbitrator(self)\n return arb.getPerspective(mind, slavename)\n else:\n log.msg(\"slave '%s' attaching from %s\" % (slavename, mind.broker.transport.getPeer()))\n return self\n\n def doKeepalive(self):\n self.keepalive_timer = reactor.callLater(self.keepalive_interval,\n self.doKeepalive)\n if not self.slave:\n return\n d = self.slave.callRemote(\"print\", \"Received keepalive from master\")\n d.addErrback(log.msg, \"Keepalive failed for '%s'\" % (self.slavename, ))\n\n def stopKeepaliveTimer(self):\n if self.keepalive_timer:\n self.keepalive_timer.cancel()\n\n def startKeepaliveTimer(self):\n assert self.keepalive_interval\n log.msg(\"Starting buildslave keepalive timer for '%s'\" % \\\n (self.slavename, ))\n self.doKeepalive()\n\n def isConnected(self):\n return self.slave\n\n def _missing_timer_fired(self):\n self.missing_timer = None\n # notify people, but only if we're still in the config\n if not self.parent:\n return\n\n buildmaster = self.botmaster.master\n status = buildmaster.getStatus()\n text = \"The Buildbot working for '%s'\\n\" % status.getTitle()\n text += (\"has noticed that the buildslave named %s went away\\n\" %\n self.slavename)\n text += \"\\n\"\n text += (\"It last disconnected at %s (buildmaster-local time)\\n\" %\n time.ctime(time.time() - self.missing_timeout)) # approx\n text += \"\\n\"\n text += \"The admin on record (as reported by BUILDSLAVE:info/admin)\\n\"\n text += \"was '%s'.\\n\" % self.slave_status.getAdmin()\n text += \"\\n\"\n text += \"Sincerely,\\n\"\n text += \" The Buildbot\\n\"\n text += \" %s\\n\" % status.getTitleURL()\n subject = \"Buildbot: buildslave %s was lost\" % self.slavename\n return self._mail_missing_message(subject, text)\n\n\n def updateSlave(self):\n \"\"\"Called to add or remove builders after the slave has connected.\n\n @return: a Deferred that indicates when an attached slave has\n accepted the new builders and/or released the old ones.\"\"\"\n if self.slave:\n return self.sendBuilderList()\n else:\n return defer.succeed(None)\n\n def updateSlaveStatus(self, buildStarted=None, buildFinished=None):\n if buildStarted:\n self.slave_status.buildStarted(buildStarted)\n if buildFinished:\n self.slave_status.buildFinished(buildFinished)\n\n @metrics.countMethod('AbstractBuildSlave.attached()')\n def attached(self, bot):\n \"\"\"This is called when the slave connects.\n\n @return: a Deferred that fires when the attachment is complete\n \"\"\"\n\n # the botmaster should ensure this.\n assert not self.isConnected()\n\n metrics.MetricCountEvent.log(\"AbstractBuildSlave.attached_slaves\", 1)\n\n # set up the subscription point for eventual detachment\n self.detached_subs = subscription.SubscriptionPoint(\"detached\")\n\n # now we go through a sequence of calls, gathering information, then\n # tell the Botmaster that it can finally give this slave to all the\n # Builders that care about it.\n\n # we accumulate slave information in this 'state' dictionary, then\n # set it atomically if we make it far enough through the process\n state = {}\n\n # Reset graceful shutdown status\n self.slave_status.setGraceful(False)\n # We want to know when the graceful shutdown flag changes\n self.slave_status.addGracefulWatcher(self._gracefulChanged)\n\n d = defer.succeed(None)\n def _log_attachment_on_slave(res):\n d1 = bot.callRemote(\"print\", \"attached\")\n d1.addErrback(lambda why: None)\n return d1\n d.addCallback(_log_attachment_on_slave)\n\n def _get_info(res):\n d1 = bot.callRemote(\"getSlaveInfo\")\n def _got_info(info):\n log.msg(\"Got slaveinfo from '%s'\" % self.slavename)\n # TODO: info{} might have other keys\n state[\"admin\"] = info.get(\"admin\")\n state[\"host\"] = info.get(\"host\")\n state[\"access_uri\"] = info.get(\"access_uri\", None)\n state[\"slave_environ\"] = info.get(\"environ\", {})\n state[\"slave_basedir\"] = info.get(\"basedir\", None)\n state[\"slave_system\"] = info.get(\"system\", None)\n def _info_unavailable(why):\n why.trap(pb.NoSuchMethod)\n # maybe an old slave, doesn't implement remote_getSlaveInfo\n log.msg(\"BuildSlave.info_unavailable\")\n log.err(why)\n d1.addCallbacks(_got_info, _info_unavailable)\n return d1\n d.addCallback(_get_info)\n self.startKeepaliveTimer()\n\n def _get_version(res):\n d = bot.callRemote(\"getVersion\")\n def _got_version(version):\n state[\"version\"] = version\n def _version_unavailable(why):\n why.trap(pb.NoSuchMethod)\n # probably an old slave\n state[\"version\"] = '(unknown)'\n d.addCallbacks(_got_version, _version_unavailable)\n return d\n d.addCallback(_get_version)\n\n def _get_commands(res):\n d1 = bot.callRemote(\"getCommands\")\n def _got_commands(commands):\n state[\"slave_commands\"] = commands\n def _commands_unavailable(why):\n # probably an old slave\n log.msg(\"BuildSlave._commands_unavailable\")\n if why.check(AttributeError):\n return\n log.err(why)\n d1.addCallbacks(_got_commands, _commands_unavailable)\n return d1\n d.addCallback(_get_commands)\n\n def _accept_slave(res):\n self.slave_status.setAdmin(state.get(\"admin\"))\n self.slave_status.setHost(state.get(\"host\"))\n self.slave_status.setAccessURI(state.get(\"access_uri\"))\n self.slave_status.setVersion(state.get(\"version\"))\n self.slave_status.setConnected(True)\n self.slave_commands = state.get(\"slave_commands\")\n self.slave_environ = state.get(\"slave_environ\")\n self.slave_basedir = state.get(\"slave_basedir\")\n self.slave_system = state.get(\"slave_system\")\n self.slave = bot\n if self.slave_system == \"win32\":\n self.path_module = namedModule(\"win32path\")\n else:\n # most eveything accepts / as separator, so posix should be a\n # reasonable fallback\n self.path_module = namedModule(\"posixpath\")\n log.msg(\"bot attached\")\n self.messageReceivedFromSlave()\n self.stopMissingTimer()\n self.botmaster.master.status.slaveConnected(self.slavename)\n\n return self.updateSlave()\n d.addCallback(_accept_slave)\n d.addCallback(lambda _:\n self.botmaster.maybeStartBuildsForSlave(self.slavename))\n\n # Finally, the slave gets a reference to this BuildSlave. They\n # receive this later, after we've started using them.\n d.addCallback(lambda _: self)\n return d\n\n def messageReceivedFromSlave(self):\n now = time.time()\n self.lastMessageReceived = now\n self.slave_status.setLastMessageReceived(now)\n\n def detached(self, mind):\n metrics.MetricCountEvent.log(\"AbstractBuildSlave.attached_slaves\", -1)\n self.slave = None\n self._old_builder_list = []\n self.slave_status.removeGracefulWatcher(self._gracefulChanged)\n self.slave_status.setConnected(False)\n log.msg(\"BuildSlave.detached(%s)\" % self.slavename)\n self.botmaster.master.status.slaveDisconnected(self.slavename)\n self.stopKeepaliveTimer()\n self.releaseLocks()\n\n # notify watchers, but do so in the next reactor iteration so that\n # any further detached() action by subclasses happens first\n def notif():\n subs = self.detached_subs\n self.detached_subs = None\n subs.deliver()\n reactor.callLater(0, notif)\n\n def subscribeToDetach(self, callback):\n \"\"\"\n Request that C{callable} be invoked with no arguments when the\n L{detached} method is invoked.\n\n @returns: L{Subscription}\n \"\"\"\n assert self.detached_subs, \"detached_subs is only set if attached\"\n return self.detached_subs.subscribe(callback)\n\n def disconnect(self):\n \"\"\"Forcibly disconnect the slave.\n\n This severs the TCP connection and returns a Deferred that will fire\n (with None) when the connection is probably gone.\n\n If the slave is still alive, they will probably try to reconnect\n again in a moment.\n\n This is called in two circumstances. The first is when a slave is\n removed from the config file. In this case, when they try to\n reconnect, they will be rejected as an unknown slave. The second is\n when we wind up with two connections for the same slave, in which\n case we disconnect the older connection.\n \"\"\"\n\n if not self.slave:\n return defer.succeed(None)\n log.msg(\"disconnecting old slave %s now\" % self.slavename)\n # When this Deferred fires, we'll be ready to accept the new slave\n return self._disconnect(self.slave)\n\n def _disconnect(self, slave):\n # all kinds of teardown will happen as a result of\n # loseConnection(), but it happens after a reactor iteration or\n # two. Hook the actual disconnect so we can know when it is safe\n # to connect the new slave. We have to wait one additional\n # iteration (with callLater(0)) to make sure the *other*\n # notifyOnDisconnect handlers have had a chance to run.\n d = defer.Deferred()\n\n # notifyOnDisconnect runs the callback with one argument, the\n # RemoteReference being disconnected.\n def _disconnected(rref):\n reactor.callLater(0, d.callback, None)\n slave.notifyOnDisconnect(_disconnected)\n tport = slave.broker.transport\n # this is the polite way to request that a socket be closed\n tport.loseConnection()\n try:\n # but really we don't want to wait for the transmit queue to\n # drain. The remote end is unlikely to ACK the data, so we'd\n # probably have to wait for a (20-minute) TCP timeout.\n #tport._closeSocket()\n # however, doing _closeSocket (whether before or after\n # loseConnection) somehow prevents the notifyOnDisconnect\n # handlers from being run. Bummer.\n tport.offset = 0\n tport.dataBuffer = \"\"\n except:\n # however, these hacks are pretty internal, so don't blow up if\n # they fail or are unavailable\n log.msg(\"failed to accelerate the shutdown process\")\n log.msg(\"waiting for slave to finish disconnecting\")\n\n return d\n\n def sendBuilderList(self):\n our_builders = self.botmaster.getBuildersForSlave(self.slavename)\n blist = [(b.name, b.config.slavebuilddir) for b in our_builders]\n if blist == self._old_builder_list:\n return defer.succeed(None)\n\n d = self.slave.callRemote(\"setBuilderList\", blist)\n def sentBuilderList(ign):\n self._old_builder_list = blist\n return ign\n d.addCallback(sentBuilderList)\n return d\n\n def perspective_keepalive(self):\n self.messageReceivedFromSlave()\n\n def perspective_shutdown(self):\n log.msg(\"slave %s wants to shut down\" % self.slavename)\n self.slave_status.setGraceful(True)\n\n def addSlaveBuilder(self, sb):\n self.slavebuilders[sb.builder_name] = sb\n\n def removeSlaveBuilder(self, sb):\n try:\n del self.slavebuilders[sb.builder_name]\n except KeyError:\n pass\n\n def buildFinished(self, sb):\n \"\"\"This is called when a build on this slave is finished.\"\"\"\n self.botmaster.maybeStartBuildsForSlave(self.slavename)\n\n def canStartBuild(self):\n \"\"\"\n I am called when a build is requested to see if this buildslave\n can start a build. This function can be used to limit overall\n concurrency on the buildslave.\n\n Note for subclassers: if a slave can become willing to start a build\n without any action on that slave (for example, by a resource in use on\n another slave becoming available), then you must arrange for\n L{maybeStartBuildsForSlave} to be called at that time, or builds on\n this slave will not start.\n \"\"\"\n # If we're waiting to shutdown gracefully, then we shouldn't\n # accept any new jobs.\n if self.slave_status.getGraceful():\n return False\n\n if self.max_builds:\n active_builders = [sb for sb in self.slavebuilders.values()\n if sb.isBusy()]\n if len(active_builders) >= self.max_builds:\n return False\n\n if not self.locksAvailable():\n return False\n\n return True\n\n def _mail_missing_message(self, subject, text):\n # first, see if we have a MailNotifier we can use. This gives us a\n # fromaddr and a relayhost.\n buildmaster = self.botmaster.master\n for st in buildmaster.statusTargets:\n if isinstance(st, MailNotifier):\n break\n else:\n # if not, they get a default MailNotifier, which always uses SMTP\n # to localhost and uses a dummy fromaddr of \"buildbot\".\n log.msg(\"buildslave-missing msg using default MailNotifier\")\n st = MailNotifier(\"buildbot\")\n # now construct the mail\n\n m = Message()\n m.set_payload(text)\n m['Date'] = formatdate(localtime=True)\n m['Subject'] = subject\n m['From'] = st.fromaddr\n recipients = self.notify_on_missing\n m['To'] = \", \".join(recipients)\n d = st.sendMessage(m, recipients)\n # return the Deferred for testing purposes\n return d\n\n def _gracefulChanged(self, graceful):\n \"\"\"This is called when our graceful shutdown setting changes\"\"\"\n self.maybeShutdown()\n\n @defer.deferredGenerator\n def shutdown(self):\n \"\"\"Shutdown the slave\"\"\"\n if not self.slave:\n log.msg(\"no remote; slave is already shut down\")\n return\n\n # First, try the \"new\" way - calling our own remote's shutdown\n # method. The method was only added in 0.8.3, so ignore NoSuchMethod\n # failures.\n def new_way():\n d = self.slave.callRemote('shutdown')\n d.addCallback(lambda _ : True) # successful shutdown request\n def check_nsm(f):\n f.trap(pb.NoSuchMethod)\n return False # fall through to the old way\n d.addErrback(check_nsm)\n def check_connlost(f):\n f.trap(pb.PBConnectionLost)\n return True # the slave is gone, so call it finished\n d.addErrback(check_connlost)\n return d\n\n wfd = defer.waitForDeferred(new_way())\n yield wfd\n if wfd.getResult():\n return # done!\n\n # Now, the old way. Look for a builder with a remote reference to the\n # client side slave. If we can find one, then call \"shutdown\" on the\n # remote builder, which will cause the slave buildbot process to exit.\n def old_way():\n d = None\n for b in self.slavebuilders.values():\n if b.remote:\n d = b.remote.callRemote(\"shutdown\")\n break\n\n if d:\n log.msg(\"Shutting down (old) slave: %s\" % self.slavename)\n # The remote shutdown call will not complete successfully since the\n # buildbot process exits almost immediately after getting the\n # shutdown request.\n # Here we look at the reason why the remote call failed, and if\n # it's because the connection was lost, that means the slave\n # shutdown as expected.\n def _errback(why):\n if why.check(pb.PBConnectionLost):\n log.msg(\"Lost connection to %s\" % self.slavename)\n else:\n log.err(\"Unexpected error when trying to shutdown %s\" % self.slavename)\n d.addErrback(_errback)\n return d\n log.err(\"Couldn't find remote builder to shut down slave\")\n return defer.succeed(None)\n wfd = defer.waitForDeferred(old_way())\n yield wfd\n wfd.getResult()\n\n def maybeShutdown(self):\n \"\"\"Shut down this slave if it has been asked to shut down gracefully,\n and has no active builders.\"\"\"\n if not self.slave_status.getGraceful():\n return\n active_builders = [sb for sb in self.slavebuilders.values()\n if sb.isBusy()]\n if active_builders:\n return\n d = self.shutdown()\n d.addErrback(log.err, 'error while shutting down slave')\n\nclass BuildSlave(AbstractBuildSlave):\n\n def sendBuilderList(self):\n d = AbstractBuildSlave.sendBuilderList(self)\n def _sent(slist):\n # Nothing has changed, so don't need to re-attach to everything\n if not slist:\n return\n dl = []\n for name, remote in slist.items():\n # use get() since we might have changed our mind since then\n b = self.botmaster.builders.get(name)\n if b:\n d1 = b.attached(self, remote, self.slave_commands)\n dl.append(d1)\n return defer.DeferredList(dl)\n def _set_failed(why):\n log.msg(\"BuildSlave.sendBuilderList (%s) failed\" % self)\n log.err(why)\n # TODO: hang up on them?, without setBuilderList we can't use\n # them\n d.addCallbacks(_sent, _set_failed)\n return d\n\n def detached(self, mind):\n AbstractBuildSlave.detached(self, mind)\n self.botmaster.slaveLost(self)\n self.startMissingTimer()\n\n def buildFinished(self, sb):\n \"\"\"This is called when a build on this slave is finished.\"\"\"\n AbstractBuildSlave.buildFinished(self, sb)\n\n # If we're gracefully shutting down, and we have no more active\n # builders, then it's safe to disconnect\n self.maybeShutdown()\n\nclass AbstractLatentBuildSlave(AbstractBuildSlave):\n \"\"\"A build slave that will start up a slave instance when needed.\n\n To use, subclass and implement start_instance and stop_instance.\n\n See ec2buildslave.py for a concrete example. Also see the stub example in\n test/test_slaves.py.\n \"\"\"\n\n implements(ILatentBuildSlave)\n\n substantiated = False\n substantiation_deferred = None\n substantiation_build = None\n build_wait_timer = None\n _shutdown_callback_handle = None\n\n def __init__(self, name, password, max_builds=None,\n notify_on_missing=[], missing_timeout=60*20,\n build_wait_timeout=60*10,\n properties={}, locks=None):\n AbstractBuildSlave.__init__(\n self, name, password, max_builds, notify_on_missing,\n missing_timeout, properties, locks)\n self.building = set()\n self.build_wait_timeout = build_wait_timeout\n\n def start_instance(self, build):\n # responsible for starting instance that will try to connect with this\n # master. Should return deferred with either True (instance started)\n # or False (instance not started, so don't run a build here). Problems\n # should use an errback.\n raise NotImplementedError\n\n def stop_instance(self, fast=False):\n # responsible for shutting down instance.\n raise NotImplementedError\n\n def substantiate(self, sb, build):\n if self.substantiated:\n self._clearBuildWaitTimer()\n self._setBuildWaitTimer()\n return defer.succeed(True)\n if self.substantiation_deferred is None:\n if self.parent and not self.missing_timer:\n # start timer. if timer times out, fail deferred\n self.missing_timer = reactor.callLater(\n self.missing_timeout,\n self._substantiation_failed, defer.TimeoutError())\n self.substantiation_deferred = defer.Deferred()\n self.substantiation_build = build\n if self.slave is None:\n d = self._substantiate(build) # start up instance\n d.addErrback(log.err, \"while substantiating\")\n # else: we're waiting for an old one to detach. the _substantiate\n # will be done in ``detached`` below.\n return self.substantiation_deferred\n\n def _substantiate(self, build):\n # register event trigger\n d = self.start_instance(build)\n self._shutdown_callback_handle = reactor.addSystemEventTrigger(\n 'before', 'shutdown', self._soft_disconnect, fast=True)\n def start_instance_result(result):\n # If we don't report success, then preparation failed.\n if not result:\n log.msg(\"Slave '%s' doesn not want to substantiate at this time\" % (self.slavename,))\n d = self.substantiation_deferred\n self.substantiation_deferred = None\n d.callback(False)\n return result\n def clean_up(failure):\n if self.missing_timer is not None:\n self.missing_timer.cancel()\n self._substantiation_failed(failure)\n if self._shutdown_callback_handle is not None:\n handle = self._shutdown_callback_handle\n del self._shutdown_callback_handle\n reactor.removeSystemEventTrigger(handle)\n return failure\n d.addCallbacks(start_instance_result, clean_up)\n return d\n\n def attached(self, bot):\n if self.substantiation_deferred is None:\n msg = 'Slave %s received connection while not trying to ' \\\n 'substantiate. Disconnecting.' % (self.slavename,)\n log.msg(msg)\n self._disconnect(bot)\n return defer.fail(RuntimeError(msg))\n return AbstractBuildSlave.attached(self, bot)\n\n def detached(self, mind):\n AbstractBuildSlave.detached(self, mind)\n if self.substantiation_deferred is not None:\n d = self._substantiate(self.substantiation_build)\n d.addErrback(log.err, 'while re-substantiating')\n\n def _substantiation_failed(self, failure):\n self.missing_timer = None\n if self.substantiation_deferred:\n d = self.substantiation_deferred\n self.substantiation_deferred = None\n self.substantiation_build = None\n d.errback(failure)\n self.insubstantiate()\n # notify people, but only if we're still in the config\n if not self.parent or not self.notify_on_missing:\n return\n\n buildmaster = self.botmaster.master\n status = buildmaster.getStatus()\n text = \"The Buildbot working for '%s'\\n\" % status.getTitle()\n text += (\"has noticed that the latent buildslave named %s \\n\" %\n self.slavename)\n text += \"never substantiated after a request\\n\"\n text += \"\\n\"\n text += (\"The request was made at %s (buildmaster-local time)\\n\" %\n time.ctime(time.time() - self.missing_timeout)) # approx\n text += \"\\n\"\n text += \"Sincerely,\\n\"\n text += \" The Buildbot\\n\"\n text += \" %s\\n\" % status.getTitleURL()\n subject = \"Buildbot: buildslave %s never substantiated\" % self.slavename\n return self._mail_missing_message(subject, text)\n\n def buildStarted(self, sb):\n assert self.substantiated\n self._clearBuildWaitTimer()\n self.building.add(sb.builder_name)\n\n def buildFinished(self, sb):\n AbstractBuildSlave.buildFinished(self, sb)\n\n self.building.remove(sb.builder_name)\n if not self.building:\n self._setBuildWaitTimer()\n\n def _clearBuildWaitTimer(self):\n if self.build_wait_timer is not None:\n if self.build_wait_timer.active():\n self.build_wait_timer.cancel()\n self.build_wait_timer = None\n\n def _setBuildWaitTimer(self):\n self._clearBuildWaitTimer()\n self.build_wait_timer = reactor.callLater(\n self.build_wait_timeout, self._soft_disconnect)\n\n def insubstantiate(self, fast=False):\n self._clearBuildWaitTimer()\n d = self.stop_instance(fast)\n if self._shutdown_callback_handle is not None:\n handle = self._shutdown_callback_handle\n del self._shutdown_callback_handle\n reactor.removeSystemEventTrigger(handle)\n self.substantiated = False\n self.building.clear() # just to be sure\n return d\n\n def _soft_disconnect(self, fast=False):\n d = AbstractBuildSlave.disconnect(self)\n if self.slave is not None:\n # this could be called when the slave needs to shut down, such as\n # in BotMaster.removeSlave, *or* when a new slave requests a\n # connection when we already have a slave. It's not clear what to\n # do in the second case: this shouldn't happen, and if it\n # does...if it's a latent slave, shutting down will probably kill\n # something we want...but we can't know what the status is. So,\n # here, we just do what should be appropriate for the first case,\n # and put our heads in the sand for the second, at least for now.\n # The best solution to the odd situation is removing it as a\n # possibilty: make the master in charge of connecting to the\n # slave, rather than vice versa. TODO.\n d = defer.DeferredList([d, self.insubstantiate(fast)])\n else:\n if self.substantiation_deferred is not None:\n # unlike the previous block, we don't expect this situation when\n # ``attached`` calls ``disconnect``, only when we get a simple\n # request to \"go away\".\n d = self.substantiation_deferred\n self.substantiation_deferred = None\n self.substantiation_build = None\n d.errback(failure.Failure(\n RuntimeError(\"soft disconnect aborted substantiation\")))\n if self.missing_timer:\n self.missing_timer.cancel()\n self.missing_timer = None\n self.stop_instance()\n return d\n\n def disconnect(self):\n # This returns a Deferred but we don't use it\n self._soft_disconnect() \n # this removes the slave from all builders. It won't come back\n # without a restart (or maybe a sighup)\n self.botmaster.slaveLost(self)\n\n def stopService(self):\n res = defer.maybeDeferred(AbstractBuildSlave.stopService, self)\n if self.slave is not None:\n d = self._soft_disconnect()\n res = defer.DeferredList([res, d])\n return res\n\n def updateSlave(self):\n \"\"\"Called to add or remove builders after the slave has connected.\n\n Also called after botmaster's builders are initially set.\n\n @return: a Deferred that indicates when an attached slave has\n accepted the new builders and/or released the old ones.\"\"\"\n for b in self.botmaster.getBuildersForSlave(self.slavename):\n if b.name not in self.slavebuilders:\n b.addLatentSlave(self)\n return AbstractBuildSlave.updateSlave(self)\n\n def sendBuilderList(self):\n d = AbstractBuildSlave.sendBuilderList(self)\n def _sent(slist):\n if not slist:\n return\n dl = []\n for name, remote in slist.items():\n # use get() since we might have changed our mind since then.\n # we're checking on the builder in addition to the\n # slavebuilders out of a bit of paranoia.\n b = self.botmaster.builders.get(name)\n sb = self.slavebuilders.get(name)\n if b and sb:\n d1 = sb.attached(self, remote, self.slave_commands)\n dl.append(d1)\n return defer.DeferredList(dl)\n def _set_failed(why):\n log.msg(\"BuildSlave.sendBuilderList (%s) failed\" % self)\n log.err(why)\n # TODO: hang up on them?, without setBuilderList we can't use\n # them\n if self.substantiation_deferred:\n d = self.substantiation_deferred\n self.substantiation_deferred = None\n self.substantiation_build = None\n d.errback(why)\n if self.missing_timer:\n self.missing_timer.cancel()\n self.missing_timer = None\n # TODO: maybe log? send an email?\n return why\n d.addCallbacks(_sent, _set_failed)\n def _substantiated(res):\n log.msg(\"Slave %s substantiated \\o/\" % self.slavename)\n self.substantiated = True\n if not self.substantiation_deferred:\n log.msg(\"No substantiation deferred for %s\" % self.slavename)\n if self.substantiation_deferred:\n log.msg(\"Firing %s substantiation deferred with success\" % self.slavename)\n d = self.substantiation_deferred\n self.substantiation_deferred = None\n self.substantiation_build = None\n d.callback(True)\n # note that the missing_timer is already handled within\n # ``attached``\n if not self.building:\n self._setBuildWaitTimer()\n d.addCallback(_substantiated)\n return d\n",
"path": "master/buildbot/buildslave.py"
}
] | [
{
"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Portions Copyright Buildbot Team Members\n# Portions Copyright Canonical Ltd. 2009\n\nimport time\nfrom email.Message import Message\nfrom email.Utils import formatdate\nfrom zope.interface import implements\nfrom twisted.python import log, failure\nfrom twisted.internet import defer, reactor\nfrom twisted.application import service\nfrom twisted.spread import pb\nfrom twisted.python.reflect import namedModule\n\nfrom buildbot.status.slave import SlaveStatus\nfrom buildbot.status.mail import MailNotifier\nfrom buildbot.process import metrics, botmaster\nfrom buildbot.interfaces import IBuildSlave, ILatentBuildSlave\nfrom buildbot.process.properties import Properties\nfrom buildbot.locks import LockAccess\nfrom buildbot.util import subscription\nfrom buildbot import config\n\nclass AbstractBuildSlave(config.ReconfigurableServiceMixin, pb.Avatar,\n service.MultiService):\n \"\"\"This is the master-side representative for a remote buildbot slave.\n There is exactly one for each slave described in the config file (the\n c['slaves'] list). When buildbots connect in (.attach), they get a\n reference to this instance. The BotMaster object is stashed as the\n .botmaster attribute. The BotMaster is also our '.parent' Service.\n\n I represent a build slave -- a remote machine capable of\n running builds. I am instantiated by the configuration file, and can be\n subclassed to add extra functionality.\"\"\"\n\n implements(IBuildSlave)\n keepalive_timer = None\n keepalive_interval = None\n\n # reconfig slaves after builders\n reconfig_priority = 64\n\n def __init__(self, name, password, max_builds=None,\n notify_on_missing=[], missing_timeout=3600,\n properties={}, locks=None, keepalive_interval=3600):\n \"\"\"\n @param name: botname this machine will supply when it connects\n @param password: password this machine will supply when\n it connects\n @param max_builds: maximum number of simultaneous builds that will\n be run concurrently on this buildslave (the\n default is None for no limit)\n @param properties: properties that will be applied to builds run on\n this slave\n @type properties: dictionary\n @param locks: A list of locks that must be acquired before this slave\n can be used\n @type locks: dictionary\n \"\"\"\n service.MultiService.__init__(self)\n self.slavename = name\n self.password = password\n\n # PB registration\n self.registration = None\n self.registered_port = None\n\n # these are set when the service is started, and unset when it is\n # stopped\n self.botmaster = None\n self.master = None\n\n self.slave_status = SlaveStatus(name)\n self.slave = None # a RemoteReference to the Bot, when connected\n self.slave_commands = None\n self.slavebuilders = {}\n self.max_builds = max_builds\n self.access = []\n if locks:\n self.access = locks\n self.lock_subscriptions = []\n\n self.properties = Properties()\n self.properties.update(properties, \"BuildSlave\")\n self.properties.setProperty(\"slavename\", name, \"BuildSlave\")\n\n self.lastMessageReceived = 0\n if isinstance(notify_on_missing, str):\n notify_on_missing = [notify_on_missing]\n self.notify_on_missing = notify_on_missing\n for i in notify_on_missing:\n if not isinstance(i, str):\n config.error(\n 'notify_on_missing arg %r is not a string' % (i,))\n self.missing_timeout = missing_timeout\n self.missing_timer = None\n self.keepalive_interval = keepalive_interval\n\n self.detached_subs = None\n\n self._old_builder_list = None\n\n def __repr__(self):\n return \"<%s %r>\" % (self.__class__.__name__, self.slavename)\n\n def updateLocks(self):\n \"\"\"Convert the L{LockAccess} objects in C{self.locks} into real lock\n objects, while also maintaining the subscriptions to lock releases.\"\"\"\n # unsubscribe from any old locks\n for s in self.lock_subscriptions:\n s.unsubscribe()\n\n # convert locks into their real form\n locks = []\n for access in self.access:\n if not isinstance(access, LockAccess):\n access = access.defaultAccess()\n lock = self.botmaster.getLockByID(access.lockid)\n locks.append((lock, access))\n self.locks = [(l.getLock(self), la) for l, la in locks]\n self.lock_subscriptions = [ l.subscribeToReleases(self._lockReleased)\n for l, la in self.locks ]\n\n def locksAvailable(self):\n \"\"\"\n I am called to see if all the locks I depend on are available,\n in which I return True, otherwise I return False\n \"\"\"\n if not self.locks:\n return True\n for lock, access in self.locks:\n if not lock.isAvailable(access):\n return False\n return True\n\n def acquireLocks(self):\n \"\"\"\n I am called when a build is preparing to run. I try to claim all\n the locks that are needed for a build to happen. If I can't, then\n my caller should give up the build and try to get another slave\n to look at it.\n \"\"\"\n log.msg(\"acquireLocks(slave %s, locks %s)\" % (self, self.locks))\n if not self.locksAvailable():\n log.msg(\"slave %s can't lock, giving up\" % (self, ))\n return False\n # all locks are available, claim them all\n for lock, access in self.locks:\n lock.claim(self, access)\n return True\n\n def releaseLocks(self):\n \"\"\"\n I am called to release any locks after a build has finished\n \"\"\"\n log.msg(\"releaseLocks(%s): %s\" % (self, self.locks))\n for lock, access in self.locks:\n lock.release(self, access)\n\n def _lockReleased(self):\n \"\"\"One of the locks for this slave was released; try scheduling\n builds.\"\"\"\n if not self.botmaster:\n return # oh well..\n self.botmaster.maybeStartBuildsForSlave(self.slavename)\n\n def startService(self):\n self.updateLocks()\n self.startMissingTimer()\n return service.MultiService.startService(self)\n\n def reconfigService(self, new_config):\n # Given a new BuildSlave, configure this one identically. Because\n # BuildSlave objects are remotely referenced, we can't replace them\n # without disconnecting the slave, yet there's no reason to do that.\n new = self.findNewSlaveInstance(new_config)\n\n assert self.slavename == new.slavename\n\n # do we need to re-register?\n if (not self.registration or\n self.password != new.password or\n new_config.slavePortnum != self.registered_port):\n if self.registration:\n self.registration.unregister()\n self.password = new.password\n self.registered_port = new_config.slavePortnum\n self.registration = self.master.pbmanager.register(\n self.registered_port, self.slavename,\n self.password, self.getPerspective)\n\n # adopt new instance's configuration parameters\n self.max_builds = new.max_builds\n self.access = new.access\n self.notify_on_missing = new.notify_on_missing\n self.keepalive_interval = new.keepalive_interval\n\n if self.missing_timeout != new.missing_timeout:\n running_missing_timer = self.missing_timer\n self.stopMissingTimer()\n self.missing_timeout = new.missing_timeout\n if running_missing_timer:\n self.startMissingTimer()\n\n properties = Properties()\n properties.updateFromProperties(new.properties)\n self.properties = properties\n\n self.updateLocks()\n\n # update the attached slave's notion of which builders are attached.\n # This assumes that the relevant builders have already been configured,\n # which is why the reconfig_priority is set low in this class.\n d = self.updateSlave()\n\n # and chain up\n d.addCallback(lambda _ :\n config.ReconfigurableServiceMixin.reconfigService(self,\n new_config))\n\n return d\n\n def stopService(self):\n if self.registration:\n self.registration.unregister()\n self.stopMissingTimer()\n return service.MultiService.stopService(self)\n\n def findNewSlaveInstance(self, new_config):\n # TODO: called multiple times per reconfig; use 1-element cache?\n for sl in new_config.slaves:\n if sl.slavename == self.slavename:\n return sl\n assert 0, \"no new slave named '%s'\" % self.slavename\n\n def startMissingTimer(self):\n if self.notify_on_missing and self.missing_timeout and self.parent:\n self.stopMissingTimer() # in case it's already running\n self.missing_timer = reactor.callLater(self.missing_timeout,\n self._missing_timer_fired)\n\n def stopMissingTimer(self):\n if self.missing_timer:\n self.missing_timer.cancel()\n self.missing_timer = None\n\n def getPerspective(self, mind, slavename):\n assert slavename == self.slavename\n metrics.MetricCountEvent.log(\"attached_slaves\", 1)\n\n # record when this connection attempt occurred\n if self.slave_status:\n self.slave_status.recordConnectTime()\n\n\n if self.isConnected():\n # duplicate slave - send it to arbitration\n arb = botmaster.DuplicateSlaveArbitrator(self)\n return arb.getPerspective(mind, slavename)\n else:\n log.msg(\"slave '%s' attaching from %s\" % (slavename, mind.broker.transport.getPeer()))\n return self\n\n def doKeepalive(self):\n self.keepalive_timer = reactor.callLater(self.keepalive_interval,\n self.doKeepalive)\n if not self.slave:\n return\n d = self.slave.callRemote(\"print\", \"Received keepalive from master\")\n d.addErrback(log.msg, \"Keepalive failed for '%s'\" % (self.slavename, ))\n\n def stopKeepaliveTimer(self):\n if self.keepalive_timer:\n self.keepalive_timer.cancel()\n\n def startKeepaliveTimer(self):\n assert self.keepalive_interval\n log.msg(\"Starting buildslave keepalive timer for '%s'\" % \\\n (self.slavename, ))\n self.doKeepalive()\n\n def isConnected(self):\n return self.slave\n\n def _missing_timer_fired(self):\n self.missing_timer = None\n # notify people, but only if we're still in the config\n if not self.parent:\n return\n\n buildmaster = self.botmaster.master\n status = buildmaster.getStatus()\n text = \"The Buildbot working for '%s'\\n\" % status.getTitle()\n text += (\"has noticed that the buildslave named %s went away\\n\" %\n self.slavename)\n text += \"\\n\"\n text += (\"It last disconnected at %s (buildmaster-local time)\\n\" %\n time.ctime(time.time() - self.missing_timeout)) # approx\n text += \"\\n\"\n text += \"The admin on record (as reported by BUILDSLAVE:info/admin)\\n\"\n text += \"was '%s'.\\n\" % self.slave_status.getAdmin()\n text += \"\\n\"\n text += \"Sincerely,\\n\"\n text += \" The Buildbot\\n\"\n text += \" %s\\n\" % status.getTitleURL()\n subject = \"Buildbot: buildslave %s was lost\" % self.slavename\n return self._mail_missing_message(subject, text)\n\n\n def updateSlave(self):\n \"\"\"Called to add or remove builders after the slave has connected.\n\n @return: a Deferred that indicates when an attached slave has\n accepted the new builders and/or released the old ones.\"\"\"\n if self.slave:\n return self.sendBuilderList()\n else:\n return defer.succeed(None)\n\n def updateSlaveStatus(self, buildStarted=None, buildFinished=None):\n if buildStarted:\n self.slave_status.buildStarted(buildStarted)\n if buildFinished:\n self.slave_status.buildFinished(buildFinished)\n\n @metrics.countMethod('AbstractBuildSlave.attached()')\n def attached(self, bot):\n \"\"\"This is called when the slave connects.\n\n @return: a Deferred that fires when the attachment is complete\n \"\"\"\n\n # the botmaster should ensure this.\n assert not self.isConnected()\n\n metrics.MetricCountEvent.log(\"AbstractBuildSlave.attached_slaves\", 1)\n\n # set up the subscription point for eventual detachment\n self.detached_subs = subscription.SubscriptionPoint(\"detached\")\n\n # now we go through a sequence of calls, gathering information, then\n # tell the Botmaster that it can finally give this slave to all the\n # Builders that care about it.\n\n # we accumulate slave information in this 'state' dictionary, then\n # set it atomically if we make it far enough through the process\n state = {}\n\n # Reset graceful shutdown status\n self.slave_status.setGraceful(False)\n # We want to know when the graceful shutdown flag changes\n self.slave_status.addGracefulWatcher(self._gracefulChanged)\n\n d = defer.succeed(None)\n def _log_attachment_on_slave(res):\n d1 = bot.callRemote(\"print\", \"attached\")\n d1.addErrback(lambda why: None)\n return d1\n d.addCallback(_log_attachment_on_slave)\n\n def _get_info(res):\n d1 = bot.callRemote(\"getSlaveInfo\")\n def _got_info(info):\n log.msg(\"Got slaveinfo from '%s'\" % self.slavename)\n # TODO: info{} might have other keys\n state[\"admin\"] = info.get(\"admin\")\n state[\"host\"] = info.get(\"host\")\n state[\"access_uri\"] = info.get(\"access_uri\", None)\n state[\"slave_environ\"] = info.get(\"environ\", {})\n state[\"slave_basedir\"] = info.get(\"basedir\", None)\n state[\"slave_system\"] = info.get(\"system\", None)\n def _info_unavailable(why):\n why.trap(pb.NoSuchMethod)\n # maybe an old slave, doesn't implement remote_getSlaveInfo\n log.msg(\"BuildSlave.info_unavailable\")\n log.err(why)\n d1.addCallbacks(_got_info, _info_unavailable)\n return d1\n d.addCallback(_get_info)\n self.startKeepaliveTimer()\n\n def _get_version(res):\n d = bot.callRemote(\"getVersion\")\n def _got_version(version):\n state[\"version\"] = version\n def _version_unavailable(why):\n why.trap(pb.NoSuchMethod)\n # probably an old slave\n state[\"version\"] = '(unknown)'\n d.addCallbacks(_got_version, _version_unavailable)\n return d\n d.addCallback(_get_version)\n\n def _get_commands(res):\n d1 = bot.callRemote(\"getCommands\")\n def _got_commands(commands):\n state[\"slave_commands\"] = commands\n def _commands_unavailable(why):\n # probably an old slave\n log.msg(\"BuildSlave._commands_unavailable\")\n if why.check(AttributeError):\n return\n log.err(why)\n d1.addCallbacks(_got_commands, _commands_unavailable)\n return d1\n d.addCallback(_get_commands)\n\n def _accept_slave(res):\n self.slave_status.setAdmin(state.get(\"admin\"))\n self.slave_status.setHost(state.get(\"host\"))\n self.slave_status.setAccessURI(state.get(\"access_uri\"))\n self.slave_status.setVersion(state.get(\"version\"))\n self.slave_status.setConnected(True)\n self.slave_commands = state.get(\"slave_commands\")\n self.slave_environ = state.get(\"slave_environ\")\n self.slave_basedir = state.get(\"slave_basedir\")\n self.slave_system = state.get(\"slave_system\")\n self.slave = bot\n if self.slave_system == \"win32\":\n self.path_module = namedModule(\"win32path\")\n else:\n # most eveything accepts / as separator, so posix should be a\n # reasonable fallback\n self.path_module = namedModule(\"posixpath\")\n log.msg(\"bot attached\")\n self.messageReceivedFromSlave()\n self.stopMissingTimer()\n self.botmaster.master.status.slaveConnected(self.slavename)\n\n return self.updateSlave()\n d.addCallback(_accept_slave)\n d.addCallback(lambda _:\n self.botmaster.maybeStartBuildsForSlave(self.slavename))\n\n # Finally, the slave gets a reference to this BuildSlave. They\n # receive this later, after we've started using them.\n d.addCallback(lambda _: self)\n return d\n\n def messageReceivedFromSlave(self):\n now = time.time()\n self.lastMessageReceived = now\n self.slave_status.setLastMessageReceived(now)\n\n def detached(self, mind):\n metrics.MetricCountEvent.log(\"AbstractBuildSlave.attached_slaves\", -1)\n self.slave = None\n self._old_builder_list = []\n self.slave_status.removeGracefulWatcher(self._gracefulChanged)\n self.slave_status.setConnected(False)\n log.msg(\"BuildSlave.detached(%s)\" % self.slavename)\n self.botmaster.master.status.slaveDisconnected(self.slavename)\n self.stopKeepaliveTimer()\n self.releaseLocks()\n\n # notify watchers, but do so in the next reactor iteration so that\n # any further detached() action by subclasses happens first\n def notif():\n subs = self.detached_subs\n self.detached_subs = None\n subs.deliver()\n reactor.callLater(0, notif)\n\n def subscribeToDetach(self, callback):\n \"\"\"\n Request that C{callable} be invoked with no arguments when the\n L{detached} method is invoked.\n\n @returns: L{Subscription}\n \"\"\"\n assert self.detached_subs, \"detached_subs is only set if attached\"\n return self.detached_subs.subscribe(callback)\n\n def disconnect(self):\n \"\"\"Forcibly disconnect the slave.\n\n This severs the TCP connection and returns a Deferred that will fire\n (with None) when the connection is probably gone.\n\n If the slave is still alive, they will probably try to reconnect\n again in a moment.\n\n This is called in two circumstances. The first is when a slave is\n removed from the config file. In this case, when they try to\n reconnect, they will be rejected as an unknown slave. The second is\n when we wind up with two connections for the same slave, in which\n case we disconnect the older connection.\n \"\"\"\n\n if not self.slave:\n return defer.succeed(None)\n log.msg(\"disconnecting old slave %s now\" % self.slavename)\n # When this Deferred fires, we'll be ready to accept the new slave\n return self._disconnect(self.slave)\n\n def _disconnect(self, slave):\n # all kinds of teardown will happen as a result of\n # loseConnection(), but it happens after a reactor iteration or\n # two. Hook the actual disconnect so we can know when it is safe\n # to connect the new slave. We have to wait one additional\n # iteration (with callLater(0)) to make sure the *other*\n # notifyOnDisconnect handlers have had a chance to run.\n d = defer.Deferred()\n\n # notifyOnDisconnect runs the callback with one argument, the\n # RemoteReference being disconnected.\n def _disconnected(rref):\n reactor.callLater(0, d.callback, None)\n slave.notifyOnDisconnect(_disconnected)\n tport = slave.broker.transport\n # this is the polite way to request that a socket be closed\n tport.loseConnection()\n try:\n # but really we don't want to wait for the transmit queue to\n # drain. The remote end is unlikely to ACK the data, so we'd\n # probably have to wait for a (20-minute) TCP timeout.\n #tport._closeSocket()\n # however, doing _closeSocket (whether before or after\n # loseConnection) somehow prevents the notifyOnDisconnect\n # handlers from being run. Bummer.\n tport.offset = 0\n tport.dataBuffer = \"\"\n except:\n # however, these hacks are pretty internal, so don't blow up if\n # they fail or are unavailable\n log.msg(\"failed to accelerate the shutdown process\")\n log.msg(\"waiting for slave to finish disconnecting\")\n\n return d\n\n def sendBuilderList(self):\n our_builders = self.botmaster.getBuildersForSlave(self.slavename)\n blist = [(b.name, b.config.slavebuilddir) for b in our_builders]\n if blist == self._old_builder_list:\n return defer.succeed(None)\n\n d = self.slave.callRemote(\"setBuilderList\", blist)\n def sentBuilderList(ign):\n self._old_builder_list = blist\n return ign\n d.addCallback(sentBuilderList)\n return d\n\n def perspective_keepalive(self):\n self.messageReceivedFromSlave()\n\n def perspective_shutdown(self):\n log.msg(\"slave %s wants to shut down\" % self.slavename)\n self.slave_status.setGraceful(True)\n\n def addSlaveBuilder(self, sb):\n self.slavebuilders[sb.builder_name] = sb\n\n def removeSlaveBuilder(self, sb):\n try:\n del self.slavebuilders[sb.builder_name]\n except KeyError:\n pass\n\n def buildFinished(self, sb):\n \"\"\"This is called when a build on this slave is finished.\"\"\"\n self.botmaster.maybeStartBuildsForSlave(self.slavename)\n\n def canStartBuild(self):\n \"\"\"\n I am called when a build is requested to see if this buildslave\n can start a build. This function can be used to limit overall\n concurrency on the buildslave.\n\n Note for subclassers: if a slave can become willing to start a build\n without any action on that slave (for example, by a resource in use on\n another slave becoming available), then you must arrange for\n L{maybeStartBuildsForSlave} to be called at that time, or builds on\n this slave will not start.\n \"\"\"\n # If we're waiting to shutdown gracefully, then we shouldn't\n # accept any new jobs.\n if self.slave_status.getGraceful():\n return False\n\n if self.max_builds:\n active_builders = [sb for sb in self.slavebuilders.values()\n if sb.isBusy()]\n if len(active_builders) >= self.max_builds:\n return False\n\n if not self.locksAvailable():\n return False\n\n return True\n\n def _mail_missing_message(self, subject, text):\n # first, see if we have a MailNotifier we can use. This gives us a\n # fromaddr and a relayhost.\n buildmaster = self.botmaster.master\n for st in buildmaster.statusTargets:\n if isinstance(st, MailNotifier):\n break\n else:\n # if not, they get a default MailNotifier, which always uses SMTP\n # to localhost and uses a dummy fromaddr of \"buildbot\".\n log.msg(\"buildslave-missing msg using default MailNotifier\")\n st = MailNotifier(\"buildbot\")\n # now construct the mail\n\n m = Message()\n m.set_payload(text)\n m['Date'] = formatdate(localtime=True)\n m['Subject'] = subject\n m['From'] = st.fromaddr\n recipients = self.notify_on_missing\n m['To'] = \", \".join(recipients)\n d = st.sendMessage(m, recipients)\n # return the Deferred for testing purposes\n return d\n\n def _gracefulChanged(self, graceful):\n \"\"\"This is called when our graceful shutdown setting changes\"\"\"\n self.maybeShutdown()\n\n @defer.deferredGenerator\n def shutdown(self):\n \"\"\"Shutdown the slave\"\"\"\n if not self.slave:\n log.msg(\"no remote; slave is already shut down\")\n return\n\n # First, try the \"new\" way - calling our own remote's shutdown\n # method. The method was only added in 0.8.3, so ignore NoSuchMethod\n # failures.\n def new_way():\n d = self.slave.callRemote('shutdown')\n d.addCallback(lambda _ : True) # successful shutdown request\n def check_nsm(f):\n f.trap(pb.NoSuchMethod)\n return False # fall through to the old way\n d.addErrback(check_nsm)\n def check_connlost(f):\n f.trap(pb.PBConnectionLost)\n return True # the slave is gone, so call it finished\n d.addErrback(check_connlost)\n return d\n\n wfd = defer.waitForDeferred(new_way())\n yield wfd\n if wfd.getResult():\n return # done!\n\n # Now, the old way. Look for a builder with a remote reference to the\n # client side slave. If we can find one, then call \"shutdown\" on the\n # remote builder, which will cause the slave buildbot process to exit.\n def old_way():\n d = None\n for b in self.slavebuilders.values():\n if b.remote:\n d = b.remote.callRemote(\"shutdown\")\n break\n\n if d:\n log.msg(\"Shutting down (old) slave: %s\" % self.slavename)\n # The remote shutdown call will not complete successfully since the\n # buildbot process exits almost immediately after getting the\n # shutdown request.\n # Here we look at the reason why the remote call failed, and if\n # it's because the connection was lost, that means the slave\n # shutdown as expected.\n def _errback(why):\n if why.check(pb.PBConnectionLost):\n log.msg(\"Lost connection to %s\" % self.slavename)\n else:\n log.err(\"Unexpected error when trying to shutdown %s\" % self.slavename)\n d.addErrback(_errback)\n return d\n log.err(\"Couldn't find remote builder to shut down slave\")\n return defer.succeed(None)\n wfd = defer.waitForDeferred(old_way())\n yield wfd\n wfd.getResult()\n\n def maybeShutdown(self):\n \"\"\"Shut down this slave if it has been asked to shut down gracefully,\n and has no active builders.\"\"\"\n if not self.slave_status.getGraceful():\n return\n active_builders = [sb for sb in self.slavebuilders.values()\n if sb.isBusy()]\n if active_builders:\n return\n d = self.shutdown()\n d.addErrback(log.err, 'error while shutting down slave')\n\nclass BuildSlave(AbstractBuildSlave):\n\n def sendBuilderList(self):\n d = AbstractBuildSlave.sendBuilderList(self)\n def _sent(slist):\n # Nothing has changed, so don't need to re-attach to everything\n if not slist:\n return\n dl = []\n for name, remote in slist.items():\n # use get() since we might have changed our mind since then\n b = self.botmaster.builders.get(name)\n if b:\n d1 = b.attached(self, remote, self.slave_commands)\n dl.append(d1)\n return defer.DeferredList(dl)\n def _set_failed(why):\n log.msg(\"BuildSlave.sendBuilderList (%s) failed\" % self)\n log.err(why)\n # TODO: hang up on them?, without setBuilderList we can't use\n # them\n d.addCallbacks(_sent, _set_failed)\n return d\n\n def detached(self, mind):\n AbstractBuildSlave.detached(self, mind)\n self.botmaster.slaveLost(self)\n self.startMissingTimer()\n\n def buildFinished(self, sb):\n \"\"\"This is called when a build on this slave is finished.\"\"\"\n AbstractBuildSlave.buildFinished(self, sb)\n\n # If we're gracefully shutting down, and we have no more active\n # builders, then it's safe to disconnect\n self.maybeShutdown()\n\nclass AbstractLatentBuildSlave(AbstractBuildSlave):\n \"\"\"A build slave that will start up a slave instance when needed.\n\n To use, subclass and implement start_instance and stop_instance.\n\n See ec2buildslave.py for a concrete example. Also see the stub example in\n test/test_slaves.py.\n \"\"\"\n\n implements(ILatentBuildSlave)\n\n substantiated = False\n substantiation_deferred = None\n substantiation_build = None\n build_wait_timer = None\n _shutdown_callback_handle = None\n\n def __init__(self, name, password, max_builds=None,\n notify_on_missing=[], missing_timeout=60*20,\n build_wait_timeout=60*10,\n properties={}, locks=None):\n AbstractBuildSlave.__init__(\n self, name, password, max_builds, notify_on_missing,\n missing_timeout, properties, locks)\n self.building = set()\n self.build_wait_timeout = build_wait_timeout\n\n def start_instance(self, build):\n # responsible for starting instance that will try to connect with this\n # master. Should return deferred with either True (instance started)\n # or False (instance not started, so don't run a build here). Problems\n # should use an errback.\n raise NotImplementedError\n\n def stop_instance(self, fast=False):\n # responsible for shutting down instance.\n raise NotImplementedError\n\n def substantiate(self, sb, build):\n if self.substantiated:\n self._clearBuildWaitTimer()\n self._setBuildWaitTimer()\n return defer.succeed(True)\n if self.substantiation_deferred is None:\n if self.parent and not self.missing_timer:\n # start timer. if timer times out, fail deferred\n self.missing_timer = reactor.callLater(\n self.missing_timeout,\n self._substantiation_failed, defer.TimeoutError())\n self.substantiation_deferred = defer.Deferred()\n self.substantiation_build = build\n if self.slave is None:\n d = self._substantiate(build) # start up instance\n d.addErrback(log.err, \"while substantiating\")\n # else: we're waiting for an old one to detach. the _substantiate\n # will be done in ``detached`` below.\n return self.substantiation_deferred\n\n def _substantiate(self, build):\n # register event trigger\n d = self.start_instance(build)\n self._shutdown_callback_handle = reactor.addSystemEventTrigger(\n 'before', 'shutdown', self._soft_disconnect, fast=True)\n def start_instance_result(result):\n # If we don't report success, then preparation failed.\n if not result:\n log.msg(\"Slave '%s' doesn not want to substantiate at this time\" % (self.slavename,))\n d = self.substantiation_deferred\n self.substantiation_deferred = None\n d.callback(False)\n return result\n def clean_up(failure):\n if self.missing_timer is not None:\n self.missing_timer.cancel()\n self._substantiation_failed(failure)\n if self._shutdown_callback_handle is not None:\n handle = self._shutdown_callback_handle\n del self._shutdown_callback_handle\n reactor.removeSystemEventTrigger(handle)\n return failure\n d.addCallbacks(start_instance_result, clean_up)\n return d\n\n def attached(self, bot):\n if self.substantiation_deferred is None:\n msg = 'Slave %s received connection while not trying to ' \\\n 'substantiate. Disconnecting.' % (self.slavename,)\n log.msg(msg)\n self._disconnect(bot)\n return defer.fail(RuntimeError(msg))\n return AbstractBuildSlave.attached(self, bot)\n\n def detached(self, mind):\n AbstractBuildSlave.detached(self, mind)\n if self.substantiation_deferred is not None:\n d = self._substantiate(self.substantiation_build)\n d.addErrback(log.err, 'while re-substantiating')\n\n def _substantiation_failed(self, failure):\n self.missing_timer = None\n if self.substantiation_deferred:\n d = self.substantiation_deferred\n self.substantiation_deferred = None\n self.substantiation_build = None\n d.errback(failure)\n self.insubstantiate()\n # notify people, but only if we're still in the config\n if not self.parent or not self.notify_on_missing:\n return\n\n buildmaster = self.botmaster.master\n status = buildmaster.getStatus()\n text = \"The Buildbot working for '%s'\\n\" % status.getTitle()\n text += (\"has noticed that the latent buildslave named %s \\n\" %\n self.slavename)\n text += \"never substantiated after a request\\n\"\n text += \"\\n\"\n text += (\"The request was made at %s (buildmaster-local time)\\n\" %\n time.ctime(time.time() - self.missing_timeout)) # approx\n text += \"\\n\"\n text += \"Sincerely,\\n\"\n text += \" The Buildbot\\n\"\n text += \" %s\\n\" % status.getTitleURL()\n subject = \"Buildbot: buildslave %s never substantiated\" % self.slavename\n return self._mail_missing_message(subject, text)\n\n def buildStarted(self, sb):\n assert self.substantiated\n self._clearBuildWaitTimer()\n self.building.add(sb.builder_name)\n\n def buildFinished(self, sb):\n AbstractBuildSlave.buildFinished(self, sb)\n\n self.building.remove(sb.builder_name)\n if not self.building:\n self._setBuildWaitTimer()\n\n def _clearBuildWaitTimer(self):\n if self.build_wait_timer is not None:\n if self.build_wait_timer.active():\n self.build_wait_timer.cancel()\n self.build_wait_timer = None\n\n def _setBuildWaitTimer(self):\n self._clearBuildWaitTimer()\n self.build_wait_timer = reactor.callLater(\n self.build_wait_timeout, self._soft_disconnect)\n\n def insubstantiate(self, fast=False):\n self._clearBuildWaitTimer()\n d = self.stop_instance(fast)\n if self._shutdown_callback_handle is not None:\n handle = self._shutdown_callback_handle\n del self._shutdown_callback_handle\n reactor.removeSystemEventTrigger(handle)\n self.substantiated = False\n self.building.clear() # just to be sure\n return d\n\n def _soft_disconnect(self, fast=False):\n d = AbstractBuildSlave.disconnect(self)\n if self.slave is not None:\n # this could be called when the slave needs to shut down, such as\n # in BotMaster.removeSlave, *or* when a new slave requests a\n # connection when we already have a slave. It's not clear what to\n # do in the second case: this shouldn't happen, and if it\n # does...if it's a latent slave, shutting down will probably kill\n # something we want...but we can't know what the status is. So,\n # here, we just do what should be appropriate for the first case,\n # and put our heads in the sand for the second, at least for now.\n # The best solution to the odd situation is removing it as a\n # possibilty: make the master in charge of connecting to the\n # slave, rather than vice versa. TODO.\n d = defer.DeferredList([d, self.insubstantiate(fast)])\n else:\n if self.substantiation_deferred is not None:\n # unlike the previous block, we don't expect this situation when\n # ``attached`` calls ``disconnect``, only when we get a simple\n # request to \"go away\".\n d = self.substantiation_deferred\n self.substantiation_deferred = None\n self.substantiation_build = None\n d.errback(failure.Failure(\n RuntimeError(\"soft disconnect aborted substantiation\")))\n if self.missing_timer:\n self.missing_timer.cancel()\n self.missing_timer = None\n self.stop_instance()\n return d\n\n def disconnect(self):\n # This returns a Deferred but we don't use it\n self._soft_disconnect() \n # this removes the slave from all builders. It won't come back\n # without a restart (or maybe a sighup)\n self.botmaster.slaveLost(self)\n\n def stopService(self):\n res = defer.maybeDeferred(AbstractBuildSlave.stopService, self)\n if self.slave is not None:\n d = self._soft_disconnect()\n res = defer.DeferredList([res, d])\n return res\n\n def updateSlave(self):\n \"\"\"Called to add or remove builders after the slave has connected.\n\n Also called after botmaster's builders are initially set.\n\n @return: a Deferred that indicates when an attached slave has\n accepted the new builders and/or released the old ones.\"\"\"\n for b in self.botmaster.getBuildersForSlave(self.slavename):\n if b.name not in self.slavebuilders:\n b.addLatentSlave(self)\n return AbstractBuildSlave.updateSlave(self)\n\n def sendBuilderList(self):\n d = AbstractBuildSlave.sendBuilderList(self)\n def _sent(slist):\n if not slist:\n return\n dl = []\n for name, remote in slist.items():\n # use get() since we might have changed our mind since then.\n # we're checking on the builder in addition to the\n # slavebuilders out of a bit of paranoia.\n b = self.botmaster.builders.get(name)\n sb = self.slavebuilders.get(name)\n if b and sb:\n d1 = sb.attached(self, remote, self.slave_commands)\n dl.append(d1)\n return defer.DeferredList(dl)\n def _set_failed(why):\n log.msg(\"BuildSlave.sendBuilderList (%s) failed\" % self)\n log.err(why)\n # TODO: hang up on them?, without setBuilderList we can't use\n # them\n if self.substantiation_deferred:\n d = self.substantiation_deferred\n self.substantiation_deferred = None\n self.substantiation_build = None\n d.errback(why)\n if self.missing_timer:\n self.missing_timer.cancel()\n self.missing_timer = None\n # TODO: maybe log? send an email?\n return why\n d.addCallbacks(_sent, _set_failed)\n def _substantiated(res):\n log.msg(\"Slave %s substantiated \\o/\" % self.slavename)\n self.substantiated = True\n if not self.substantiation_deferred:\n log.msg(\"No substantiation deferred for %s\" % self.slavename)\n if self.substantiation_deferred:\n log.msg(\"Firing %s substantiation deferred with success\" % self.slavename)\n d = self.substantiation_deferred\n self.substantiation_deferred = None\n self.substantiation_build = None\n d.callback(True)\n # note that the missing_timer is already handled within\n # ``attached``\n if not self.building:\n self._setBuildWaitTimer()\n d.addCallback(_substantiated)\n return d\n",
"path": "master/buildbot/buildslave.py"
}
] | diff --git a/master/buildbot/buildslave.py b/master/buildbot/buildslave.py
index 6fe79c1cd0bb..29f77df99f6c 100644
--- a/master/buildbot/buildslave.py
+++ b/master/buildbot/buildslave.py
@@ -233,6 +233,8 @@ def reconfigService(self, new_config):
return d
def stopService(self):
+ if self.registration:
+ self.registration.unregister()
self.stopMissingTimer()
return service.MultiService.stopService(self)
diff --git a/master/buildbot/test/fake/fakemaster.py b/master/buildbot/test/fake/fakemaster.py
index f6f10fa9755b..4494513912ca 100644
--- a/master/buildbot/test/fake/fakemaster.py
+++ b/master/buildbot/test/fake/fakemaster.py
@@ -16,6 +16,7 @@
import weakref
from twisted.internet import defer
from buildbot.test.fake import fakedb
+from buildbot.test.fake.pbmanager import FakePBManager
from buildbot import config
import mock
@@ -36,7 +37,7 @@ def mkref(x):
return d
-def make_master(master_id=fakedb.FakeBuildRequestsComponent.MASTER_ID):
+class FakeMaster(mock.Mock):
"""
Create a fake Master instance: a Mock with some convenience
implementations:
@@ -44,15 +45,19 @@ def make_master(master_id=fakedb.FakeBuildRequestsComponent.MASTER_ID):
- Non-caching implementation for C{self.caches}
"""
- fakemaster = mock.Mock(name="fakemaster")
+ def __init__(self, master_id=fakedb.FakeBuildRequestsComponent.MASTER_ID):
+ mock.Mock.__init__(self, name="fakemaster")
+ self._master_id = master_id
+ self.config = config.MasterConfig()
+ self.caches.get_cache = FakeCache
+ self.pbmanager = FakePBManager()
- # set up caches
- fakemaster.caches.get_cache = FakeCache
+ def getObjectId(self):
+ return defer.succeed(self._master_id)
- # and a getObjectId method
- fakemaster.getObjectId = (lambda : defer.succeed(master_id))
+ # work around http://code.google.com/p/mock/issues/detail?id=105
+ def _get_child_mock(self, **kw):
+ return mock.Mock(**kw)
- # and some config - this class's constructor is good enough to trust
- fakemaster.config = config.MasterConfig()
-
- return fakemaster
+# Leave this alias, in case we want to add more behavior later
+make_master = FakeMaster
diff --git a/master/buildbot/test/fake/pbmanager.py b/master/buildbot/test/fake/pbmanager.py
new file mode 100644
index 000000000000..e91b7e5ecdf2
--- /dev/null
+++ b/master/buildbot/test/fake/pbmanager.py
@@ -0,0 +1,47 @@
+# This file is part of Buildbot. Buildbot is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Buildbot Team Members
+
+from twisted.application import service
+from twisted.internet import defer
+
+class FakePBManager(service.MultiService):
+
+ def __init__(self):
+ service.MultiService.__init__(self)
+ self.setName("fake-pbmanager")
+ self._registrations = []
+ self._unregistrations = []
+
+ def register(self, portstr, username, password, pfactory):
+ if (portstr, username) not in self._registrations:
+ reg = FakeRegistration(self, portstr, username)
+ self._registrations.append((portstr,username,password))
+ return reg
+ else:
+ raise KeyError, ("username '%s' is already registered on port %s"
+ % (username, portstr))
+
+ def _unregister(self, portstr, username):
+ self._unregistrations.append((portstr, username))
+ return defer.succeed(None)
+
+class FakeRegistration(object):
+ def __init__(self, pbmanager, portstr, username):
+ self._portstr = portstr
+ self._username = username
+ self._pbmanager = pbmanager
+
+ def unregister(self):
+ self._pbmanager._unregister(self._portstr, self._username)
diff --git a/master/buildbot/test/unit/test_buildslave.py b/master/buildbot/test/unit/test_buildslave.py
index 75f4d96c3b9d..f4068a7b5c67 100644
--- a/master/buildbot/test/unit/test_buildslave.py
+++ b/master/buildbot/test/unit/test_buildslave.py
@@ -17,7 +17,7 @@
from twisted.trial import unittest
from twisted.internet import defer
from buildbot import buildslave, config
-from buildbot.test.fake import fakemaster
+from buildbot.test.fake import fakemaster, pbmanager
class AbstractBuildSlave(unittest.TestCase):
@@ -68,15 +68,11 @@ def do_test_reconfigService(self, old, old_port, new, new_port):
old.master = master
if old_port:
self.old_registration = old.registration = \
- mock.Mock(name='old_registration')
+ pbmanager.FakeRegistration(master.pbmanager, old_port, old.slavename)
old.registered_port = old_port
old.missing_timer = mock.Mock(name='missing_timer')
old.startService()
- self.new_registration = mock.Mock(name='new_registration')
- master.pbmanager.register = mock.Mock(
- side_effect=lambda *args : self.new_registration)
-
new_config = mock.Mock()
new_config.slavePortnum = new_port
new_config.slaves = [ new ]
@@ -113,7 +109,7 @@ def test_reconfigService_attrs(self):
self.assertEqual(old.missing_timeout, 121)
self.assertEqual(old.properties.getProperty('a'), 'c')
self.assertEqual(old.keepalive_interval, 61)
- self.assertFalse(self.master.pbmanager.register.called)
+ self.assertEqual(self.master.pbmanager._registrations, [])
self.assertTrue(old.updateSlave.called)
@defer.deferredGenerator
@@ -136,7 +132,7 @@ def test_reconfigService_initial_registration(self):
yield wfd
wfd.getResult()
- self.assertTrue(self.master.pbmanager.register.called)
+ self.assertEqual(self.master.pbmanager._registrations, [('tcp:1234', 'bot', 'pass')])
@defer.deferredGenerator
def test_reconfigService_reregister_password(self):
@@ -149,8 +145,8 @@ def test_reconfigService_reregister_password(self):
wfd.getResult()
self.assertEqual(old.password, 'newpass')
- self.assertTrue(self.old_registration.unregister.called)
- self.assertTrue(self.master.pbmanager.register.called)
+ self.assertEqual(self.master.pbmanager._unregistrations, [('tcp:1234', 'bot')])
+ self.assertEqual(self.master.pbmanager._registrations, [('tcp:1234', 'bot', 'newpass')])
@defer.deferredGenerator
def test_reconfigService_reregister_port(self):
@@ -162,8 +158,25 @@ def test_reconfigService_reregister_port(self):
yield wfd
wfd.getResult()
- self.assertTrue(self.old_registration.unregister.called)
- self.assertTrue(self.master.pbmanager.register.called)
+ self.assertEqual(self.master.pbmanager._unregistrations, [('tcp:1234', 'bot')])
+ self.assertEqual(self.master.pbmanager._registrations, [('tcp:5678', 'bot', 'pass')])
+
+ @defer.inlineCallbacks
+ def test_stopService(self):
+ master = self.master = fakemaster.make_master()
+ slave = self.ConcreteBuildSlave('bot', 'pass')
+ slave.master = master
+ slave.startService()
+
+ config = mock.Mock()
+ config.slavePortnum = "tcp:1234"
+ config.slaves = [ slave ]
+
+ yield slave.reconfigService(config)
+ yield slave.stopService()
+
+ self.assertEqual(self.master.pbmanager._unregistrations, [('tcp:1234', 'bot')])
+ self.assertEqual(self.master.pbmanager._registrations, [('tcp:1234', 'bot', 'pass')])
# FIXME: Test that reconfig properly deals with
# 1) locks
|
feast-dev__feast-4085 | Remove numpy <1.25 dependency in setup.py
In setup.py, I can see that the dependency for pandas has already been updated from
"pandas>=1.4.3,<2" (which is still in the current PyPI version) to "pandas>=1.4.3,<3", but numpy hasn't, which will break the installation if I am using e.g. pandas 2.2.1, that requires numpy (>=1.26.0,<2)
## Problem
"numpy>=1.22,<1.25"
## Solution
"numpy>=1.22,<2"
## Steps to reproduce
poetry add git+https://github.com/feast-dev/feast.git
| [
{
"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport pathlib\nimport re\nimport shutil\nimport subprocess\nimport sys\nfrom distutils.cmd import Command\nfrom pathlib import Path\n\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.build_ext import build_ext as _build_ext\n from setuptools.command.build_py import build_py\n from setuptools.command.develop import develop\n from setuptools.command.install import install\n\nexcept ImportError:\n from distutils.command.build_py import build_py\n from distutils.core import setup\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.9.0\"\n\nREQUIRED = [\n \"click>=7.0.0,<9.0.0\",\n \"colorama>=0.3.9,<1\",\n \"dill~=0.3.0\",\n \"mypy-protobuf>=3.1\",\n \"Jinja2>=2,<4\",\n \"jsonschema\",\n \"mmh3\",\n \"numpy>=1.22,<1.25\",\n \"pandas>=1.4.3,<3\",\n # Higher than 4.23.4 seems to cause a seg fault\n \"protobuf>=4.24.0,<5.0.0\",\n \"pyarrow>=4\",\n \"pydantic>=2.0.0\",\n \"pygments>=2.12.0,<3\",\n \"PyYAML>=5.4.0,<7\",\n \"requests\",\n \"SQLAlchemy[mypy]>1\",\n \"tabulate>=0.8.0,<1\",\n \"tenacity>=7,<9\",\n \"toml>=0.10.0,<1\",\n \"tqdm>=4,<5\",\n \"typeguard>=4.0.0\",\n \"fastapi>=0.68.0\",\n \"uvicorn[standard]>=0.14.0,<1\",\n \"gunicorn; platform_system != 'Windows'\",\n # https://github.com/dask/dask/issues/10996\n \"dask>=2021.1.0,<2024.3.0\",\n \"bowler\", # Needed for automatic repo upgrades\n \"importlib-resources>=6.0.0,<7\",\n \"importlib_metadata>=6.8.0,<7\",\n]\n\nGCP_REQUIRED = [\n \"google-api-core>=1.23.0,<3\",\n \"googleapis-common-protos>=1.52.0,<2\",\n \"google-cloud-bigquery[pandas]>=2,<3.13.0\",\n \"google-cloud-bigquery-storage >= 2.0.0,<3\",\n \"google-cloud-datastore>=2.1.0,<3\",\n \"google-cloud-storage>=1.34.0,<3\",\n \"google-cloud-bigtable>=2.11.0,<3\",\n \"fsspec<=2024.1.0\",\n]\n\nREDIS_REQUIRED = [\n \"redis>=4.2.2,<5\",\n \"hiredis>=2.0.0,<3\",\n]\n\nAWS_REQUIRED = [\"boto3>=1.17.0,<2\", \"docker>=5.0.2\", \"fsspec<=2024.1.0\"]\n\nBYTEWAX_REQUIRED = [\"bytewax==0.15.1\", \"docker>=5.0.2\", \"kubernetes<=20.13.0\"]\n\nSNOWFLAKE_REQUIRED = [\n \"snowflake-connector-python[pandas]>=3.7,<4\",\n]\n\nSPARK_REQUIRED = [\n \"pyspark>=3.0.0,<4\",\n]\n\nTRINO_REQUIRED = [\"trino>=0.305.0,<0.400.0\", \"regex\"]\n\nPOSTGRES_REQUIRED = [\n \"psycopg2-binary>=2.8.3,<3\",\n]\n\nMYSQL_REQUIRED = [\"pymysql\", \"types-PyMySQL\"]\n\nHBASE_REQUIRED = [\n \"happybase>=1.2.0,<3\",\n]\n\nCASSANDRA_REQUIRED = [\n \"cassandra-driver>=3.24.0,<4\",\n]\n\nGE_REQUIRED = [\"great_expectations>=0.15.41\"]\n\nAZURE_REQUIRED = [\n \"azure-storage-blob>=0.37.0\",\n \"azure-identity>=1.6.1\",\n \"SQLAlchemy>=1.4.19\",\n \"pyodbc>=4.0.30\",\n \"pymssql\",\n]\n\nROCKSET_REQUIRED = [\n \"rockset>=1.0.3\",\n]\n\nHAZELCAST_REQUIRED = [\n \"hazelcast-python-client>=5.1\",\n]\n\nIBIS_REQUIRED = [\n \"ibis-framework\",\n \"ibis-substrait\",\n]\n\nGRPCIO_REQUIRED = [\n \"grpcio>=1.56.2,<2\",\n \"grpcio-tools>=1.56.2,<2\",\n \"grpcio-reflection>=1.56.2,<2\",\n \"grpcio-health-checking>=1.56.2,<2\",\n]\n\nDUCKDB_REQUIRED = [\n \"ibis-framework[duckdb]\"\n]\n\nCI_REQUIRED = (\n [\n \"build\",\n \"virtualenv==20.23.0\",\n \"cryptography>=35.0,<43\",\n \"ruff>=0.3.3\",\n \"grpcio-testing>=1.56.2,<2\",\n # FastAPI does not correctly pull starlette dependency on httpx see thread(https://github.com/tiangolo/fastapi/issues/5656).\n \"httpx>=0.23.3\",\n \"minio==7.1.0\",\n \"mock==2.0.0\",\n \"moto<5\",\n \"mypy>=1.4.1\",\n \"urllib3>=1.25.4,<3\",\n \"psutil==5.9.0\",\n \"py>=1.11.0\", # https://github.com/pytest-dev/pytest/issues/10420\n \"pytest>=6.0.0,<8\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-benchmark>=3.4.1,<4\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering~=0.6.0\",\n \"pytest-mock==1.10.4\",\n \"pytest-env\",\n \"Sphinx>4.0.0,<7\",\n \"testcontainers>=3.5,<4\",\n \"firebase-admin>=5.2.0,<6\",\n \"pre-commit<3.3.2\",\n \"assertpy==1.1\",\n \"pip-tools\",\n \"pybindgen\",\n \"types-protobuf~=3.19.22\",\n \"types-python-dateutil\",\n \"types-pytz\",\n \"types-PyYAML\",\n \"types-redis\",\n \"types-requests<2.31.0\",\n \"types-setuptools\",\n \"types-tabulate\",\n \"virtualenv<20.24.2\",\n ]\n + GCP_REQUIRED\n + REDIS_REQUIRED\n + AWS_REQUIRED\n + BYTEWAX_REQUIRED\n + SNOWFLAKE_REQUIRED\n + SPARK_REQUIRED\n + POSTGRES_REQUIRED\n + MYSQL_REQUIRED\n + TRINO_REQUIRED\n + GE_REQUIRED\n + HBASE_REQUIRED\n + CASSANDRA_REQUIRED\n + AZURE_REQUIRED\n + ROCKSET_REQUIRED\n + HAZELCAST_REQUIRED\n + IBIS_REQUIRED\n + GRPCIO_REQUIRED\n + DUCKDB_REQUIRED\n)\n\nDOCS_REQUIRED = CI_REQUIRED\nDEV_REQUIRED = CI_REQUIRED\n\n# Get git repo root directory\nrepo_root = str(pathlib.Path(__file__).resolve().parent)\n\n# README file from Feast repo root directory\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\", encoding=\"utf8\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n# Only set use_scm_version if git executable exists (setting this variable causes pip to use git under the hood)\nif shutil.which(\"git\"):\n use_scm_version = {\"root\": \".\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX}\nelse:\n use_scm_version = None\n\nPROTO_SUBDIRS = [\"core\", \"registry\", \"serving\", \"types\", \"storage\"]\nPYTHON_CODE_PREFIX = \"sdk/python\"\n\n\nclass BuildPythonProtosCommand(Command):\n description = \"Builds the proto files into Python files.\"\n user_options = [\n (\"inplace\", \"i\", \"Write generated proto files to source directory.\"),\n ]\n\n def initialize_options(self):\n self.python_protoc = [\n sys.executable,\n \"-m\",\n \"grpc_tools.protoc\",\n ] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.sub_folders = PROTO_SUBDIRS\n self.build_lib = None\n self.inplace = 0\n\n def finalize_options(self):\n self.set_undefined_options(\"build\", (\"build_lib\", \"build_lib\"))\n\n @property\n def python_folder(self):\n if self.inplace:\n return os.path.join(\n os.path.dirname(__file__) or os.getcwd(), \"sdk/python/feast/protos\"\n )\n\n return os.path.join(self.build_lib, \"feast/protos\")\n\n def _generate_python_protos(self, path: str):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n Path(self.python_folder).mkdir(parents=True, exist_ok=True)\n subprocess.check_call(\n self.python_protoc\n + [\n \"-I\",\n self.proto_folder,\n \"--python_out\",\n self.python_folder,\n \"--grpc_python_out\",\n self.python_folder,\n \"--mypy_out\",\n self.python_folder,\n ]\n + proto_files\n )\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_python_protos(f\"feast/{sub_folder}/*.proto\")\n # We need the __init__ files for each of the generated subdirs\n # so that they are regular packages, and don't need the `--namespace-packages` flags\n # when being typechecked using mypy.\n with open(f\"{self.python_folder}/feast/{sub_folder}/__init__.py\", \"w\"):\n pass\n\n with open(f\"{self.python_folder}/__init__.py\", \"w\"):\n pass\n with open(f\"{self.python_folder}/feast/__init__.py\", \"w\"):\n pass\n\n for path in Path(self.python_folder).rglob(\"*.py\"):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, \"r\") as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(\n f\"from feast.{folder}\", f\"from feast.protos.feast.{folder}\"\n )\n\n # Write the file out again\n with open(path, \"w\") as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command(\"build_python_protos\")\n\n self.run_command(\"build_ext\")\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.reinitialize_command(\"build_python_protos\", inplace=1)\n self.run_command(\"build_python_protos\")\n\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(\n where=PYTHON_CODE_PREFIX, exclude=(\"java\", \"infra\", \"sdk/python/tests\", \"ui\")\n ),\n package_dir={\"\": PYTHON_CODE_PREFIX},\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": DEV_REQUIRED,\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"aws\": AWS_REQUIRED,\n \"bytewax\": BYTEWAX_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n \"snowflake\": SNOWFLAKE_REQUIRED,\n \"spark\": SPARK_REQUIRED,\n \"trino\": TRINO_REQUIRED,\n \"postgres\": POSTGRES_REQUIRED,\n \"azure\": AZURE_REQUIRED,\n \"mysql\": MYSQL_REQUIRED,\n \"ge\": GE_REQUIRED,\n \"hbase\": HBASE_REQUIRED,\n \"docs\": DOCS_REQUIRED,\n \"cassandra\": CASSANDRA_REQUIRED,\n \"hazelcast\": HAZELCAST_REQUIRED,\n \"grpcio\": GRPCIO_REQUIRED,\n \"rockset\": ROCKSET_REQUIRED,\n \"ibis\": IBIS_REQUIRED,\n \"duckdb\": DUCKDB_REQUIRED\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.9\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version=use_scm_version,\n setup_requires=[\n \"setuptools_scm\",\n \"grpcio>=1.56.2,<2\",\n \"grpcio-tools>=1.56.2,<2\",\n \"mypy-protobuf>=3.1\",\n \"pybindgen==0.22.0\",\n ],\n cmdclass={\n \"build_python_protos\": BuildPythonProtosCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport pathlib\nimport re\nimport shutil\nimport subprocess\nimport sys\nfrom distutils.cmd import Command\nfrom pathlib import Path\n\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.build_ext import build_ext as _build_ext\n from setuptools.command.build_py import build_py\n from setuptools.command.develop import develop\n from setuptools.command.install import install\n\nexcept ImportError:\n from distutils.command.build_py import build_py\n from distutils.core import setup\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.9.0\"\n\nREQUIRED = [\n \"click>=7.0.0,<9.0.0\",\n \"colorama>=0.3.9,<1\",\n \"dill~=0.3.0\",\n \"mypy-protobuf>=3.1\",\n \"Jinja2>=2,<4\",\n \"jsonschema\",\n \"mmh3\",\n \"numpy>=1.22,<2\",\n \"pandas>=1.4.3,<3\",\n # Higher than 4.23.4 seems to cause a seg fault\n \"protobuf>=4.24.0,<5.0.0\",\n \"pyarrow>=4\",\n \"pydantic>=2.0.0\",\n \"pygments>=2.12.0,<3\",\n \"PyYAML>=5.4.0,<7\",\n \"requests\",\n \"SQLAlchemy[mypy]>1\",\n \"tabulate>=0.8.0,<1\",\n \"tenacity>=7,<9\",\n \"toml>=0.10.0,<1\",\n \"tqdm>=4,<5\",\n \"typeguard>=4.0.0\",\n \"fastapi>=0.68.0\",\n \"uvicorn[standard]>=0.14.0,<1\",\n \"gunicorn; platform_system != 'Windows'\",\n # https://github.com/dask/dask/issues/10996\n \"dask>=2021.1.0,<2024.3.0\",\n \"bowler\", # Needed for automatic repo upgrades\n \"importlib-resources>=6.0.0,<7\",\n \"importlib_metadata>=6.8.0,<7\",\n]\n\nGCP_REQUIRED = [\n \"google-api-core>=1.23.0,<3\",\n \"googleapis-common-protos>=1.52.0,<2\",\n \"google-cloud-bigquery[pandas]>=2,<3.13.0\",\n \"google-cloud-bigquery-storage >= 2.0.0,<3\",\n \"google-cloud-datastore>=2.1.0,<3\",\n \"google-cloud-storage>=1.34.0,<3\",\n \"google-cloud-bigtable>=2.11.0,<3\",\n \"fsspec<=2024.1.0\",\n]\n\nREDIS_REQUIRED = [\n \"redis>=4.2.2,<5\",\n \"hiredis>=2.0.0,<3\",\n]\n\nAWS_REQUIRED = [\"boto3>=1.17.0,<2\", \"docker>=5.0.2\", \"fsspec<=2024.1.0\"]\n\nBYTEWAX_REQUIRED = [\"bytewax==0.15.1\", \"docker>=5.0.2\", \"kubernetes<=20.13.0\"]\n\nSNOWFLAKE_REQUIRED = [\n \"snowflake-connector-python[pandas]>=3.7,<4\",\n]\n\nSPARK_REQUIRED = [\n \"pyspark>=3.0.0,<4\",\n]\n\nTRINO_REQUIRED = [\"trino>=0.305.0,<0.400.0\", \"regex\"]\n\nPOSTGRES_REQUIRED = [\n \"psycopg2-binary>=2.8.3,<3\",\n]\n\nMYSQL_REQUIRED = [\"pymysql\", \"types-PyMySQL\"]\n\nHBASE_REQUIRED = [\n \"happybase>=1.2.0,<3\",\n]\n\nCASSANDRA_REQUIRED = [\n \"cassandra-driver>=3.24.0,<4\",\n]\n\nGE_REQUIRED = [\"great_expectations>=0.15.41\"]\n\nAZURE_REQUIRED = [\n \"azure-storage-blob>=0.37.0\",\n \"azure-identity>=1.6.1\",\n \"SQLAlchemy>=1.4.19\",\n \"pyodbc>=4.0.30\",\n \"pymssql\",\n]\n\nROCKSET_REQUIRED = [\n \"rockset>=1.0.3\",\n]\n\nHAZELCAST_REQUIRED = [\n \"hazelcast-python-client>=5.1\",\n]\n\nIBIS_REQUIRED = [\n \"ibis-framework\",\n \"ibis-substrait\",\n]\n\nGRPCIO_REQUIRED = [\n \"grpcio>=1.56.2,<2\",\n \"grpcio-tools>=1.56.2,<2\",\n \"grpcio-reflection>=1.56.2,<2\",\n \"grpcio-health-checking>=1.56.2,<2\",\n]\n\nDUCKDB_REQUIRED = [\n \"ibis-framework[duckdb]\"\n]\n\nCI_REQUIRED = (\n [\n \"build\",\n \"virtualenv==20.23.0\",\n \"cryptography>=35.0,<43\",\n \"ruff>=0.3.3\",\n \"grpcio-testing>=1.56.2,<2\",\n # FastAPI does not correctly pull starlette dependency on httpx see thread(https://github.com/tiangolo/fastapi/issues/5656).\n \"httpx>=0.23.3\",\n \"minio==7.1.0\",\n \"mock==2.0.0\",\n \"moto<5\",\n \"mypy>=1.4.1\",\n \"urllib3>=1.25.4,<3\",\n \"psutil==5.9.0\",\n \"py>=1.11.0\", # https://github.com/pytest-dev/pytest/issues/10420\n \"pytest>=6.0.0,<8\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"pytest-benchmark>=3.4.1,<4\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering~=0.6.0\",\n \"pytest-mock==1.10.4\",\n \"pytest-env\",\n \"Sphinx>4.0.0,<7\",\n \"testcontainers>=3.5,<4\",\n \"firebase-admin>=5.2.0,<6\",\n \"pre-commit<3.3.2\",\n \"assertpy==1.1\",\n \"pip-tools\",\n \"pybindgen\",\n \"types-protobuf~=3.19.22\",\n \"types-python-dateutil\",\n \"types-pytz\",\n \"types-PyYAML\",\n \"types-redis\",\n \"types-requests<2.31.0\",\n \"types-setuptools\",\n \"types-tabulate\",\n \"virtualenv<20.24.2\",\n ]\n + GCP_REQUIRED\n + REDIS_REQUIRED\n + AWS_REQUIRED\n + BYTEWAX_REQUIRED\n + SNOWFLAKE_REQUIRED\n + SPARK_REQUIRED\n + POSTGRES_REQUIRED\n + MYSQL_REQUIRED\n + TRINO_REQUIRED\n + GE_REQUIRED\n + HBASE_REQUIRED\n + CASSANDRA_REQUIRED\n + AZURE_REQUIRED\n + ROCKSET_REQUIRED\n + HAZELCAST_REQUIRED\n + IBIS_REQUIRED\n + GRPCIO_REQUIRED\n + DUCKDB_REQUIRED\n)\n\nDOCS_REQUIRED = CI_REQUIRED\nDEV_REQUIRED = CI_REQUIRED\n\n# Get git repo root directory\nrepo_root = str(pathlib.Path(__file__).resolve().parent)\n\n# README file from Feast repo root directory\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(README_FILE, \"r\", encoding=\"utf8\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n# Only set use_scm_version if git executable exists (setting this variable causes pip to use git under the hood)\nif shutil.which(\"git\"):\n use_scm_version = {\"root\": \".\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX}\nelse:\n use_scm_version = None\n\nPROTO_SUBDIRS = [\"core\", \"registry\", \"serving\", \"types\", \"storage\"]\nPYTHON_CODE_PREFIX = \"sdk/python\"\n\n\nclass BuildPythonProtosCommand(Command):\n description = \"Builds the proto files into Python files.\"\n user_options = [\n (\"inplace\", \"i\", \"Write generated proto files to source directory.\"),\n ]\n\n def initialize_options(self):\n self.python_protoc = [\n sys.executable,\n \"-m\",\n \"grpc_tools.protoc\",\n ] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.sub_folders = PROTO_SUBDIRS\n self.build_lib = None\n self.inplace = 0\n\n def finalize_options(self):\n self.set_undefined_options(\"build\", (\"build_lib\", \"build_lib\"))\n\n @property\n def python_folder(self):\n if self.inplace:\n return os.path.join(\n os.path.dirname(__file__) or os.getcwd(), \"sdk/python/feast/protos\"\n )\n\n return os.path.join(self.build_lib, \"feast/protos\")\n\n def _generate_python_protos(self, path: str):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n Path(self.python_folder).mkdir(parents=True, exist_ok=True)\n subprocess.check_call(\n self.python_protoc\n + [\n \"-I\",\n self.proto_folder,\n \"--python_out\",\n self.python_folder,\n \"--grpc_python_out\",\n self.python_folder,\n \"--mypy_out\",\n self.python_folder,\n ]\n + proto_files\n )\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_python_protos(f\"feast/{sub_folder}/*.proto\")\n # We need the __init__ files for each of the generated subdirs\n # so that they are regular packages, and don't need the `--namespace-packages` flags\n # when being typechecked using mypy.\n with open(f\"{self.python_folder}/feast/{sub_folder}/__init__.py\", \"w\"):\n pass\n\n with open(f\"{self.python_folder}/__init__.py\", \"w\"):\n pass\n with open(f\"{self.python_folder}/feast/__init__.py\", \"w\"):\n pass\n\n for path in Path(self.python_folder).rglob(\"*.py\"):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, \"r\") as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(\n f\"from feast.{folder}\", f\"from feast.protos.feast.{folder}\"\n )\n\n # Write the file out again\n with open(path, \"w\") as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command(\"build_python_protos\")\n\n self.run_command(\"build_ext\")\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.reinitialize_command(\"build_python_protos\", inplace=1)\n self.run_command(\"build_python_protos\")\n\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(\n where=PYTHON_CODE_PREFIX, exclude=(\"java\", \"infra\", \"sdk/python/tests\", \"ui\")\n ),\n package_dir={\"\": PYTHON_CODE_PREFIX},\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": DEV_REQUIRED,\n \"ci\": CI_REQUIRED,\n \"gcp\": GCP_REQUIRED,\n \"aws\": AWS_REQUIRED,\n \"bytewax\": BYTEWAX_REQUIRED,\n \"redis\": REDIS_REQUIRED,\n \"snowflake\": SNOWFLAKE_REQUIRED,\n \"spark\": SPARK_REQUIRED,\n \"trino\": TRINO_REQUIRED,\n \"postgres\": POSTGRES_REQUIRED,\n \"azure\": AZURE_REQUIRED,\n \"mysql\": MYSQL_REQUIRED,\n \"ge\": GE_REQUIRED,\n \"hbase\": HBASE_REQUIRED,\n \"docs\": DOCS_REQUIRED,\n \"cassandra\": CASSANDRA_REQUIRED,\n \"hazelcast\": HAZELCAST_REQUIRED,\n \"grpcio\": GRPCIO_REQUIRED,\n \"rockset\": ROCKSET_REQUIRED,\n \"ibis\": IBIS_REQUIRED,\n \"duckdb\": DUCKDB_REQUIRED\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.9\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version=use_scm_version,\n setup_requires=[\n \"setuptools_scm\",\n \"grpcio>=1.56.2,<2\",\n \"grpcio-tools>=1.56.2,<2\",\n \"mypy-protobuf>=3.1\",\n \"pybindgen==0.22.0\",\n ],\n cmdclass={\n \"build_python_protos\": BuildPythonProtosCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index f94fb25bb55..b0e9c0c6af4 100644
--- a/setup.py
+++ b/setup.py
@@ -48,7 +48,7 @@
"Jinja2>=2,<4",
"jsonschema",
"mmh3",
- "numpy>=1.22,<1.25",
+ "numpy>=1.22,<2",
"pandas>=1.4.3,<3",
# Higher than 4.23.4 seems to cause a seg fault
"protobuf>=4.24.0,<5.0.0",
|
readthedocs__readthedocs.org-4853 | Confusing error message to end user
In https://github.com/rtfd/readthedocs.org/issues/4071#issuecomment-405939492 I realized that we are saying that we have a problem parsing the YAML file but the problem is in fact in one of the options set from the web admin dashboard.
Example:

There is no `requirements_file` entry in the YAML file (https://github.com/geopandas/geopandas/blob/master/readthedocs.yml) but it exists under the `Admin -> Advanced Settings` field form.
We need to improve this error to something more user-friendly that expresses the real error. It's not an error on parsing the YAML file. The file was parsed properly, but the problem is with one of the values from one of the fields.
| [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"Exceptions raised when building documentation.\"\"\"\n\nfrom __future__ import division, print_function, unicode_literals\n\nfrom django.utils.translation import ugettext_noop\n\n\nclass BuildEnvironmentException(Exception):\n message = None\n status_code = None\n\n def __init__(self, message=None, **kwargs):\n self.status_code = kwargs.pop('status_code', None) or self.status_code or 1\n message = message or self.get_default_message()\n super(BuildEnvironmentException, self).__init__(message, **kwargs)\n\n def get_default_message(self):\n return self.message\n\n\nclass BuildEnvironmentError(BuildEnvironmentException):\n GENERIC_WITH_BUILD_ID = ugettext_noop(\n 'There was a problem with Read the Docs while building your documentation. '\n 'Please try again later. '\n 'However, if this problem persists, '\n 'please report this to us with your build id ({build_id}).',\n )\n\n\nclass BuildEnvironmentCreationFailed(BuildEnvironmentError):\n message = ugettext_noop('Build environment creation failed')\n\n\nclass VersionLockedError(BuildEnvironmentError):\n message = ugettext_noop('Version locked, retrying in 5 minutes.')\n status_code = 423\n\n\nclass ProjectBuildsSkippedError(BuildEnvironmentError):\n message = ugettext_noop('Builds for this project are temporarily disabled')\n\n\nclass YAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem parsing YAML configuration. {exception}',\n )\n\n\nclass BuildTimeoutError(BuildEnvironmentError):\n message = ugettext_noop('Build exited due to time out')\n\n\nclass BuildEnvironmentWarning(BuildEnvironmentException):\n pass\n\n\nclass MkDocsYAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem parsing MkDocs YAML configuration. {exception}',\n )\n",
"path": "readthedocs/doc_builder/exceptions.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"Exceptions raised when building documentation.\"\"\"\n\nfrom __future__ import division, print_function, unicode_literals\n\nfrom django.utils.translation import ugettext_noop\n\n\nclass BuildEnvironmentException(Exception):\n message = None\n status_code = None\n\n def __init__(self, message=None, **kwargs):\n self.status_code = kwargs.pop('status_code', None) or self.status_code or 1\n message = message or self.get_default_message()\n super(BuildEnvironmentException, self).__init__(message, **kwargs)\n\n def get_default_message(self):\n return self.message\n\n\nclass BuildEnvironmentError(BuildEnvironmentException):\n GENERIC_WITH_BUILD_ID = ugettext_noop(\n 'There was a problem with Read the Docs while building your documentation. '\n 'Please try again later. '\n 'However, if this problem persists, '\n 'please report this to us with your build id ({build_id}).',\n )\n\n\nclass BuildEnvironmentCreationFailed(BuildEnvironmentError):\n message = ugettext_noop('Build environment creation failed')\n\n\nclass VersionLockedError(BuildEnvironmentError):\n message = ugettext_noop('Version locked, retrying in 5 minutes.')\n status_code = 423\n\n\nclass ProjectBuildsSkippedError(BuildEnvironmentError):\n message = ugettext_noop('Builds for this project are temporarily disabled')\n\n\nclass YAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem in your project\\'s configuration. {exception}',\n )\n\n\nclass BuildTimeoutError(BuildEnvironmentError):\n message = ugettext_noop('Build exited due to time out')\n\n\nclass BuildEnvironmentWarning(BuildEnvironmentException):\n pass\n\n\nclass MkDocsYAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem parsing MkDocs YAML configuration. {exception}',\n )\n",
"path": "readthedocs/doc_builder/exceptions.py"
}
] | diff --git a/readthedocs/doc_builder/exceptions.py b/readthedocs/doc_builder/exceptions.py
index 4897fd41daa..ce2ce3d844b 100644
--- a/readthedocs/doc_builder/exceptions.py
+++ b/readthedocs/doc_builder/exceptions.py
@@ -43,7 +43,7 @@ class ProjectBuildsSkippedError(BuildEnvironmentError):
class YAMLParseError(BuildEnvironmentError):
GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(
- 'Problem parsing YAML configuration. {exception}',
+ 'Problem in your project\'s configuration. {exception}',
)
|
apache__airflow-9699 | TimeSensor triggers immediately when used over midnight (UTC)
<!--
Welcome to Apache Airflow! For a smooth issue process, try to answer the following questions.
Don't worry if they're not all applicable; just try to include what you can :-)
If you need to include code snippets or logs, please put them in fenced code
blocks. If they're super-long, please use the details tag like
<details><summary>super-long log</summary> lots of stuff </details>
Please delete these comment blocks before submitting the issue.
-->
<!--
IMPORTANT!!!
PLEASE CHECK "SIMILAR TO X EXISTING ISSUES" OPTION IF VISIBLE
NEXT TO "SUBMIT NEW ISSUE" BUTTON!!!
PLEASE CHECK IF THIS ISSUE HAS BEEN REPORTED PREVIOUSLY USING SEARCH!!!
Please complete the next sections or the issue will be closed.
This questions are the first thing we need to know to understand the context.
-->
**Apache Airflow version**: 1.10.10 (issue exists in current master as well)
**Environment**: does not seem relevant
**What happened**:
The TimeSensor does trigger if the current time is later than the defined trigger time. Looking at the [source code](https://github.com/apache/airflow/blob/master/airflow/sensors/time_sensor.py), the trigger rule is defined as
```
return timezone.utcnow().time() > self.target_time
```
This leads to problems when the DAG runs over midnight UTC. For example, suppose the following DAG:
```
with DAG('foo',
default_args={'start_date': datetime(2020, 7, 1, tzinfo=pendulum.timezone("Europe/Berlin"))},
schedule_interval="0 0 * * *") as dag:
# in summer, Europe/Berlin is two hours after UTC, hence:
time_04h00_local = TimeSensor(task_id="time_01h30", target_time=time(hour=2, minute=00))
```
This DAG will be triggered at 22:00 UTC. Then, according to the trigger rule:
```
22:00 UTC > 2:00 UTC
```
Hence, the TimeSensor will be triggered immediately.
**What you expected to happen**:
The TimeSensor should trigger at the following day if `target_time < next_execution_date.time()`
**Possible workarounds**:
One can always use the TimeDeltaSensor to archive similar effects. This does result in code that is not as readable, though.
| [
{
"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom airflow.sensors.base_sensor_operator import BaseSensorOperator\nfrom airflow.utils import timezone\nfrom airflow.utils.decorators import apply_defaults\n\n\nclass TimeSensor(BaseSensorOperator):\n \"\"\"\n Waits until the specified time of the day.\n\n :param target_time: time after which the job succeeds\n :type target_time: datetime.time\n \"\"\"\n\n @apply_defaults\n def __init__(self, target_time, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.target_time = target_time\n\n def poke(self, context):\n self.log.info('Checking if the time (%s) has come', self.target_time)\n return timezone.utcnow().time() > self.target_time\n",
"path": "airflow/sensors/time_sensor.py"
}
] | [
{
"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom airflow.sensors.base_sensor_operator import BaseSensorOperator\nfrom airflow.utils import timezone\nfrom airflow.utils.decorators import apply_defaults\n\n\nclass TimeSensor(BaseSensorOperator):\n \"\"\"\n Waits until the specified time of the day.\n\n :param target_time: time after which the job succeeds\n :type target_time: datetime.time\n \"\"\"\n\n @apply_defaults\n def __init__(self, target_time, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.target_time = target_time\n\n def poke(self, context):\n self.log.info('Checking if the time (%s) has come', self.target_time)\n return timezone.make_naive(timezone.utcnow()).time() > self.target_time\n",
"path": "airflow/sensors/time_sensor.py"
}
] | diff --git a/UPDATING.md b/UPDATING.md
index c5097aeb34521..b58eaf1bb9e70 100644
--- a/UPDATING.md
+++ b/UPDATING.md
@@ -1475,6 +1475,12 @@ arguments, please change `store_serialized_dags` to `read_dags_from_db`.
Similarly, if you were using `DagBag().store_serialized_dags` property, change it to
`DagBag().read_dags_from_db`.
+### TimeSensor will consider default_timezone setting.
+
+Previously `TimeSensor` always compared the `target_time` with the current time in UTC.
+
+Now it will compare `target_time` with the current time in the timezone set by `default_timezone` under the `core` section of the config.
+
## Airflow 1.10.11
diff --git a/airflow/sensors/time_sensor.py b/airflow/sensors/time_sensor.py
index 210dc00aad4ca..69feaaefafb2a 100644
--- a/airflow/sensors/time_sensor.py
+++ b/airflow/sensors/time_sensor.py
@@ -36,4 +36,4 @@ def __init__(self, target_time, *args, **kwargs):
def poke(self, context):
self.log.info('Checking if the time (%s) has come', self.target_time)
- return timezone.utcnow().time() > self.target_time
+ return timezone.make_naive(timezone.utcnow()).time() > self.target_time
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.