in_source_id
stringlengths 13
58
| issue
stringlengths 3
241k
| before_files
listlengths 0
3
| after_files
listlengths 0
3
| pr_diff
stringlengths 109
107M
⌀ |
---|---|---|---|---|
carpentries__amy-696 | Can't assign person to something when lookup fails
This is direct cause for this error:
```
Internal Server Error: /workshops/request/65/assign
Traceback (most recent call last):
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/core/handlers/base.py", line 132, in get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/contrib/auth/decorators.py", line 22, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/contrib/auth/decorators.py", line 22, in _wrapped_view
return view_func(request, *args, **kwargs)
File "./workshops/views.py", line 1989, in eventrequest_assign
assign(request, event_req, person_id)
File "./workshops/util.py", line 737, in assign
person = Person.objects.get(pk=person_id)
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/db/models/manager.py", line 127, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/db/models/query.py", line 325, in get
clone = self.filter(*args, **kwargs)
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/db/models/query.py", line 679, in filter
return self._filter_or_exclude(False, *args, **kwargs)
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/db/models/query.py", line 697, in _filter_or_exclude
clone.query.add_q(Q(*args, **kwargs))
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/db/models/sql/query.py", line 1310, in add_q
clause, require_inner = self._add_q(where_part, self.used_aliases)
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/db/models/sql/query.py", line 1338, in _add_q
allow_joins=allow_joins, split_subq=split_subq,
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/db/models/sql/query.py", line 1209, in build_filter
condition = self.build_lookup(lookups, col, value)
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/db/models/sql/query.py", line 1102, in build_lookup
return final_lookup(lhs, rhs)
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/db/models/lookups.py", line 101, in __init__
self.rhs = self.get_prep_lookup()
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/db/models/lookups.py", line 139, in get_prep_lookup
return self.lhs.output_field.get_prep_lookup(self.lookup_name, self.rhs)
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/db/models/fields/__init__.py", line 727, in get_prep_lookup
return self.get_prep_value(value)
File "/home/amy/amy_site/venv/lib/python3.4/site-packages/django/db/models/fields/__init__.py", line 985, in get_prep_value
return int(value)
ValueError: invalid literal for int() with base 10: ''
```
There needs to be a "get-or-404" mechanism in `workshops.util.assign` - it will prevent this error.
| [
{
"content": "# coding: utf-8\nfrom collections import namedtuple, defaultdict\nimport csv\nimport datetime\nfrom itertools import chain\nimport re\nimport yaml\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.paginator import (\n EmptyPage, PageNotAnInteger, Paginator as DjangoPaginator,\n)\nfrom django.core.validators import ValidationError\nfrom django.db import IntegrityError, transaction\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.shortcuts import render\n\nfrom workshops.models import Event, Role, Person, Task, Award, Badge\n\n\nITEMS_PER_PAGE = 25\n\nWORD_SPLIT = re.compile(r'''([\\s<>\"']+)''')\nSIMPLE_EMAIL = re.compile(r'^\\S+@\\S+\\.\\S+$')\n\nNUM_TRIES = 100\n\nALLOWED_TAG_NAMES = [\n 'slug', 'startdate', 'enddate', 'country', 'venue', 'address',\n 'latlng', 'language', 'eventbrite', 'instructor', 'helper', 'contact',\n]\n\n\nclass InternalError(Exception):\n pass\n\n\ndef upload_person_task_csv(stream):\n \"\"\"Read people from CSV and return a JSON-serializable list of dicts.\n\n The input `stream` should be a file-like object that returns\n Unicode data.\n\n \"Serializability\" is required because we put this data into session. See\n https://docs.djangoproject.com/en/1.7/topics/http/sessions/ for details.\n\n Also return a list of fields from Person.PERSON_UPLOAD_FIELDS for which\n no data was given.\n \"\"\"\n\n result = []\n reader = csv.DictReader(stream)\n empty_fields = set()\n\n for row in reader:\n # skip empty lines in the CSV\n if not any(row.values()):\n continue\n\n entry = {}\n for col in Person.PERSON_UPLOAD_FIELDS:\n try:\n entry[col] = row[col].strip()\n except (KeyError, IndexError, AttributeError):\n # either `col` is not in `entry`, or not in `row`, or\n # `.strip()` doesn't work (e.g. `row[col]` gives `None` instead\n # of string)\n entry[col] = None\n empty_fields.add(col)\n\n for col in Person.PERSON_TASK_EXTRA_FIELDS:\n entry[col] = row.get(col, None)\n entry['errors'] = None\n\n # it will be set in the `verify_upload_person_task`\n entry['username'] = ''\n\n result.append(entry)\n\n return result, list(empty_fields)\n\n\ndef verify_upload_person_task(data):\n \"\"\"\n Verify that uploaded data is correct. Show errors by populating ``errors``\n dictionary item. This function changes ``data`` in place.\n \"\"\"\n\n errors_occur = False\n for item in data:\n errors = []\n info = []\n\n event = item.get('event', None)\n existing_event = None\n if event:\n try:\n existing_event = Event.objects.get(slug=event)\n except Event.DoesNotExist:\n errors.append('Event with slug {0} does not exist.'\n .format(event))\n\n role = item.get('role', None)\n existing_role = None\n if role:\n try:\n existing_role = Role.objects.get(name=role)\n except Role.DoesNotExist:\n errors.append('Role with name {0} does not exist.'\n .format(role))\n except Role.MultipleObjectsReturned:\n errors.append('More than one role named {0} exists.'\n .format(role))\n\n # check if the user exists, and if so: check if existing user's\n # personal and family names are the same as uploaded\n email = item.get('email', None)\n personal = item.get('personal', None)\n family = item.get('family', None)\n person = None\n\n if email:\n try:\n # check if first and last name matches person in the database\n person = Person.objects.get(email__iexact=email)\n\n for which, actual, uploaded in (\n ('personal', person.personal, personal),\n ('family', person.family, family)\n ):\n if (actual == uploaded) or (not actual and not uploaded):\n pass\n else:\n errors.append('{0} mismatch: database \"{1}\" '\n 'vs uploaded \"{2}\".'\n .format(which, actual, uploaded))\n\n except Person.DoesNotExist:\n # in this case we need to add a new person\n pass\n\n else:\n if existing_event and person and existing_role:\n # person, their role and a corresponding event exist, so\n # let's check if the task exists\n try:\n Task.objects.get(event=existing_event, person=person,\n role=existing_role)\n except Task.DoesNotExist:\n info.append('Task will be created.')\n else:\n info.append('Task already exists.')\n else:\n info.append('It\\'s highly recommended to add an email address.')\n\n if person:\n # force username from existing record\n item['username'] = person.username\n item['person_exists'] = True\n\n else:\n # force a newly created username\n if not item.get('username'):\n item['username'] = create_username(personal, family)\n item['person_exists'] = False\n\n info.append('Person and task will be created.')\n\n try:\n # let's check if there's someone else named this way\n similar_person = Person.objects.get(personal=personal,\n family=family)\n\n except Person.DoesNotExist:\n pass\n\n except Person.MultipleObjectsReturned:\n persons = [\n str(person) for person in\n Person.objects.filter(personal=personal, family=family)\n ]\n info.append('There\\'s a couple of matching persons in the '\n 'database: {}. '\n 'Use email to merge.'.format(', '.join(persons)))\n\n else:\n info.append('There\\'s a matching person in the database: {}. '\n 'Use their email to merge.'.format(similar_person))\n\n # let's check what Person model validators want to say\n try:\n p = Person(personal=personal, family=family, email=email,\n username=item['username'])\n p.clean_fields(exclude=['password'])\n except ValidationError as e:\n for k, v in e.message_dict.items():\n errors.append('{}: {}'.format(k, v))\n\n if not role:\n errors.append('Must have a role.')\n\n if not event:\n errors.append('Must have an event.')\n\n if errors:\n errors_occur = True\n item['errors'] = errors\n\n if info:\n item['info'] = info\n\n return errors_occur\n\n\ndef create_uploaded_persons_tasks(data):\n \"\"\"\n Create persons and tasks from upload data.\n \"\"\"\n\n # Quick sanity check.\n if any([row.get('errors') for row in data]):\n raise InternalError('Uploaded data contains errors, cancelling upload')\n\n persons_created = []\n tasks_created = []\n events = set()\n\n with transaction.atomic():\n for row in data:\n try:\n fields = {key: row[key] for key in Person.PERSON_UPLOAD_FIELDS}\n fields['username'] = row['username']\n\n if fields['email']:\n # we should use existing Person or create one\n p, created = Person.objects.get_or_create(\n email__iexact=fields['email'], defaults=fields\n )\n\n if created:\n persons_created.append(p)\n\n else:\n # we should create a new Person without any email provided\n p = Person(**fields)\n p.save()\n persons_created.append(p)\n\n if row['event'] and row['role']:\n e = Event.objects.get(slug=row['event'])\n r = Role.objects.get(name=row['role'])\n\n # is the number of learners attending the event changed,\n # we should update ``event.attendance``\n if row['role'] == 'learner':\n events.add(e)\n\n t, created = Task.objects.get_or_create(person=p, event=e,\n role=r)\n if created:\n tasks_created.append(t)\n\n except IntegrityError as e:\n raise IntegrityError('{0} (for {1})'.format(str(e), row))\n\n except ObjectDoesNotExist as e:\n raise ObjectDoesNotExist('{0} (for {1})'.format(str(e), row))\n\n for event in events:\n # if event.attendance is lower than number of learners, then\n # update the attendance\n update_event_attendance_from_tasks(event)\n\n return persons_created, tasks_created\n\n\ndef create_username(personal, family, tries=NUM_TRIES):\n '''Generate unique username.'''\n stem = normalize_name(family) + '_' + normalize_name(personal)\n\n counter = None\n for i in range(tries): # let's limit ourselves to only 100 tries\n try:\n if counter is None:\n username = stem\n counter = 1\n else:\n counter += 1\n username = '{0}_{1}'.format(stem, counter)\n Person.objects.get(username=username)\n except ObjectDoesNotExist:\n return username\n\n raise InternalError('Cannot find a non-repeating username'\n '(tried {} usernames): {}.'.format(tries, username))\n\n\ndef normalize_name(name):\n '''Get rid of spaces, funky characters, etc.'''\n name = name.strip()\n for (accented, flat) in [(' ', '-')]:\n name = name.replace(accented, flat)\n\n # remove all non-alphanumeric, non-hyphen chars\n name = re.sub(r'[^\\w\\-]', '', name, flags=re.A)\n\n # We should use lower-cased username, because it directly corresponds to\n # some files Software Carpentry stores about some people - and, as we know,\n # some filesystems are not case-sensitive.\n return name.lower()\n\n\nclass Paginator(DjangoPaginator):\n \"\"\"Everything should work as in django.core.paginator.Paginator, except\n this class provides additional generator for nicer set of pages.\"\"\"\n\n _page_number = None\n\n def page(self, number):\n \"\"\"Overridden to store retrieved page number somewhere.\"\"\"\n self._page_number = number\n return super().page(number)\n\n def paginate_sections(self):\n \"\"\"Divide pagination range into 3 sections.\n\n Each section should contain approx. 5 links. If sections are\n overlapping, they're merged.\n The results might be:\n * L…M…R\n * LM…R\n * L…MR\n * LMR\n where L - left section, M - middle section, R - right section, and \"…\"\n stands for a separator.\n \"\"\"\n index = int(self._page_number) or 1\n items = self.page_range\n length = self._num_pages\n\n L = items[0:5]\n\n if index - 3 == 5:\n # Fix when two sets, L_s and M_s, are disjoint but make a sequence\n # [... 3 4, 5 6 ...], then there should not be dots between them\n M = items[index-4:index+4] or items[0:index+1]\n else:\n M = items[index-3:index+4] or items[0:index+1]\n\n if index + 4 == length - 5:\n # Fix when two sets, M_s and R_s, are disjoint but make a sequence\n # [... 3 4, 5 6 ...], then there should not be dots between them\n R = items[-6:]\n else:\n R = items[-5:]\n\n L_s = set(L)\n M_s = set(M)\n R_s = set(R)\n\n dots = [None]\n\n D1 = L_s.isdisjoint(M_s)\n D2 = M_s.isdisjoint(R_s)\n D3 = L_s.isdisjoint(R_s)\n\n if D1 and D2 and D3:\n # L…M…R\n pagination = chain(L, dots, M, dots, R)\n elif not D1 and D2 and D3:\n # LM…R\n pagination = chain(sorted(L_s | M_s), dots, R)\n elif D1 and not D2 and D3:\n # L…MR\n pagination = chain(L, dots, sorted(M_s | R_s))\n elif not D3:\n # tough situation, we may have split something wrong,\n # so lets just display all pages\n pagination = items\n else:\n # LMR\n pagination = iter(sorted(L_s | M_s | R_s))\n\n return pagination\n\n\ndef get_pagination_items(request, all_objects):\n '''Select paginated items.'''\n\n # Get parameters.\n items = request.GET.get('items_per_page', ITEMS_PER_PAGE)\n if items != 'all':\n try:\n items = int(items)\n except ValueError:\n items = ITEMS_PER_PAGE\n else:\n # Show everything.\n items = all_objects.count()\n\n # Figure out where we are.\n page = request.GET.get('page')\n\n # Show selected items.\n paginator = Paginator(all_objects, items)\n\n # Select the pages.\n try:\n result = paginator.page(page)\n\n # If page is not an integer, deliver first page.\n except PageNotAnInteger:\n result = paginator.page(1)\n\n # If page is out of range, deliver last page of results.\n except EmptyPage:\n result = paginator.page(paginator.num_pages)\n\n return result\n\n\ndef merge_persons(person_from, person_to):\n for award in person_from.award_set.all():\n try:\n award.person = person_to\n award.save()\n except IntegrityError:\n # unique constraints fail (probably)\n pass\n\n for task in person_from.task_set.all():\n try:\n task.person = person_to\n task.save()\n except IntegrityError:\n # unique constraints fail (probably)\n pass\n\n # update only unique lessons\n person_from.qualification_set.exclude(lesson__in=person_to.lessons.all()) \\\n .update(person=person_to)\n\n person_to.domains.add(*person_from.domains.all())\n\n # removes tasks, awards, qualifications in a cascading way\n person_from.delete()\n\n\nclass WrongWorkshopURL(ValueError):\n \"\"\"Raised when we fall back to reading tags from event's YAML front matter,\n which requires a link to GitHub raw hosted file, but we can't get that link\n because provided URL doesn't match Event.WEBSITE_REGEX\n (see `generate_url_to_event_index` below).\"\"\"\n\n def __str__(self):\n return ('Event\\'s URL doesn\\'t match Github website format '\n '\"http://user.github.io/2015-12-08-workshop\".')\n\n\ndef generate_url_to_event_index(website_url):\n \"\"\"Given URL to workshop's website, generate a URL to its raw `index.html`\n file in GitHub repository.\"\"\"\n template = ('https://raw.githubusercontent.com/{name}/{repo}'\n '/gh-pages/index.html')\n regex = Event.WEBSITE_REGEX\n\n results = regex.match(website_url)\n if results:\n return template.format(**results.groupdict()), results.group('repo')\n raise WrongWorkshopURL()\n\n\ndef find_tags_on_event_index(content):\n \"\"\"Given workshop's raw `index.html`, find and take YAML tags that\n have workshop-related data.\"\"\"\n try:\n first, header, last = content.split('---')\n tags = yaml.load(header.strip())\n\n # get tags to the form returned by `find_tags_on_event_website`\n # because YAML tries to interpret values from index's header\n filtered_tags = {key: value for key, value in tags.items()\n if key in ALLOWED_TAG_NAMES}\n for key, value in filtered_tags.items():\n if isinstance(value, int):\n filtered_tags[key] = str(value)\n elif isinstance(value, datetime.date):\n filtered_tags[key] = '{:%Y-%m-%d}'.format(value)\n elif isinstance(value, list):\n filtered_tags[key] = ', '.join(value)\n\n return filtered_tags\n\n except (ValueError, yaml.scanner.ScannerError):\n # can't unpack or header is not YML format\n return dict()\n\n\ndef find_tags_on_event_website(content):\n \"\"\"Given website content, find and take <meta> tags that have\n workshop-related data.\"\"\"\n\n R = r'<meta name=\"(?P<name>[\\w-]+)\" content=\"(?P<content>.+)\" />$'\n regexp = re.compile(R, re.M)\n\n return {name: content for name, content in regexp.findall(content)\n if name in ALLOWED_TAG_NAMES}\n\n\ndef parse_tags_from_event_website(tags):\n \"\"\"Simple preprocessing of the tags from event website.\"\"\"\n # no compatibility with old-style names\n country = tags.get('country', '').upper()[0:2]\n if len(country) < 2:\n country = ''\n language = tags.get('language', '').upper()[0:2]\n if len(language) < 2:\n language = ''\n\n try:\n latitude, _ = tags.get('latlng', '').split(',')\n latitude = float(latitude.strip())\n except (ValueError, AttributeError):\n # value error: can't convert string to float\n # attribute error: object doesn't have \"split\" or \"strip\" methods\n latitude = None\n try:\n _, longitude = tags.get('latlng', '').split(',')\n longitude = float(longitude.strip())\n except (ValueError, AttributeError):\n # value error: can't convert string to float\n # attribute error: object doesn't have \"split\" or \"strip\" methods\n longitude = None\n\n try:\n reg_key = tags.get('eventbrite', '')\n reg_key = int(reg_key)\n except (ValueError, TypeError):\n # value error: can't convert string to int\n # type error: can't convert None to int\n reg_key = None\n\n try:\n start = tags.get('startdate', '')\n start = datetime.datetime.strptime(start, '%Y-%m-%d').date()\n except ValueError:\n start = None\n\n try:\n end = tags.get('enddate', '')\n end = datetime.datetime.strptime(end, '%Y-%m-%d').date()\n except ValueError:\n end = None\n\n # Split string of comma-separated names into a list, but return empty list\n # instead of [''] when there are no instructors/helpers.\n instructors = tags.get('instructor', '').split('|')\n instructors = [instructor.strip() for instructor in instructors]\n instructors = [] if not any(instructors) else instructors\n helpers = tags.get('helper', '').split('|')\n helpers = [helper.strip() for helper in helpers]\n helpers = [] if not any(helpers) else helpers\n\n return {\n 'slug': tags.get('slug', ''),\n 'language': language,\n 'start': start,\n 'end': end,\n 'country': country,\n 'venue': tags.get('venue', ''),\n 'address': tags.get('address', ''),\n 'latitude': latitude,\n 'longitude': longitude,\n 'reg_key': reg_key,\n 'instructors': instructors,\n 'helpers': helpers,\n 'contact': tags.get('contact', ''),\n }\n\n\ndef validate_tags_from_event_website(tags):\n errors = []\n\n Requirement = namedtuple(\n 'Requirement',\n ['name', 'display', 'required', 'match_format'],\n )\n\n DATE_FMT = r'^\\d{4}-\\d{2}-\\d{2}$'\n SLUG_FMT = r'^\\d{4}-\\d{2}-\\d{2}-.+$'\n TWOCHAR_FMT = r'^\\w\\w$'\n FRACTION_FMT = r'[-+]?[0-9]*\\.?[0-9]*'\n requirements = [\n Requirement('slug', 'workshop name', True, SLUG_FMT),\n Requirement('language', None, False, TWOCHAR_FMT),\n Requirement('startdate', 'start date', True, DATE_FMT),\n Requirement('enddate', 'end date', False, DATE_FMT),\n Requirement('country', None, True, TWOCHAR_FMT),\n Requirement('venue', None, True, None),\n Requirement('address', None, True, None),\n Requirement('latlng', 'latitude / longitude', True,\n '^' + FRACTION_FMT + r',\\s?' + FRACTION_FMT + '$'),\n Requirement('instructor', None, True, None),\n Requirement('helper', None, True, None),\n Requirement('contact', None, True, None),\n Requirement('eventbrite', 'Eventbrite event ID', False, r'^\\d+$'),\n ]\n\n for requirement in requirements:\n d_ = requirement._asdict()\n name_ = ('{display} ({name})'.format(**d_)\n if requirement.display\n else '{name}'.format(**d_))\n type_ = 'required' if requirement.required else 'optional'\n value_ = tags.get(requirement.name)\n\n if not value_:\n errors.append('Missing {} tag {}.'.format(type_, name_))\n\n if value_ == 'FIXME':\n errors.append('Placeholder value \"FIXME\" for {} tag {}.'\n .format(type_, name_))\n else:\n try:\n if not re.match(requirement.match_format, value_):\n errors.append(\n 'Invalid value \"{}\" for {} tag {}: should be in '\n 'format \"{}\".'\n .format(value_, type_, name_, requirement.match_format)\n )\n except (re.error, TypeError):\n pass\n\n return errors\n\n\ndef update_event_attendance_from_tasks(event):\n \"\"\"Increase event.attendance if there's more learner tasks belonging to the\n event.\"\"\"\n learners = event.task_set.filter(role__name='learner').count()\n Event.objects \\\n .filter(pk=event.pk) \\\n .filter(Q(attendance__lt=learners) | Q(attendance__isnull=True)) \\\n .update(attendance=learners)\n\n\ndef universal_date_format(date):\n return '{:%Y-%m-%d}'.format(date)\n\n\ndef get_members(earliest, latest):\n '''Get everyone who is a member of the Software Carpentry Foundation.'''\n\n member_badge = Badge.objects.get(name='member')\n instructor_badges = Badge.objects.instructor_badges()\n instructor_role = Role.objects.get(name='instructor')\n\n # Everyone who is an explicit member.\n explicit = Person.objects.filter(badges__in=[member_badge]).distinct()\n\n # Everyone who qualifies by having taught recently.\n implicit = Person.objects.filter(\n task__role=instructor_role,\n badges__in=instructor_badges,\n task__event__start__gte=earliest,\n task__event__start__lte=latest\n ).distinct()\n\n # Merge the two sets.\n return explicit | implicit\n\n\ndef default_membership_cutoff():\n \"Calculate a default cutoff dates for members finding with `get_members`.\"\n earliest = datetime.date.today() - 2 * datetime.timedelta(days=365)\n latest = datetime.date.today()\n return earliest, latest\n\n\ndef find_emails(text):\n \"\"\"Find emails in the text. This is based on Django's own\n `django.utils.html.urlize`.\"\"\"\n # Split into tokens in case someone uses for example\n # 'Name <[email protected]>' format.\n emails = []\n\n for word in WORD_SPLIT.split(text):\n if SIMPLE_EMAIL.match(word):\n local, domain = word.rsplit('@', 1)\n try:\n domain = domain.encode('idna').decode('ascii')\n except UnicodeError:\n continue\n emails.append('{}@{}'.format(local, domain))\n\n return emails\n\n\ndef assignment_selection(request):\n \"\"\"Parse `assigned_to` query param depending on the logged-in user.\"\"\"\n user = request.user\n is_admin = user.groups.filter(name='administrators').exists()\n\n # it's always possible to assign something entirely else\n # in the `?assigned_to` query param\n\n if is_admin:\n # One of the administrators.\n # They should be presented with their events by default.\n assigned_to = request.GET.get('assigned_to', 'me')\n\n elif user.is_superuser:\n # A superuser. Should see all events by default\n assigned_to = request.GET.get('assigned_to', 'all')\n\n else:\n # Normal user (for example subcommittee members).\n assigned_to = 'all'\n\n return assigned_to, is_admin\n\n\ndef failed_to_delete(request, object, protected_objects, back=None):\n context = {\n 'title': 'Failed to delete',\n 'back': back or object.get_absolute_url,\n 'object': object,\n 'refs': defaultdict(list),\n }\n\n for obj in protected_objects:\n # e.g. for model Award its plural name is 'awards'\n name = str(obj.__class__._meta.verbose_name_plural)\n context['refs'][name].append(obj)\n\n # this trick enables looping through defaultdict instance\n context['refs'].default_factory = None\n\n return render(request, 'workshops/failed_to_delete.html', context)\n\n\ndef assign(request, obj, person_id):\n \"\"\"Set obj.assigned_to. This view helper works with both POST and GET\n requests:\n\n * POST: read person ID from POST's person_1\n * GET: read person_id from URL\n * both: if person_id is None then make event.assigned_to empty\n * otherwise assign matching person.\n\n This is not a view, but it's used in some.\"\"\"\n try:\n if request.method == \"POST\":\n person_id = request.POST.get('person_1', None)\n\n if person_id is None:\n obj.assigned_to = None\n else:\n person = Person.objects.get(pk=person_id)\n obj.assigned_to = person\n\n obj.save()\n\n except Person.DoesNotExist:\n raise Http404(\"No person found matching the query.\")\n",
"path": "workshops/util.py"
}
] | [
{
"content": "# coding: utf-8\nfrom collections import namedtuple, defaultdict\nimport csv\nimport datetime\nfrom itertools import chain\nimport re\nimport yaml\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.paginator import (\n EmptyPage, PageNotAnInteger, Paginator as DjangoPaginator,\n)\nfrom django.core.validators import ValidationError\nfrom django.db import IntegrityError, transaction\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.shortcuts import render\n\nfrom workshops.models import Event, Role, Person, Task, Award, Badge\n\n\nITEMS_PER_PAGE = 25\n\nWORD_SPLIT = re.compile(r'''([\\s<>\"']+)''')\nSIMPLE_EMAIL = re.compile(r'^\\S+@\\S+\\.\\S+$')\n\nNUM_TRIES = 100\n\nALLOWED_TAG_NAMES = [\n 'slug', 'startdate', 'enddate', 'country', 'venue', 'address',\n 'latlng', 'language', 'eventbrite', 'instructor', 'helper', 'contact',\n]\n\n\nclass InternalError(Exception):\n pass\n\n\ndef upload_person_task_csv(stream):\n \"\"\"Read people from CSV and return a JSON-serializable list of dicts.\n\n The input `stream` should be a file-like object that returns\n Unicode data.\n\n \"Serializability\" is required because we put this data into session. See\n https://docs.djangoproject.com/en/1.7/topics/http/sessions/ for details.\n\n Also return a list of fields from Person.PERSON_UPLOAD_FIELDS for which\n no data was given.\n \"\"\"\n\n result = []\n reader = csv.DictReader(stream)\n empty_fields = set()\n\n for row in reader:\n # skip empty lines in the CSV\n if not any(row.values()):\n continue\n\n entry = {}\n for col in Person.PERSON_UPLOAD_FIELDS:\n try:\n entry[col] = row[col].strip()\n except (KeyError, IndexError, AttributeError):\n # either `col` is not in `entry`, or not in `row`, or\n # `.strip()` doesn't work (e.g. `row[col]` gives `None` instead\n # of string)\n entry[col] = None\n empty_fields.add(col)\n\n for col in Person.PERSON_TASK_EXTRA_FIELDS:\n entry[col] = row.get(col, None)\n entry['errors'] = None\n\n # it will be set in the `verify_upload_person_task`\n entry['username'] = ''\n\n result.append(entry)\n\n return result, list(empty_fields)\n\n\ndef verify_upload_person_task(data):\n \"\"\"\n Verify that uploaded data is correct. Show errors by populating ``errors``\n dictionary item. This function changes ``data`` in place.\n \"\"\"\n\n errors_occur = False\n for item in data:\n errors = []\n info = []\n\n event = item.get('event', None)\n existing_event = None\n if event:\n try:\n existing_event = Event.objects.get(slug=event)\n except Event.DoesNotExist:\n errors.append('Event with slug {0} does not exist.'\n .format(event))\n\n role = item.get('role', None)\n existing_role = None\n if role:\n try:\n existing_role = Role.objects.get(name=role)\n except Role.DoesNotExist:\n errors.append('Role with name {0} does not exist.'\n .format(role))\n except Role.MultipleObjectsReturned:\n errors.append('More than one role named {0} exists.'\n .format(role))\n\n # check if the user exists, and if so: check if existing user's\n # personal and family names are the same as uploaded\n email = item.get('email', None)\n personal = item.get('personal', None)\n family = item.get('family', None)\n person = None\n\n if email:\n try:\n # check if first and last name matches person in the database\n person = Person.objects.get(email__iexact=email)\n\n for which, actual, uploaded in (\n ('personal', person.personal, personal),\n ('family', person.family, family)\n ):\n if (actual == uploaded) or (not actual and not uploaded):\n pass\n else:\n errors.append('{0} mismatch: database \"{1}\" '\n 'vs uploaded \"{2}\".'\n .format(which, actual, uploaded))\n\n except Person.DoesNotExist:\n # in this case we need to add a new person\n pass\n\n else:\n if existing_event and person and existing_role:\n # person, their role and a corresponding event exist, so\n # let's check if the task exists\n try:\n Task.objects.get(event=existing_event, person=person,\n role=existing_role)\n except Task.DoesNotExist:\n info.append('Task will be created.')\n else:\n info.append('Task already exists.')\n else:\n info.append('It\\'s highly recommended to add an email address.')\n\n if person:\n # force username from existing record\n item['username'] = person.username\n item['person_exists'] = True\n\n else:\n # force a newly created username\n if not item.get('username'):\n item['username'] = create_username(personal, family)\n item['person_exists'] = False\n\n info.append('Person and task will be created.')\n\n try:\n # let's check if there's someone else named this way\n similar_person = Person.objects.get(personal=personal,\n family=family)\n\n except Person.DoesNotExist:\n pass\n\n except Person.MultipleObjectsReturned:\n persons = [\n str(person) for person in\n Person.objects.filter(personal=personal, family=family)\n ]\n info.append('There\\'s a couple of matching persons in the '\n 'database: {}. '\n 'Use email to merge.'.format(', '.join(persons)))\n\n else:\n info.append('There\\'s a matching person in the database: {}. '\n 'Use their email to merge.'.format(similar_person))\n\n # let's check what Person model validators want to say\n try:\n p = Person(personal=personal, family=family, email=email,\n username=item['username'])\n p.clean_fields(exclude=['password'])\n except ValidationError as e:\n for k, v in e.message_dict.items():\n errors.append('{}: {}'.format(k, v))\n\n if not role:\n errors.append('Must have a role.')\n\n if not event:\n errors.append('Must have an event.')\n\n if errors:\n errors_occur = True\n item['errors'] = errors\n\n if info:\n item['info'] = info\n\n return errors_occur\n\n\ndef create_uploaded_persons_tasks(data):\n \"\"\"\n Create persons and tasks from upload data.\n \"\"\"\n\n # Quick sanity check.\n if any([row.get('errors') for row in data]):\n raise InternalError('Uploaded data contains errors, cancelling upload')\n\n persons_created = []\n tasks_created = []\n events = set()\n\n with transaction.atomic():\n for row in data:\n try:\n fields = {key: row[key] for key in Person.PERSON_UPLOAD_FIELDS}\n fields['username'] = row['username']\n\n if fields['email']:\n # we should use existing Person or create one\n p, created = Person.objects.get_or_create(\n email__iexact=fields['email'], defaults=fields\n )\n\n if created:\n persons_created.append(p)\n\n else:\n # we should create a new Person without any email provided\n p = Person(**fields)\n p.save()\n persons_created.append(p)\n\n if row['event'] and row['role']:\n e = Event.objects.get(slug=row['event'])\n r = Role.objects.get(name=row['role'])\n\n # is the number of learners attending the event changed,\n # we should update ``event.attendance``\n if row['role'] == 'learner':\n events.add(e)\n\n t, created = Task.objects.get_or_create(person=p, event=e,\n role=r)\n if created:\n tasks_created.append(t)\n\n except IntegrityError as e:\n raise IntegrityError('{0} (for {1})'.format(str(e), row))\n\n except ObjectDoesNotExist as e:\n raise ObjectDoesNotExist('{0} (for {1})'.format(str(e), row))\n\n for event in events:\n # if event.attendance is lower than number of learners, then\n # update the attendance\n update_event_attendance_from_tasks(event)\n\n return persons_created, tasks_created\n\n\ndef create_username(personal, family, tries=NUM_TRIES):\n '''Generate unique username.'''\n stem = normalize_name(family) + '_' + normalize_name(personal)\n\n counter = None\n for i in range(tries): # let's limit ourselves to only 100 tries\n try:\n if counter is None:\n username = stem\n counter = 1\n else:\n counter += 1\n username = '{0}_{1}'.format(stem, counter)\n Person.objects.get(username=username)\n except ObjectDoesNotExist:\n return username\n\n raise InternalError('Cannot find a non-repeating username'\n '(tried {} usernames): {}.'.format(tries, username))\n\n\ndef normalize_name(name):\n '''Get rid of spaces, funky characters, etc.'''\n name = name.strip()\n for (accented, flat) in [(' ', '-')]:\n name = name.replace(accented, flat)\n\n # remove all non-alphanumeric, non-hyphen chars\n name = re.sub(r'[^\\w\\-]', '', name, flags=re.A)\n\n # We should use lower-cased username, because it directly corresponds to\n # some files Software Carpentry stores about some people - and, as we know,\n # some filesystems are not case-sensitive.\n return name.lower()\n\n\nclass Paginator(DjangoPaginator):\n \"\"\"Everything should work as in django.core.paginator.Paginator, except\n this class provides additional generator for nicer set of pages.\"\"\"\n\n _page_number = None\n\n def page(self, number):\n \"\"\"Overridden to store retrieved page number somewhere.\"\"\"\n self._page_number = number\n return super().page(number)\n\n def paginate_sections(self):\n \"\"\"Divide pagination range into 3 sections.\n\n Each section should contain approx. 5 links. If sections are\n overlapping, they're merged.\n The results might be:\n * L…M…R\n * LM…R\n * L…MR\n * LMR\n where L - left section, M - middle section, R - right section, and \"…\"\n stands for a separator.\n \"\"\"\n index = int(self._page_number) or 1\n items = self.page_range\n length = self._num_pages\n\n L = items[0:5]\n\n if index - 3 == 5:\n # Fix when two sets, L_s and M_s, are disjoint but make a sequence\n # [... 3 4, 5 6 ...], then there should not be dots between them\n M = items[index-4:index+4] or items[0:index+1]\n else:\n M = items[index-3:index+4] or items[0:index+1]\n\n if index + 4 == length - 5:\n # Fix when two sets, M_s and R_s, are disjoint but make a sequence\n # [... 3 4, 5 6 ...], then there should not be dots between them\n R = items[-6:]\n else:\n R = items[-5:]\n\n L_s = set(L)\n M_s = set(M)\n R_s = set(R)\n\n dots = [None]\n\n D1 = L_s.isdisjoint(M_s)\n D2 = M_s.isdisjoint(R_s)\n D3 = L_s.isdisjoint(R_s)\n\n if D1 and D2 and D3:\n # L…M…R\n pagination = chain(L, dots, M, dots, R)\n elif not D1 and D2 and D3:\n # LM…R\n pagination = chain(sorted(L_s | M_s), dots, R)\n elif D1 and not D2 and D3:\n # L…MR\n pagination = chain(L, dots, sorted(M_s | R_s))\n elif not D3:\n # tough situation, we may have split something wrong,\n # so lets just display all pages\n pagination = items\n else:\n # LMR\n pagination = iter(sorted(L_s | M_s | R_s))\n\n return pagination\n\n\ndef get_pagination_items(request, all_objects):\n '''Select paginated items.'''\n\n # Get parameters.\n items = request.GET.get('items_per_page', ITEMS_PER_PAGE)\n if items != 'all':\n try:\n items = int(items)\n except ValueError:\n items = ITEMS_PER_PAGE\n else:\n # Show everything.\n items = all_objects.count()\n\n # Figure out where we are.\n page = request.GET.get('page')\n\n # Show selected items.\n paginator = Paginator(all_objects, items)\n\n # Select the pages.\n try:\n result = paginator.page(page)\n\n # If page is not an integer, deliver first page.\n except PageNotAnInteger:\n result = paginator.page(1)\n\n # If page is out of range, deliver last page of results.\n except EmptyPage:\n result = paginator.page(paginator.num_pages)\n\n return result\n\n\ndef merge_persons(person_from, person_to):\n for award in person_from.award_set.all():\n try:\n award.person = person_to\n award.save()\n except IntegrityError:\n # unique constraints fail (probably)\n pass\n\n for task in person_from.task_set.all():\n try:\n task.person = person_to\n task.save()\n except IntegrityError:\n # unique constraints fail (probably)\n pass\n\n # update only unique lessons\n person_from.qualification_set.exclude(lesson__in=person_to.lessons.all()) \\\n .update(person=person_to)\n\n person_to.domains.add(*person_from.domains.all())\n\n # removes tasks, awards, qualifications in a cascading way\n person_from.delete()\n\n\nclass WrongWorkshopURL(ValueError):\n \"\"\"Raised when we fall back to reading tags from event's YAML front matter,\n which requires a link to GitHub raw hosted file, but we can't get that link\n because provided URL doesn't match Event.WEBSITE_REGEX\n (see `generate_url_to_event_index` below).\"\"\"\n\n def __str__(self):\n return ('Event\\'s URL doesn\\'t match Github website format '\n '\"http://user.github.io/2015-12-08-workshop\".')\n\n\ndef generate_url_to_event_index(website_url):\n \"\"\"Given URL to workshop's website, generate a URL to its raw `index.html`\n file in GitHub repository.\"\"\"\n template = ('https://raw.githubusercontent.com/{name}/{repo}'\n '/gh-pages/index.html')\n regex = Event.WEBSITE_REGEX\n\n results = regex.match(website_url)\n if results:\n return template.format(**results.groupdict()), results.group('repo')\n raise WrongWorkshopURL()\n\n\ndef find_tags_on_event_index(content):\n \"\"\"Given workshop's raw `index.html`, find and take YAML tags that\n have workshop-related data.\"\"\"\n try:\n first, header, last = content.split('---')\n tags = yaml.load(header.strip())\n\n # get tags to the form returned by `find_tags_on_event_website`\n # because YAML tries to interpret values from index's header\n filtered_tags = {key: value for key, value in tags.items()\n if key in ALLOWED_TAG_NAMES}\n for key, value in filtered_tags.items():\n if isinstance(value, int):\n filtered_tags[key] = str(value)\n elif isinstance(value, datetime.date):\n filtered_tags[key] = '{:%Y-%m-%d}'.format(value)\n elif isinstance(value, list):\n filtered_tags[key] = ', '.join(value)\n\n return filtered_tags\n\n except (ValueError, yaml.scanner.ScannerError):\n # can't unpack or header is not YML format\n return dict()\n\n\ndef find_tags_on_event_website(content):\n \"\"\"Given website content, find and take <meta> tags that have\n workshop-related data.\"\"\"\n\n R = r'<meta name=\"(?P<name>[\\w-]+)\" content=\"(?P<content>.+)\" />$'\n regexp = re.compile(R, re.M)\n\n return {name: content for name, content in regexp.findall(content)\n if name in ALLOWED_TAG_NAMES}\n\n\ndef parse_tags_from_event_website(tags):\n \"\"\"Simple preprocessing of the tags from event website.\"\"\"\n # no compatibility with old-style names\n country = tags.get('country', '').upper()[0:2]\n if len(country) < 2:\n country = ''\n language = tags.get('language', '').upper()[0:2]\n if len(language) < 2:\n language = ''\n\n try:\n latitude, _ = tags.get('latlng', '').split(',')\n latitude = float(latitude.strip())\n except (ValueError, AttributeError):\n # value error: can't convert string to float\n # attribute error: object doesn't have \"split\" or \"strip\" methods\n latitude = None\n try:\n _, longitude = tags.get('latlng', '').split(',')\n longitude = float(longitude.strip())\n except (ValueError, AttributeError):\n # value error: can't convert string to float\n # attribute error: object doesn't have \"split\" or \"strip\" methods\n longitude = None\n\n try:\n reg_key = tags.get('eventbrite', '')\n reg_key = int(reg_key)\n except (ValueError, TypeError):\n # value error: can't convert string to int\n # type error: can't convert None to int\n reg_key = None\n\n try:\n start = tags.get('startdate', '')\n start = datetime.datetime.strptime(start, '%Y-%m-%d').date()\n except ValueError:\n start = None\n\n try:\n end = tags.get('enddate', '')\n end = datetime.datetime.strptime(end, '%Y-%m-%d').date()\n except ValueError:\n end = None\n\n # Split string of comma-separated names into a list, but return empty list\n # instead of [''] when there are no instructors/helpers.\n instructors = tags.get('instructor', '').split('|')\n instructors = [instructor.strip() for instructor in instructors]\n instructors = [] if not any(instructors) else instructors\n helpers = tags.get('helper', '').split('|')\n helpers = [helper.strip() for helper in helpers]\n helpers = [] if not any(helpers) else helpers\n\n return {\n 'slug': tags.get('slug', ''),\n 'language': language,\n 'start': start,\n 'end': end,\n 'country': country,\n 'venue': tags.get('venue', ''),\n 'address': tags.get('address', ''),\n 'latitude': latitude,\n 'longitude': longitude,\n 'reg_key': reg_key,\n 'instructors': instructors,\n 'helpers': helpers,\n 'contact': tags.get('contact', ''),\n }\n\n\ndef validate_tags_from_event_website(tags):\n errors = []\n\n Requirement = namedtuple(\n 'Requirement',\n ['name', 'display', 'required', 'match_format'],\n )\n\n DATE_FMT = r'^\\d{4}-\\d{2}-\\d{2}$'\n SLUG_FMT = r'^\\d{4}-\\d{2}-\\d{2}-.+$'\n TWOCHAR_FMT = r'^\\w\\w$'\n FRACTION_FMT = r'[-+]?[0-9]*\\.?[0-9]*'\n requirements = [\n Requirement('slug', 'workshop name', True, SLUG_FMT),\n Requirement('language', None, False, TWOCHAR_FMT),\n Requirement('startdate', 'start date', True, DATE_FMT),\n Requirement('enddate', 'end date', False, DATE_FMT),\n Requirement('country', None, True, TWOCHAR_FMT),\n Requirement('venue', None, True, None),\n Requirement('address', None, True, None),\n Requirement('latlng', 'latitude / longitude', True,\n '^' + FRACTION_FMT + r',\\s?' + FRACTION_FMT + '$'),\n Requirement('instructor', None, True, None),\n Requirement('helper', None, True, None),\n Requirement('contact', None, True, None),\n Requirement('eventbrite', 'Eventbrite event ID', False, r'^\\d+$'),\n ]\n\n for requirement in requirements:\n d_ = requirement._asdict()\n name_ = ('{display} ({name})'.format(**d_)\n if requirement.display\n else '{name}'.format(**d_))\n type_ = 'required' if requirement.required else 'optional'\n value_ = tags.get(requirement.name)\n\n if not value_:\n errors.append('Missing {} tag {}.'.format(type_, name_))\n\n if value_ == 'FIXME':\n errors.append('Placeholder value \"FIXME\" for {} tag {}.'\n .format(type_, name_))\n else:\n try:\n if not re.match(requirement.match_format, value_):\n errors.append(\n 'Invalid value \"{}\" for {} tag {}: should be in '\n 'format \"{}\".'\n .format(value_, type_, name_, requirement.match_format)\n )\n except (re.error, TypeError):\n pass\n\n return errors\n\n\ndef update_event_attendance_from_tasks(event):\n \"\"\"Increase event.attendance if there's more learner tasks belonging to the\n event.\"\"\"\n learners = event.task_set.filter(role__name='learner').count()\n Event.objects \\\n .filter(pk=event.pk) \\\n .filter(Q(attendance__lt=learners) | Q(attendance__isnull=True)) \\\n .update(attendance=learners)\n\n\ndef universal_date_format(date):\n return '{:%Y-%m-%d}'.format(date)\n\n\ndef get_members(earliest, latest):\n '''Get everyone who is a member of the Software Carpentry Foundation.'''\n\n member_badge = Badge.objects.get(name='member')\n instructor_badges = Badge.objects.instructor_badges()\n instructor_role = Role.objects.get(name='instructor')\n\n # Everyone who is an explicit member.\n explicit = Person.objects.filter(badges__in=[member_badge]).distinct()\n\n # Everyone who qualifies by having taught recently.\n implicit = Person.objects.filter(\n task__role=instructor_role,\n badges__in=instructor_badges,\n task__event__start__gte=earliest,\n task__event__start__lte=latest\n ).distinct()\n\n # Merge the two sets.\n return explicit | implicit\n\n\ndef default_membership_cutoff():\n \"Calculate a default cutoff dates for members finding with `get_members`.\"\n earliest = datetime.date.today() - 2 * datetime.timedelta(days=365)\n latest = datetime.date.today()\n return earliest, latest\n\n\ndef find_emails(text):\n \"\"\"Find emails in the text. This is based on Django's own\n `django.utils.html.urlize`.\"\"\"\n # Split into tokens in case someone uses for example\n # 'Name <[email protected]>' format.\n emails = []\n\n for word in WORD_SPLIT.split(text):\n if SIMPLE_EMAIL.match(word):\n local, domain = word.rsplit('@', 1)\n try:\n domain = domain.encode('idna').decode('ascii')\n except UnicodeError:\n continue\n emails.append('{}@{}'.format(local, domain))\n\n return emails\n\n\ndef assignment_selection(request):\n \"\"\"Parse `assigned_to` query param depending on the logged-in user.\"\"\"\n user = request.user\n is_admin = user.groups.filter(name='administrators').exists()\n\n # it's always possible to assign something entirely else\n # in the `?assigned_to` query param\n\n if is_admin:\n # One of the administrators.\n # They should be presented with their events by default.\n assigned_to = request.GET.get('assigned_to', 'me')\n\n elif user.is_superuser:\n # A superuser. Should see all events by default\n assigned_to = request.GET.get('assigned_to', 'all')\n\n else:\n # Normal user (for example subcommittee members).\n assigned_to = 'all'\n\n return assigned_to, is_admin\n\n\ndef failed_to_delete(request, object, protected_objects, back=None):\n context = {\n 'title': 'Failed to delete',\n 'back': back or object.get_absolute_url,\n 'object': object,\n 'refs': defaultdict(list),\n }\n\n for obj in protected_objects:\n # e.g. for model Award its plural name is 'awards'\n name = str(obj.__class__._meta.verbose_name_plural)\n context['refs'][name].append(obj)\n\n # this trick enables looping through defaultdict instance\n context['refs'].default_factory = None\n\n return render(request, 'workshops/failed_to_delete.html', context)\n\n\ndef assign(request, obj, person_id):\n \"\"\"Set obj.assigned_to. This view helper works with both POST and GET\n requests:\n\n * POST: read person ID from POST's person_1\n * GET: read person_id from URL\n * both: if person_id is None then make event.assigned_to empty\n * otherwise assign matching person.\n\n This is not a view, but it's used in some.\"\"\"\n try:\n if request.method == \"POST\":\n person_id = request.POST.get('person_1', None)\n\n if person_id is None:\n obj.assigned_to = None\n else:\n person = Person.objects.get(pk=person_id)\n obj.assigned_to = person\n\n obj.save()\n\n except (Person.DoesNotExist, ValueError):\n raise Http404(\"No person found matching the query.\")\n",
"path": "workshops/util.py"
}
] | diff --git a/workshops/test/test_util.py b/workshops/test/test_util.py
index d4e33f889..ad4d06371 100644
--- a/workshops/test/test_util.py
+++ b/workshops/test/test_util.py
@@ -5,8 +5,9 @@
from django.contrib.auth.models import Group
from django.contrib.sessions.serializers import JSONSerializer
-from django.test import TestCase, RequestFactory
from django.core.urlresolvers import reverse
+from django.http import Http404
+from django.test import TestCase, RequestFactory
from ..models import Host, Event, Role, Person, Task, Badge, Award
from ..util import (
@@ -23,6 +24,7 @@
create_username,
InternalError,
Paginator,
+ assign,
)
from .base import TestBase
@@ -1008,3 +1010,62 @@ def test_long_no_breaks(self):
# None is a break, it appears as '...' in the paginator widget
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
)
+
+
+class TestAssignUtil(TestCase):
+ def setUp(self):
+ """Set up RequestFactory for making fast fake requests."""
+ Person.objects.create_user('test_user', 'User', 'Test', 'user@test')
+ self.factory = RequestFactory()
+ self.event = Event.objects.create(
+ slug='event-for-assignment', host=Host.objects.first())
+
+ def test_no_integer_pk(self):
+ """Ensure we fail with 404 when person PK is string, not integer."""
+ tests = [
+ (self.factory.get('/'), 'alpha'),
+ (self.factory.post('/', {'person_1': 'alpha'}), None),
+ ]
+ for request, person_id in tests:
+ with self.subTest(method=request.method):
+ with self.assertRaises(Http404):
+ assign(request, self.event, person_id=person_id)
+
+ # just reset the link, for safety sake
+ self.event.assigned_to = None
+ self.event.save()
+
+ def test_assigning(self):
+ """Ensure that with assignment is set correctly."""
+ first_person = Person.objects.first()
+ tests = [
+ (self.factory.get('/'), first_person.pk),
+ (self.factory.post('/', {'person_1': first_person.pk}), None),
+ ]
+ for request, person_id in tests:
+ with self.subTest(method=request.method):
+ # just reset the link, for safety sake
+ self.event.assigned_to = None
+ self.event.save()
+
+ assign(request, self.event, person_id=person_id)
+ self.event.refresh_from_db()
+ self.assertEqual(self.event.assigned_to, first_person)
+
+ def test_removing_assignment(self):
+ """Ensure that with person_id=None, the assignment is removed."""
+ first_person = Person.objects.first()
+ tests = [
+ (self.factory.get('/'), None),
+ (self.factory.post('/'), None),
+ ]
+ for request, person_id in tests:
+ with self.subTest(method=request.method):
+ # just re-set the link to first person, for safety sake
+ self.event.assigned_to = first_person
+ self.event.save()
+
+ assign(request, self.event, person_id=person_id)
+
+ self.event.refresh_from_db()
+ self.assertEqual(self.event.assigned_to, None)
diff --git a/workshops/util.py b/workshops/util.py
index 3aee0556c..ad5d1d7dd 100644
--- a/workshops/util.py
+++ b/workshops/util.py
@@ -762,5 +762,5 @@ def assign(request, obj, person_id):
obj.save()
- except Person.DoesNotExist:
+ except (Person.DoesNotExist, ValueError):
raise Http404("No person found matching the query.")
|
mozilla__telemetry-analysis-service-474 | Create view with jobs histories
There have been several jobs failing silently. Those jobs will soon generate alerts (#201) but it would still be convenient to have a master view in the dashboard that shows the history, and their status, of all scheduled jobs. Furthermore, every user should be able to see the history for their own jobs.
| [
{
"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom datetime import timedelta\n\nimport urlman\nfrom autorepr import autorepr, autostr\nfrom django.core.urlresolvers import reverse\nfrom django.db import models, transaction\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\n\nfrom ..clusters.models import Cluster, EMRReleaseModel\nfrom ..clusters.provisioners import ClusterProvisioner\nfrom ..models import CreatedByModel, EditedAtModel, ForgivingOneToOneField\n\nfrom .provisioners import SparkJobProvisioner\nfrom .queries import SparkJobQuerySet, SparkJobRunQuerySet\n\nDEFAULT_STATUS = ''\n\n\nclass SparkJob(EMRReleaseModel, CreatedByModel, EditedAtModel):\n INTERVAL_DAILY = 24\n INTERVAL_WEEKLY = INTERVAL_DAILY * 7\n INTERVAL_MONTHLY = INTERVAL_DAILY * 30\n INTERVAL_CHOICES = [\n (INTERVAL_DAILY, 'Daily'),\n (INTERVAL_WEEKLY, 'Weekly'),\n (INTERVAL_MONTHLY, 'Monthly'),\n ]\n RESULT_PRIVATE = 'private'\n RESULT_PUBLIC = 'public'\n RESULT_VISIBILITY_CHOICES = [\n (RESULT_PRIVATE, 'Private'),\n (RESULT_PUBLIC, 'Public'),\n ]\n identifier = models.CharField(\n max_length=100,\n help_text=\"Job name, used to uniqely identify individual jobs.\",\n unique=True,\n )\n description = models.TextField(\n help_text='Job description.',\n default='',\n )\n notebook_s3_key = models.CharField(\n max_length=800,\n help_text=\"S3 key of the notebook after uploading it to the Spark code bucket.\"\n )\n result_visibility = models.CharField( # can currently be \"public\" or \"private\"\n max_length=50,\n help_text=\"Whether notebook results are uploaded to a public or private bucket\",\n choices=RESULT_VISIBILITY_CHOICES,\n default=RESULT_PRIVATE,\n )\n size = models.IntegerField(\n help_text=\"Number of computers to use to run the job.\"\n )\n interval_in_hours = models.IntegerField(\n help_text=\"Interval at which the job should run, in hours.\",\n choices=INTERVAL_CHOICES,\n default=INTERVAL_DAILY,\n )\n job_timeout = models.IntegerField(\n help_text=\"Number of hours before the job times out.\",\n )\n start_date = models.DateTimeField(\n help_text=\"Date/time that the job should start being scheduled to run.\"\n )\n end_date = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the job should stop being scheduled to run, null if no end date.\"\n )\n expired_date = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the job was expired.\"\n )\n is_enabled = models.BooleanField(\n default=True,\n help_text=\"Whether the job should run or not.\"\n )\n\n objects = SparkJobQuerySet.as_manager()\n\n class Meta:\n permissions = [\n ('view_sparkjob', 'Can view Spark job'),\n ]\n\n class urls(urlman.Urls):\n\n def delete(self):\n return reverse('jobs-delete', kwargs={'id': self.id})\n\n def detail(self):\n return reverse('jobs-detail', kwargs={'id': self.id})\n\n def download(self):\n return reverse('jobs-download', kwargs={'id': self.id})\n\n def edit(self):\n return reverse('jobs-edit', kwargs={'id': self.id})\n\n def run(self):\n return reverse('jobs-run', kwargs={'id': self.id})\n\n __str__ = autostr('{self.identifier}')\n\n __repr__ = autorepr(['identifier', 'size', 'is_enabled'])\n\n def get_absolute_url(self):\n return self.urls.detail\n\n @property\n def provisioner(self):\n return SparkJobProvisioner()\n\n # TEMPORARY till we have 1:1 relationship to cluster object\n # and we can then ask for spark_job.cluster.provisioner\n @property\n def cluster_provisioner(self):\n return ClusterProvisioner()\n\n @property\n def schedule(self):\n from .schedules import SparkJobSchedule\n return SparkJobSchedule(self)\n\n def has_future_end_date(self, now):\n # no end date means it'll always be due\n if self.end_date is None:\n return True\n return self.end_date >= now\n\n @property\n def has_never_run(self):\n \"\"\"\n Whether the job has run before.\n Looks at both the cluster status and our own record when\n we asked it to run.\n \"\"\"\n return (self.latest_run is None or\n self.latest_run.status == DEFAULT_STATUS or\n self.latest_run.scheduled_date is None)\n\n @property\n def has_finished(self):\n \"\"\"Whether the job's cluster is terminated or failed\"\"\"\n return (self.latest_run and\n self.latest_run.status in Cluster.FINAL_STATUS_LIST)\n\n @property\n def has_timed_out(self):\n \"\"\"\n Whether the current job run has been running longer than the\n job's timeout allows.\n \"\"\"\n if self.has_never_run:\n # Job isn't even running at the moment and never ran before\n return False\n timeout_delta = timedelta(hours=self.job_timeout)\n max_run_time = self.latest_run.scheduled_date + timeout_delta\n timed_out = timezone.now() >= max_run_time\n return not self.is_runnable and timed_out\n\n @property\n def is_due(self):\n \"\"\"\n Whether the start date is in the past and the end date is in the\n future.\n \"\"\"\n now = timezone.now()\n has_past_start_date = self.start_date <= now\n return has_past_start_date and self.has_future_end_date(now)\n\n @property\n def is_runnable(self):\n \"\"\"\n Either the job has never run before or was never finished.\n\n This is checked right before the actual provisioning.\n \"\"\"\n return self.has_never_run or self.has_finished\n\n @property\n def should_run(self):\n \"\"\"Whether the scheduled Spark job should run.\"\"\"\n return self.is_runnable and self.is_enabled and self.is_due\n\n @property\n def is_public(self):\n return self.result_visibility == self.RESULT_PUBLIC\n\n @property\n def is_active(self):\n return (self.latest_run and\n self.latest_run.status in Cluster.ACTIVE_STATUS_LIST)\n\n @property\n def notebook_name(self):\n return self.notebook_s3_key.rsplit('/', 1)[-1]\n\n @cached_property\n def notebook_s3_object(self):\n return self.provisioner.get(self.notebook_s3_key)\n\n @cached_property\n def results(self):\n return self.provisioner.results(self.identifier, self.is_public)\n\n def get_latest_run(self):\n try:\n return self.runs.latest()\n except SparkJobRun.DoesNotExist:\n return None\n latest_run = cached_property(get_latest_run, name='latest_run')\n\n def run(self):\n \"\"\"Actually run the scheduled Spark job.\"\"\"\n # if the job ran before and is still running, don't start it again\n if not self.is_runnable:\n return\n jobflow_id = self.provisioner.run(\n user_username=self.created_by.username,\n user_email=self.created_by.email,\n identifier=self.identifier,\n emr_release=self.emr_release.version,\n size=self.size,\n notebook_key=self.notebook_s3_key,\n is_public=self.is_public,\n job_timeout=self.job_timeout,\n )\n # Create new job history record.\n run = self.runs.create(\n spark_job=self,\n jobflow_id=jobflow_id,\n scheduled_date=timezone.now(),\n emr_release_version=self.emr_release.version,\n size=self.size,\n )\n # Remove the cached latest run to this objects will requery it.\n try:\n delattr(self, 'latest_run')\n except AttributeError: # pragma: no cover\n pass # It didn't have a `latest_run` and that's ok.\n run.update_status()\n\n def expire(self):\n # TODO disable the job as well once it's easy to re-enable the job\n deleted = self.schedule.delete()\n self.expired_date = timezone.now()\n self.save()\n return deleted\n\n def terminate(self):\n \"\"\"Stop the currently running scheduled Spark job.\"\"\"\n if self.latest_run:\n self.cluster_provisioner.stop(self.latest_run.jobflow_id)\n\n def first_run(self):\n if self.latest_run:\n return None\n from .tasks import run_job\n return run_job.apply_async(\n args=(self.pk,),\n kwargs={'first_run': True},\n # make sure we run this task only when we expect it\n # may be in the future, may be in the past\n # but definitely at a specific time\n eta=self.start_date,\n )\n\n def save(self, *args, **kwargs):\n # whether the job is being created for the first time\n first_save = self.pk is None\n # resetting expired_date in case a user resets the end_date\n if self.expired_date and self.end_date and self.end_date > timezone.now():\n self.expired_date = None\n super().save(*args, **kwargs)\n # first remove if it exists\n self.schedule.delete()\n # and then add it, but only if the end date is in the future\n if self.has_future_end_date(timezone.now()):\n self.schedule.add()\n if first_save:\n transaction.on_commit(self.first_run)\n\n def delete(self, *args, **kwargs):\n # make sure to shut down the cluster if it's currently running\n self.terminate()\n # make sure to clean up the job notebook from storage\n self.provisioner.remove(self.notebook_s3_key)\n self.schedule.delete()\n super().delete(*args, **kwargs)\n\n\nclass SparkJobRun(EditedAtModel):\n\n spark_job = models.ForeignKey(\n SparkJob,\n on_delete=models.CASCADE,\n related_name='runs',\n related_query_name='runs',\n )\n jobflow_id = models.CharField(\n max_length=50,\n blank=True,\n null=True,\n )\n emr_release_version = models.CharField(\n max_length=50,\n blank=True,\n null=True,\n )\n size = models.IntegerField(\n help_text=\"Number of computers used to run the job.\",\n blank=True,\n null=True,\n )\n status = models.CharField(\n max_length=50,\n blank=True,\n default=DEFAULT_STATUS,\n )\n scheduled_date = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the job was scheduled.\",\n )\n run_date = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the job was run.\",\n )\n finished_at = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the job was terminated or failed.\",\n )\n\n objects = SparkJobRunQuerySet.as_manager()\n\n class Meta:\n get_latest_by = 'created_at'\n\n __str__ = autostr('{self.jobflow_id}')\n\n def spark_job_identifier(self):\n return self.spark_job.identifier\n\n __repr__ = autorepr(\n ['jobflow_id', 'spark_job_identifier', 'emr_release_version', 'size'],\n spark_job_identifier=spark_job_identifier,\n )\n\n @property\n def info(self):\n return self.spark_job.cluster_provisioner.info(self.jobflow_id)\n\n def update_status(self, info=None):\n \"\"\"\n Updates latest status and life cycle datetimes.\n \"\"\"\n if info is None:\n info = self.info\n if self.status != info['state']:\n self.status = info['state']\n if self.status == Cluster.STATUS_RUNNING:\n self.run_date = timezone.now()\n elif self.status in Cluster.FINAL_STATUS_LIST:\n # set the terminated date to now\n self.finished_at = info.get('end_datetime', timezone.now())\n # if the job cluster terminated with error raise the alarm\n if self.status == Cluster.STATUS_TERMINATED_WITH_ERRORS:\n SparkJobRunAlert.objects.create(\n run=self,\n reason_code=info['state_change_reason_code'],\n reason_message=info['state_change_reason_message'],\n )\n self.save()\n return self.status\n\n\nclass SparkJobRunAlert(EditedAtModel):\n \"\"\"\n A data model to store job run alerts for later processing by an\n async job that sends out emails.\n \"\"\"\n run = ForgivingOneToOneField(\n SparkJobRun,\n on_delete=models.CASCADE,\n related_name='alert', # run.alert & alert.run\n primary_key=True,\n )\n reason_code = models.CharField(\n max_length=50,\n blank=True,\n null=True,\n help_text=\"The reason code for the creation of the alert.\",\n )\n reason_message = models.TextField(\n default='',\n help_text=\"The reason message for the creation of the alert.\",\n )\n mail_sent_date = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"The datetime the alert email was sent.\",\n )\n",
"path": "atmo/jobs/models.py"
}
] | [
{
"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom datetime import timedelta\n\nimport urlman\nfrom autorepr import autorepr, autostr\nfrom django.core.urlresolvers import reverse\nfrom django.db import models, transaction\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\n\nfrom ..clusters.models import Cluster, EMRReleaseModel\nfrom ..clusters.provisioners import ClusterProvisioner\nfrom ..models import CreatedByModel, EditedAtModel, ForgivingOneToOneField\n\nfrom .provisioners import SparkJobProvisioner\nfrom .queries import SparkJobQuerySet, SparkJobRunQuerySet\n\nDEFAULT_STATUS = ''\n\n\nclass SparkJob(EMRReleaseModel, CreatedByModel, EditedAtModel):\n INTERVAL_DAILY = 24\n INTERVAL_WEEKLY = INTERVAL_DAILY * 7\n INTERVAL_MONTHLY = INTERVAL_DAILY * 30\n INTERVAL_CHOICES = [\n (INTERVAL_DAILY, 'Daily'),\n (INTERVAL_WEEKLY, 'Weekly'),\n (INTERVAL_MONTHLY, 'Monthly'),\n ]\n RESULT_PRIVATE = 'private'\n RESULT_PUBLIC = 'public'\n RESULT_VISIBILITY_CHOICES = [\n (RESULT_PRIVATE, 'Private'),\n (RESULT_PUBLIC, 'Public'),\n ]\n identifier = models.CharField(\n max_length=100,\n help_text=\"Job name, used to uniqely identify individual jobs.\",\n unique=True,\n )\n description = models.TextField(\n help_text='Job description.',\n default='',\n )\n notebook_s3_key = models.CharField(\n max_length=800,\n help_text=\"S3 key of the notebook after uploading it to the Spark code bucket.\"\n )\n result_visibility = models.CharField( # can currently be \"public\" or \"private\"\n max_length=50,\n help_text=\"Whether notebook results are uploaded to a public or private bucket\",\n choices=RESULT_VISIBILITY_CHOICES,\n default=RESULT_PRIVATE,\n )\n size = models.IntegerField(\n help_text=\"Number of computers to use to run the job.\"\n )\n interval_in_hours = models.IntegerField(\n help_text=\"Interval at which the job should run, in hours.\",\n choices=INTERVAL_CHOICES,\n default=INTERVAL_DAILY,\n )\n job_timeout = models.IntegerField(\n help_text=\"Number of hours before the job times out.\",\n )\n start_date = models.DateTimeField(\n help_text=\"Date/time that the job should start being scheduled to run.\"\n )\n end_date = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the job should stop being scheduled to run, null if no end date.\"\n )\n expired_date = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the job was expired.\"\n )\n is_enabled = models.BooleanField(\n default=True,\n help_text=\"Whether the job should run or not.\"\n )\n\n objects = SparkJobQuerySet.as_manager()\n\n class Meta:\n permissions = [\n ('view_sparkjob', 'Can view Spark job'),\n ]\n\n class urls(urlman.Urls):\n\n def delete(self):\n return reverse('jobs-delete', kwargs={'id': self.id})\n\n def detail(self):\n return reverse('jobs-detail', kwargs={'id': self.id})\n\n def download(self):\n return reverse('jobs-download', kwargs={'id': self.id})\n\n def edit(self):\n return reverse('jobs-edit', kwargs={'id': self.id})\n\n def run(self):\n return reverse('jobs-run', kwargs={'id': self.id})\n\n __str__ = autostr('{self.identifier}')\n\n __repr__ = autorepr(['identifier', 'size', 'is_enabled'])\n\n def get_absolute_url(self):\n return self.urls.detail\n\n @property\n def provisioner(self):\n return SparkJobProvisioner()\n\n # TEMPORARY till we have 1:1 relationship to cluster object\n # and we can then ask for spark_job.cluster.provisioner\n @property\n def cluster_provisioner(self):\n return ClusterProvisioner()\n\n @property\n def schedule(self):\n from .schedules import SparkJobSchedule\n return SparkJobSchedule(self)\n\n def has_future_end_date(self, now):\n # no end date means it'll always be due\n if self.end_date is None:\n return True\n return self.end_date >= now\n\n @property\n def has_never_run(self):\n \"\"\"\n Whether the job has run before.\n Looks at both the cluster status and our own record when\n we asked it to run.\n \"\"\"\n return (self.latest_run is None or\n self.latest_run.status == DEFAULT_STATUS or\n self.latest_run.scheduled_date is None)\n\n @property\n def has_finished(self):\n \"\"\"Whether the job's cluster is terminated or failed\"\"\"\n return (self.latest_run and\n self.latest_run.status in Cluster.FINAL_STATUS_LIST)\n\n @property\n def has_timed_out(self):\n \"\"\"\n Whether the current job run has been running longer than the\n job's timeout allows.\n \"\"\"\n if self.has_never_run:\n # Job isn't even running at the moment and never ran before\n return False\n timeout_delta = timedelta(hours=self.job_timeout)\n max_run_time = self.latest_run.scheduled_date + timeout_delta\n timed_out = timezone.now() >= max_run_time\n return not self.is_runnable and timed_out\n\n @property\n def is_due(self):\n \"\"\"\n Whether the start date is in the past and the end date is in the\n future.\n \"\"\"\n now = timezone.now()\n has_past_start_date = self.start_date <= now\n return has_past_start_date and self.has_future_end_date(now)\n\n @property\n def is_runnable(self):\n \"\"\"\n Either the job has never run before or was never finished.\n\n This is checked right before the actual provisioning.\n \"\"\"\n return self.has_never_run or self.has_finished\n\n @property\n def should_run(self):\n \"\"\"Whether the scheduled Spark job should run.\"\"\"\n return self.is_runnable and self.is_enabled and self.is_due\n\n @property\n def is_public(self):\n return self.result_visibility == self.RESULT_PUBLIC\n\n @property\n def is_active(self):\n return (self.latest_run and\n self.latest_run.status in Cluster.ACTIVE_STATUS_LIST)\n\n @property\n def notebook_name(self):\n return self.notebook_s3_key.rsplit('/', 1)[-1]\n\n @cached_property\n def notebook_s3_object(self):\n return self.provisioner.get(self.notebook_s3_key)\n\n @cached_property\n def results(self):\n return self.provisioner.results(self.identifier, self.is_public)\n\n def get_latest_run(self):\n try:\n return self.runs.latest()\n except SparkJobRun.DoesNotExist:\n return None\n latest_run = cached_property(get_latest_run, name='latest_run')\n\n def run(self):\n \"\"\"Actually run the scheduled Spark job.\"\"\"\n # if the job ran before and is still running, don't start it again\n if not self.is_runnable:\n return\n jobflow_id = self.provisioner.run(\n user_username=self.created_by.username,\n user_email=self.created_by.email,\n identifier=self.identifier,\n emr_release=self.emr_release.version,\n size=self.size,\n notebook_key=self.notebook_s3_key,\n is_public=self.is_public,\n job_timeout=self.job_timeout,\n )\n # Create new job history record.\n run = self.runs.create(\n spark_job=self,\n jobflow_id=jobflow_id,\n scheduled_date=timezone.now(),\n emr_release_version=self.emr_release.version,\n size=self.size,\n )\n # Remove the cached latest run to this objects will requery it.\n try:\n delattr(self, 'latest_run')\n except AttributeError: # pragma: no cover\n pass # It didn't have a `latest_run` and that's ok.\n run.update_status()\n\n def expire(self):\n # TODO disable the job as well once it's easy to re-enable the job\n deleted = self.schedule.delete()\n self.expired_date = timezone.now()\n self.save()\n return deleted\n\n def terminate(self):\n \"\"\"Stop the currently running scheduled Spark job.\"\"\"\n if self.latest_run:\n self.cluster_provisioner.stop(self.latest_run.jobflow_id)\n\n def first_run(self):\n if self.latest_run:\n return None\n from .tasks import run_job\n return run_job.apply_async(\n args=(self.pk,),\n kwargs={'first_run': True},\n # make sure we run this task only when we expect it\n # may be in the future, may be in the past\n # but definitely at a specific time\n eta=self.start_date,\n )\n\n def save(self, *args, **kwargs):\n # whether the job is being created for the first time\n first_save = self.pk is None\n # resetting expired_date in case a user resets the end_date\n if self.expired_date and self.end_date and self.end_date > timezone.now():\n self.expired_date = None\n super().save(*args, **kwargs)\n # first remove if it exists\n self.schedule.delete()\n # and then add it, but only if the end date is in the future\n if self.has_future_end_date(timezone.now()):\n self.schedule.add()\n if first_save:\n transaction.on_commit(self.first_run)\n\n def delete(self, *args, **kwargs):\n # make sure to shut down the cluster if it's currently running\n self.terminate()\n # make sure to clean up the job notebook from storage\n self.provisioner.remove(self.notebook_s3_key)\n self.schedule.delete()\n super().delete(*args, **kwargs)\n\n\nclass SparkJobRun(EditedAtModel):\n\n spark_job = models.ForeignKey(\n SparkJob,\n on_delete=models.CASCADE,\n related_name='runs',\n related_query_name='runs',\n )\n jobflow_id = models.CharField(\n max_length=50,\n blank=True,\n null=True,\n )\n emr_release_version = models.CharField(\n max_length=50,\n blank=True,\n null=True,\n )\n size = models.IntegerField(\n help_text=\"Number of computers used to run the job.\",\n blank=True,\n null=True,\n )\n status = models.CharField(\n max_length=50,\n blank=True,\n default=DEFAULT_STATUS,\n )\n scheduled_date = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the job was scheduled.\",\n )\n run_date = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the job was run.\",\n )\n finished_at = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the job was terminated or failed.\",\n )\n\n objects = SparkJobRunQuerySet.as_manager()\n\n class Meta:\n get_latest_by = 'created_at'\n ordering = ['-created_at']\n\n __str__ = autostr('{self.jobflow_id}')\n\n def spark_job_identifier(self):\n return self.spark_job.identifier\n\n __repr__ = autorepr(\n ['jobflow_id', 'spark_job_identifier', 'emr_release_version', 'size'],\n spark_job_identifier=spark_job_identifier,\n )\n\n @property\n def info(self):\n return self.spark_job.cluster_provisioner.info(self.jobflow_id)\n\n def update_status(self, info=None):\n \"\"\"\n Updates latest status and life cycle datetimes.\n \"\"\"\n if info is None:\n info = self.info\n if self.status != info['state']:\n self.status = info['state']\n if self.status == Cluster.STATUS_RUNNING:\n self.run_date = timezone.now()\n elif self.status in Cluster.FINAL_STATUS_LIST:\n # set the terminated date to now\n self.finished_at = info.get('end_datetime', timezone.now())\n # if the job cluster terminated with error raise the alarm\n if self.status == Cluster.STATUS_TERMINATED_WITH_ERRORS:\n SparkJobRunAlert.objects.create(\n run=self,\n reason_code=info['state_change_reason_code'],\n reason_message=info['state_change_reason_message'],\n )\n self.save()\n return self.status\n\n\nclass SparkJobRunAlert(EditedAtModel):\n \"\"\"\n A data model to store job run alerts for later processing by an\n async job that sends out emails.\n \"\"\"\n run = ForgivingOneToOneField(\n SparkJobRun,\n on_delete=models.CASCADE,\n related_name='alert', # run.alert & alert.run\n primary_key=True,\n )\n reason_code = models.CharField(\n max_length=50,\n blank=True,\n null=True,\n help_text=\"The reason code for the creation of the alert.\",\n )\n reason_message = models.TextField(\n default='',\n help_text=\"The reason message for the creation of the alert.\",\n )\n mail_sent_date = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"The datetime the alert email was sent.\",\n )\n",
"path": "atmo/jobs/models.py"
}
] | diff --git a/atmo/jobs/models.py b/atmo/jobs/models.py
index 126aca0a..fb638199 100644
--- a/atmo/jobs/models.py
+++ b/atmo/jobs/models.py
@@ -345,6 +345,7 @@ class SparkJobRun(EditedAtModel):
class Meta:
get_latest_by = 'created_at'
+ ordering = ['-created_at']
__str__ = autostr('{self.jobflow_id}')
diff --git a/atmo/templates/atmo/jobs/detail.html b/atmo/templates/atmo/jobs/detail.html
index 51763c29..dc74bdfb 100644
--- a/atmo/templates/atmo/jobs/detail.html
+++ b/atmo/templates/atmo/jobs/detail.html
@@ -36,6 +36,7 @@ <h2>
<a href="#notebook" class="btn btn-sm btn-default active" aria-controls="notebook" role="tab" data-toggle="tab">Notebook</a>
<a href="#results" class="btn btn-sm btn-default" aria-controls="results" role="tab" data-toggle="tab">Results</a>
<a href="#logs" class="btn btn-sm btn-default" aria-controls="logs" role="tab" data-toggle="tab">Logs</a>
+ <a href="#runs" class="btn btn-sm btn-default" aria-controls="runs" role="tab" data-toggle="tab">Runs</a>
</div>
<div class="btn-group"
role="group"
@@ -163,6 +164,38 @@ <h4>
{% endfor %}
</ul>
</div>
+ <div role="tabpanel" class="tab-pane" id="runs">
+ {% if spark_job.has_never_run %}
+ <p>No job run history yet.</p>
+ {% else %}
+ <table class="table table-striped">
+ <thead>
+ <tr>
+ <th>Jobflow ID</th>
+ <th>EMR version</th>
+ <th>Cluster size</th>
+ <th>Last status</th>
+ <th>Scheduled</th>
+ <th>Ran</th>
+ <th>Finished</th>
+ </tr>
+ </thead>
+ <tbody>
+ {% for run in spark_job.runs.all %}
+ <tr>
+ <td><a href="https://{{ settings.AWS_CONFIG.AWS_REGION }}.console.aws.amazon.com/elasticmapreduce/home?region={{ settings.AWS_CONFIG.AWS_REGION }}#cluster-details:{{ run.jobflow_id }}">{{ run.jobflow_id }}</a></td>
+ <td>{{ run.emr_release_version }}</td>
+ <td>{{ run.size|default:"n/a" }}</td>
+ <td>{{ run.status }}</td>
+ <td>{{ run.scheduled_date|default:"n/a" }}</td>
+ <td>{{ run.run_date|default:"n/a" }}</td>
+ <td>{{ run.finished_at|default:"n/a" }}</td>
+ </tr>
+ {% endfor %}
+ </tbody>
+ </table>
+ {% endif %}
+ </div>
</div>
</div>
<div class="col-sm-3">
|
sanic-org__sanic-878 | 0.5.5 release request
Because 0.5.4 has actual protocol parsing problem (#755) I request to quickly release 0.5.5.
It causes actual request loss and unhandlable 400 errors for the sanic users. (unless they make local patch for sanic)
| [
{
"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n__version__ = '0.5.4'\n\n__all__ = ['Sanic', 'Blueprint']\n",
"path": "sanic/__init__.py"
}
] | [
{
"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n__version__ = '0.6.0'\n\n__all__ = ['Sanic', 'Blueprint']\n",
"path": "sanic/__init__.py"
}
] | diff --git a/sanic/__init__.py b/sanic/__init__.py
index 4cc0710ff7..8f35a28367 100644
--- a/sanic/__init__.py
+++ b/sanic/__init__.py
@@ -1,6 +1,6 @@
from sanic.app import Sanic
from sanic.blueprints import Blueprint
-__version__ = '0.5.4'
+__version__ = '0.6.0'
__all__ = ['Sanic', 'Blueprint']
|
pypa__setuptools-4184 | [BUG] "'extras_require' must be a dictionary..." regression
### setuptools version
setuptools==67.0.0
### Python version
Python 3.10
### OS
Linux / Ubuntu
### Additional environment information
_No response_
### Description
With latest setuptools I am unable to use a package (btchip-python) as input to pip-compile requirements for hash generation. It errors with:
> error in btchip-python setup command: 'extras_require' must be a dictionary whose values are strings or lists of strings containing valid project/version requirement specifiers.
If I downgrade setuptools to a version I know works, 65.5.0, the package works again.
The problem part of the project's setup.py file is:
```
extras_require = {
'smartcard': [ 'python-pyscard>=1.6.12-4build1' ]
},
```
It is triggered in the following command pip-compile does:
> python3 setup.py egg_info
### Expected behavior
Unless there is an obvious reason that setuptools is rejecting values it accepted before, I expect it to continue to accept them and no do breaking changes.
### How to Reproduce
The simplest reproduction case is taking the erroring command from pip-compile and running it in a local copy of the package.
1. pip3 install setuptools==67.0.0
2. git clone https://github.com/LedgerHQ/btchip-python.git
3. cd btchip-python
4. python3 setup.py egg_info
5. Observe that it errors with the `extras_require` complaint.
Then with the older setuptools.
1. pip3 install setuptools==65.5.0
2. python3 setup.py egg_info
3. Observe that it generates the eggy stuff.
### Output
```console
vboxuser@Ubuntu2204:~/_src/btchip-python$ python3 setup.py egg_info
error in btchip-python setup command: 'extras_require' must be a dictionary whose values are strings or lists of strings containing valid project/version requirement specifiers.
vboxuser@Ubuntu2204:~/_src/btchip-python$ pip3 show setuptools
Name: setuptools
Version: 67.0.0
Summary: Easily download, build, install, upgrade, and uninstall Python packages
Home-page: https://github.com/pypa/setuptools
Author: Python Packaging Authority
Author-email: [email protected]
License:
Location: /home/vboxuser/.local/lib/python3.10/site-packages
Requires:
Required-by: pip-tools
vboxuser@Ubuntu2204:~/_src/btchip-python$ pip3 install setuptools==65.5.0
Defaulting to user installation because normal site-packages is not writeable
Collecting setuptools==65.5.0
Downloading setuptools-65.5.0-py3-none-any.whl (1.2 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.2/1.2 MB 1.4 MB/s eta 0:00:00
Installing collected packages: setuptools
Attempting uninstall: setuptools
Found existing installation: setuptools 67.0.0
Uninstalling setuptools-67.0.0:
Successfully uninstalled setuptools-67.0.0
Successfully installed setuptools-65.5.0
vboxuser@Ubuntu2204:~/_src/btchip-python$ which pip3
/home/vboxuser/.local/bin/pip3
vboxuser@Ubuntu2204:~/_src/btchip-python$ python3 setup.py egg_info
running egg_info
creating btchip_python.egg-info
writing btchip_python.egg-info/PKG-INFO
writing dependency_links to btchip_python.egg-info/dependency_links.txt
writing requirements to btchip_python.egg-info/requires.txt
writing top-level names to btchip_python.egg-info/top_level.txt
writing manifest file 'btchip_python.egg-info/SOURCES.txt'
reading manifest file 'btchip_python.egg-info/SOURCES.txt'
reading manifest template 'MANIFEST.in'
adding license file 'LICENSE'
writing manifest file 'btchip_python.egg-info/SOURCES.txt'
vboxuser@Ubuntu2204:~/_src/btchip-python$
```
| [
{
"content": "\"\"\"Translation layer between pyproject config and setuptools distribution and\nmetadata objects.\n\nThe distribution and metadata objects are modeled after (an old version of)\ncore metadata, therefore configs in the format specified for ``pyproject.toml``\nneed to be processed before being applied.\n\n**PRIVATE MODULE**: API reserved for setuptools internal usage only.\n\"\"\"\n\nimport logging\nimport os\nfrom collections.abc import Mapping\nfrom email.headerregistry import Address\nfrom functools import partial, reduce\nfrom inspect import cleandoc\nfrom itertools import chain\nfrom types import MappingProxyType\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n List,\n Optional,\n Set,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nfrom ..errors import RemovedConfigError\nfrom ..warnings import SetuptoolsWarning\n\nif TYPE_CHECKING:\n from setuptools._importlib import metadata # noqa\n from setuptools.dist import Distribution # noqa\n\nEMPTY: Mapping = MappingProxyType({}) # Immutable dict-like\n_Path = Union[os.PathLike, str]\n_DictOrStr = Union[dict, str]\n_CorrespFn = Callable[[\"Distribution\", Any, _Path], None]\n_Correspondence = Union[str, _CorrespFn]\n\n_logger = logging.getLogger(__name__)\n\n\ndef apply(dist: \"Distribution\", config: dict, filename: _Path) -> \"Distribution\":\n \"\"\"Apply configuration dict read with :func:`read_configuration`\"\"\"\n\n if not config:\n return dist # short-circuit unrelated pyproject.toml file\n\n root_dir = os.path.dirname(filename) or \".\"\n\n _apply_project_table(dist, config, root_dir)\n _apply_tool_table(dist, config, filename)\n\n current_directory = os.getcwd()\n os.chdir(root_dir)\n try:\n dist._finalize_requires()\n dist._finalize_license_files()\n finally:\n os.chdir(current_directory)\n\n return dist\n\n\ndef _apply_project_table(dist: \"Distribution\", config: dict, root_dir: _Path):\n project_table = config.get(\"project\", {}).copy()\n if not project_table:\n return # short-circuit\n\n _handle_missing_dynamic(dist, project_table)\n _unify_entry_points(project_table)\n\n for field, value in project_table.items():\n norm_key = json_compatible_key(field)\n corresp = PYPROJECT_CORRESPONDENCE.get(norm_key, norm_key)\n if callable(corresp):\n corresp(dist, value, root_dir)\n else:\n _set_config(dist, corresp, value)\n\n\ndef _apply_tool_table(dist: \"Distribution\", config: dict, filename: _Path):\n tool_table = config.get(\"tool\", {}).get(\"setuptools\", {})\n if not tool_table:\n return # short-circuit\n\n for field, value in tool_table.items():\n norm_key = json_compatible_key(field)\n\n if norm_key in TOOL_TABLE_REMOVALS:\n suggestion = cleandoc(TOOL_TABLE_REMOVALS[norm_key])\n msg = f\"\"\"\n The parameter `tool.setuptools.{field}` was long deprecated\n and has been removed from `pyproject.toml`.\n \"\"\"\n raise RemovedConfigError(\"\\n\".join([cleandoc(msg), suggestion]))\n\n norm_key = TOOL_TABLE_RENAMES.get(norm_key, norm_key)\n _set_config(dist, norm_key, value)\n\n _copy_command_options(config, dist, filename)\n\n\ndef _handle_missing_dynamic(dist: \"Distribution\", project_table: dict):\n \"\"\"Be temporarily forgiving with ``dynamic`` fields not listed in ``dynamic``\"\"\"\n dynamic = set(project_table.get(\"dynamic\", []))\n for field, getter in _PREVIOUSLY_DEFINED.items():\n if not (field in project_table or field in dynamic):\n value = getter(dist)\n if value:\n _MissingDynamic.emit(field=field, value=value)\n project_table[field] = _RESET_PREVIOUSLY_DEFINED.get(field)\n\n\ndef json_compatible_key(key: str) -> str:\n \"\"\"As defined in :pep:`566#json-compatible-metadata`\"\"\"\n return key.lower().replace(\"-\", \"_\")\n\n\ndef _set_config(dist: \"Distribution\", field: str, value: Any):\n setter = getattr(dist.metadata, f\"set_{field}\", None)\n if setter:\n setter(value)\n elif hasattr(dist.metadata, field) or field in SETUPTOOLS_PATCHES:\n setattr(dist.metadata, field, value)\n else:\n setattr(dist, field, value)\n\n\n_CONTENT_TYPES = {\n \".md\": \"text/markdown\",\n \".rst\": \"text/x-rst\",\n \".txt\": \"text/plain\",\n}\n\n\ndef _guess_content_type(file: str) -> Optional[str]:\n _, ext = os.path.splitext(file.lower())\n if not ext:\n return None\n\n if ext in _CONTENT_TYPES:\n return _CONTENT_TYPES[ext]\n\n valid = \", \".join(f\"{k} ({v})\" for k, v in _CONTENT_TYPES.items())\n msg = f\"only the following file extensions are recognized: {valid}.\"\n raise ValueError(f\"Undefined content type for {file}, {msg}\")\n\n\ndef _long_description(dist: \"Distribution\", val: _DictOrStr, root_dir: _Path):\n from setuptools.config import expand\n\n if isinstance(val, str):\n file: Union[str, list] = val\n text = expand.read_files(file, root_dir)\n ctype = _guess_content_type(val)\n else:\n file = val.get(\"file\") or []\n text = val.get(\"text\") or expand.read_files(file, root_dir)\n ctype = val[\"content-type\"]\n\n _set_config(dist, \"long_description\", text)\n\n if ctype:\n _set_config(dist, \"long_description_content_type\", ctype)\n\n if file:\n dist._referenced_files.add(cast(str, file))\n\n\ndef _license(dist: \"Distribution\", val: dict, root_dir: _Path):\n from setuptools.config import expand\n\n if \"file\" in val:\n _set_config(dist, \"license\", expand.read_files([val[\"file\"]], root_dir))\n dist._referenced_files.add(val[\"file\"])\n else:\n _set_config(dist, \"license\", val[\"text\"])\n\n\ndef _people(dist: \"Distribution\", val: List[dict], _root_dir: _Path, kind: str):\n field = []\n email_field = []\n for person in val:\n if \"name\" not in person:\n email_field.append(person[\"email\"])\n elif \"email\" not in person:\n field.append(person[\"name\"])\n else:\n addr = Address(display_name=person[\"name\"], addr_spec=person[\"email\"])\n email_field.append(str(addr))\n\n if field:\n _set_config(dist, kind, \", \".join(field))\n if email_field:\n _set_config(dist, f\"{kind}_email\", \", \".join(email_field))\n\n\ndef _project_urls(dist: \"Distribution\", val: dict, _root_dir):\n _set_config(dist, \"project_urls\", val)\n\n\ndef _python_requires(dist: \"Distribution\", val: dict, _root_dir):\n from setuptools.extern.packaging.specifiers import SpecifierSet\n\n _set_config(dist, \"python_requires\", SpecifierSet(val))\n\n\ndef _dependencies(dist: \"Distribution\", val: list, _root_dir):\n if getattr(dist, \"install_requires\", []):\n msg = \"`install_requires` overwritten in `pyproject.toml` (dependencies)\"\n SetuptoolsWarning.emit(msg)\n dist.install_requires = val\n\n\ndef _optional_dependencies(dist: \"Distribution\", val: dict, _root_dir):\n existing = getattr(dist, \"extras_require\", None) or {}\n dist.extras_require = {**existing, **val}\n\n\ndef _unify_entry_points(project_table: dict):\n project = project_table\n entry_points = project.pop(\"entry-points\", project.pop(\"entry_points\", {}))\n renaming = {\"scripts\": \"console_scripts\", \"gui_scripts\": \"gui_scripts\"}\n for key, value in list(project.items()): # eager to allow modifications\n norm_key = json_compatible_key(key)\n if norm_key in renaming:\n # Don't skip even if value is empty (reason: reset missing `dynamic`)\n entry_points[renaming[norm_key]] = project.pop(key)\n\n if entry_points:\n project[\"entry-points\"] = {\n name: [f\"{k} = {v}\" for k, v in group.items()]\n for name, group in entry_points.items()\n if group # now we can skip empty groups\n }\n # Sometimes this will set `project[\"entry-points\"] = {}`, and that is\n # intentional (for reseting configurations that are missing `dynamic`).\n\n\ndef _copy_command_options(pyproject: dict, dist: \"Distribution\", filename: _Path):\n tool_table = pyproject.get(\"tool\", {})\n cmdclass = tool_table.get(\"setuptools\", {}).get(\"cmdclass\", {})\n valid_options = _valid_command_options(cmdclass)\n\n cmd_opts = dist.command_options\n for cmd, config in pyproject.get(\"tool\", {}).get(\"distutils\", {}).items():\n cmd = json_compatible_key(cmd)\n valid = valid_options.get(cmd, set())\n cmd_opts.setdefault(cmd, {})\n for key, value in config.items():\n key = json_compatible_key(key)\n cmd_opts[cmd][key] = (str(filename), value)\n if key not in valid:\n # To avoid removing options that are specified dynamically we\n # just log a warn...\n _logger.warning(f\"Command option {cmd}.{key} is not defined\")\n\n\ndef _valid_command_options(cmdclass: Mapping = EMPTY) -> Dict[str, Set[str]]:\n from .._importlib import metadata\n from setuptools.dist import Distribution\n\n valid_options = {\"global\": _normalise_cmd_options(Distribution.global_options)}\n\n unloaded_entry_points = metadata.entry_points(group='distutils.commands')\n loaded_entry_points = (_load_ep(ep) for ep in unloaded_entry_points)\n entry_points = (ep for ep in loaded_entry_points if ep)\n for cmd, cmd_class in chain(entry_points, cmdclass.items()):\n opts = valid_options.get(cmd, set())\n opts = opts | _normalise_cmd_options(getattr(cmd_class, \"user_options\", []))\n valid_options[cmd] = opts\n\n return valid_options\n\n\ndef _load_ep(ep: \"metadata.EntryPoint\") -> Optional[Tuple[str, Type]]:\n # Ignore all the errors\n try:\n return (ep.name, ep.load())\n except Exception as ex:\n msg = f\"{ex.__class__.__name__} while trying to load entry-point {ep.name}\"\n _logger.warning(f\"{msg}: {ex}\")\n return None\n\n\ndef _normalise_cmd_option_key(name: str) -> str:\n return json_compatible_key(name).strip(\"_=\")\n\n\ndef _normalise_cmd_options(desc: List[Tuple[str, Optional[str], str]]) -> Set[str]:\n return {_normalise_cmd_option_key(fancy_option[0]) for fancy_option in desc}\n\n\ndef _get_previous_entrypoints(dist: \"Distribution\") -> Dict[str, list]:\n ignore = (\"console_scripts\", \"gui_scripts\")\n value = getattr(dist, \"entry_points\", None) or {}\n return {k: v for k, v in value.items() if k not in ignore}\n\n\ndef _get_previous_scripts(dist: \"Distribution\") -> Optional[list]:\n value = getattr(dist, \"entry_points\", None) or {}\n return value.get(\"console_scripts\")\n\n\ndef _get_previous_gui_scripts(dist: \"Distribution\") -> Optional[list]:\n value = getattr(dist, \"entry_points\", None) or {}\n return value.get(\"gui_scripts\")\n\n\ndef _attrgetter(attr):\n \"\"\"\n Similar to ``operator.attrgetter`` but returns None if ``attr`` is not found\n >>> from types import SimpleNamespace\n >>> obj = SimpleNamespace(a=42, b=SimpleNamespace(c=13))\n >>> _attrgetter(\"a\")(obj)\n 42\n >>> _attrgetter(\"b.c\")(obj)\n 13\n >>> _attrgetter(\"d\")(obj) is None\n True\n \"\"\"\n return partial(reduce, lambda acc, x: getattr(acc, x, None), attr.split(\".\"))\n\n\ndef _some_attrgetter(*items):\n \"\"\"\n Return the first \"truth-y\" attribute or None\n >>> from types import SimpleNamespace\n >>> obj = SimpleNamespace(a=42, b=SimpleNamespace(c=13))\n >>> _some_attrgetter(\"d\", \"a\", \"b.c\")(obj)\n 42\n >>> _some_attrgetter(\"d\", \"e\", \"b.c\", \"a\")(obj)\n 13\n >>> _some_attrgetter(\"d\", \"e\", \"f\")(obj) is None\n True\n \"\"\"\n\n def _acessor(obj):\n values = (_attrgetter(i)(obj) for i in items)\n return next((i for i in values if i is not None), None)\n\n return _acessor\n\n\nPYPROJECT_CORRESPONDENCE: Dict[str, _Correspondence] = {\n \"readme\": _long_description,\n \"license\": _license,\n \"authors\": partial(_people, kind=\"author\"),\n \"maintainers\": partial(_people, kind=\"maintainer\"),\n \"urls\": _project_urls,\n \"dependencies\": _dependencies,\n \"optional_dependencies\": _optional_dependencies,\n \"requires_python\": _python_requires,\n}\n\nTOOL_TABLE_RENAMES = {\"script_files\": \"scripts\"}\nTOOL_TABLE_REMOVALS = {\n \"namespace_packages\": \"\"\"\n Please migrate to implicit native namespaces instead.\n See https://packaging.python.org/en/latest/guides/packaging-namespace-packages/.\n \"\"\",\n}\n\nSETUPTOOLS_PATCHES = {\n \"long_description_content_type\",\n \"project_urls\",\n \"provides_extras\",\n \"license_file\",\n \"license_files\",\n}\n\n_PREVIOUSLY_DEFINED = {\n \"name\": _attrgetter(\"metadata.name\"),\n \"version\": _attrgetter(\"metadata.version\"),\n \"description\": _attrgetter(\"metadata.description\"),\n \"readme\": _attrgetter(\"metadata.long_description\"),\n \"requires-python\": _some_attrgetter(\"python_requires\", \"metadata.python_requires\"),\n \"license\": _attrgetter(\"metadata.license\"),\n \"authors\": _some_attrgetter(\"metadata.author\", \"metadata.author_email\"),\n \"maintainers\": _some_attrgetter(\"metadata.maintainer\", \"metadata.maintainer_email\"),\n \"keywords\": _attrgetter(\"metadata.keywords\"),\n \"classifiers\": _attrgetter(\"metadata.classifiers\"),\n \"urls\": _attrgetter(\"metadata.project_urls\"),\n \"entry-points\": _get_previous_entrypoints,\n \"scripts\": _get_previous_scripts,\n \"gui-scripts\": _get_previous_gui_scripts,\n \"dependencies\": _attrgetter(\"install_requires\"),\n \"optional-dependencies\": _attrgetter(\"extras_require\"),\n}\n\n\n_RESET_PREVIOUSLY_DEFINED: dict = {\n # Fix improper setting: given in `setup.py`, but not listed in `dynamic`\n # dict: pyproject name => value to which reset\n \"license\": {},\n \"authors\": [],\n \"maintainers\": [],\n \"keywords\": [],\n \"classifiers\": [],\n \"urls\": {},\n \"entry-points\": {},\n \"scripts\": {},\n \"gui-scripts\": {},\n \"dependencies\": [],\n \"optional-dependencies\": [],\n}\n\n\nclass _MissingDynamic(SetuptoolsWarning):\n _SUMMARY = \"`{field}` defined outside of `pyproject.toml` is ignored.\"\n\n _DETAILS = \"\"\"\n The following seems to be defined outside of `pyproject.toml`:\n\n `{field} = {value!r}`\n\n According to the spec (see the link below), however, setuptools CANNOT\n consider this value unless `{field}` is listed as `dynamic`.\n\n https://packaging.python.org/en/latest/specifications/declaring-project-metadata/\n\n To prevent this problem, you can list `{field}` under `dynamic` or alternatively\n remove the `[project]` table from your file and rely entirely on other means of\n configuration.\n \"\"\"\n # TODO: Consider removing this check in the future?\n # There is a trade-off here between improving \"debug-ability\" and the cost\n # of running/testing/maintaining these unnecessary checks...\n\n @classmethod\n def details(cls, field: str, value: Any) -> str:\n return cls._DETAILS.format(field=field, value=value)\n",
"path": "setuptools/config/_apply_pyprojecttoml.py"
}
] | [
{
"content": "\"\"\"Translation layer between pyproject config and setuptools distribution and\nmetadata objects.\n\nThe distribution and metadata objects are modeled after (an old version of)\ncore metadata, therefore configs in the format specified for ``pyproject.toml``\nneed to be processed before being applied.\n\n**PRIVATE MODULE**: API reserved for setuptools internal usage only.\n\"\"\"\n\nimport logging\nimport os\nfrom collections.abc import Mapping\nfrom email.headerregistry import Address\nfrom functools import partial, reduce\nfrom inspect import cleandoc\nfrom itertools import chain\nfrom types import MappingProxyType\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n List,\n Optional,\n Set,\n Tuple,\n Type,\n Union,\n cast,\n)\n\nfrom ..errors import RemovedConfigError\nfrom ..warnings import SetuptoolsWarning\n\nif TYPE_CHECKING:\n from setuptools._importlib import metadata # noqa\n from setuptools.dist import Distribution # noqa\n\nEMPTY: Mapping = MappingProxyType({}) # Immutable dict-like\n_Path = Union[os.PathLike, str]\n_DictOrStr = Union[dict, str]\n_CorrespFn = Callable[[\"Distribution\", Any, _Path], None]\n_Correspondence = Union[str, _CorrespFn]\n\n_logger = logging.getLogger(__name__)\n\n\ndef apply(dist: \"Distribution\", config: dict, filename: _Path) -> \"Distribution\":\n \"\"\"Apply configuration dict read with :func:`read_configuration`\"\"\"\n\n if not config:\n return dist # short-circuit unrelated pyproject.toml file\n\n root_dir = os.path.dirname(filename) or \".\"\n\n _apply_project_table(dist, config, root_dir)\n _apply_tool_table(dist, config, filename)\n\n current_directory = os.getcwd()\n os.chdir(root_dir)\n try:\n dist._finalize_requires()\n dist._finalize_license_files()\n finally:\n os.chdir(current_directory)\n\n return dist\n\n\ndef _apply_project_table(dist: \"Distribution\", config: dict, root_dir: _Path):\n project_table = config.get(\"project\", {}).copy()\n if not project_table:\n return # short-circuit\n\n _handle_missing_dynamic(dist, project_table)\n _unify_entry_points(project_table)\n\n for field, value in project_table.items():\n norm_key = json_compatible_key(field)\n corresp = PYPROJECT_CORRESPONDENCE.get(norm_key, norm_key)\n if callable(corresp):\n corresp(dist, value, root_dir)\n else:\n _set_config(dist, corresp, value)\n\n\ndef _apply_tool_table(dist: \"Distribution\", config: dict, filename: _Path):\n tool_table = config.get(\"tool\", {}).get(\"setuptools\", {})\n if not tool_table:\n return # short-circuit\n\n for field, value in tool_table.items():\n norm_key = json_compatible_key(field)\n\n if norm_key in TOOL_TABLE_REMOVALS:\n suggestion = cleandoc(TOOL_TABLE_REMOVALS[norm_key])\n msg = f\"\"\"\n The parameter `tool.setuptools.{field}` was long deprecated\n and has been removed from `pyproject.toml`.\n \"\"\"\n raise RemovedConfigError(\"\\n\".join([cleandoc(msg), suggestion]))\n\n norm_key = TOOL_TABLE_RENAMES.get(norm_key, norm_key)\n _set_config(dist, norm_key, value)\n\n _copy_command_options(config, dist, filename)\n\n\ndef _handle_missing_dynamic(dist: \"Distribution\", project_table: dict):\n \"\"\"Be temporarily forgiving with ``dynamic`` fields not listed in ``dynamic``\"\"\"\n dynamic = set(project_table.get(\"dynamic\", []))\n for field, getter in _PREVIOUSLY_DEFINED.items():\n if not (field in project_table or field in dynamic):\n value = getter(dist)\n if value:\n _MissingDynamic.emit(field=field, value=value)\n project_table[field] = _RESET_PREVIOUSLY_DEFINED.get(field)\n\n\ndef json_compatible_key(key: str) -> str:\n \"\"\"As defined in :pep:`566#json-compatible-metadata`\"\"\"\n return key.lower().replace(\"-\", \"_\")\n\n\ndef _set_config(dist: \"Distribution\", field: str, value: Any):\n setter = getattr(dist.metadata, f\"set_{field}\", None)\n if setter:\n setter(value)\n elif hasattr(dist.metadata, field) or field in SETUPTOOLS_PATCHES:\n setattr(dist.metadata, field, value)\n else:\n setattr(dist, field, value)\n\n\n_CONTENT_TYPES = {\n \".md\": \"text/markdown\",\n \".rst\": \"text/x-rst\",\n \".txt\": \"text/plain\",\n}\n\n\ndef _guess_content_type(file: str) -> Optional[str]:\n _, ext = os.path.splitext(file.lower())\n if not ext:\n return None\n\n if ext in _CONTENT_TYPES:\n return _CONTENT_TYPES[ext]\n\n valid = \", \".join(f\"{k} ({v})\" for k, v in _CONTENT_TYPES.items())\n msg = f\"only the following file extensions are recognized: {valid}.\"\n raise ValueError(f\"Undefined content type for {file}, {msg}\")\n\n\ndef _long_description(dist: \"Distribution\", val: _DictOrStr, root_dir: _Path):\n from setuptools.config import expand\n\n if isinstance(val, str):\n file: Union[str, list] = val\n text = expand.read_files(file, root_dir)\n ctype = _guess_content_type(val)\n else:\n file = val.get(\"file\") or []\n text = val.get(\"text\") or expand.read_files(file, root_dir)\n ctype = val[\"content-type\"]\n\n _set_config(dist, \"long_description\", text)\n\n if ctype:\n _set_config(dist, \"long_description_content_type\", ctype)\n\n if file:\n dist._referenced_files.add(cast(str, file))\n\n\ndef _license(dist: \"Distribution\", val: dict, root_dir: _Path):\n from setuptools.config import expand\n\n if \"file\" in val:\n _set_config(dist, \"license\", expand.read_files([val[\"file\"]], root_dir))\n dist._referenced_files.add(val[\"file\"])\n else:\n _set_config(dist, \"license\", val[\"text\"])\n\n\ndef _people(dist: \"Distribution\", val: List[dict], _root_dir: _Path, kind: str):\n field = []\n email_field = []\n for person in val:\n if \"name\" not in person:\n email_field.append(person[\"email\"])\n elif \"email\" not in person:\n field.append(person[\"name\"])\n else:\n addr = Address(display_name=person[\"name\"], addr_spec=person[\"email\"])\n email_field.append(str(addr))\n\n if field:\n _set_config(dist, kind, \", \".join(field))\n if email_field:\n _set_config(dist, f\"{kind}_email\", \", \".join(email_field))\n\n\ndef _project_urls(dist: \"Distribution\", val: dict, _root_dir):\n _set_config(dist, \"project_urls\", val)\n\n\ndef _python_requires(dist: \"Distribution\", val: dict, _root_dir):\n from setuptools.extern.packaging.specifiers import SpecifierSet\n\n _set_config(dist, \"python_requires\", SpecifierSet(val))\n\n\ndef _dependencies(dist: \"Distribution\", val: list, _root_dir):\n if getattr(dist, \"install_requires\", []):\n msg = \"`install_requires` overwritten in `pyproject.toml` (dependencies)\"\n SetuptoolsWarning.emit(msg)\n dist.install_requires = val\n\n\ndef _optional_dependencies(dist: \"Distribution\", val: dict, _root_dir):\n existing = getattr(dist, \"extras_require\", None) or {}\n dist.extras_require = {**existing, **val}\n\n\ndef _unify_entry_points(project_table: dict):\n project = project_table\n entry_points = project.pop(\"entry-points\", project.pop(\"entry_points\", {}))\n renaming = {\"scripts\": \"console_scripts\", \"gui_scripts\": \"gui_scripts\"}\n for key, value in list(project.items()): # eager to allow modifications\n norm_key = json_compatible_key(key)\n if norm_key in renaming:\n # Don't skip even if value is empty (reason: reset missing `dynamic`)\n entry_points[renaming[norm_key]] = project.pop(key)\n\n if entry_points:\n project[\"entry-points\"] = {\n name: [f\"{k} = {v}\" for k, v in group.items()]\n for name, group in entry_points.items()\n if group # now we can skip empty groups\n }\n # Sometimes this will set `project[\"entry-points\"] = {}`, and that is\n # intentional (for reseting configurations that are missing `dynamic`).\n\n\ndef _copy_command_options(pyproject: dict, dist: \"Distribution\", filename: _Path):\n tool_table = pyproject.get(\"tool\", {})\n cmdclass = tool_table.get(\"setuptools\", {}).get(\"cmdclass\", {})\n valid_options = _valid_command_options(cmdclass)\n\n cmd_opts = dist.command_options\n for cmd, config in pyproject.get(\"tool\", {}).get(\"distutils\", {}).items():\n cmd = json_compatible_key(cmd)\n valid = valid_options.get(cmd, set())\n cmd_opts.setdefault(cmd, {})\n for key, value in config.items():\n key = json_compatible_key(key)\n cmd_opts[cmd][key] = (str(filename), value)\n if key not in valid:\n # To avoid removing options that are specified dynamically we\n # just log a warn...\n _logger.warning(f\"Command option {cmd}.{key} is not defined\")\n\n\ndef _valid_command_options(cmdclass: Mapping = EMPTY) -> Dict[str, Set[str]]:\n from .._importlib import metadata\n from setuptools.dist import Distribution\n\n valid_options = {\"global\": _normalise_cmd_options(Distribution.global_options)}\n\n unloaded_entry_points = metadata.entry_points(group='distutils.commands')\n loaded_entry_points = (_load_ep(ep) for ep in unloaded_entry_points)\n entry_points = (ep for ep in loaded_entry_points if ep)\n for cmd, cmd_class in chain(entry_points, cmdclass.items()):\n opts = valid_options.get(cmd, set())\n opts = opts | _normalise_cmd_options(getattr(cmd_class, \"user_options\", []))\n valid_options[cmd] = opts\n\n return valid_options\n\n\ndef _load_ep(ep: \"metadata.EntryPoint\") -> Optional[Tuple[str, Type]]:\n # Ignore all the errors\n try:\n return (ep.name, ep.load())\n except Exception as ex:\n msg = f\"{ex.__class__.__name__} while trying to load entry-point {ep.name}\"\n _logger.warning(f\"{msg}: {ex}\")\n return None\n\n\ndef _normalise_cmd_option_key(name: str) -> str:\n return json_compatible_key(name).strip(\"_=\")\n\n\ndef _normalise_cmd_options(desc: List[Tuple[str, Optional[str], str]]) -> Set[str]:\n return {_normalise_cmd_option_key(fancy_option[0]) for fancy_option in desc}\n\n\ndef _get_previous_entrypoints(dist: \"Distribution\") -> Dict[str, list]:\n ignore = (\"console_scripts\", \"gui_scripts\")\n value = getattr(dist, \"entry_points\", None) or {}\n return {k: v for k, v in value.items() if k not in ignore}\n\n\ndef _get_previous_scripts(dist: \"Distribution\") -> Optional[list]:\n value = getattr(dist, \"entry_points\", None) or {}\n return value.get(\"console_scripts\")\n\n\ndef _get_previous_gui_scripts(dist: \"Distribution\") -> Optional[list]:\n value = getattr(dist, \"entry_points\", None) or {}\n return value.get(\"gui_scripts\")\n\n\ndef _attrgetter(attr):\n \"\"\"\n Similar to ``operator.attrgetter`` but returns None if ``attr`` is not found\n >>> from types import SimpleNamespace\n >>> obj = SimpleNamespace(a=42, b=SimpleNamespace(c=13))\n >>> _attrgetter(\"a\")(obj)\n 42\n >>> _attrgetter(\"b.c\")(obj)\n 13\n >>> _attrgetter(\"d\")(obj) is None\n True\n \"\"\"\n return partial(reduce, lambda acc, x: getattr(acc, x, None), attr.split(\".\"))\n\n\ndef _some_attrgetter(*items):\n \"\"\"\n Return the first \"truth-y\" attribute or None\n >>> from types import SimpleNamespace\n >>> obj = SimpleNamespace(a=42, b=SimpleNamespace(c=13))\n >>> _some_attrgetter(\"d\", \"a\", \"b.c\")(obj)\n 42\n >>> _some_attrgetter(\"d\", \"e\", \"b.c\", \"a\")(obj)\n 13\n >>> _some_attrgetter(\"d\", \"e\", \"f\")(obj) is None\n True\n \"\"\"\n\n def _acessor(obj):\n values = (_attrgetter(i)(obj) for i in items)\n return next((i for i in values if i is not None), None)\n\n return _acessor\n\n\nPYPROJECT_CORRESPONDENCE: Dict[str, _Correspondence] = {\n \"readme\": _long_description,\n \"license\": _license,\n \"authors\": partial(_people, kind=\"author\"),\n \"maintainers\": partial(_people, kind=\"maintainer\"),\n \"urls\": _project_urls,\n \"dependencies\": _dependencies,\n \"optional_dependencies\": _optional_dependencies,\n \"requires_python\": _python_requires,\n}\n\nTOOL_TABLE_RENAMES = {\"script_files\": \"scripts\"}\nTOOL_TABLE_REMOVALS = {\n \"namespace_packages\": \"\"\"\n Please migrate to implicit native namespaces instead.\n See https://packaging.python.org/en/latest/guides/packaging-namespace-packages/.\n \"\"\",\n}\n\nSETUPTOOLS_PATCHES = {\n \"long_description_content_type\",\n \"project_urls\",\n \"provides_extras\",\n \"license_file\",\n \"license_files\",\n}\n\n_PREVIOUSLY_DEFINED = {\n \"name\": _attrgetter(\"metadata.name\"),\n \"version\": _attrgetter(\"metadata.version\"),\n \"description\": _attrgetter(\"metadata.description\"),\n \"readme\": _attrgetter(\"metadata.long_description\"),\n \"requires-python\": _some_attrgetter(\"python_requires\", \"metadata.python_requires\"),\n \"license\": _attrgetter(\"metadata.license\"),\n \"authors\": _some_attrgetter(\"metadata.author\", \"metadata.author_email\"),\n \"maintainers\": _some_attrgetter(\"metadata.maintainer\", \"metadata.maintainer_email\"),\n \"keywords\": _attrgetter(\"metadata.keywords\"),\n \"classifiers\": _attrgetter(\"metadata.classifiers\"),\n \"urls\": _attrgetter(\"metadata.project_urls\"),\n \"entry-points\": _get_previous_entrypoints,\n \"scripts\": _get_previous_scripts,\n \"gui-scripts\": _get_previous_gui_scripts,\n \"dependencies\": _attrgetter(\"install_requires\"),\n \"optional-dependencies\": _attrgetter(\"extras_require\"),\n}\n\n\n_RESET_PREVIOUSLY_DEFINED: dict = {\n # Fix improper setting: given in `setup.py`, but not listed in `dynamic`\n # dict: pyproject name => value to which reset\n \"license\": {},\n \"authors\": [],\n \"maintainers\": [],\n \"keywords\": [],\n \"classifiers\": [],\n \"urls\": {},\n \"entry-points\": {},\n \"scripts\": {},\n \"gui-scripts\": {},\n \"dependencies\": [],\n \"optional-dependencies\": {},\n}\n\n\nclass _MissingDynamic(SetuptoolsWarning):\n _SUMMARY = \"`{field}` defined outside of `pyproject.toml` is ignored.\"\n\n _DETAILS = \"\"\"\n The following seems to be defined outside of `pyproject.toml`:\n\n `{field} = {value!r}`\n\n According to the spec (see the link below), however, setuptools CANNOT\n consider this value unless `{field}` is listed as `dynamic`.\n\n https://packaging.python.org/en/latest/specifications/declaring-project-metadata/\n\n To prevent this problem, you can list `{field}` under `dynamic` or alternatively\n remove the `[project]` table from your file and rely entirely on other means of\n configuration.\n \"\"\"\n # TODO: Consider removing this check in the future?\n # There is a trade-off here between improving \"debug-ability\" and the cost\n # of running/testing/maintaining these unnecessary checks...\n\n @classmethod\n def details(cls, field: str, value: Any) -> str:\n return cls._DETAILS.format(field=field, value=value)\n",
"path": "setuptools/config/_apply_pyprojecttoml.py"
}
] | diff --git a/setuptools/config/_apply_pyprojecttoml.py b/setuptools/config/_apply_pyprojecttoml.py
index b562f91759..4aec5f1377 100644
--- a/setuptools/config/_apply_pyprojecttoml.py
+++ b/setuptools/config/_apply_pyprojecttoml.py
@@ -409,7 +409,7 @@ def _acessor(obj):
"scripts": {},
"gui-scripts": {},
"dependencies": [],
- "optional-dependencies": [],
+ "optional-dependencies": {},
}
|
sanic-org__sanic-1045 | 0.6.1 release to PyPi
Hey folks,
There's been a bunch of substantive changes in the past few months; I think it warrants a release of 0.6.1 (or 0.7, considering there may be large changes in PRs like #939). Any chance we could get a new candidate uploaded to PyPi?
If there's a better place to ask this, I'm happy to head there.
| [
{
"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n__version__ = '0.6.0'\n\n__all__ = ['Sanic', 'Blueprint']\n",
"path": "sanic/__init__.py"
}
] | [
{
"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n__version__ = '0.7.0'\n\n__all__ = ['Sanic', 'Blueprint']\n",
"path": "sanic/__init__.py"
}
] | diff --git a/sanic/__init__.py b/sanic/__init__.py
index 8f35a28367..78bc7bd9f5 100644
--- a/sanic/__init__.py
+++ b/sanic/__init__.py
@@ -1,6 +1,6 @@
from sanic.app import Sanic
from sanic.blueprints import Blueprint
-__version__ = '0.6.0'
+__version__ = '0.7.0'
__all__ = ['Sanic', 'Blueprint']
|
biopython__biopython-3366 | `Bio.Phylo.write` in format `nexus` does not export comments.
### Setup
I am reporting a problem with Biopython version 1.77, Python version 3.7, and operating
system Ubuntu 16.04 as follows:
```python
from io import StringIO
from Bio import Phylo
t = Phylo.read(StringIO("((A,B),C);"), 'newick')
for ni,n in enumerate(t.get_terminals()):
n.comment = f"[&node_number={ni}]"
out = StringIO()
Phylo.write(t, out, "nexus")
print(out.getvalue())
```
The output is
```
#NEXUS
Begin Taxa;
Dimensions NTax=3;
TaxLabels A B C;
End;
Begin Trees;
Tree tree1=((A:0.00000,B:0.00000):0.00000,C:0.00000):0.00000;
End;
```
missing the comments I attached to the nodes.
### Expected behaviour
The output should look like this:
```
Begin Taxa;
Dimensions NTax=3;
TaxLabels A B C;
End;
Begin Trees;
Tree tree1=((A[&node_number=0]:0.00000,B[&node_number=1]:0.00000):0.00000,C[&node_number=2]:0.00000):0.00000;
End;
```
In fact, doing the reverse and parsing this tree with `Bio.Phylo.read` reads the comments correctly.
```python
nexus_in = StringIO(
"""
Begin Taxa;
Dimensions NTax=3;
TaxLabels A B C;
End;
Begin Trees;
Tree tree1=((A[&node_number=0]:0.00000,B[&node_number=1]:0.00000):0.00000,C[&node_number=2]:0.00000):0.00000;
End;
""")
t2 = Phylo.read(nexus_in, 'nexus')
for n in t2.get_terminals():
print(n.name, n.comment)
```
has the expected output
```
A [&node_number=0]
B [&node_number=1]
C [&node_number=2]
```
### Actual behaviour
`Bio.Phylo.write` in `nexus` format ignores comments, while `Bio.Phylo.read` correctly parses them from the input.
### Steps to reproduce
The above code-snippets are sufficient to reproduce the unexpected behaviour.
| [
{
"content": "# Copyright (C) 2009 by Eric Talevich ([email protected])\n# Based on Bio.Nexus, copyright 2005-2008 by Frank Kauff & Cymon J. Cox.\n# All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\n\"\"\"I/O function wrappers for the Newick file format.\n\nSee: http://evolution.genetics.washington.edu/phylip/newick_doc.html\n\"\"\"\n\nimport re\nfrom io import StringIO\n\nfrom Bio.Phylo import Newick\n\n\nclass NewickError(Exception):\n \"\"\"Exception raised when Newick object construction cannot continue.\"\"\"\n\n pass\n\n\ntokens = [\n (r\"\\(\", \"open parens\"),\n (r\"\\)\", \"close parens\"),\n (r\"[^\\s\\(\\)\\[\\]\\'\\:\\;\\,]+\", \"unquoted node label\"),\n (r\"\\:\\ ?[+-]?[0-9]*\\.?[0-9]+([eE][+-]?[0-9]+)?\", \"edge length\"),\n (r\"\\,\", \"comma\"),\n (r\"\\[(\\\\.|[^\\]])*\\]\", \"comment\"),\n (r\"\\'(\\\\.|[^\\'])*\\'\", \"quoted node label\"),\n (r\"\\;\", \"semicolon\"),\n (r\"\\n\", \"newline\"),\n]\ntokenizer = re.compile(\"(%s)\" % \"|\".join(token[0] for token in tokens))\ntoken_dict = {name: re.compile(token) for token, name in tokens}\n\n\n# ---------------------------------------------------------\n# Public API\n\n\ndef parse(handle, **kwargs):\n \"\"\"Iterate over the trees in a Newick file handle.\n\n :returns: generator of Bio.Phylo.Newick.Tree objects.\n\n \"\"\"\n return Parser(handle).parse(**kwargs)\n\n\ndef write(trees, handle, plain=False, **kwargs):\n \"\"\"Write a trees in Newick format to the given file handle.\n\n :returns: number of trees written.\n\n \"\"\"\n return Writer(trees).write(handle, plain=plain, **kwargs)\n\n\n# ---------------------------------------------------------\n# Input\n\n\ndef _parse_confidence(text):\n if text.isdigit():\n return int(text)\n # NB: Could make this more consistent by treating as a percentage\n # return int(text) / 100.\n try:\n return float(text)\n # NB: This should be in [0.0, 1.0], but who knows what people will do\n # assert 0 <= current_clade.confidence <= 1\n except ValueError:\n return None\n\n\ndef _format_comment(text):\n return \"[%s]\" % (text.replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\"))\n\n\ndef _get_comment(clade):\n try:\n comment = clade.coment\n except AttributeError:\n pass\n else:\n if comment:\n return _format_comment(str(comment))\n return \"\"\n\n\nclass Parser:\n \"\"\"Parse a Newick tree given a file handle.\n\n Based on the parser in ``Bio.Nexus.Trees``.\n \"\"\"\n\n def __init__(self, handle):\n \"\"\"Initialize file handle for the Newick Tree.\"\"\"\n if handle.read(0) != \"\":\n raise ValueError(\"Newick files must be opened in text mode\") from None\n self.handle = handle\n\n @classmethod\n def from_string(cls, treetext):\n \"\"\"Instantiate the Newick Tree class from the given string.\"\"\"\n handle = StringIO(treetext)\n return cls(handle)\n\n def parse(\n self, values_are_confidence=False, comments_are_confidence=False, rooted=False\n ):\n \"\"\"Parse the text stream this object was initialized with.\"\"\"\n self.values_are_confidence = values_are_confidence\n self.comments_are_confidence = comments_are_confidence\n self.rooted = rooted\n buf = \"\"\n for line in self.handle:\n buf += line.rstrip()\n if buf.endswith(\";\"):\n yield self._parse_tree(buf)\n buf = \"\"\n if buf:\n # Last tree is missing a terminal ';' character -- that's OK\n yield self._parse_tree(buf)\n\n def _parse_tree(self, text):\n \"\"\"Parse the text representation into an Tree object (PRIVATE).\"\"\"\n tokens = re.finditer(tokenizer, text.strip())\n\n new_clade = self.new_clade\n root_clade = new_clade()\n\n current_clade = root_clade\n entering_branch_length = False\n\n lp_count = 0\n rp_count = 0\n for match in tokens:\n token = match.group()\n\n if token.startswith(\"'\"):\n # quoted label; add characters to clade name\n current_clade.name = token[1:-1]\n\n elif token.startswith(\"[\"):\n # comment\n current_clade.comment = token[1:-1]\n if self.comments_are_confidence:\n # Try to use this comment as a numeric support value\n current_clade.confidence = _parse_confidence(current_clade.comment)\n\n elif token == \"(\":\n # start a new clade, which is a child of the current clade\n current_clade = new_clade(current_clade)\n entering_branch_length = False\n lp_count += 1\n\n elif token == \",\":\n # if the current clade is the root, then the external parentheses\n # are missing and a new root should be created\n if current_clade is root_clade:\n root_clade = new_clade()\n current_clade.parent = root_clade\n # start a new child clade at the same level as the current clade\n parent = self.process_clade(current_clade)\n current_clade = new_clade(parent)\n entering_branch_length = False\n\n elif token == \")\":\n # done adding children for this parent clade\n parent = self.process_clade(current_clade)\n if not parent:\n raise NewickError(\"Parenthesis mismatch.\")\n current_clade = parent\n entering_branch_length = False\n rp_count += 1\n\n elif token == \";\":\n break\n\n elif token.startswith(\":\"):\n # branch length or confidence\n value = float(token[1:])\n if self.values_are_confidence:\n current_clade.confidence = value\n else:\n current_clade.branch_length = value\n\n elif token == \"\\n\":\n pass\n\n else:\n # unquoted node label\n current_clade.name = token\n\n if not lp_count == rp_count:\n raise NewickError(\"Number of open/close parentheses do not match.\")\n\n # if ; token broke out of for loop, there should be no remaining tokens\n try:\n next_token = next(tokens)\n raise NewickError(\n \"Text after semicolon in Newick tree: %s\" % next_token.group()\n )\n except StopIteration:\n pass\n\n self.process_clade(current_clade)\n self.process_clade(root_clade)\n return Newick.Tree(root=root_clade, rooted=self.rooted)\n\n def new_clade(self, parent=None):\n \"\"\"Return new Newick.Clade, optionally with temporary reference to parent.\"\"\"\n clade = Newick.Clade()\n if parent:\n clade.parent = parent\n return clade\n\n def process_clade(self, clade):\n \"\"\"Remove node's parent and return it. Final processing of parsed clade.\"\"\"\n if (\n (clade.name)\n and not (self.values_are_confidence or self.comments_are_confidence)\n and (clade.confidence is None)\n and (clade.clades)\n ):\n clade.confidence = _parse_confidence(clade.name)\n if clade.confidence is not None:\n clade.name = None\n\n try:\n parent = clade.parent\n except AttributeError:\n pass\n else:\n parent.clades.append(clade)\n del clade.parent\n return parent\n\n\n# ---------------------------------------------------------\n# Output\n\n\nclass Writer:\n \"\"\"Based on the writer in Bio.Nexus.Trees (str, to_string).\"\"\"\n\n def __init__(self, trees):\n \"\"\"Initialize parameter for Tree Writer object.\"\"\"\n self.trees = trees\n\n def write(self, handle, **kwargs):\n \"\"\"Write this instance's trees to a file handle.\"\"\"\n count = 0\n for treestr in self.to_strings(**kwargs):\n handle.write(treestr + \"\\n\")\n count += 1\n return count\n\n def to_strings(\n self,\n confidence_as_branch_length=False,\n branch_length_only=False,\n plain=False,\n plain_newick=True,\n ladderize=None,\n max_confidence=1.0,\n format_confidence=\"%1.2f\",\n format_branch_length=\"%1.5f\",\n ):\n \"\"\"Return an iterable of PAUP-compatible tree lines.\"\"\"\n # If there's a conflict in the arguments, we override plain=True\n if confidence_as_branch_length or branch_length_only:\n plain = False\n make_info_string = self._info_factory(\n plain,\n confidence_as_branch_length,\n branch_length_only,\n max_confidence,\n format_confidence,\n format_branch_length,\n )\n\n def newickize(clade):\n \"\"\"Convert a node tree to a Newick tree string, recursively.\"\"\"\n label = clade.name or \"\"\n if label:\n unquoted_label = re.match(token_dict[\"unquoted node label\"], label)\n if (not unquoted_label) or (unquoted_label.end() < len(label)):\n label = \"'%s'\" % label.replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\")\n\n if clade.is_terminal(): # terminal\n return label + make_info_string(clade, terminal=True)\n else:\n subtrees = (newickize(sub) for sub in clade)\n return \"(%s)%s\" % (\",\".join(subtrees), label + make_info_string(clade))\n\n # Convert each tree to a string\n for tree in self.trees:\n if ladderize in (\"left\", \"LEFT\", \"right\", \"RIGHT\"):\n # Nexus compatibility shim, kind of\n tree.ladderize(reverse=(ladderize in (\"right\", \"RIGHT\")))\n rawtree = newickize(tree.root) + \";\"\n if plain_newick:\n yield rawtree\n continue\n # Nexus-style (?) notation before the raw Newick tree\n treeline = [\"tree\", (tree.name or \"a_tree\"), \"=\"]\n if tree.weight != 1:\n treeline.append(\"[&W%s]\" % round(float(tree.weight), 3))\n if tree.rooted:\n treeline.append(\"[&R]\")\n treeline.append(rawtree)\n yield \" \".join(treeline)\n\n def _info_factory(\n self,\n plain,\n confidence_as_branch_length,\n branch_length_only,\n max_confidence,\n format_confidence,\n format_branch_length,\n ):\n \"\"\"Return a function that creates a nicely formatted node tag (PRIVATE).\"\"\"\n if plain:\n # Plain tree only. That's easy.\n def make_info_string(clade, terminal=False):\n return _get_comment(clade)\n\n elif confidence_as_branch_length:\n # Support as branchlengths (eg. PAUP), ignore actual branchlengths\n def make_info_string(clade, terminal=False):\n if terminal:\n # terminal branches have 100% support\n return (\":\" + format_confidence % max_confidence) + _get_comment(\n clade\n )\n else:\n return (\":\" + format_confidence % clade.confidence) + _get_comment(\n clade\n )\n\n elif branch_length_only:\n # write only branchlengths, ignore support\n def make_info_string(clade, terminal=False):\n return (\n \":\" + format_branch_length % clade.branch_length\n ) + _get_comment(clade)\n\n else:\n # write support and branchlengths (e.g. .con tree of mrbayes)\n def make_info_string(clade, terminal=False):\n if (\n terminal\n or not hasattr(clade, \"confidence\")\n or clade.confidence is None\n ):\n return (\":\" + format_branch_length) % (\n clade.branch_length or 0.0\n ) + _get_comment(clade)\n else:\n return (format_confidence + \":\" + format_branch_length) % (\n clade.confidence,\n clade.branch_length or 0.0,\n ) + _get_comment(clade)\n\n return make_info_string\n",
"path": "Bio/Phylo/NewickIO.py"
}
] | [
{
"content": "# Copyright (C) 2009 by Eric Talevich ([email protected])\n# Based on Bio.Nexus, copyright 2005-2008 by Frank Kauff & Cymon J. Cox.\n# All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\n\"\"\"I/O function wrappers for the Newick file format.\n\nSee: http://evolution.genetics.washington.edu/phylip/newick_doc.html\n\"\"\"\n\nimport re\nfrom io import StringIO\n\nfrom Bio.Phylo import Newick\n\n\nclass NewickError(Exception):\n \"\"\"Exception raised when Newick object construction cannot continue.\"\"\"\n\n pass\n\n\ntokens = [\n (r\"\\(\", \"open parens\"),\n (r\"\\)\", \"close parens\"),\n (r\"[^\\s\\(\\)\\[\\]\\'\\:\\;\\,]+\", \"unquoted node label\"),\n (r\"\\:\\ ?[+-]?[0-9]*\\.?[0-9]+([eE][+-]?[0-9]+)?\", \"edge length\"),\n (r\"\\,\", \"comma\"),\n (r\"\\[(\\\\.|[^\\]])*\\]\", \"comment\"),\n (r\"\\'(\\\\.|[^\\'])*\\'\", \"quoted node label\"),\n (r\"\\;\", \"semicolon\"),\n (r\"\\n\", \"newline\"),\n]\ntokenizer = re.compile(\"(%s)\" % \"|\".join(token[0] for token in tokens))\ntoken_dict = {name: re.compile(token) for token, name in tokens}\n\n\n# ---------------------------------------------------------\n# Public API\n\n\ndef parse(handle, **kwargs):\n \"\"\"Iterate over the trees in a Newick file handle.\n\n :returns: generator of Bio.Phylo.Newick.Tree objects.\n\n \"\"\"\n return Parser(handle).parse(**kwargs)\n\n\ndef write(trees, handle, plain=False, **kwargs):\n \"\"\"Write a trees in Newick format to the given file handle.\n\n :returns: number of trees written.\n\n \"\"\"\n return Writer(trees).write(handle, plain=plain, **kwargs)\n\n\n# ---------------------------------------------------------\n# Input\n\n\ndef _parse_confidence(text):\n if text.isdigit():\n return int(text)\n # NB: Could make this more consistent by treating as a percentage\n # return int(text) / 100.\n try:\n return float(text)\n # NB: This should be in [0.0, 1.0], but who knows what people will do\n # assert 0 <= current_clade.confidence <= 1\n except ValueError:\n return None\n\n\ndef _format_comment(text):\n return \"[%s]\" % (text.replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\"))\n\n\ndef _get_comment(clade):\n try:\n comment = clade.comment\n except AttributeError:\n pass\n else:\n if comment:\n return _format_comment(str(comment))\n return \"\"\n\n\nclass Parser:\n \"\"\"Parse a Newick tree given a file handle.\n\n Based on the parser in ``Bio.Nexus.Trees``.\n \"\"\"\n\n def __init__(self, handle):\n \"\"\"Initialize file handle for the Newick Tree.\"\"\"\n if handle.read(0) != \"\":\n raise ValueError(\"Newick files must be opened in text mode\") from None\n self.handle = handle\n\n @classmethod\n def from_string(cls, treetext):\n \"\"\"Instantiate the Newick Tree class from the given string.\"\"\"\n handle = StringIO(treetext)\n return cls(handle)\n\n def parse(\n self, values_are_confidence=False, comments_are_confidence=False, rooted=False\n ):\n \"\"\"Parse the text stream this object was initialized with.\"\"\"\n self.values_are_confidence = values_are_confidence\n self.comments_are_confidence = comments_are_confidence\n self.rooted = rooted\n buf = \"\"\n for line in self.handle:\n buf += line.rstrip()\n if buf.endswith(\";\"):\n yield self._parse_tree(buf)\n buf = \"\"\n if buf:\n # Last tree is missing a terminal ';' character -- that's OK\n yield self._parse_tree(buf)\n\n def _parse_tree(self, text):\n \"\"\"Parse the text representation into an Tree object (PRIVATE).\"\"\"\n tokens = re.finditer(tokenizer, text.strip())\n\n new_clade = self.new_clade\n root_clade = new_clade()\n\n current_clade = root_clade\n entering_branch_length = False\n\n lp_count = 0\n rp_count = 0\n for match in tokens:\n token = match.group()\n\n if token.startswith(\"'\"):\n # quoted label; add characters to clade name\n current_clade.name = token[1:-1]\n\n elif token.startswith(\"[\"):\n # comment\n current_clade.comment = token[1:-1]\n if self.comments_are_confidence:\n # Try to use this comment as a numeric support value\n current_clade.confidence = _parse_confidence(current_clade.comment)\n\n elif token == \"(\":\n # start a new clade, which is a child of the current clade\n current_clade = new_clade(current_clade)\n entering_branch_length = False\n lp_count += 1\n\n elif token == \",\":\n # if the current clade is the root, then the external parentheses\n # are missing and a new root should be created\n if current_clade is root_clade:\n root_clade = new_clade()\n current_clade.parent = root_clade\n # start a new child clade at the same level as the current clade\n parent = self.process_clade(current_clade)\n current_clade = new_clade(parent)\n entering_branch_length = False\n\n elif token == \")\":\n # done adding children for this parent clade\n parent = self.process_clade(current_clade)\n if not parent:\n raise NewickError(\"Parenthesis mismatch.\")\n current_clade = parent\n entering_branch_length = False\n rp_count += 1\n\n elif token == \";\":\n break\n\n elif token.startswith(\":\"):\n # branch length or confidence\n value = float(token[1:])\n if self.values_are_confidence:\n current_clade.confidence = value\n else:\n current_clade.branch_length = value\n\n elif token == \"\\n\":\n pass\n\n else:\n # unquoted node label\n current_clade.name = token\n\n if not lp_count == rp_count:\n raise NewickError(\"Number of open/close parentheses do not match.\")\n\n # if ; token broke out of for loop, there should be no remaining tokens\n try:\n next_token = next(tokens)\n raise NewickError(\n \"Text after semicolon in Newick tree: %s\" % next_token.group()\n )\n except StopIteration:\n pass\n\n self.process_clade(current_clade)\n self.process_clade(root_clade)\n return Newick.Tree(root=root_clade, rooted=self.rooted)\n\n def new_clade(self, parent=None):\n \"\"\"Return new Newick.Clade, optionally with temporary reference to parent.\"\"\"\n clade = Newick.Clade()\n if parent:\n clade.parent = parent\n return clade\n\n def process_clade(self, clade):\n \"\"\"Remove node's parent and return it. Final processing of parsed clade.\"\"\"\n if (\n (clade.name)\n and not (self.values_are_confidence or self.comments_are_confidence)\n and (clade.confidence is None)\n and (clade.clades)\n ):\n clade.confidence = _parse_confidence(clade.name)\n if clade.confidence is not None:\n clade.name = None\n\n try:\n parent = clade.parent\n except AttributeError:\n pass\n else:\n parent.clades.append(clade)\n del clade.parent\n return parent\n\n\n# ---------------------------------------------------------\n# Output\n\n\nclass Writer:\n \"\"\"Based on the writer in Bio.Nexus.Trees (str, to_string).\"\"\"\n\n def __init__(self, trees):\n \"\"\"Initialize parameter for Tree Writer object.\"\"\"\n self.trees = trees\n\n def write(self, handle, **kwargs):\n \"\"\"Write this instance's trees to a file handle.\"\"\"\n count = 0\n for treestr in self.to_strings(**kwargs):\n handle.write(treestr + \"\\n\")\n count += 1\n return count\n\n def to_strings(\n self,\n confidence_as_branch_length=False,\n branch_length_only=False,\n plain=False,\n plain_newick=True,\n ladderize=None,\n max_confidence=1.0,\n format_confidence=\"%1.2f\",\n format_branch_length=\"%1.5f\",\n ):\n \"\"\"Return an iterable of PAUP-compatible tree lines.\"\"\"\n # If there's a conflict in the arguments, we override plain=True\n if confidence_as_branch_length or branch_length_only:\n plain = False\n make_info_string = self._info_factory(\n plain,\n confidence_as_branch_length,\n branch_length_only,\n max_confidence,\n format_confidence,\n format_branch_length,\n )\n\n def newickize(clade):\n \"\"\"Convert a node tree to a Newick tree string, recursively.\"\"\"\n label = clade.name or \"\"\n if label:\n unquoted_label = re.match(token_dict[\"unquoted node label\"], label)\n if (not unquoted_label) or (unquoted_label.end() < len(label)):\n label = \"'%s'\" % label.replace(\"\\\\\", \"\\\\\\\\\").replace(\"'\", \"\\\\'\")\n\n if clade.is_terminal(): # terminal\n return label + make_info_string(clade, terminal=True)\n else:\n subtrees = (newickize(sub) for sub in clade)\n return \"(%s)%s\" % (\",\".join(subtrees), label + make_info_string(clade))\n\n # Convert each tree to a string\n for tree in self.trees:\n if ladderize in (\"left\", \"LEFT\", \"right\", \"RIGHT\"):\n # Nexus compatibility shim, kind of\n tree.ladderize(reverse=(ladderize in (\"right\", \"RIGHT\")))\n rawtree = newickize(tree.root) + \";\"\n if plain_newick:\n yield rawtree\n continue\n # Nexus-style (?) notation before the raw Newick tree\n treeline = [\"tree\", (tree.name or \"a_tree\"), \"=\"]\n if tree.weight != 1:\n treeline.append(\"[&W%s]\" % round(float(tree.weight), 3))\n if tree.rooted:\n treeline.append(\"[&R]\")\n treeline.append(rawtree)\n yield \" \".join(treeline)\n\n def _info_factory(\n self,\n plain,\n confidence_as_branch_length,\n branch_length_only,\n max_confidence,\n format_confidence,\n format_branch_length,\n ):\n \"\"\"Return a function that creates a nicely formatted node tag (PRIVATE).\"\"\"\n if plain:\n # Plain tree only. That's easy.\n def make_info_string(clade, terminal=False):\n return _get_comment(clade)\n\n elif confidence_as_branch_length:\n # Support as branchlengths (eg. PAUP), ignore actual branchlengths\n def make_info_string(clade, terminal=False):\n if terminal:\n # terminal branches have 100% support\n return (\":\" + format_confidence % max_confidence) + _get_comment(\n clade\n )\n else:\n return (\":\" + format_confidence % clade.confidence) + _get_comment(\n clade\n )\n\n elif branch_length_only:\n # write only branchlengths, ignore support\n def make_info_string(clade, terminal=False):\n return (\n \":\" + format_branch_length % clade.branch_length\n ) + _get_comment(clade)\n\n else:\n # write support and branchlengths (e.g. .con tree of mrbayes)\n def make_info_string(clade, terminal=False):\n if (\n terminal\n or not hasattr(clade, \"confidence\")\n or clade.confidence is None\n ):\n return (\":\" + format_branch_length) % (\n clade.branch_length or 0.0\n ) + _get_comment(clade)\n else:\n return (format_confidence + \":\" + format_branch_length) % (\n clade.confidence,\n clade.branch_length or 0.0,\n ) + _get_comment(clade)\n\n return make_info_string\n",
"path": "Bio/Phylo/NewickIO.py"
}
] | diff --git a/Bio/Phylo/NewickIO.py b/Bio/Phylo/NewickIO.py
index 99b77816756..b9af509cc29 100644
--- a/Bio/Phylo/NewickIO.py
+++ b/Bio/Phylo/NewickIO.py
@@ -84,7 +84,7 @@ def _format_comment(text):
def _get_comment(clade):
try:
- comment = clade.coment
+ comment = clade.comment
except AttributeError:
pass
else:
|
sanic-org__sanic-1292 | New release on Pypi ?
Hello,
I was looking for a tool to autoreload my code when I develop and I found this commit : https://github.com/channelcat/sanic/commit/52c2a8484e6aa5fa13aaade49e1f2597dd006e15
So it seems Sanic already integrates it since December 07, 2017. But the the latest version on Pypi dates from the day before (https://github.com/channelcat/sanic/commit/1ea3ab7fe8ab03a6ddf4d75a3de8cb719f4c584c) : https://pypi.org/project/Sanic/#history
Is-it possible to release a new version on Pypi please ? Other features (like the UUID support in routes) are also interesting :)
Thanks in advance !
| [
{
"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n__version__ = '0.7.0'\n\n__all__ = ['Sanic', 'Blueprint']\n",
"path": "sanic/__init__.py"
}
] | [
{
"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n__version__ = '0.8.0'\n\n__all__ = ['Sanic', 'Blueprint']\n",
"path": "sanic/__init__.py"
}
] | diff --git a/sanic/__init__.py b/sanic/__init__.py
index 78bc7bd9f5..5e6ff4daff 100644
--- a/sanic/__init__.py
+++ b/sanic/__init__.py
@@ -1,6 +1,6 @@
from sanic.app import Sanic
from sanic.blueprints import Blueprint
-__version__ = '0.7.0'
+__version__ = '0.8.0'
__all__ = ['Sanic', 'Blueprint']
|
biolab__orange3-2988 | cannot install add-ons from the menu
<!--
This is an issue template. Please fill in the relevant details in the
sections below.
Wrap code and verbatim terminal window output into triple backticks, see:
https://help.github.com/articles/basic-writing-and-formatting-syntax/#quoting-code
If you're raising an issue about an add-on (e.g. installed via
Options > Add-ons), raise an issue on the relevant add-on's issue
tracker instead. See: https://github.com/biolab?q=orange3
-->
##### Orange version
Linux last version
##### Expected behavior
getting a list of add-ons using options/add-ons
##### Actual behavior
pop up as follows:
Error encountered:
Traceback (most recent call last):
File "/home/roudenko/.local/lib/python3.5/site-packages/Orange/canvas/application/canvasmain.py", line 1625, in open_addons
if not have_install_permissions():
File "/home/roudenko/.local/lib/python3.5/site-packages/Orange/canvas/application/addons.py", line 755, in have_install_permissions
with open(fn, "w"):
FileNotFoundError: [Errno 2] No such file or directory: '/usr/lib/python3.5/site-packages/test_write_19468'
##### Steps to reproduce the behavior
I guess the pb is related to the fact that I installed Orange locally.
Indeed, I do not even have the directory /usr/lib/python3.5/site-packages
##### Additional info (worksheets, data, screenshots, ...)
Is the only way to get add-ons in this case is to install them from your git using pip?
| [
{
"content": "import sys\nimport sysconfig\nimport os\nimport logging\nimport re\nimport errno\nimport shlex\nimport subprocess\nimport itertools\nimport concurrent.futures\n\nfrom collections import namedtuple, deque\nfrom xml.sax.saxutils import escape\nfrom distutils import version\nimport urllib.request\nimport xmlrpc.client\n\nimport pkg_resources\n\ntry:\n import docutils.core\nexcept ImportError:\n docutils = None\n\nfrom AnyQt.QtWidgets import (\n QWidget, QDialog, QLabel, QLineEdit, QTreeView, QHeaderView,\n QTextBrowser, QDialogButtonBox, QProgressDialog,\n QVBoxLayout, QStyle, QStyledItemDelegate, QStyleOptionViewItem,\n QApplication, QHBoxLayout, QCheckBox\n)\n\nfrom AnyQt.QtGui import (\n QStandardItemModel, QStandardItem, QPalette, QTextOption\n)\n\nfrom AnyQt.QtCore import (\n QSortFilterProxyModel, QItemSelectionModel,\n Qt, QObject, QMetaObject, QEvent, QSize, QTimer, QThread, Q_ARG,\n QSettings)\nfrom AnyQt.QtCore import pyqtSignal as Signal, pyqtSlot as Slot\n\nfrom ..gui.utils import message_warning, message_information, \\\n message_critical as message_error, \\\n OSX_NSURL_toLocalFile\nfrom ..help.manager import get_dist_meta, trim, parse_meta\n\nlog = logging.getLogger(__name__)\n\nOFFICIAL_ADDONS = [\n \"Orange3-Bioinformatics\",\n \"Orange3-Prototypes\",\n \"Orange3-Text\",\n \"Orange3-Network\",\n \"Orange3-Associate\",\n \"Orange-Spectroscopy\",\n \"Orange3-Textable\",\n \"Orange3-Educational\",\n \"Orange3-Geo\",\n \"Orange3-ImageAnalytics\",\n \"Orange3-Timeseries\",\n]\n\nInstallable = namedtuple(\n \"Installable\",\n [\"name\",\n \"version\",\n \"summary\",\n \"description\",\n \"package_url\",\n \"release_urls\"]\n)\n\nReleaseUrl = namedtuple(\n \"ReleaseUrl\",\n [\"filename\",\n \"url\",\n \"size\",\n \"python_version\",\n \"package_type\"\n ]\n)\n\nAvailable = namedtuple(\n \"Available\",\n [\"installable\"]\n)\n\nInstalled = namedtuple(\n \"Installed\",\n [\"installable\",\n \"local\"]\n)\n\n\ndef is_updatable(item):\n if isinstance(item, Available):\n return False\n elif item.installable is None:\n return False\n else:\n inst, dist = item\n try:\n v1 = version.StrictVersion(dist.version)\n v2 = version.StrictVersion(inst.version)\n except ValueError:\n pass\n else:\n return v1 < v2\n\n return (version.LooseVersion(dist.version) <\n version.LooseVersion(inst.version))\n\n\nclass TristateCheckItemDelegate(QStyledItemDelegate):\n \"\"\"\n A QStyledItemDelegate which properly toggles Qt.ItemIsTristate check\n state transitions on user interaction.\n \"\"\"\n def editorEvent(self, event, model, option, index):\n flags = model.flags(index)\n if not flags & Qt.ItemIsUserCheckable or \\\n not option.state & QStyle.State_Enabled or \\\n not flags & Qt.ItemIsEnabled:\n return False\n\n checkstate = model.data(index, Qt.CheckStateRole)\n if checkstate is None:\n return False\n\n widget = option.widget\n style = widget.style() if widget else QApplication.style()\n if event.type() in {QEvent.MouseButtonPress, QEvent.MouseButtonRelease,\n QEvent.MouseButtonDblClick}:\n pos = event.pos()\n opt = QStyleOptionViewItem(option)\n self.initStyleOption(opt, index)\n rect = style.subElementRect(\n QStyle.SE_ItemViewItemCheckIndicator, opt, widget)\n\n if event.button() != Qt.LeftButton or not rect.contains(pos):\n return False\n\n if event.type() in {QEvent.MouseButtonPress,\n QEvent.MouseButtonDblClick}:\n return True\n\n elif event.type() == QEvent.KeyPress:\n if event.key() != Qt.Key_Space and event.key() != Qt.Key_Select:\n return False\n else:\n return False\n\n if model.flags(index) & Qt.ItemIsTristate:\n checkstate = (checkstate + 1) % 3\n else:\n checkstate = \\\n Qt.Unchecked if checkstate == Qt.Checked else Qt.Checked\n\n return model.setData(index, checkstate, Qt.CheckStateRole)\n\n\ndef get_meta_from_archive(path):\n \"\"\"Return project name, version and summary extracted from\n sdist or wheel metadata in a ZIP or tar.gz archive, or None if metadata\n can't be found.\"\"\"\n\n def is_metadata(fname):\n return fname.endswith(('PKG-INFO', 'METADATA'))\n\n meta = None\n if path.endswith(('.zip', '.whl')):\n from zipfile import ZipFile\n with ZipFile(path) as archive:\n meta = next(filter(is_metadata, archive.namelist()), None)\n if meta:\n meta = archive.read(meta).decode('utf-8')\n elif path.endswith(('.tar.gz', '.tgz')):\n import tarfile\n with tarfile.open(path) as archive:\n meta = next(filter(is_metadata, archive.getnames()), None)\n if meta:\n meta = archive.extractfile(meta).read().decode('utf-8')\n if meta:\n meta = parse_meta(meta)\n return [meta.get(key, '')\n for key in ('Name', 'Version', 'Description', 'Summary')]\n\n\ndef cleanup(name, sep=\"-\"):\n \"\"\"Used for sanitizing addon names. The function removes Orange/Orange3\n from the name and adds spaces before upper letters of the leftover to\n separate its words.\"\"\"\n prefix, separator, postfix = name.partition(sep)\n name = postfix if separator == sep else prefix\n return \" \".join(re.findall(\"[A-Z][a-z]*\", name[0].upper() + name[1:]))\n\n\nclass SortFilterProxyTrusted(QSortFilterProxyModel):\n\n show_only_trusted = True\n\n def set_show_only_trusted(self, s):\n self.show_only_trusted = s\n self.invalidateFilter()\n\n def filterAcceptsRow(self, source_row, source_parent):\n if self.show_only_trusted:\n model = self.sourceModel()\n item = self.sourceModel().data(model.index(source_row, 1), Qt.UserRole)\n if isinstance(item, Available) and item.installable.name not in OFFICIAL_ADDONS:\n return False\n return super().filterAcceptsRow(source_row, source_parent)\n\n\nclass AddonManagerWidget(QWidget):\n\n statechanged = Signal()\n\n def __init__(self, parent=None, **kwargs):\n super(AddonManagerWidget, self).__init__(parent, **kwargs)\n self.__items = []\n self.setLayout(QVBoxLayout())\n\n self.__header = QLabel(\n wordWrap=True,\n textFormat=Qt.RichText\n )\n self.__search = QLineEdit(\n placeholderText=self.tr(\"Filter\")\n )\n self.__only_trusted = QCheckBox(\n self.tr(\"Show only trusted add-ons\"),\n )\n\n topline = QHBoxLayout()\n topline.addWidget(self.__search)\n topline.addWidget(self.__only_trusted)\n self.layout().addLayout(topline)\n\n self.__only_trusted.setChecked(True)\n self.show_only_trusted = True\n self.__only_trusted.stateChanged.connect(self._show_only_trusted_changed)\n\n self.__view = view = QTreeView(\n rootIsDecorated=False,\n editTriggers=QTreeView.NoEditTriggers,\n selectionMode=QTreeView.SingleSelection,\n alternatingRowColors=True\n )\n self.__view.setItemDelegateForColumn(0, TristateCheckItemDelegate())\n self.layout().addWidget(view)\n\n self.__model = model = QStandardItemModel()\n model.setHorizontalHeaderLabels([\"\", \"Name\", \"Version\", \"Action\"])\n model.dataChanged.connect(self.__data_changed)\n self.__proxy = proxy = SortFilterProxyTrusted(\n filterKeyColumn=1,\n filterCaseSensitivity=Qt.CaseInsensitive\n )\n proxy.setSourceModel(model)\n self.__search.textChanged.connect(proxy.setFilterFixedString)\n\n view.setModel(proxy)\n view.selectionModel().selectionChanged.connect(\n self.__update_details\n )\n header = self.__view.header()\n header.setSectionResizeMode(0, QHeaderView.Fixed)\n header.setSectionResizeMode(2, QHeaderView.ResizeToContents)\n\n self.__details = QTextBrowser(\n frameShape=QTextBrowser.NoFrame,\n readOnly=True,\n lineWrapMode=QTextBrowser.WidgetWidth,\n openExternalLinks=True,\n )\n\n self.__details.setWordWrapMode(QTextOption.WordWrap)\n palette = QPalette(self.palette())\n palette.setColor(QPalette.Base, Qt.transparent)\n self.__details.setPalette(palette)\n self.layout().addWidget(self.__details)\n\n def _show_only_trusted_changed(self):\n self.__proxy.set_show_only_trusted(self.__only_trusted.isChecked())\n\n def set_items(self, items):\n self.__items = items\n model = self.__model\n model.clear()\n model.setHorizontalHeaderLabels([\"\", \"Name\", \"Version\", \"Action\"])\n\n for item in items:\n if isinstance(item, Installed):\n installed = True\n ins, dist = item\n name = dist.project_name\n summary = get_dist_meta(dist).get(\"Summary\", \"\")\n version = ins.version if ins is not None else dist.version\n else:\n installed = False\n (ins,) = item\n dist = None\n name = ins.name\n summary = ins.summary\n version = ins.version\n\n updatable = is_updatable(item)\n\n item1 = QStandardItem()\n item1.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable |\n Qt.ItemIsUserCheckable |\n (Qt.ItemIsTristate if updatable else 0))\n\n if installed and updatable:\n item1.setCheckState(Qt.PartiallyChecked)\n elif installed:\n item1.setCheckState(Qt.Checked)\n else:\n item1.setCheckState(Qt.Unchecked)\n\n item2 = QStandardItem(cleanup(name))\n\n item2.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)\n item2.setToolTip(summary)\n item2.setData(item, Qt.UserRole)\n\n if updatable:\n version = \"{} < {}\".format(dist.version, ins.version)\n\n item3 = QStandardItem(version)\n item3.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)\n\n item4 = QStandardItem()\n item4.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)\n\n model.appendRow([item1, item2, item3, item4])\n\n self.__view.resizeColumnToContents(0)\n self.__view.setColumnWidth(\n 1, max(150, self.__view.sizeHintForColumn(1)))\n self.__view.setColumnWidth(\n 2, max(150, self.__view.sizeHintForColumn(2)))\n\n if self.__items:\n self.__view.selectionModel().select(\n self.__view.model().index(0, 0),\n QItemSelectionModel.Select | QItemSelectionModel.Rows\n )\n\n def item_state(self):\n steps = []\n for i, item in enumerate(self.__items):\n modelitem = self.__model.item(i, 0)\n state = modelitem.checkState()\n if modelitem.flags() & Qt.ItemIsTristate and state == Qt.Checked:\n steps.append((Upgrade, item))\n elif isinstance(item, Available) and state == Qt.Checked:\n steps.append((Install, item))\n elif isinstance(item, Installed) and state == Qt.Unchecked:\n steps.append((Uninstall, item))\n\n return steps\n\n def __selected_row(self):\n indices = self.__view.selectedIndexes()\n if indices:\n proxy = self.__view.model()\n indices = [proxy.mapToSource(index) for index in indices]\n return indices[0].row()\n else:\n return -1\n\n def set_install_projects(self, names):\n \"\"\"Mark for installation the add-ons that match any of names\"\"\"\n model = self.__model\n for row in range(model.rowCount()):\n item = model.item(row, 1)\n if item.text() in names:\n model.item(row, 0).setCheckState(Qt.Checked)\n\n def __data_changed(self, topleft, bottomright):\n rows = range(topleft.row(), bottomright.row() + 1)\n for i in rows:\n modelitem = self.__model.item(i, 0)\n actionitem = self.__model.item(i, 3)\n item = self.__items[i]\n\n state = modelitem.checkState()\n flags = modelitem.flags()\n\n if flags & Qt.ItemIsTristate and state == Qt.Checked:\n actionitem.setText(\"Update\")\n elif isinstance(item, Available) and state == Qt.Checked:\n actionitem.setText(\"Install\")\n elif isinstance(item, Installed) and state == Qt.Unchecked:\n actionitem.setText(\"Uninstall\")\n else:\n actionitem.setText(\"\")\n self.statechanged.emit()\n\n def __update_details(self):\n index = self.__selected_row()\n if index == -1:\n self.__details.setText(\"\")\n else:\n item = self.__model.item(index, 1)\n item = item.data(Qt.UserRole)\n assert isinstance(item, (Installed, Available))\n text = self._detailed_text(item)\n self.__details.setText(text)\n\n def _detailed_text(self, item):\n if isinstance(item, Installed):\n remote, dist = item\n if remote is None:\n meta = get_dist_meta(dist)\n description = meta.get(\"Description\") or meta.get('Summary')\n else:\n description = remote.description\n else:\n description = item[0].description\n\n if docutils is not None:\n try:\n html = docutils.core.publish_string(\n trim(description),\n writer_name=\"html\",\n settings_overrides={\n \"output-encoding\": \"utf-8\",\n # \"embed-stylesheet\": False,\n # \"stylesheet\": [],\n # \"stylesheet_path\": []\n }\n ).decode(\"utf-8\")\n\n except docutils.utils.SystemMessage:\n html = \"<pre>{}<pre>\".format(escape(description))\n except Exception:\n html = \"<pre>{}<pre>\".format(escape(description))\n else:\n html = \"<pre>{}<pre>\".format(escape(description))\n return html\n\n def sizeHint(self):\n return QSize(480, 420)\n\n\ndef method_queued(method, sig, conntype=Qt.QueuedConnection):\n name = method.__name__\n obj = method.__self__\n assert isinstance(obj, QObject)\n\n def call(*args):\n args = [Q_ARG(atype, arg) for atype, arg in zip(sig, args)]\n return QMetaObject.invokeMethod(obj, name, conntype, *args)\n\n return call\n\n\nclass AddonManagerDialog(QDialog):\n _packages = None\n\n def __init__(self, parent=None, **kwargs):\n super().__init__(parent, acceptDrops=True, **kwargs)\n self.setLayout(QVBoxLayout())\n self.layout().setContentsMargins(0, 0, 0, 0)\n\n self.addonwidget = AddonManagerWidget()\n self.layout().addWidget(self.addonwidget)\n\n info_bar = QWidget()\n info_layout = QHBoxLayout()\n info_bar.setLayout(info_layout)\n self.layout().addWidget(info_bar)\n\n buttons = QDialogButtonBox(\n orientation=Qt.Horizontal,\n standardButtons=QDialogButtonBox.Ok | QDialogButtonBox.Cancel\n )\n buttons.accepted.connect(self.__accepted)\n buttons.rejected.connect(self.reject)\n\n self.layout().addWidget(buttons)\n\n self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)\n if AddonManagerDialog._packages is None:\n self._f_pypi_addons = self._executor.submit(list_pypi_addons)\n else:\n self._f_pypi_addons = concurrent.futures.Future()\n self._f_pypi_addons.set_result(AddonManagerDialog._packages)\n\n self._f_pypi_addons.add_done_callback(\n method_queued(self._set_packages, (object,))\n )\n\n self.__progress = None # type: Optional[QProgressDialog]\n self.__thread = None\n self.__installer = None\n\n if not self._f_pypi_addons.done():\n self.__progressDialog()\n\n def __progressDialog(self):\n if self.__progress is None:\n self.__progress = QProgressDialog(\n self,\n minimum=0, maximum=0,\n labelText=self.tr(\"Retrieving package list\"),\n sizeGripEnabled=False,\n windowTitle=\"Progress\",\n )\n self.__progress.setWindowModality(Qt.WindowModal)\n self.__progress.canceled.connect(self.reject)\n self.__progress.hide()\n\n return self.__progress\n\n @Slot(object)\n def _set_packages(self, f):\n if self.__progress is not None:\n self.__progress.hide()\n self.__progress.deleteLater()\n self.__progress = None\n\n try:\n packages = f.result()\n except (IOError, OSError, ValueError) as err:\n message_warning(\n \"Could not retrieve package list\",\n title=\"Error\",\n informative_text=str(err),\n parent=self\n )\n log.error(str(err), exc_info=True)\n packages = []\n except Exception:\n raise\n else:\n AddonManagerDialog._packages = packages\n\n installed = list_installed_addons()\n dists = {dist.project_name: dist for dist in installed}\n packages = {pkg.name: pkg for pkg in packages}\n\n # For every pypi available distribution not listed by\n # list_installed_addons, check if it is actually already\n # installed.\n ws = pkg_resources.WorkingSet()\n for pkg_name in set(packages.keys()).difference(set(dists.keys())):\n try:\n d = ws.find(pkg_resources.Requirement.parse(pkg_name))\n except pkg_resources.VersionConflict:\n pass\n except ValueError:\n # Requirements.parse error ?\n pass\n else:\n if d is not None:\n dists[d.project_name] = d\n\n project_names = unique(\n itertools.chain(packages.keys(), dists.keys())\n )\n\n items = []\n for name in project_names:\n if name in dists and name in packages:\n item = Installed(packages[name], dists[name])\n elif name in dists:\n item = Installed(None, dists[name])\n elif name in packages:\n item = Available(packages[name])\n else:\n assert False\n items.append(item)\n\n self.addonwidget.set_items(items)\n\n def showEvent(self, event):\n super().showEvent(event)\n\n if not self._f_pypi_addons.done() and self.__progress is not None:\n QTimer.singleShot(0, self.__progress.show)\n\n def done(self, retcode):\n super().done(retcode)\n self._f_pypi_addons.cancel()\n self._executor.shutdown(wait=False)\n if self.__thread is not None:\n self.__thread.quit()\n self.__thread.wait(1000)\n\n def closeEvent(self, event):\n super().closeEvent(event)\n if self.__progress is not None:\n self.__progress.hide()\n self._f_pypi_addons.cancel()\n self._executor.shutdown(wait=False)\n\n if self.__thread is not None:\n self.__thread.quit()\n self.__thread.wait(1000)\n\n ADDON_EXTENSIONS = ('.zip', '.whl', '.tar.gz')\n\n def dragEnterEvent(self, event):\n urls = event.mimeData().urls()\n if any((OSX_NSURL_toLocalFile(url) or url.toLocalFile())\n .endswith(self.ADDON_EXTENSIONS) for url in urls):\n event.acceptProposedAction()\n\n def dropEvent(self, event):\n \"\"\"Allow dropping add-ons (zip or wheel archives) on this dialog to\n install them\"\"\"\n packages = []\n names = []\n for url in event.mimeData().urls():\n path = OSX_NSURL_toLocalFile(url) or url.toLocalFile()\n if path.endswith(self.ADDON_EXTENSIONS):\n name, vers, summary, descr = (get_meta_from_archive(path) or\n (os.path.basename(path), '', '', ''))\n names.append(cleanup(name))\n packages.append(\n Installable(name, vers, summary,\n descr or summary, path, [path]))\n future = concurrent.futures.Future()\n future.set_result((AddonManagerDialog._packages or []) + packages)\n self._set_packages(future)\n self.addonwidget.set_install_projects(names)\n\n def __accepted(self):\n steps = self.addonwidget.item_state()\n\n if steps:\n # Move all uninstall steps to the front\n steps = sorted(\n steps, key=lambda step: 0 if step[0] == Uninstall else 1\n )\n self.__installer = Installer(steps=steps)\n self.__thread = QThread(self)\n self.__thread.start()\n\n self.__installer.moveToThread(self.__thread)\n self.__installer.finished.connect(self.__on_installer_finished)\n self.__installer.error.connect(self.__on_installer_error)\n\n progress = self.__progressDialog()\n self.__installer.installStatusChanged.connect(progress.setLabelText)\n progress.show()\n progress.setLabelText(\"Installing\")\n\n self.__installer.start()\n\n else:\n self.accept()\n\n def __on_installer_error(self, command, pkg, retcode, output):\n message_error(\n \"An error occurred while running a subprocess\", title=\"Error\",\n informative_text=\"{} exited with non zero status.\".format(command),\n details=\"\".join(output),\n parent=self\n )\n self.reject()\n\n def __on_installer_finished(self):\n message = \"Please restart Orange for changes to take effect.\"\n message_information(message, parent=self)\n self.accept()\n\n\nclass SafeUrllibTransport(xmlrpc.client.Transport):\n \"\"\"Urllib for HTTPS connections that automatically handles proxies.\"\"\"\n\n def single_request(self, host, handler, request_body, verbose=False):\n req = urllib.request.Request('https://%s%s' % (host, handler), request_body)\n req.add_header('User-agent', self.user_agent)\n req.add_header('Content-Type', 'text/xml')\n self.verbose = verbose\n opener = urllib.request.build_opener()\n return self.parse_response(opener.open(req))\n\n\ndef list_pypi_addons():\n \"\"\"\n List add-ons available on pypi.\n \"\"\"\n from ..config import ADDON_PYPI_SEARCH_SPEC\n\n pypi = xmlrpc.client.ServerProxy(\n \"https://pypi.python.org/pypi/\",\n transport=SafeUrllibTransport()\n )\n addons = pypi.search(ADDON_PYPI_SEARCH_SPEC)\n\n for addon in OFFICIAL_ADDONS:\n if not any(a for a in addons if a['name'] == addon):\n addons.append({\"name\": addon, \"version\": '0'})\n\n multicall = xmlrpc.client.MultiCall(pypi)\n for addon in addons:\n name = addon[\"name\"]\n multicall.package_releases(name)\n\n releases = multicall()\n multicall = xmlrpc.client.MultiCall(pypi)\n for addon, versions in zip(addons, releases):\n # Workaround for PyPI bug of search not returning the latest versions\n # https://bitbucket.org/pypa/pypi/issues/326/my-package-doesnt-appear-in-the-search\n version_ = max(versions, key=version.LooseVersion)\n\n name = addon[\"name\"]\n multicall.release_data(name, version_)\n\n results = list(multicall())\n packages = []\n\n for release in results:\n if release:\n # ignore releases without actual source/wheel/egg files,\n # or with empty metadata (deleted from PyPi?).\n packages.append(\n Installable(release[\"name\"], release[\"version\"],\n release[\"summary\"], release[\"description\"],\n release[\"package_url\"],\n release[\"package_url\"])\n )\n return packages\n\n\ndef list_installed_addons():\n from ..config import ADDON_ENTRY\n workingset = pkg_resources.WorkingSet(sys.path)\n return [ep.dist for ep in\n workingset.iter_entry_points(ADDON_ENTRY)]\n\n\ndef unique(iterable):\n seen = set()\n\n def observed(el):\n observed = el in seen\n seen.add(el)\n return observed\n\n return (el for el in iterable if not observed(el))\n\n\ndef have_install_permissions():\n \"\"\"Check if we can create a file in the site-packages folder.\n This works on a Win7 miniconda install, where os.access did not. \"\"\"\n try:\n fn = os.path.join(sysconfig.get_path(\"purelib\"), \"test_write_\" + str(os.getpid()))\n with open(fn, \"w\"):\n pass\n os.remove(fn)\n return True\n except PermissionError:\n return False\n\n\nInstall, Upgrade, Uninstall = 1, 2, 3\n\n\nclass CommandFailed(Exception):\n def __init__(self, cmd, retcode, output):\n if not isinstance(cmd, str):\n cmd = \" \".join(map(shlex.quote, cmd))\n self.cmd = cmd\n self.retcode = retcode\n self.output = output\n\n\nclass Installer(QObject):\n installStatusChanged = Signal(str)\n started = Signal()\n finished = Signal()\n error = Signal(str, object, int, list)\n\n def __init__(self, parent=None, steps=[]):\n QObject.__init__(self, parent)\n self.__interupt = False\n self.__queue = deque(steps)\n self.pip = PipInstaller()\n self.conda = CondaInstaller()\n\n def start(self):\n QTimer.singleShot(0, self._next)\n\n def interupt(self):\n self.__interupt = True\n\n def setStatusMessage(self, message):\n self.__statusMessage = message\n self.installStatusChanged.emit(message)\n\n @Slot()\n def _next(self):\n command, pkg = self.__queue.popleft()\n try:\n if command == Install:\n self.setStatusMessage(\n \"Installing {}\".format(cleanup(pkg.installable.name)))\n if self.conda:\n self.conda.install(pkg.installable, raise_on_fail=False)\n self.pip.install(pkg.installable)\n elif command == Upgrade:\n self.setStatusMessage(\n \"Upgrading {}\".format(cleanup(pkg.installable.name)))\n if self.conda:\n self.conda.upgrade(pkg.installable, raise_on_fail=False)\n self.pip.upgrade(pkg.installable)\n elif command == Uninstall:\n self.setStatusMessage(\n \"Uninstalling {}\".format(cleanup(pkg.local.project_name)))\n if self.conda:\n try:\n self.conda.uninstall(pkg.local, raise_on_fail=True)\n except CommandFailed:\n self.pip.uninstall(pkg.local)\n else:\n self.pip.uninstall(pkg.local)\n except CommandFailed as ex:\n self.error.emit(\n \"Command failed: python {}\".format(ex.cmd),\n pkg, ex.retcode, ex.output\n )\n return\n\n if self.__queue:\n QTimer.singleShot(0, self._next)\n else:\n self.finished.emit()\n\n\nclass PipInstaller:\n\n def __init__(self):\n arguments = QSettings().value('add-ons/pip-install-arguments', '', type=str)\n self.arguments = shlex.split(arguments)\n\n def install(self, pkg):\n cmd = [\"python\", \"-m\", \"pip\", \"install\"]\n cmd.extend(self.arguments)\n if pkg.package_url.startswith(\"http://\"):\n cmd.append(pkg.name)\n else:\n # Package url is path to the (local) wheel\n cmd.append(pkg.package_url)\n\n run_command(cmd)\n\n def upgrade(self, package):\n # This is done in two steps to avoid upgrading\n # all of the dependencies - faster\n self.upgrade_no_deps(package)\n self.install(package)\n\n def upgrade_no_deps(self, package):\n cmd = [\"python\", \"-m\", \"pip\", \"install\", \"--upgrade\", \"--no-deps\"]\n cmd.extend(self.arguments)\n cmd.append(package.name)\n\n run_command(cmd)\n\n def uninstall(self, dist):\n cmd = [\"python\", \"-m\", \"pip\", \"uninstall\", \"--yes\", dist.project_name]\n run_command(cmd)\n\n\nclass CondaInstaller:\n def __init__(self):\n enabled = QSettings().value('add-ons/allow-conda',\n True, type=bool)\n if enabled:\n self.conda = self._find_conda()\n else:\n self.conda = None\n\n def _find_conda(self):\n executable = sys.executable\n bin = os.path.dirname(executable)\n\n # posix\n conda = os.path.join(bin, \"conda\")\n if os.path.exists(conda):\n return conda\n\n # windows\n conda = os.path.join(bin, \"Scripts\", \"conda.bat\")\n if os.path.exists(conda):\n # \"activate\" conda environment orange is running in\n os.environ[\"CONDA_PREFIX\"] = bin\n os.environ[\"CONDA_DEFAULT_ENV\"] = bin\n return conda\n\n def install(self, pkg, raise_on_fail=False):\n cmd = [self.conda, \"install\", \"--yes\", \"--quiet\",\n self._normalize(pkg.name)]\n run_command(cmd, raise_on_fail=raise_on_fail)\n\n def upgrade(self, pkg, raise_on_fail=False):\n cmd = [self.conda, \"upgrade\", \"--yes\", \"--quiet\",\n self._normalize(pkg.name)]\n run_command(cmd, raise_on_fail=raise_on_fail)\n\n def uninstall(self, dist, raise_on_fail=False):\n cmd = [self.conda, \"uninstall\", \"--yes\",\n self._normalize(dist.project_name)]\n run_command(cmd, raise_on_fail=raise_on_fail)\n\n def _normalize(self, name):\n # Conda 4.3.30 is inconsistent, upgrade command is case sensitive\n # while install and uninstall are not. We assume that all conda\n # package names are lowercase which fixes the problems (for now)\n return name.lower()\n\n def __bool__(self):\n return bool(self.conda)\n\n\ndef run_command(command, raise_on_fail=True):\n \"\"\"Run command in a subprocess.\n\n Return `process` return code and output once it completes.\n \"\"\"\n log.info(\"Running %s\", \" \".join(command))\n\n if command[0] == \"python\":\n process = python_process(command[1:])\n else:\n process = create_process(command)\n\n output = []\n while process.poll() is None:\n try:\n line = process.stdout.readline()\n except IOError as ex:\n if ex.errno != errno.EINTR:\n raise\n else:\n output.append(line)\n print(line, end=\"\")\n # Read remaining output if any\n line = process.stdout.read()\n if line:\n output.append(line)\n print(line, end=\"\")\n\n if process.returncode != 0:\n log.info(\"Command %s failed with %s\",\n \" \".join(command), process.returncode)\n log.debug(\"Output:\\n%s\", \"\\n\".join(output))\n if raise_on_fail:\n raise CommandFailed(command, process.returncode, output)\n\n return process.returncode, output\n\n\ndef python_process(args, script_name=None, **kwargs):\n \"\"\"\n Run a `sys.executable` in a subprocess with `args`.\n \"\"\"\n executable = sys.executable\n if os.name == \"nt\" and os.path.basename(executable) == \"pythonw.exe\":\n # Don't run the script with a 'gui' (detached) process.\n dirname = os.path.dirname(executable)\n executable = os.path.join(dirname, \"python.exe\")\n\n if script_name is not None:\n script = script_name\n else:\n script = executable\n\n return create_process(\n [script] + args,\n executable=executable\n )\n\n\ndef create_process(cmd, executable=None, **kwargs):\n if hasattr(subprocess, \"STARTUPINFO\"):\n # do not open a new console window for command on windows\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n kwargs[\"startupinfo\"] = startupinfo\n\n return subprocess.Popen(\n cmd,\n executable=executable,\n cwd=None,\n stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE,\n bufsize=-1,\n universal_newlines=True,\n **kwargs\n )\n",
"path": "Orange/canvas/application/addons.py"
}
] | [
{
"content": "import sys\nimport sysconfig\nimport os\nimport logging\nimport re\nimport errno\nimport shlex\nimport subprocess\nimport itertools\nimport concurrent.futures\n\nfrom collections import namedtuple, deque\nfrom xml.sax.saxutils import escape\nfrom distutils import version\nimport urllib.request\nimport xmlrpc.client\n\nimport pkg_resources\n\ntry:\n import docutils.core\nexcept ImportError:\n docutils = None\n\nfrom AnyQt.QtWidgets import (\n QWidget, QDialog, QLabel, QLineEdit, QTreeView, QHeaderView,\n QTextBrowser, QDialogButtonBox, QProgressDialog,\n QVBoxLayout, QStyle, QStyledItemDelegate, QStyleOptionViewItem,\n QApplication, QHBoxLayout, QCheckBox\n)\n\nfrom AnyQt.QtGui import (\n QStandardItemModel, QStandardItem, QPalette, QTextOption\n)\n\nfrom AnyQt.QtCore import (\n QSortFilterProxyModel, QItemSelectionModel,\n Qt, QObject, QMetaObject, QEvent, QSize, QTimer, QThread, Q_ARG,\n QSettings)\nfrom AnyQt.QtCore import pyqtSignal as Signal, pyqtSlot as Slot\n\nfrom ..gui.utils import message_warning, message_information, \\\n message_critical as message_error, \\\n OSX_NSURL_toLocalFile\nfrom ..help.manager import get_dist_meta, trim, parse_meta\n\nlog = logging.getLogger(__name__)\n\nOFFICIAL_ADDONS = [\n \"Orange3-Bioinformatics\",\n \"Orange3-Prototypes\",\n \"Orange3-Text\",\n \"Orange3-Network\",\n \"Orange3-Associate\",\n \"Orange-Spectroscopy\",\n \"Orange3-Textable\",\n \"Orange3-Educational\",\n \"Orange3-Geo\",\n \"Orange3-ImageAnalytics\",\n \"Orange3-Timeseries\",\n]\n\nInstallable = namedtuple(\n \"Installable\",\n [\"name\",\n \"version\",\n \"summary\",\n \"description\",\n \"package_url\",\n \"release_urls\"]\n)\n\nReleaseUrl = namedtuple(\n \"ReleaseUrl\",\n [\"filename\",\n \"url\",\n \"size\",\n \"python_version\",\n \"package_type\"\n ]\n)\n\nAvailable = namedtuple(\n \"Available\",\n [\"installable\"]\n)\n\nInstalled = namedtuple(\n \"Installed\",\n [\"installable\",\n \"local\"]\n)\n\n\ndef is_updatable(item):\n if isinstance(item, Available):\n return False\n elif item.installable is None:\n return False\n else:\n inst, dist = item\n try:\n v1 = version.StrictVersion(dist.version)\n v2 = version.StrictVersion(inst.version)\n except ValueError:\n pass\n else:\n return v1 < v2\n\n return (version.LooseVersion(dist.version) <\n version.LooseVersion(inst.version))\n\n\nclass TristateCheckItemDelegate(QStyledItemDelegate):\n \"\"\"\n A QStyledItemDelegate which properly toggles Qt.ItemIsTristate check\n state transitions on user interaction.\n \"\"\"\n def editorEvent(self, event, model, option, index):\n flags = model.flags(index)\n if not flags & Qt.ItemIsUserCheckable or \\\n not option.state & QStyle.State_Enabled or \\\n not flags & Qt.ItemIsEnabled:\n return False\n\n checkstate = model.data(index, Qt.CheckStateRole)\n if checkstate is None:\n return False\n\n widget = option.widget\n style = widget.style() if widget else QApplication.style()\n if event.type() in {QEvent.MouseButtonPress, QEvent.MouseButtonRelease,\n QEvent.MouseButtonDblClick}:\n pos = event.pos()\n opt = QStyleOptionViewItem(option)\n self.initStyleOption(opt, index)\n rect = style.subElementRect(\n QStyle.SE_ItemViewItemCheckIndicator, opt, widget)\n\n if event.button() != Qt.LeftButton or not rect.contains(pos):\n return False\n\n if event.type() in {QEvent.MouseButtonPress,\n QEvent.MouseButtonDblClick}:\n return True\n\n elif event.type() == QEvent.KeyPress:\n if event.key() != Qt.Key_Space and event.key() != Qt.Key_Select:\n return False\n else:\n return False\n\n if model.flags(index) & Qt.ItemIsTristate:\n checkstate = (checkstate + 1) % 3\n else:\n checkstate = \\\n Qt.Unchecked if checkstate == Qt.Checked else Qt.Checked\n\n return model.setData(index, checkstate, Qt.CheckStateRole)\n\n\ndef get_meta_from_archive(path):\n \"\"\"Return project name, version and summary extracted from\n sdist or wheel metadata in a ZIP or tar.gz archive, or None if metadata\n can't be found.\"\"\"\n\n def is_metadata(fname):\n return fname.endswith(('PKG-INFO', 'METADATA'))\n\n meta = None\n if path.endswith(('.zip', '.whl')):\n from zipfile import ZipFile\n with ZipFile(path) as archive:\n meta = next(filter(is_metadata, archive.namelist()), None)\n if meta:\n meta = archive.read(meta).decode('utf-8')\n elif path.endswith(('.tar.gz', '.tgz')):\n import tarfile\n with tarfile.open(path) as archive:\n meta = next(filter(is_metadata, archive.getnames()), None)\n if meta:\n meta = archive.extractfile(meta).read().decode('utf-8')\n if meta:\n meta = parse_meta(meta)\n return [meta.get(key, '')\n for key in ('Name', 'Version', 'Description', 'Summary')]\n\n\ndef cleanup(name, sep=\"-\"):\n \"\"\"Used for sanitizing addon names. The function removes Orange/Orange3\n from the name and adds spaces before upper letters of the leftover to\n separate its words.\"\"\"\n prefix, separator, postfix = name.partition(sep)\n name = postfix if separator == sep else prefix\n return \" \".join(re.findall(\"[A-Z][a-z]*\", name[0].upper() + name[1:]))\n\n\nclass SortFilterProxyTrusted(QSortFilterProxyModel):\n\n show_only_trusted = True\n\n def set_show_only_trusted(self, s):\n self.show_only_trusted = s\n self.invalidateFilter()\n\n def filterAcceptsRow(self, source_row, source_parent):\n if self.show_only_trusted:\n model = self.sourceModel()\n item = self.sourceModel().data(model.index(source_row, 1), Qt.UserRole)\n if isinstance(item, Available) and item.installable.name not in OFFICIAL_ADDONS:\n return False\n return super().filterAcceptsRow(source_row, source_parent)\n\n\nclass AddonManagerWidget(QWidget):\n\n statechanged = Signal()\n\n def __init__(self, parent=None, **kwargs):\n super(AddonManagerWidget, self).__init__(parent, **kwargs)\n self.__items = []\n self.setLayout(QVBoxLayout())\n\n self.__header = QLabel(\n wordWrap=True,\n textFormat=Qt.RichText\n )\n self.__search = QLineEdit(\n placeholderText=self.tr(\"Filter\")\n )\n self.__only_trusted = QCheckBox(\n self.tr(\"Show only trusted add-ons\"),\n )\n\n topline = QHBoxLayout()\n topline.addWidget(self.__search)\n topline.addWidget(self.__only_trusted)\n self.layout().addLayout(topline)\n\n self.__only_trusted.setChecked(True)\n self.show_only_trusted = True\n self.__only_trusted.stateChanged.connect(self._show_only_trusted_changed)\n\n self.__view = view = QTreeView(\n rootIsDecorated=False,\n editTriggers=QTreeView.NoEditTriggers,\n selectionMode=QTreeView.SingleSelection,\n alternatingRowColors=True\n )\n self.__view.setItemDelegateForColumn(0, TristateCheckItemDelegate())\n self.layout().addWidget(view)\n\n self.__model = model = QStandardItemModel()\n model.setHorizontalHeaderLabels([\"\", \"Name\", \"Version\", \"Action\"])\n model.dataChanged.connect(self.__data_changed)\n self.__proxy = proxy = SortFilterProxyTrusted(\n filterKeyColumn=1,\n filterCaseSensitivity=Qt.CaseInsensitive\n )\n proxy.setSourceModel(model)\n self.__search.textChanged.connect(proxy.setFilterFixedString)\n\n view.setModel(proxy)\n view.selectionModel().selectionChanged.connect(\n self.__update_details\n )\n header = self.__view.header()\n header.setSectionResizeMode(0, QHeaderView.Fixed)\n header.setSectionResizeMode(2, QHeaderView.ResizeToContents)\n\n self.__details = QTextBrowser(\n frameShape=QTextBrowser.NoFrame,\n readOnly=True,\n lineWrapMode=QTextBrowser.WidgetWidth,\n openExternalLinks=True,\n )\n\n self.__details.setWordWrapMode(QTextOption.WordWrap)\n palette = QPalette(self.palette())\n palette.setColor(QPalette.Base, Qt.transparent)\n self.__details.setPalette(palette)\n self.layout().addWidget(self.__details)\n\n def _show_only_trusted_changed(self):\n self.__proxy.set_show_only_trusted(self.__only_trusted.isChecked())\n\n def set_items(self, items):\n self.__items = items\n model = self.__model\n model.clear()\n model.setHorizontalHeaderLabels([\"\", \"Name\", \"Version\", \"Action\"])\n\n for item in items:\n if isinstance(item, Installed):\n installed = True\n ins, dist = item\n name = dist.project_name\n summary = get_dist_meta(dist).get(\"Summary\", \"\")\n version = ins.version if ins is not None else dist.version\n else:\n installed = False\n (ins,) = item\n dist = None\n name = ins.name\n summary = ins.summary\n version = ins.version\n\n updatable = is_updatable(item)\n\n item1 = QStandardItem()\n item1.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable |\n Qt.ItemIsUserCheckable |\n (Qt.ItemIsTristate if updatable else 0))\n\n if installed and updatable:\n item1.setCheckState(Qt.PartiallyChecked)\n elif installed:\n item1.setCheckState(Qt.Checked)\n else:\n item1.setCheckState(Qt.Unchecked)\n\n item2 = QStandardItem(cleanup(name))\n\n item2.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)\n item2.setToolTip(summary)\n item2.setData(item, Qt.UserRole)\n\n if updatable:\n version = \"{} < {}\".format(dist.version, ins.version)\n\n item3 = QStandardItem(version)\n item3.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)\n\n item4 = QStandardItem()\n item4.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)\n\n model.appendRow([item1, item2, item3, item4])\n\n self.__view.resizeColumnToContents(0)\n self.__view.setColumnWidth(\n 1, max(150, self.__view.sizeHintForColumn(1)))\n self.__view.setColumnWidth(\n 2, max(150, self.__view.sizeHintForColumn(2)))\n\n if self.__items:\n self.__view.selectionModel().select(\n self.__view.model().index(0, 0),\n QItemSelectionModel.Select | QItemSelectionModel.Rows\n )\n\n def item_state(self):\n steps = []\n for i, item in enumerate(self.__items):\n modelitem = self.__model.item(i, 0)\n state = modelitem.checkState()\n if modelitem.flags() & Qt.ItemIsTristate and state == Qt.Checked:\n steps.append((Upgrade, item))\n elif isinstance(item, Available) and state == Qt.Checked:\n steps.append((Install, item))\n elif isinstance(item, Installed) and state == Qt.Unchecked:\n steps.append((Uninstall, item))\n\n return steps\n\n def __selected_row(self):\n indices = self.__view.selectedIndexes()\n if indices:\n proxy = self.__view.model()\n indices = [proxy.mapToSource(index) for index in indices]\n return indices[0].row()\n else:\n return -1\n\n def set_install_projects(self, names):\n \"\"\"Mark for installation the add-ons that match any of names\"\"\"\n model = self.__model\n for row in range(model.rowCount()):\n item = model.item(row, 1)\n if item.text() in names:\n model.item(row, 0).setCheckState(Qt.Checked)\n\n def __data_changed(self, topleft, bottomright):\n rows = range(topleft.row(), bottomright.row() + 1)\n for i in rows:\n modelitem = self.__model.item(i, 0)\n actionitem = self.__model.item(i, 3)\n item = self.__items[i]\n\n state = modelitem.checkState()\n flags = modelitem.flags()\n\n if flags & Qt.ItemIsTristate and state == Qt.Checked:\n actionitem.setText(\"Update\")\n elif isinstance(item, Available) and state == Qt.Checked:\n actionitem.setText(\"Install\")\n elif isinstance(item, Installed) and state == Qt.Unchecked:\n actionitem.setText(\"Uninstall\")\n else:\n actionitem.setText(\"\")\n self.statechanged.emit()\n\n def __update_details(self):\n index = self.__selected_row()\n if index == -1:\n self.__details.setText(\"\")\n else:\n item = self.__model.item(index, 1)\n item = item.data(Qt.UserRole)\n assert isinstance(item, (Installed, Available))\n text = self._detailed_text(item)\n self.__details.setText(text)\n\n def _detailed_text(self, item):\n if isinstance(item, Installed):\n remote, dist = item\n if remote is None:\n meta = get_dist_meta(dist)\n description = meta.get(\"Description\") or meta.get('Summary')\n else:\n description = remote.description\n else:\n description = item[0].description\n\n if docutils is not None:\n try:\n html = docutils.core.publish_string(\n trim(description),\n writer_name=\"html\",\n settings_overrides={\n \"output-encoding\": \"utf-8\",\n # \"embed-stylesheet\": False,\n # \"stylesheet\": [],\n # \"stylesheet_path\": []\n }\n ).decode(\"utf-8\")\n\n except docutils.utils.SystemMessage:\n html = \"<pre>{}<pre>\".format(escape(description))\n except Exception:\n html = \"<pre>{}<pre>\".format(escape(description))\n else:\n html = \"<pre>{}<pre>\".format(escape(description))\n return html\n\n def sizeHint(self):\n return QSize(480, 420)\n\n\ndef method_queued(method, sig, conntype=Qt.QueuedConnection):\n name = method.__name__\n obj = method.__self__\n assert isinstance(obj, QObject)\n\n def call(*args):\n args = [Q_ARG(atype, arg) for atype, arg in zip(sig, args)]\n return QMetaObject.invokeMethod(obj, name, conntype, *args)\n\n return call\n\n\nclass AddonManagerDialog(QDialog):\n _packages = None\n\n def __init__(self, parent=None, **kwargs):\n super().__init__(parent, acceptDrops=True, **kwargs)\n self.setLayout(QVBoxLayout())\n self.layout().setContentsMargins(0, 0, 0, 0)\n\n self.addonwidget = AddonManagerWidget()\n self.layout().addWidget(self.addonwidget)\n\n info_bar = QWidget()\n info_layout = QHBoxLayout()\n info_bar.setLayout(info_layout)\n self.layout().addWidget(info_bar)\n\n buttons = QDialogButtonBox(\n orientation=Qt.Horizontal,\n standardButtons=QDialogButtonBox.Ok | QDialogButtonBox.Cancel\n )\n buttons.accepted.connect(self.__accepted)\n buttons.rejected.connect(self.reject)\n\n self.layout().addWidget(buttons)\n\n self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)\n if AddonManagerDialog._packages is None:\n self._f_pypi_addons = self._executor.submit(list_pypi_addons)\n else:\n self._f_pypi_addons = concurrent.futures.Future()\n self._f_pypi_addons.set_result(AddonManagerDialog._packages)\n\n self._f_pypi_addons.add_done_callback(\n method_queued(self._set_packages, (object,))\n )\n\n self.__progress = None # type: Optional[QProgressDialog]\n self.__thread = None\n self.__installer = None\n\n if not self._f_pypi_addons.done():\n self.__progressDialog()\n\n def __progressDialog(self):\n if self.__progress is None:\n self.__progress = QProgressDialog(\n self,\n minimum=0, maximum=0,\n labelText=self.tr(\"Retrieving package list\"),\n sizeGripEnabled=False,\n windowTitle=\"Progress\",\n )\n self.__progress.setWindowModality(Qt.WindowModal)\n self.__progress.canceled.connect(self.reject)\n self.__progress.hide()\n\n return self.__progress\n\n @Slot(object)\n def _set_packages(self, f):\n if self.__progress is not None:\n self.__progress.hide()\n self.__progress.deleteLater()\n self.__progress = None\n\n try:\n packages = f.result()\n except (IOError, OSError, ValueError) as err:\n message_warning(\n \"Could not retrieve package list\",\n title=\"Error\",\n informative_text=str(err),\n parent=self\n )\n log.error(str(err), exc_info=True)\n packages = []\n except Exception:\n raise\n else:\n AddonManagerDialog._packages = packages\n\n installed = list_installed_addons()\n dists = {dist.project_name: dist for dist in installed}\n packages = {pkg.name: pkg for pkg in packages}\n\n # For every pypi available distribution not listed by\n # list_installed_addons, check if it is actually already\n # installed.\n ws = pkg_resources.WorkingSet()\n for pkg_name in set(packages.keys()).difference(set(dists.keys())):\n try:\n d = ws.find(pkg_resources.Requirement.parse(pkg_name))\n except pkg_resources.VersionConflict:\n pass\n except ValueError:\n # Requirements.parse error ?\n pass\n else:\n if d is not None:\n dists[d.project_name] = d\n\n project_names = unique(\n itertools.chain(packages.keys(), dists.keys())\n )\n\n items = []\n for name in project_names:\n if name in dists and name in packages:\n item = Installed(packages[name], dists[name])\n elif name in dists:\n item = Installed(None, dists[name])\n elif name in packages:\n item = Available(packages[name])\n else:\n assert False\n items.append(item)\n\n self.addonwidget.set_items(items)\n\n def showEvent(self, event):\n super().showEvent(event)\n\n if not self._f_pypi_addons.done() and self.__progress is not None:\n QTimer.singleShot(0, self.__progress.show)\n\n def done(self, retcode):\n super().done(retcode)\n self._f_pypi_addons.cancel()\n self._executor.shutdown(wait=False)\n if self.__thread is not None:\n self.__thread.quit()\n self.__thread.wait(1000)\n\n def closeEvent(self, event):\n super().closeEvent(event)\n if self.__progress is not None:\n self.__progress.hide()\n self._f_pypi_addons.cancel()\n self._executor.shutdown(wait=False)\n\n if self.__thread is not None:\n self.__thread.quit()\n self.__thread.wait(1000)\n\n ADDON_EXTENSIONS = ('.zip', '.whl', '.tar.gz')\n\n def dragEnterEvent(self, event):\n urls = event.mimeData().urls()\n if any((OSX_NSURL_toLocalFile(url) or url.toLocalFile())\n .endswith(self.ADDON_EXTENSIONS) for url in urls):\n event.acceptProposedAction()\n\n def dropEvent(self, event):\n \"\"\"Allow dropping add-ons (zip or wheel archives) on this dialog to\n install them\"\"\"\n packages = []\n names = []\n for url in event.mimeData().urls():\n path = OSX_NSURL_toLocalFile(url) or url.toLocalFile()\n if path.endswith(self.ADDON_EXTENSIONS):\n name, vers, summary, descr = (get_meta_from_archive(path) or\n (os.path.basename(path), '', '', ''))\n names.append(cleanup(name))\n packages.append(\n Installable(name, vers, summary,\n descr or summary, path, [path]))\n future = concurrent.futures.Future()\n future.set_result((AddonManagerDialog._packages or []) + packages)\n self._set_packages(future)\n self.addonwidget.set_install_projects(names)\n\n def __accepted(self):\n steps = self.addonwidget.item_state()\n\n if steps:\n # Move all uninstall steps to the front\n steps = sorted(\n steps, key=lambda step: 0 if step[0] == Uninstall else 1\n )\n self.__installer = Installer(steps=steps)\n self.__thread = QThread(self)\n self.__thread.start()\n\n self.__installer.moveToThread(self.__thread)\n self.__installer.finished.connect(self.__on_installer_finished)\n self.__installer.error.connect(self.__on_installer_error)\n\n progress = self.__progressDialog()\n self.__installer.installStatusChanged.connect(progress.setLabelText)\n progress.show()\n progress.setLabelText(\"Installing\")\n\n self.__installer.start()\n\n else:\n self.accept()\n\n def __on_installer_error(self, command, pkg, retcode, output):\n message_error(\n \"An error occurred while running a subprocess\", title=\"Error\",\n informative_text=\"{} exited with non zero status.\".format(command),\n details=\"\".join(output),\n parent=self\n )\n self.reject()\n\n def __on_installer_finished(self):\n message = \"Please restart Orange for changes to take effect.\"\n message_information(message, parent=self)\n self.accept()\n\n\nclass SafeUrllibTransport(xmlrpc.client.Transport):\n \"\"\"Urllib for HTTPS connections that automatically handles proxies.\"\"\"\n\n def single_request(self, host, handler, request_body, verbose=False):\n req = urllib.request.Request('https://%s%s' % (host, handler), request_body)\n req.add_header('User-agent', self.user_agent)\n req.add_header('Content-Type', 'text/xml')\n self.verbose = verbose\n opener = urllib.request.build_opener()\n return self.parse_response(opener.open(req))\n\n\ndef list_pypi_addons():\n \"\"\"\n List add-ons available on pypi.\n \"\"\"\n from ..config import ADDON_PYPI_SEARCH_SPEC\n\n pypi = xmlrpc.client.ServerProxy(\n \"https://pypi.python.org/pypi/\",\n transport=SafeUrllibTransport()\n )\n addons = pypi.search(ADDON_PYPI_SEARCH_SPEC)\n\n for addon in OFFICIAL_ADDONS:\n if not any(a for a in addons if a['name'] == addon):\n addons.append({\"name\": addon, \"version\": '0'})\n\n multicall = xmlrpc.client.MultiCall(pypi)\n for addon in addons:\n name = addon[\"name\"]\n multicall.package_releases(name)\n\n releases = multicall()\n multicall = xmlrpc.client.MultiCall(pypi)\n for addon, versions in zip(addons, releases):\n # Workaround for PyPI bug of search not returning the latest versions\n # https://bitbucket.org/pypa/pypi/issues/326/my-package-doesnt-appear-in-the-search\n version_ = max(versions, key=version.LooseVersion)\n\n name = addon[\"name\"]\n multicall.release_data(name, version_)\n\n results = list(multicall())\n packages = []\n\n for release in results:\n if release:\n # ignore releases without actual source/wheel/egg files,\n # or with empty metadata (deleted from PyPi?).\n packages.append(\n Installable(release[\"name\"], release[\"version\"],\n release[\"summary\"], release[\"description\"],\n release[\"package_url\"],\n release[\"package_url\"])\n )\n return packages\n\n\ndef list_installed_addons():\n from ..config import ADDON_ENTRY\n workingset = pkg_resources.WorkingSet(sys.path)\n return [ep.dist for ep in\n workingset.iter_entry_points(ADDON_ENTRY)]\n\n\ndef unique(iterable):\n seen = set()\n\n def observed(el):\n observed = el in seen\n seen.add(el)\n return observed\n\n return (el for el in iterable if not observed(el))\n\n\ndef have_install_permissions():\n \"\"\"Check if we can create a file in the site-packages folder.\n This works on a Win7 miniconda install, where os.access did not. \"\"\"\n try:\n fn = os.path.join(sysconfig.get_path(\"purelib\"), \"test_write_\" + str(os.getpid()))\n with open(fn, \"w\"):\n pass\n os.remove(fn)\n return True\n except OSError:\n return False\n\n\nInstall, Upgrade, Uninstall = 1, 2, 3\n\n\nclass CommandFailed(Exception):\n def __init__(self, cmd, retcode, output):\n if not isinstance(cmd, str):\n cmd = \" \".join(map(shlex.quote, cmd))\n self.cmd = cmd\n self.retcode = retcode\n self.output = output\n\n\nclass Installer(QObject):\n installStatusChanged = Signal(str)\n started = Signal()\n finished = Signal()\n error = Signal(str, object, int, list)\n\n def __init__(self, parent=None, steps=[]):\n QObject.__init__(self, parent)\n self.__interupt = False\n self.__queue = deque(steps)\n self.pip = PipInstaller()\n self.conda = CondaInstaller()\n\n def start(self):\n QTimer.singleShot(0, self._next)\n\n def interupt(self):\n self.__interupt = True\n\n def setStatusMessage(self, message):\n self.__statusMessage = message\n self.installStatusChanged.emit(message)\n\n @Slot()\n def _next(self):\n command, pkg = self.__queue.popleft()\n try:\n if command == Install:\n self.setStatusMessage(\n \"Installing {}\".format(cleanup(pkg.installable.name)))\n if self.conda:\n self.conda.install(pkg.installable, raise_on_fail=False)\n self.pip.install(pkg.installable)\n elif command == Upgrade:\n self.setStatusMessage(\n \"Upgrading {}\".format(cleanup(pkg.installable.name)))\n if self.conda:\n self.conda.upgrade(pkg.installable, raise_on_fail=False)\n self.pip.upgrade(pkg.installable)\n elif command == Uninstall:\n self.setStatusMessage(\n \"Uninstalling {}\".format(cleanup(pkg.local.project_name)))\n if self.conda:\n try:\n self.conda.uninstall(pkg.local, raise_on_fail=True)\n except CommandFailed:\n self.pip.uninstall(pkg.local)\n else:\n self.pip.uninstall(pkg.local)\n except CommandFailed as ex:\n self.error.emit(\n \"Command failed: python {}\".format(ex.cmd),\n pkg, ex.retcode, ex.output\n )\n return\n\n if self.__queue:\n QTimer.singleShot(0, self._next)\n else:\n self.finished.emit()\n\n\nclass PipInstaller:\n\n def __init__(self):\n arguments = QSettings().value('add-ons/pip-install-arguments', '', type=str)\n self.arguments = shlex.split(arguments)\n\n def install(self, pkg):\n cmd = [\"python\", \"-m\", \"pip\", \"install\"]\n cmd.extend(self.arguments)\n if pkg.package_url.startswith(\"http://\"):\n cmd.append(pkg.name)\n else:\n # Package url is path to the (local) wheel\n cmd.append(pkg.package_url)\n\n run_command(cmd)\n\n def upgrade(self, package):\n # This is done in two steps to avoid upgrading\n # all of the dependencies - faster\n self.upgrade_no_deps(package)\n self.install(package)\n\n def upgrade_no_deps(self, package):\n cmd = [\"python\", \"-m\", \"pip\", \"install\", \"--upgrade\", \"--no-deps\"]\n cmd.extend(self.arguments)\n cmd.append(package.name)\n\n run_command(cmd)\n\n def uninstall(self, dist):\n cmd = [\"python\", \"-m\", \"pip\", \"uninstall\", \"--yes\", dist.project_name]\n run_command(cmd)\n\n\nclass CondaInstaller:\n def __init__(self):\n enabled = QSettings().value('add-ons/allow-conda',\n True, type=bool)\n if enabled:\n self.conda = self._find_conda()\n else:\n self.conda = None\n\n def _find_conda(self):\n executable = sys.executable\n bin = os.path.dirname(executable)\n\n # posix\n conda = os.path.join(bin, \"conda\")\n if os.path.exists(conda):\n return conda\n\n # windows\n conda = os.path.join(bin, \"Scripts\", \"conda.bat\")\n if os.path.exists(conda):\n # \"activate\" conda environment orange is running in\n os.environ[\"CONDA_PREFIX\"] = bin\n os.environ[\"CONDA_DEFAULT_ENV\"] = bin\n return conda\n\n def install(self, pkg, raise_on_fail=False):\n cmd = [self.conda, \"install\", \"--yes\", \"--quiet\",\n self._normalize(pkg.name)]\n run_command(cmd, raise_on_fail=raise_on_fail)\n\n def upgrade(self, pkg, raise_on_fail=False):\n cmd = [self.conda, \"upgrade\", \"--yes\", \"--quiet\",\n self._normalize(pkg.name)]\n run_command(cmd, raise_on_fail=raise_on_fail)\n\n def uninstall(self, dist, raise_on_fail=False):\n cmd = [self.conda, \"uninstall\", \"--yes\",\n self._normalize(dist.project_name)]\n run_command(cmd, raise_on_fail=raise_on_fail)\n\n def _normalize(self, name):\n # Conda 4.3.30 is inconsistent, upgrade command is case sensitive\n # while install and uninstall are not. We assume that all conda\n # package names are lowercase which fixes the problems (for now)\n return name.lower()\n\n def __bool__(self):\n return bool(self.conda)\n\n\ndef run_command(command, raise_on_fail=True):\n \"\"\"Run command in a subprocess.\n\n Return `process` return code and output once it completes.\n \"\"\"\n log.info(\"Running %s\", \" \".join(command))\n\n if command[0] == \"python\":\n process = python_process(command[1:])\n else:\n process = create_process(command)\n\n output = []\n while process.poll() is None:\n try:\n line = process.stdout.readline()\n except IOError as ex:\n if ex.errno != errno.EINTR:\n raise\n else:\n output.append(line)\n print(line, end=\"\")\n # Read remaining output if any\n line = process.stdout.read()\n if line:\n output.append(line)\n print(line, end=\"\")\n\n if process.returncode != 0:\n log.info(\"Command %s failed with %s\",\n \" \".join(command), process.returncode)\n log.debug(\"Output:\\n%s\", \"\\n\".join(output))\n if raise_on_fail:\n raise CommandFailed(command, process.returncode, output)\n\n return process.returncode, output\n\n\ndef python_process(args, script_name=None, **kwargs):\n \"\"\"\n Run a `sys.executable` in a subprocess with `args`.\n \"\"\"\n executable = sys.executable\n if os.name == \"nt\" and os.path.basename(executable) == \"pythonw.exe\":\n # Don't run the script with a 'gui' (detached) process.\n dirname = os.path.dirname(executable)\n executable = os.path.join(dirname, \"python.exe\")\n\n if script_name is not None:\n script = script_name\n else:\n script = executable\n\n return create_process(\n [script] + args,\n executable=executable\n )\n\n\ndef create_process(cmd, executable=None, **kwargs):\n if hasattr(subprocess, \"STARTUPINFO\"):\n # do not open a new console window for command on windows\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n kwargs[\"startupinfo\"] = startupinfo\n\n return subprocess.Popen(\n cmd,\n executable=executable,\n cwd=None,\n stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE,\n bufsize=-1,\n universal_newlines=True,\n **kwargs\n )\n",
"path": "Orange/canvas/application/addons.py"
}
] | diff --git a/Orange/canvas/application/addons.py b/Orange/canvas/application/addons.py
index 63dcb9e1ee7..41af8d096e0 100644
--- a/Orange/canvas/application/addons.py
+++ b/Orange/canvas/application/addons.py
@@ -756,7 +756,7 @@ def have_install_permissions():
pass
os.remove(fn)
return True
- except PermissionError:
+ except OSError:
return False
|
vega__altair-3387 | minimum pyarrow version enforced even if pandas is installed
The error we are facing in an environment says:
```python
RuntimeError: The pyarrow package must be version 11.0.0 or greater. Found version 6.0.1
```
And is caused by these lines:
https://github.com/altair-viz/altair/blob/main/altair/utils/core.py#L591-L592
```python
# if data is specified and type is not, infer type from data
if "type" not in attrs:
if pyarrow_available() and data is not None and isinstance(data, DataFrameLike):
...
elif isinstance(data, pd.DataFrame):
# Fallback if pyarrow is not installed or if pandas is older than 1.5
```
In that particular environment pandas is installed by default and we are not able to upgrade pyarrow.
Now the altair specifications errors as the code never tries the pandas approach as it has found a pyarrow version that is too old.
| [
{
"content": "from types import ModuleType\nfrom packaging.version import Version\nfrom importlib.metadata import version as importlib_version\n\n\ndef import_vegafusion() -> ModuleType:\n min_version = \"1.5.0\"\n try:\n version = importlib_version(\"vegafusion\")\n embed_version = importlib_version(\"vegafusion-python-embed\")\n if version != embed_version or Version(version) < Version(min_version):\n raise RuntimeError(\n \"The versions of the vegafusion and vegafusion-python-embed packages must match\\n\"\n f\"and must be version {min_version} or greater.\\n\"\n f\"Found:\\n\"\n f\" - vegafusion=={version}\\n\"\n f\" - vegafusion-python-embed=={embed_version}\\n\"\n )\n import vegafusion as vf # type: ignore\n\n return vf\n except ImportError as err:\n raise ImportError(\n 'The \"vegafusion\" data transformer and chart.transformed_data feature requires\\n'\n f\"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\\n\"\n \"These can be installed with pip using:\\n\"\n f' pip install \"vegafusion[embed]>={min_version}\"\\n'\n \"Or with conda using:\\n\"\n f' conda install -c conda-forge \"vegafusion-python-embed>={min_version}\" '\n f'\"vegafusion>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef import_vl_convert() -> ModuleType:\n min_version = \"1.3.0\"\n try:\n version = importlib_version(\"vl-convert-python\")\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The vl-convert-python package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import vl_convert as vlc\n\n return vlc\n except ImportError as err:\n raise ImportError(\n f\"The vl-convert Vega-Lite compiler and file export feature requires\\n\"\n f\"version {min_version} or greater of the 'vl-convert-python' package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"vl-convert-python>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"vl-convert-python>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef vl_version_for_vl_convert() -> str:\n from ..vegalite import SCHEMA_VERSION\n\n # Compute VlConvert's vl_version string (of the form 'v5_2')\n # from SCHEMA_VERSION (of the form 'v5.2.0')\n return \"_\".join(SCHEMA_VERSION.split(\".\")[:2])\n\n\ndef import_pyarrow_interchange() -> ModuleType:\n min_version = \"11.0.0\"\n try:\n version = importlib_version(\"pyarrow\")\n\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The pyarrow package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import pyarrow.interchange as pi\n\n return pi\n except ImportError as err:\n raise ImportError(\n f\"Usage of the DataFrame Interchange Protocol requires\\n\"\n f\"version {min_version} or greater of the pyarrow package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"pyarrow>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"pyarrow>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef pyarrow_available() -> bool:\n try:\n import_pyarrow_interchange()\n return True\n except ImportError:\n return False\n",
"path": "altair/utils/_importers.py"
}
] | [
{
"content": "from types import ModuleType\nfrom packaging.version import Version\nfrom importlib.metadata import version as importlib_version\n\n\ndef import_vegafusion() -> ModuleType:\n min_version = \"1.5.0\"\n try:\n version = importlib_version(\"vegafusion\")\n embed_version = importlib_version(\"vegafusion-python-embed\")\n if version != embed_version or Version(version) < Version(min_version):\n raise RuntimeError(\n \"The versions of the vegafusion and vegafusion-python-embed packages must match\\n\"\n f\"and must be version {min_version} or greater.\\n\"\n f\"Found:\\n\"\n f\" - vegafusion=={version}\\n\"\n f\" - vegafusion-python-embed=={embed_version}\\n\"\n )\n import vegafusion as vf # type: ignore\n\n return vf\n except ImportError as err:\n raise ImportError(\n 'The \"vegafusion\" data transformer and chart.transformed_data feature requires\\n'\n f\"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\\n\"\n \"These can be installed with pip using:\\n\"\n f' pip install \"vegafusion[embed]>={min_version}\"\\n'\n \"Or with conda using:\\n\"\n f' conda install -c conda-forge \"vegafusion-python-embed>={min_version}\" '\n f'\"vegafusion>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef import_vl_convert() -> ModuleType:\n min_version = \"1.3.0\"\n try:\n version = importlib_version(\"vl-convert-python\")\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The vl-convert-python package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import vl_convert as vlc\n\n return vlc\n except ImportError as err:\n raise ImportError(\n f\"The vl-convert Vega-Lite compiler and file export feature requires\\n\"\n f\"version {min_version} or greater of the 'vl-convert-python' package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"vl-convert-python>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"vl-convert-python>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef vl_version_for_vl_convert() -> str:\n from ..vegalite import SCHEMA_VERSION\n\n # Compute VlConvert's vl_version string (of the form 'v5_2')\n # from SCHEMA_VERSION (of the form 'v5.2.0')\n return \"_\".join(SCHEMA_VERSION.split(\".\")[:2])\n\n\ndef import_pyarrow_interchange() -> ModuleType:\n min_version = \"11.0.0\"\n try:\n version = importlib_version(\"pyarrow\")\n\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The pyarrow package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import pyarrow.interchange as pi\n\n return pi\n except ImportError as err:\n raise ImportError(\n f\"Usage of the DataFrame Interchange Protocol requires\\n\"\n f\"version {min_version} or greater of the pyarrow package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"pyarrow>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"pyarrow>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef pyarrow_available() -> bool:\n try:\n import_pyarrow_interchange()\n return True\n except (ImportError, RuntimeError):\n return False\n",
"path": "altair/utils/_importers.py"
}
] | diff --git a/altair/utils/_importers.py b/altair/utils/_importers.py
index 718fa9129..b7fa8a958 100644
--- a/altair/utils/_importers.py
+++ b/altair/utils/_importers.py
@@ -93,5 +93,5 @@ def pyarrow_available() -> bool:
try:
import_pyarrow_interchange()
return True
- except ImportError:
+ except (ImportError, RuntimeError):
return False
|
opendatacube__datacube-core-1331 | Code includes Type Annotations, but they're not made available for type checking (PEP561)
**Summary**
The ODC code is fairly well annotated with [Python type hints](https://typing.readthedocs.io/en/latest/), but these type hints aren't made availble for use in downstream packages by following [PEP 561 – Distributing and Packaging Type Information | peps.python.org](https://peps.python.org/pep-0561/).
**Proposal**
Since ODC Core includes inline type hints with the code, we need to follow [packaging type information - PEP561](https://peps.python.org/pep-0561/#packaging-type-information) by adding an empty `datacube/py.typed` file, and ensuring it's distributed by adding it to `package_data` in `setup.py`.
| [
{
"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'hypothesis',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\ndoc_require = [\n 'Sphinx',\n 'sphinx_rtd_theme',\n 'sphinx_autodoc_typehints', # Propagate mypy info into docs\n 'sphinx-click',\n 'recommonmark',\n 'setuptools', # version related dependencies\n 'setuptools_scm[toml]',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': doc_require,\n 's3': ['boto3', 'botocore'],\n 'test': tests_require,\n 'cf': ['compliance-checker>=4.0.0'],\n}\n\nextras_require['dev'] = sorted(set(sum([extras_require[k] for k in [\n 'test',\n 'doc',\n 'performance',\n 's3',\n 'distributed',\n]], [])))\n\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.8.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[],\n install_requires=[\n 'affine',\n 'pyproj>=2.5',\n 'shapely>=1.6.4',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark',\n 'pandas',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1\n 'sqlalchemy',\n 'GeoAlchemy2',\n 'toolz',\n 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-worker = datacube.execution.worker:main',\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.postgres.index:index_driver_init',\n 'null = datacube.index.null.index:index_driver_init',\n 'memory = datacube.index.memory.index:index_driver_init',\n 'postgis = datacube.index.postgis.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'hypothesis',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\ndoc_require = [\n 'Sphinx',\n 'sphinx_rtd_theme',\n 'sphinx_autodoc_typehints', # Propagate mypy info into docs\n 'sphinx-click',\n 'recommonmark',\n 'setuptools', # version related dependencies\n 'setuptools_scm[toml]',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': doc_require,\n 's3': ['boto3', 'botocore'],\n 'test': tests_require,\n 'cf': ['compliance-checker>=4.0.0'],\n}\n\nextras_require['dev'] = sorted(set(sum([extras_require[k] for k in [\n 'test',\n 'doc',\n 'performance',\n 's3',\n 'distributed',\n]], [])))\n\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.8.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n 'datacube': ['py.typed'],\n },\n scripts=[],\n install_requires=[\n 'affine',\n 'pyproj>=2.5',\n 'shapely>=1.6.4',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark',\n 'pandas',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1\n 'sqlalchemy',\n 'GeoAlchemy2',\n 'toolz',\n 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-worker = datacube.execution.worker:main',\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.postgres.index:index_driver_init',\n 'null = datacube.index.null.index:index_driver_init',\n 'memory = datacube.index.memory.index:index_driver_init',\n 'postgis = datacube.index.postgis.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/datacube/py.typed b/datacube/py.typed
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/docs/about/whats_new.rst b/docs/about/whats_new.rst
index ff1cab2c0d..1be5b7418a 100644
--- a/docs/about/whats_new.rst
+++ b/docs/about/whats_new.rst
@@ -12,6 +12,9 @@ v1.8.next
- Extend `patch_url` argument to `dc.load()` and `dc.load_data()` to Dask loading. (:pull:`1323`)
- Add `sphinx.ext.autoselectionlabel` extension to readthedoc conf to support `:ref:` command (:pull:`1325`)
- Add `pyspellcheck` for `.rst` documentation files and fix typos (:pull:`1327`)
+- Follow PEP561_ to make type hints available to other packages (:issue:`1330`)
+
+.. _PEP561: https://peps.python.org/pep-0561/
v1.8.8 (5 October 2022)
=======================
diff --git a/setup.py b/setup.py
index d315bdf69b..2721f9506a 100755
--- a/setup.py
+++ b/setup.py
@@ -82,6 +82,7 @@
),
package_data={
'': ['*.yaml', '*/*.yaml'],
+ 'datacube': ['py.typed'],
},
scripts=[],
install_requires=[
|
scikit-image__scikit-image-938 | Draw circle does not obey shape argument - v0.93
In previous version worked fine, but I just installed new version 0.93 and draw.circle is not working properly. When I apply the circle for image 1024x1024 with following arguments:
rr,cc=circle(-5.2796287128712879E+02, 1.5003712871287132E+02, 9.8910961199417170E+02, (1024,1024))
I get negative values in rr and cc arrays. Which obviously leads to errors when applied to:
img[rr,cc]=0
| [
{
"content": "# coding: utf-8\nimport numpy as np\n\n\ndef _coords_inside_image(rr, cc, shape):\n mask = (rr >= 0) & (rr < shape[0]) & (cc >= 0) & (cc < shape[1])\n return rr[mask], cc[mask]\n\n\ndef ellipse(cy, cx, yradius, xradius, shape=None):\n \"\"\"Generate coordinates of pixels within ellipse.\n\n Parameters\n ----------\n cy, cx : double\n Centre coordinate of ellipse.\n yradius, xradius : double\n Minor and major semi-axes. ``(x/xradius)**2 + (y/yradius)**2 = 1``.\n shape : tuple, optional\n Image shape which is used to determine maximum extents of output pixel\n coordinates. This is useful for ellipses which exceed the image size.\n By default the full extents of the ellipse are used.\n\n Returns\n -------\n rr, cc : ndarray of int\n Pixel coordinates of ellipse.\n May be used to directly index into an array, e.g.\n ``img[rr, cc] = 1``.\n\n Examples\n --------\n >>> from skimage.draw import ellipse\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc = ellipse(5, 5, 3, 4)\n >>> img[rr, cc] = 1\n >>> img\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n\n \"\"\"\n\n dr = 1 / float(yradius)\n dc = 1 / float(xradius)\n\n r, c = np.ogrid[-1:1:dr, -1:1:dc]\n rr, cc = np.nonzero(r ** 2 + c ** 2 < 1)\n\n rr.flags.writeable = True\n cc.flags.writeable = True\n rr += cy - yradius\n cc += cx - xradius\n\n if shape is not None:\n _coords_inside_image(rr, cc, shape)\n\n return rr, cc\n\n\ndef circle(cy, cx, radius, shape=None):\n \"\"\"Generate coordinates of pixels within circle.\n\n Parameters\n ----------\n cy, cx : double\n Centre coordinate of circle.\n radius: double\n Radius of circle.\n shape : tuple, optional\n Image shape which is used to determine maximum extents of output pixel\n coordinates. This is useful for circles which exceed the image size.\n By default the full extents of the circle are used.\n\n Returns\n -------\n rr, cc : ndarray of int\n Pixel coordinates of circle.\n May be used to directly index into an array, e.g.\n ``img[rr, cc] = 1``.\n Notes\n -----\n This function is a wrapper for skimage.draw.ellipse()\n\n Examples\n --------\n >>> from skimage.draw import circle\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc = circle(4, 4, 5)\n >>> img[rr, cc] = 1\n >>> img\n array([[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n\n \"\"\"\n\n return ellipse(cy, cx, radius, radius, shape)\n\n\ndef set_color(img, coords, color):\n \"\"\"Set pixel color in the image at the given coordinates.\n\n Coordinates that exceed the shape of the image will be ignored.\n\n Parameters\n ----------\n img : (M, N, D) ndarray\n Image\n coords : ((P,) ndarray, (P,) ndarray)\n Coordinates of pixels to be colored.\n color : (D,) ndarray\n Color to be assigned to coordinates in the image.\n\n Returns\n -------\n img : (M, N, D) ndarray\n The updated image.\n\n Examples\n --------\n >>> from skimage.draw import line, set_color\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc = line(1, 1, 20, 20)\n >>> set_color(img, (rr, cc), 1)\n >>> img\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=uint8)\n\n \"\"\"\n\n rr, cc = coords\n rr, cc = _coords_inside_image(rr, cc, img.shape)\n img[rr, cc] = color\n",
"path": "skimage/draw/draw.py"
}
] | [
{
"content": "# coding: utf-8\nimport numpy as np\n\n\ndef _coords_inside_image(rr, cc, shape):\n mask = (rr >= 0) & (rr < shape[0]) & (cc >= 0) & (cc < shape[1])\n return rr[mask], cc[mask]\n\n\ndef ellipse(cy, cx, yradius, xradius, shape=None):\n \"\"\"Generate coordinates of pixels within ellipse.\n\n Parameters\n ----------\n cy, cx : double\n Centre coordinate of ellipse.\n yradius, xradius : double\n Minor and major semi-axes. ``(x/xradius)**2 + (y/yradius)**2 = 1``.\n shape : tuple, optional\n Image shape which is used to determine maximum extents of output pixel\n coordinates. This is useful for ellipses which exceed the image size.\n By default the full extents of the ellipse are used.\n\n Returns\n -------\n rr, cc : ndarray of int\n Pixel coordinates of ellipse.\n May be used to directly index into an array, e.g.\n ``img[rr, cc] = 1``.\n\n Examples\n --------\n >>> from skimage.draw import ellipse\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc = ellipse(5, 5, 3, 4)\n >>> img[rr, cc] = 1\n >>> img\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n\n \"\"\"\n\n dr = 1 / float(yradius)\n dc = 1 / float(xradius)\n\n r, c = np.ogrid[-1:1:dr, -1:1:dc]\n rr, cc = np.nonzero(r ** 2 + c ** 2 < 1)\n\n rr.flags.writeable = True\n cc.flags.writeable = True\n rr += cy - yradius\n cc += cx - xradius\n\n if shape is not None:\n return _coords_inside_image(rr, cc, shape)\n\n return rr, cc\n\n\ndef circle(cy, cx, radius, shape=None):\n \"\"\"Generate coordinates of pixels within circle.\n\n Parameters\n ----------\n cy, cx : double\n Centre coordinate of circle.\n radius: double\n Radius of circle.\n shape : tuple, optional\n Image shape which is used to determine maximum extents of output pixel\n coordinates. This is useful for circles which exceed the image size.\n By default the full extents of the circle are used.\n\n Returns\n -------\n rr, cc : ndarray of int\n Pixel coordinates of circle.\n May be used to directly index into an array, e.g.\n ``img[rr, cc] = 1``.\n Notes\n -----\n This function is a wrapper for skimage.draw.ellipse()\n\n Examples\n --------\n >>> from skimage.draw import circle\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc = circle(4, 4, 5)\n >>> img[rr, cc] = 1\n >>> img\n array([[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n\n \"\"\"\n\n return ellipse(cy, cx, radius, radius, shape)\n\n\ndef set_color(img, coords, color):\n \"\"\"Set pixel color in the image at the given coordinates.\n\n Coordinates that exceed the shape of the image will be ignored.\n\n Parameters\n ----------\n img : (M, N, D) ndarray\n Image\n coords : ((P,) ndarray, (P,) ndarray)\n Coordinates of pixels to be colored.\n color : (D,) ndarray\n Color to be assigned to coordinates in the image.\n\n Returns\n -------\n img : (M, N, D) ndarray\n The updated image.\n\n Examples\n --------\n >>> from skimage.draw import line, set_color\n >>> img = np.zeros((10, 10), dtype=np.uint8)\n >>> rr, cc = line(1, 1, 20, 20)\n >>> set_color(img, (rr, cc), 1)\n >>> img\n array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=uint8)\n\n \"\"\"\n\n rr, cc = coords\n rr, cc = _coords_inside_image(rr, cc, img.shape)\n img[rr, cc] = color\n",
"path": "skimage/draw/draw.py"
}
] | diff --git a/skimage/draw/draw.py b/skimage/draw/draw.py
index cbf3ced2426..e61df40c329 100644
--- a/skimage/draw/draw.py
+++ b/skimage/draw/draw.py
@@ -60,7 +60,7 @@ def ellipse(cy, cx, yradius, xradius, shape=None):
cc += cx - xradius
if shape is not None:
- _coords_inside_image(rr, cc, shape)
+ return _coords_inside_image(rr, cc, shape)
return rr, cc
diff --git a/skimage/draw/tests/test_draw.py b/skimage/draw/tests/test_draw.py
index 2d739f0ffb8..3cb8c2a6199 100644
--- a/skimage/draw/tests/test_draw.py
+++ b/skimage/draw/tests/test_draw.py
@@ -325,6 +325,33 @@ def test_ellipse():
assert_array_equal(img, img_)
+def test_ellipse_with_shape():
+ img = np.zeros((15, 15), 'uint8')
+
+ rr, cc = ellipse(7, 7, 3, 10, shape=img.shape)
+ img[rr, cc] = 1
+
+ img_ = np.array(
+ [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
+ )
+
+ assert_array_equal(img, img_)
+
+
def test_ellipse_perimeter_dot_zeroangle():
# dot, angle == 0
img = np.zeros((30, 15), 'uint8')
|
voxel51__fiftyone-1652 | [BUG] `ImportError: cannot import name 'soft_unicode' from 'markupsafe'`
When `Jinja2<3` is installed a user will encounter this error.
```py
>>> import fiftyone as fo
Uncaught exception
Traceback (most recent call last):
File "/home/user/.local/lib/python3.8/site-packages/fiftyone/service/main.py", line 43, in <module>
from fiftyone.core.service import Service
File "/home/user/.local/lib/python3.8/site-packages/fiftyone/__init__.py", line 25, in <module>
from fiftyone.__public__ import *
File "/home/user/.local/lib/python3.8/site-packages/fiftyone/__public__.py", line 172, in <module>
from .core.session import (
File "/home/user/.local/lib/python3.8/site-packages/fiftyone/core/session.py", line 16, in <module>
from jinja2 import Template
File "/home/user/.local/lib/python3.8/site-packages/jinja2/__init__.py", line 12, in <module>
from .environment import Environment
File "/home/user/.local/lib/python3.8/site-packages/jinja2/environment.py", line 25, in <module>
from .defaults import BLOCK_END_STRING
File "/home/user/.local/lib/python3.8/site-packages/jinja2/defaults.py", line 3, in <module>
from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401
File "/home/user/.local/lib/python3.8/site-packages/jinja2/filters.py", line 13, in <module>
from markupsafe import soft_unicode
ImportError: cannot import name 'soft_unicode' from 'markupsafe' (/home/user/.local/lib/python3.8/site-packages/markupsafe/__init__.py)
```
| [
{
"content": "#!/usr/bin/env python\n\"\"\"\nInstalls FiftyOne.\n\n| Copyright 2017-2022, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport os\nfrom pkg_resources import DistributionNotFound, get_distribution\nimport re\nfrom setuptools import setup, find_packages\n\n\nVERSION = \"0.15.0\" # updated by https://github.com/voxel51/fiftyone/pull/1615\n\n\ndef get_version():\n if \"RELEASE_VERSION\" in os.environ:\n version = os.environ[\"RELEASE_VERSION\"]\n if not version.startswith(VERSION):\n raise ValueError(\n \"Release version does not match version: %s and %s\"\n % (version, VERSION)\n )\n return version\n\n return VERSION\n\n\nINSTALL_REQUIRES = [\n # third-party packages\n \"aiofiles\",\n \"argcomplete\",\n \"boto3\",\n \"Deprecated\",\n \"eventlet\",\n \"future\",\n \"Jinja2\",\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.20.0\",\n \"motor>=2.3,<3\",\n \"ndjson\",\n \"numpy\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n \"plotly>=4.14,<5\",\n \"pprintpp\",\n \"psutil\",\n \"pymongo>=3.11,<4\",\n \"pytz\",\n \"PyYAML\",\n \"retrying\",\n \"scikit-learn\",\n \"scikit-image\",\n \"setuptools\",\n \"tabulate\",\n \"tornado>=5.1.1,<7\",\n \"xmltodict\",\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"fiftyone-brain>=0.8,<0.9\",\n \"fiftyone-db>=0.3,<0.4\",\n \"voxel51-eta>=0.6.3,<0.7\",\n]\n\n\nCHOOSE_INSTALL_REQUIRES = [\n (\n (\n \"opencv-python\",\n \"opencv-contrib-python\",\n \"opencv-contrib-python-headless\",\n ),\n \"opencv-python-headless\",\n )\n]\n\n\ndef choose_requirement(mains, secondary):\n chosen = secondary\n for main in mains:\n try:\n name = re.split(r\"[!<>=]\", main)[0]\n get_distribution(name)\n chosen = main\n break\n except DistributionNotFound:\n pass\n\n return str(chosen)\n\n\ndef get_install_requirements(install_requires, choose_install_requires):\n for mains, secondary in choose_install_requires:\n install_requires.append(choose_requirement(mains, secondary))\n\n return install_requires\n\n\nEXTRAS_REQUIREMENTS = {\"desktop\": [\"fiftyone-desktop>=0.19.2,<0.20\"]}\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetup(\n name=\"fiftyone\",\n version=get_version(),\n description=(\n \"FiftyOne: the open-source tool for building high-quality datasets \"\n \"and computer vision models\"\n ),\n author=\"Voxel51, Inc.\",\n author_email=\"[email protected]\",\n url=\"https://github.com/voxel51/fiftyone\",\n extras_require=EXTRAS_REQUIREMENTS,\n license=\"Apache\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(\n exclude=[\"app\", \"eta\", \"package\", \"requirements\", \"tests\", \"tools\"]\n )\n + [\"fiftyone.recipes\", \"fiftyone.tutorials\"],\n package_dir={\n \"fiftyone.recipes\": \"docs/source/recipes\",\n \"fiftyone.tutorials\": \"docs/source/tutorials\",\n },\n install_requires=get_install_requirements(\n INSTALL_REQUIRES, CHOOSE_INSTALL_REQUIRES\n ),\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Processing\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n entry_points={\"console_scripts\": [\"fiftyone=fiftyone.core.cli:main\"]},\n python_requires=\">=3.6\",\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\"\"\"\nInstalls FiftyOne.\n\n| Copyright 2017-2022, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport os\nfrom pkg_resources import DistributionNotFound, get_distribution\nimport re\nfrom setuptools import setup, find_packages\n\n\nVERSION = \"0.15.0\" # updated by https://github.com/voxel51/fiftyone/pull/1615\n\n\ndef get_version():\n if \"RELEASE_VERSION\" in os.environ:\n version = os.environ[\"RELEASE_VERSION\"]\n if not version.startswith(VERSION):\n raise ValueError(\n \"Release version does not match version: %s and %s\"\n % (version, VERSION)\n )\n return version\n\n return VERSION\n\n\nINSTALL_REQUIRES = [\n # third-party packages\n \"aiofiles\",\n \"argcomplete\",\n \"boto3\",\n \"Deprecated\",\n \"eventlet\",\n \"future\",\n \"Jinja2>=3\",\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.20.0\",\n \"motor>=2.3,<3\",\n \"ndjson\",\n \"numpy\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n \"plotly>=4.14,<5\",\n \"pprintpp\",\n \"psutil\",\n \"pymongo>=3.11,<4\",\n \"pytz\",\n \"PyYAML\",\n \"retrying\",\n \"scikit-learn\",\n \"scikit-image\",\n \"setuptools\",\n \"tabulate\",\n \"tornado>=5.1.1,<7\",\n \"xmltodict\",\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"fiftyone-brain>=0.8,<0.9\",\n \"fiftyone-db>=0.3,<0.4\",\n \"voxel51-eta>=0.6.3,<0.7\",\n]\n\n\nCHOOSE_INSTALL_REQUIRES = [\n (\n (\n \"opencv-python\",\n \"opencv-contrib-python\",\n \"opencv-contrib-python-headless\",\n ),\n \"opencv-python-headless\",\n )\n]\n\n\ndef choose_requirement(mains, secondary):\n chosen = secondary\n for main in mains:\n try:\n name = re.split(r\"[!<>=]\", main)[0]\n get_distribution(name)\n chosen = main\n break\n except DistributionNotFound:\n pass\n\n return str(chosen)\n\n\ndef get_install_requirements(install_requires, choose_install_requires):\n for mains, secondary in choose_install_requires:\n install_requires.append(choose_requirement(mains, secondary))\n\n return install_requires\n\n\nEXTRAS_REQUIREMENTS = {\"desktop\": [\"fiftyone-desktop>=0.19.2,<0.20\"]}\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetup(\n name=\"fiftyone\",\n version=get_version(),\n description=(\n \"FiftyOne: the open-source tool for building high-quality datasets \"\n \"and computer vision models\"\n ),\n author=\"Voxel51, Inc.\",\n author_email=\"[email protected]\",\n url=\"https://github.com/voxel51/fiftyone\",\n extras_require=EXTRAS_REQUIREMENTS,\n license=\"Apache\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(\n exclude=[\"app\", \"eta\", \"package\", \"requirements\", \"tests\", \"tools\"]\n )\n + [\"fiftyone.recipes\", \"fiftyone.tutorials\"],\n package_dir={\n \"fiftyone.recipes\": \"docs/source/recipes\",\n \"fiftyone.tutorials\": \"docs/source/tutorials\",\n },\n install_requires=get_install_requirements(\n INSTALL_REQUIRES, CHOOSE_INSTALL_REQUIRES\n ),\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Processing\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n entry_points={\"console_scripts\": [\"fiftyone=fiftyone.core.cli:main\"]},\n python_requires=\">=3.6\",\n)\n",
"path": "setup.py"
}
] | diff --git a/requirements/common.txt b/requirements/common.txt
index b21b0cc4e47..596d5b1b5ec 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -5,7 +5,7 @@ Deprecated==1.2.11
eventlet==0.31.0
Flask==1.1.2
httpx==0.7.7
-Jinja2==2.11.3
+Jinja2==3.0.3
kaleido==0.2.1
matplotlib==3.2.1
mongoengine==0.20.0
diff --git a/requirements/docs.txt b/requirements/docs.txt
index 99beeb1acf0..9d54669015c 100644
--- a/requirements/docs.txt
+++ b/requirements/docs.txt
@@ -2,7 +2,7 @@ autodocsumm==0.2.7
docutils==0.16
ipykernel==5.3.0
jupyter-client==6.1.3
-nbsphinx==0.8.5
+nbsphinx==0.8.8
sphinx-tabs==1.2.1
Sphinx==2.4.4
sphinxcontrib-napoleon==0.7
diff --git a/setup.py b/setup.py
index e50d01ddd21..4a896778029 100644
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,7 @@ def get_version():
"Deprecated",
"eventlet",
"future",
- "Jinja2",
+ "Jinja2>=3",
"kaleido",
"matplotlib",
"mongoengine==0.20.0",
|
aio-libs__aiohttp-1888 | In unit tests, Application comparisons can report false positive
Comparison between Application is performed at the MutableMapping level. MutableMapping says that, like dict objects, if all keys and matching values are the same 2 instances, then they are equals. This means that `web.Application() == web.Application()` will return True.
See:
```python
>>> a = aiohttp.web.Application()
>>> b = aiohttp.web.Application()
>>> a == b
True
>>> a["foo"] = "bar"
>>> a == b
False
>>> b["foo"] = "bar"
>>> a == b
True
```
I think those few unit tests are assuming a different behaviour:
* test_subapp_middlewares
* test_subapp_on_response_prepare
* test_subapp_on_startup
* test_subapp_on_shutdown
* test_subapp_on_cleanup
A change has been submitted for `test_subapp_middlewares` in #1854 to fix that. While the solution may or may not be accepted as is, all tests should be fixed.
Also, maybe an additional `test_application_equal` should be implemented, to be ensure expected behavior. Unless `web.Application.__eq__` special method gets implemented to change current behaviour, it should look like something like that:
```python
def test_application_equal():
app1 = web.Application()
app2 = web.Application()
assert app1 == app2
app1["foo"] = "bar"
assert app1 != app2
```
| [
{
"content": "import asyncio\nimport os\nimport socket\nimport stat\nimport sys\nimport warnings\nfrom argparse import ArgumentParser\nfrom collections import Iterable, MutableMapping\nfrom importlib import import_module\n\nfrom yarl import URL\n\nfrom . import (hdrs, web_exceptions, web_fileresponse, web_middlewares,\n web_protocol, web_request, web_response, web_server,\n web_urldispatcher, web_ws)\nfrom .abc import AbstractMatchInfo, AbstractRouter\nfrom .helpers import FrozenList\nfrom .http import HttpVersion # noqa\nfrom .log import access_logger, web_logger\nfrom .signals import FuncSignal, PostSignal, PreSignal, Signal\nfrom .web_exceptions import * # noqa\nfrom .web_fileresponse import * # noqa\nfrom .web_middlewares import * # noqa\nfrom .web_protocol import * # noqa\nfrom .web_request import * # noqa\nfrom .web_response import * # noqa\nfrom .web_server import Server\nfrom .web_urldispatcher import * # noqa\nfrom .web_urldispatcher import PrefixedSubAppResource\nfrom .web_ws import * # noqa\n\n__all__ = (web_protocol.__all__ +\n web_fileresponse.__all__ +\n web_request.__all__ +\n web_response.__all__ +\n web_exceptions.__all__ +\n web_urldispatcher.__all__ +\n web_ws.__all__ +\n web_server.__all__ +\n web_middlewares.__all__ +\n ('Application', 'HttpVersion', 'MsgType'))\n\n\nclass Application(MutableMapping):\n def __init__(self, *,\n logger=web_logger,\n router=None,\n middlewares=(),\n handler_args=None,\n client_max_size=1024**2,\n secure_proxy_ssl_header=None,\n loop=None,\n debug=...):\n if router is None:\n router = web_urldispatcher.UrlDispatcher()\n assert isinstance(router, AbstractRouter), router\n\n if loop is not None:\n warnings.warn(\"loop argument is deprecated\", ResourceWarning)\n\n self._debug = debug\n self._router = router\n self._secure_proxy_ssl_header = secure_proxy_ssl_header\n self._loop = loop\n self._handler_args = handler_args\n self.logger = logger\n\n self._middlewares = FrozenList(middlewares)\n self._state = {}\n self._frozen = False\n self._subapps = []\n\n self._on_pre_signal = PreSignal()\n self._on_post_signal = PostSignal()\n self._on_loop_available = FuncSignal(self)\n self._on_response_prepare = Signal(self)\n self._on_startup = Signal(self)\n self._on_shutdown = Signal(self)\n self._on_cleanup = Signal(self)\n self._client_max_size = client_max_size\n\n # MutableMapping API\n\n def __getitem__(self, key):\n return self._state[key]\n\n def _check_frozen(self):\n if self._frozen:\n warnings.warn(\"Changing state of started or joined \"\n \"application is deprecated\",\n DeprecationWarning,\n stacklevel=3)\n\n def __setitem__(self, key, value):\n self._check_frozen()\n self._state[key] = value\n\n def __delitem__(self, key):\n self._check_frozen()\n del self._state[key]\n\n def __len__(self):\n return len(self._state)\n\n def __iter__(self):\n return iter(self._state)\n\n ########\n @property\n def loop(self):\n return self._loop\n\n def _set_loop(self, loop):\n if loop is None:\n loop = asyncio.get_event_loop()\n if self._loop is not None and self._loop is not loop:\n raise RuntimeError(\n \"web.Application instance initialized with different loop\")\n\n self._loop = loop\n self._on_loop_available.send(self)\n\n # set loop debug\n if self._debug is ...:\n self._debug = loop.get_debug()\n\n # set loop to sub applications\n for subapp in self._subapps:\n subapp._set_loop(loop)\n\n @property\n def frozen(self):\n return self._frozen\n\n def freeze(self):\n if self._frozen:\n return\n\n self._frozen = True\n self._middlewares = tuple(reversed(self._middlewares))\n self._router.freeze()\n self._on_loop_available.freeze()\n self._on_pre_signal.freeze()\n self._on_post_signal.freeze()\n self._on_response_prepare.freeze()\n self._on_startup.freeze()\n self._on_shutdown.freeze()\n self._on_cleanup.freeze()\n\n for subapp in self._subapps:\n subapp.freeze()\n\n @property\n def debug(self):\n return self._debug\n\n def _reg_subapp_signals(self, subapp):\n\n def reg_handler(signame):\n subsig = getattr(subapp, signame)\n\n @asyncio.coroutine\n def handler(app):\n yield from subsig.send(subapp)\n appsig = getattr(self, signame)\n appsig.append(handler)\n\n reg_handler('on_startup')\n reg_handler('on_shutdown')\n reg_handler('on_cleanup')\n\n def add_subapp(self, prefix, subapp):\n if self.frozen:\n raise RuntimeError(\n \"Cannot add sub application to frozen application\")\n if subapp.frozen:\n raise RuntimeError(\"Cannot add frozen application\")\n if prefix.endswith('/'):\n prefix = prefix[:-1]\n if prefix in ('', '/'):\n raise ValueError(\"Prefix cannot be empty\")\n\n resource = PrefixedSubAppResource(prefix, subapp)\n self.router.register_resource(resource)\n self._reg_subapp_signals(subapp)\n self._subapps.append(subapp)\n if self._loop is not None:\n subapp._set_loop(self._loop)\n return resource\n\n @property\n def on_loop_available(self):\n return self._on_loop_available\n\n @property\n def on_response_prepare(self):\n return self._on_response_prepare\n\n @property\n def on_pre_signal(self):\n return self._on_pre_signal\n\n @property\n def on_post_signal(self):\n return self._on_post_signal\n\n @property\n def on_startup(self):\n return self._on_startup\n\n @property\n def on_shutdown(self):\n return self._on_shutdown\n\n @property\n def on_cleanup(self):\n return self._on_cleanup\n\n @property\n def router(self):\n return self._router\n\n @property\n def middlewares(self):\n return self._middlewares\n\n def make_handler(self, *, loop=None,\n secure_proxy_ssl_header=None, **kwargs):\n self._set_loop(loop)\n self.freeze()\n\n kwargs['debug'] = self.debug\n if self._handler_args:\n for k, v in self._handler_args.items():\n kwargs[k] = v\n\n if secure_proxy_ssl_header:\n self._secure_proxy_ssl_header = secure_proxy_ssl_header\n return Server(self._handle, request_factory=self._make_request,\n loop=self.loop, **kwargs)\n\n @asyncio.coroutine\n def startup(self):\n \"\"\"Causes on_startup signal\n\n Should be called in the event loop along with the request handler.\n \"\"\"\n yield from self.on_startup.send(self)\n\n @asyncio.coroutine\n def shutdown(self):\n \"\"\"Causes on_shutdown signal\n\n Should be called before cleanup()\n \"\"\"\n yield from self.on_shutdown.send(self)\n\n @asyncio.coroutine\n def cleanup(self):\n \"\"\"Causes on_cleanup signal\n\n Should be called after shutdown()\n \"\"\"\n yield from self.on_cleanup.send(self)\n\n def _make_request(self, message, payload, protocol, writer, task,\n _cls=web_request.Request):\n return _cls(\n message, payload, protocol, writer, protocol._time_service, task,\n secure_proxy_ssl_header=self._secure_proxy_ssl_header,\n client_max_size=self._client_max_size)\n\n @asyncio.coroutine\n def _handle(self, request):\n match_info = yield from self._router.resolve(request)\n assert isinstance(match_info, AbstractMatchInfo), match_info\n match_info.add_app(self)\n\n if __debug__:\n match_info.freeze()\n\n resp = None\n request._match_info = match_info\n expect = request.headers.get(hdrs.EXPECT)\n if expect:\n resp = yield from match_info.expect_handler(request)\n yield from request.writer.drain()\n\n if resp is None:\n handler = match_info.handler\n for app in match_info.apps[::-1]:\n for factory in app._middlewares:\n handler = yield from factory(app, handler)\n\n resp = yield from handler(request)\n\n assert isinstance(resp, web_response.StreamResponse), \\\n (\"Handler {!r} should return response instance, \"\n \"got {!r} [middlewares {!r}]\").format(\n match_info.handler, type(resp),\n [middleware for middleware in app.middlewares\n for app in match_info.apps])\n return resp\n\n def __call__(self):\n \"\"\"gunicorn compatibility\"\"\"\n return self\n\n def __repr__(self):\n return \"<Application 0x{:x}>\".format(id(self))\n\n\ndef run_app(app, *, host=None, port=None, path=None, sock=None,\n shutdown_timeout=60.0, ssl_context=None,\n print=print, backlog=128, access_log_format=None,\n access_log=access_logger, loop=None):\n \"\"\"Run an app locally\"\"\"\n user_supplied_loop = loop is not None\n if loop is None:\n loop = asyncio.get_event_loop()\n\n make_handler_kwargs = dict()\n if access_log_format is not None:\n make_handler_kwargs['access_log_format'] = access_log_format\n handler = app.make_handler(loop=loop, access_log=access_log,\n **make_handler_kwargs)\n\n loop.run_until_complete(app.startup())\n\n scheme = 'https' if ssl_context else 'http'\n base_url = URL('{}://localhost'.format(scheme)).with_port(port)\n\n if path is None:\n paths = ()\n elif isinstance(path, (str, bytes, bytearray, memoryview))\\\n or not isinstance(path, Iterable):\n paths = (path,)\n else:\n paths = path\n\n if sock is None:\n socks = ()\n elif not isinstance(sock, Iterable):\n socks = (sock,)\n else:\n socks = sock\n\n if host is None:\n if (paths or socks) and not port:\n hosts = ()\n else:\n hosts = (\"0.0.0.0\",)\n elif isinstance(host, (str, bytes, bytearray, memoryview))\\\n or not isinstance(host, Iterable):\n hosts = (host,)\n else:\n hosts = host\n\n if hosts and port is None:\n port = 8443 if ssl_context else 8080\n\n server_creations = []\n uris = [str(base_url.with_host(host)) for host in hosts]\n if hosts:\n # Multiple hosts bound to same server is available in most loop\n # implementations, but only send multiple if we have multiple.\n host_binding = hosts[0] if len(hosts) == 1 else hosts\n server_creations.append(\n loop.create_server(\n handler, host_binding, port, ssl=ssl_context, backlog=backlog\n )\n )\n for path in paths:\n # Most loop implementations don't support multiple paths bound in same\n # server, so create a server for each.\n server_creations.append(\n loop.create_unix_server(\n handler, path, ssl=ssl_context, backlog=backlog\n )\n )\n uris.append('{}://unix:{}:'.format(scheme, path))\n\n # Clean up prior socket path if stale and not abstract.\n # CPython 3.5.3+'s event loop already does this. See\n # https://github.com/python/asyncio/issues/425\n if path[0] not in (0, '\\x00'): # pragma: no branch\n try:\n if stat.S_ISSOCK(os.stat(path).st_mode):\n os.remove(path)\n except FileNotFoundError:\n pass\n for sock in socks:\n server_creations.append(\n loop.create_server(\n handler, sock=sock, ssl=ssl_context, backlog=backlog\n )\n )\n\n if hasattr(socket, 'AF_UNIX') and sock.family == socket.AF_UNIX:\n uris.append('{}://unix:{}:'.format(scheme, sock.getsockname()))\n else:\n host, port = sock.getsockname()\n uris.append(str(base_url.with_host(host).with_port(port)))\n\n servers = loop.run_until_complete(\n asyncio.gather(*server_creations, loop=loop)\n )\n\n print(\"======== Running on {} ========\\n\"\n \"(Press CTRL+C to quit)\".format(', '.join(uris)))\n\n try:\n loop.run_forever()\n except KeyboardInterrupt: # pragma: no cover\n pass\n finally:\n server_closures = []\n for srv in servers:\n srv.close()\n server_closures.append(srv.wait_closed())\n loop.run_until_complete(asyncio.gather(*server_closures, loop=loop))\n loop.run_until_complete(app.shutdown())\n loop.run_until_complete(handler.shutdown(shutdown_timeout))\n loop.run_until_complete(app.cleanup())\n if not user_supplied_loop:\n loop.close()\n\n\ndef main(argv):\n arg_parser = ArgumentParser(\n description=\"aiohttp.web Application server\",\n prog=\"aiohttp.web\"\n )\n arg_parser.add_argument(\n \"entry_func\",\n help=(\"Callable returning the `aiohttp.web.Application` instance to \"\n \"run. Should be specified in the 'module:function' syntax.\"),\n metavar=\"entry-func\"\n )\n arg_parser.add_argument(\n \"-H\", \"--hostname\",\n help=\"TCP/IP hostname to serve on (default: %(default)r)\",\n default=\"localhost\"\n )\n arg_parser.add_argument(\n \"-P\", \"--port\",\n help=\"TCP/IP port to serve on (default: %(default)r)\",\n type=int,\n default=\"8080\"\n )\n arg_parser.add_argument(\n \"-U\", \"--path\",\n help=\"Unix file system path to serve on. Specifying a path will cause \"\n \"hostname and port arguments to be ignored.\",\n )\n args, extra_argv = arg_parser.parse_known_args(argv)\n\n # Import logic\n mod_str, _, func_str = args.entry_func.partition(\":\")\n if not func_str or not mod_str:\n arg_parser.error(\n \"'entry-func' not in 'module:function' syntax\"\n )\n if mod_str.startswith(\".\"):\n arg_parser.error(\"relative module names not supported\")\n try:\n module = import_module(mod_str)\n except ImportError as ex:\n arg_parser.error(\"unable to import %s: %s\" % (mod_str, ex))\n try:\n func = getattr(module, func_str)\n except AttributeError:\n arg_parser.error(\"module %r has no attribute %r\" % (mod_str, func_str))\n\n # Compatibility logic\n if args.path is not None and not hasattr(socket, 'AF_UNIX'):\n arg_parser.error(\"file system paths not supported by your operating\"\n \" environment\")\n\n app = func(extra_argv)\n run_app(app, host=args.hostname, port=args.port, path=args.path)\n arg_parser.exit(message=\"Stopped\\n\")\n\n\nif __name__ == \"__main__\": # pragma: no branch\n main(sys.argv[1:]) # pragma: no cover\n",
"path": "aiohttp/web.py"
}
] | [
{
"content": "import asyncio\nimport os\nimport socket\nimport stat\nimport sys\nimport warnings\nfrom argparse import ArgumentParser\nfrom collections import Iterable, MutableMapping\nfrom importlib import import_module\n\nfrom yarl import URL\n\nfrom . import (hdrs, web_exceptions, web_fileresponse, web_middlewares,\n web_protocol, web_request, web_response, web_server,\n web_urldispatcher, web_ws)\nfrom .abc import AbstractMatchInfo, AbstractRouter\nfrom .helpers import FrozenList\nfrom .http import HttpVersion # noqa\nfrom .log import access_logger, web_logger\nfrom .signals import FuncSignal, PostSignal, PreSignal, Signal\nfrom .web_exceptions import * # noqa\nfrom .web_fileresponse import * # noqa\nfrom .web_middlewares import * # noqa\nfrom .web_protocol import * # noqa\nfrom .web_request import * # noqa\nfrom .web_response import * # noqa\nfrom .web_server import Server\nfrom .web_urldispatcher import * # noqa\nfrom .web_urldispatcher import PrefixedSubAppResource\nfrom .web_ws import * # noqa\n\n__all__ = (web_protocol.__all__ +\n web_fileresponse.__all__ +\n web_request.__all__ +\n web_response.__all__ +\n web_exceptions.__all__ +\n web_urldispatcher.__all__ +\n web_ws.__all__ +\n web_server.__all__ +\n web_middlewares.__all__ +\n ('Application', 'HttpVersion', 'MsgType'))\n\n\nclass Application(MutableMapping):\n def __init__(self, *,\n logger=web_logger,\n router=None,\n middlewares=(),\n handler_args=None,\n client_max_size=1024**2,\n secure_proxy_ssl_header=None,\n loop=None,\n debug=...):\n if router is None:\n router = web_urldispatcher.UrlDispatcher()\n assert isinstance(router, AbstractRouter), router\n\n if loop is not None:\n warnings.warn(\"loop argument is deprecated\", ResourceWarning)\n\n self._debug = debug\n self._router = router\n self._secure_proxy_ssl_header = secure_proxy_ssl_header\n self._loop = loop\n self._handler_args = handler_args\n self.logger = logger\n\n self._middlewares = FrozenList(middlewares)\n self._state = {}\n self._frozen = False\n self._subapps = []\n\n self._on_pre_signal = PreSignal()\n self._on_post_signal = PostSignal()\n self._on_loop_available = FuncSignal(self)\n self._on_response_prepare = Signal(self)\n self._on_startup = Signal(self)\n self._on_shutdown = Signal(self)\n self._on_cleanup = Signal(self)\n self._client_max_size = client_max_size\n\n # MutableMapping API\n\n def __eq__(self, other):\n return self is other\n\n def __getitem__(self, key):\n return self._state[key]\n\n def _check_frozen(self):\n if self._frozen:\n warnings.warn(\"Changing state of started or joined \"\n \"application is deprecated\",\n DeprecationWarning,\n stacklevel=3)\n\n def __setitem__(self, key, value):\n self._check_frozen()\n self._state[key] = value\n\n def __delitem__(self, key):\n self._check_frozen()\n del self._state[key]\n\n def __len__(self):\n return len(self._state)\n\n def __iter__(self):\n return iter(self._state)\n\n ########\n @property\n def loop(self):\n return self._loop\n\n def _set_loop(self, loop):\n if loop is None:\n loop = asyncio.get_event_loop()\n if self._loop is not None and self._loop is not loop:\n raise RuntimeError(\n \"web.Application instance initialized with different loop\")\n\n self._loop = loop\n self._on_loop_available.send(self)\n\n # set loop debug\n if self._debug is ...:\n self._debug = loop.get_debug()\n\n # set loop to sub applications\n for subapp in self._subapps:\n subapp._set_loop(loop)\n\n @property\n def frozen(self):\n return self._frozen\n\n def freeze(self):\n if self._frozen:\n return\n\n self._frozen = True\n self._middlewares = tuple(reversed(self._middlewares))\n self._router.freeze()\n self._on_loop_available.freeze()\n self._on_pre_signal.freeze()\n self._on_post_signal.freeze()\n self._on_response_prepare.freeze()\n self._on_startup.freeze()\n self._on_shutdown.freeze()\n self._on_cleanup.freeze()\n\n for subapp in self._subapps:\n subapp.freeze()\n\n @property\n def debug(self):\n return self._debug\n\n def _reg_subapp_signals(self, subapp):\n\n def reg_handler(signame):\n subsig = getattr(subapp, signame)\n\n @asyncio.coroutine\n def handler(app):\n yield from subsig.send(subapp)\n appsig = getattr(self, signame)\n appsig.append(handler)\n\n reg_handler('on_startup')\n reg_handler('on_shutdown')\n reg_handler('on_cleanup')\n\n def add_subapp(self, prefix, subapp):\n if self.frozen:\n raise RuntimeError(\n \"Cannot add sub application to frozen application\")\n if subapp.frozen:\n raise RuntimeError(\"Cannot add frozen application\")\n if prefix.endswith('/'):\n prefix = prefix[:-1]\n if prefix in ('', '/'):\n raise ValueError(\"Prefix cannot be empty\")\n\n resource = PrefixedSubAppResource(prefix, subapp)\n self.router.register_resource(resource)\n self._reg_subapp_signals(subapp)\n self._subapps.append(subapp)\n if self._loop is not None:\n subapp._set_loop(self._loop)\n return resource\n\n @property\n def on_loop_available(self):\n return self._on_loop_available\n\n @property\n def on_response_prepare(self):\n return self._on_response_prepare\n\n @property\n def on_pre_signal(self):\n return self._on_pre_signal\n\n @property\n def on_post_signal(self):\n return self._on_post_signal\n\n @property\n def on_startup(self):\n return self._on_startup\n\n @property\n def on_shutdown(self):\n return self._on_shutdown\n\n @property\n def on_cleanup(self):\n return self._on_cleanup\n\n @property\n def router(self):\n return self._router\n\n @property\n def middlewares(self):\n return self._middlewares\n\n def make_handler(self, *, loop=None,\n secure_proxy_ssl_header=None, **kwargs):\n self._set_loop(loop)\n self.freeze()\n\n kwargs['debug'] = self.debug\n if self._handler_args:\n for k, v in self._handler_args.items():\n kwargs[k] = v\n\n if secure_proxy_ssl_header:\n self._secure_proxy_ssl_header = secure_proxy_ssl_header\n return Server(self._handle, request_factory=self._make_request,\n loop=self.loop, **kwargs)\n\n @asyncio.coroutine\n def startup(self):\n \"\"\"Causes on_startup signal\n\n Should be called in the event loop along with the request handler.\n \"\"\"\n yield from self.on_startup.send(self)\n\n @asyncio.coroutine\n def shutdown(self):\n \"\"\"Causes on_shutdown signal\n\n Should be called before cleanup()\n \"\"\"\n yield from self.on_shutdown.send(self)\n\n @asyncio.coroutine\n def cleanup(self):\n \"\"\"Causes on_cleanup signal\n\n Should be called after shutdown()\n \"\"\"\n yield from self.on_cleanup.send(self)\n\n def _make_request(self, message, payload, protocol, writer, task,\n _cls=web_request.Request):\n return _cls(\n message, payload, protocol, writer, protocol._time_service, task,\n secure_proxy_ssl_header=self._secure_proxy_ssl_header,\n client_max_size=self._client_max_size)\n\n @asyncio.coroutine\n def _handle(self, request):\n match_info = yield from self._router.resolve(request)\n assert isinstance(match_info, AbstractMatchInfo), match_info\n match_info.add_app(self)\n\n if __debug__:\n match_info.freeze()\n\n resp = None\n request._match_info = match_info\n expect = request.headers.get(hdrs.EXPECT)\n if expect:\n resp = yield from match_info.expect_handler(request)\n yield from request.writer.drain()\n\n if resp is None:\n handler = match_info.handler\n for app in match_info.apps[::-1]:\n for factory in app._middlewares:\n handler = yield from factory(app, handler)\n\n resp = yield from handler(request)\n\n assert isinstance(resp, web_response.StreamResponse), \\\n (\"Handler {!r} should return response instance, \"\n \"got {!r} [middlewares {!r}]\").format(\n match_info.handler, type(resp),\n [middleware for middleware in app.middlewares\n for app in match_info.apps])\n return resp\n\n def __call__(self):\n \"\"\"gunicorn compatibility\"\"\"\n return self\n\n def __repr__(self):\n return \"<Application 0x{:x}>\".format(id(self))\n\n\ndef run_app(app, *, host=None, port=None, path=None, sock=None,\n shutdown_timeout=60.0, ssl_context=None,\n print=print, backlog=128, access_log_format=None,\n access_log=access_logger, loop=None):\n \"\"\"Run an app locally\"\"\"\n user_supplied_loop = loop is not None\n if loop is None:\n loop = asyncio.get_event_loop()\n\n make_handler_kwargs = dict()\n if access_log_format is not None:\n make_handler_kwargs['access_log_format'] = access_log_format\n handler = app.make_handler(loop=loop, access_log=access_log,\n **make_handler_kwargs)\n\n loop.run_until_complete(app.startup())\n\n scheme = 'https' if ssl_context else 'http'\n base_url = URL('{}://localhost'.format(scheme)).with_port(port)\n\n if path is None:\n paths = ()\n elif isinstance(path, (str, bytes, bytearray, memoryview))\\\n or not isinstance(path, Iterable):\n paths = (path,)\n else:\n paths = path\n\n if sock is None:\n socks = ()\n elif not isinstance(sock, Iterable):\n socks = (sock,)\n else:\n socks = sock\n\n if host is None:\n if (paths or socks) and not port:\n hosts = ()\n else:\n hosts = (\"0.0.0.0\",)\n elif isinstance(host, (str, bytes, bytearray, memoryview))\\\n or not isinstance(host, Iterable):\n hosts = (host,)\n else:\n hosts = host\n\n if hosts and port is None:\n port = 8443 if ssl_context else 8080\n\n server_creations = []\n uris = [str(base_url.with_host(host)) for host in hosts]\n if hosts:\n # Multiple hosts bound to same server is available in most loop\n # implementations, but only send multiple if we have multiple.\n host_binding = hosts[0] if len(hosts) == 1 else hosts\n server_creations.append(\n loop.create_server(\n handler, host_binding, port, ssl=ssl_context, backlog=backlog\n )\n )\n for path in paths:\n # Most loop implementations don't support multiple paths bound in same\n # server, so create a server for each.\n server_creations.append(\n loop.create_unix_server(\n handler, path, ssl=ssl_context, backlog=backlog\n )\n )\n uris.append('{}://unix:{}:'.format(scheme, path))\n\n # Clean up prior socket path if stale and not abstract.\n # CPython 3.5.3+'s event loop already does this. See\n # https://github.com/python/asyncio/issues/425\n if path[0] not in (0, '\\x00'): # pragma: no branch\n try:\n if stat.S_ISSOCK(os.stat(path).st_mode):\n os.remove(path)\n except FileNotFoundError:\n pass\n for sock in socks:\n server_creations.append(\n loop.create_server(\n handler, sock=sock, ssl=ssl_context, backlog=backlog\n )\n )\n\n if hasattr(socket, 'AF_UNIX') and sock.family == socket.AF_UNIX:\n uris.append('{}://unix:{}:'.format(scheme, sock.getsockname()))\n else:\n host, port = sock.getsockname()\n uris.append(str(base_url.with_host(host).with_port(port)))\n\n servers = loop.run_until_complete(\n asyncio.gather(*server_creations, loop=loop)\n )\n\n print(\"======== Running on {} ========\\n\"\n \"(Press CTRL+C to quit)\".format(', '.join(uris)))\n\n try:\n loop.run_forever()\n except KeyboardInterrupt: # pragma: no cover\n pass\n finally:\n server_closures = []\n for srv in servers:\n srv.close()\n server_closures.append(srv.wait_closed())\n loop.run_until_complete(asyncio.gather(*server_closures, loop=loop))\n loop.run_until_complete(app.shutdown())\n loop.run_until_complete(handler.shutdown(shutdown_timeout))\n loop.run_until_complete(app.cleanup())\n if not user_supplied_loop:\n loop.close()\n\n\ndef main(argv):\n arg_parser = ArgumentParser(\n description=\"aiohttp.web Application server\",\n prog=\"aiohttp.web\"\n )\n arg_parser.add_argument(\n \"entry_func\",\n help=(\"Callable returning the `aiohttp.web.Application` instance to \"\n \"run. Should be specified in the 'module:function' syntax.\"),\n metavar=\"entry-func\"\n )\n arg_parser.add_argument(\n \"-H\", \"--hostname\",\n help=\"TCP/IP hostname to serve on (default: %(default)r)\",\n default=\"localhost\"\n )\n arg_parser.add_argument(\n \"-P\", \"--port\",\n help=\"TCP/IP port to serve on (default: %(default)r)\",\n type=int,\n default=\"8080\"\n )\n arg_parser.add_argument(\n \"-U\", \"--path\",\n help=\"Unix file system path to serve on. Specifying a path will cause \"\n \"hostname and port arguments to be ignored.\",\n )\n args, extra_argv = arg_parser.parse_known_args(argv)\n\n # Import logic\n mod_str, _, func_str = args.entry_func.partition(\":\")\n if not func_str or not mod_str:\n arg_parser.error(\n \"'entry-func' not in 'module:function' syntax\"\n )\n if mod_str.startswith(\".\"):\n arg_parser.error(\"relative module names not supported\")\n try:\n module = import_module(mod_str)\n except ImportError as ex:\n arg_parser.error(\"unable to import %s: %s\" % (mod_str, ex))\n try:\n func = getattr(module, func_str)\n except AttributeError:\n arg_parser.error(\"module %r has no attribute %r\" % (mod_str, func_str))\n\n # Compatibility logic\n if args.path is not None and not hasattr(socket, 'AF_UNIX'):\n arg_parser.error(\"file system paths not supported by your operating\"\n \" environment\")\n\n app = func(extra_argv)\n run_app(app, host=args.hostname, port=args.port, path=args.path)\n arg_parser.exit(message=\"Stopped\\n\")\n\n\nif __name__ == \"__main__\": # pragma: no branch\n main(sys.argv[1:]) # pragma: no cover\n",
"path": "aiohttp/web.py"
}
] | diff --git a/CHANGES.rst b/CHANGES.rst
index 9330a3c7287..9f89a9ee3eb 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -43,6 +43,8 @@ Changes
- Fix sub-application middlewares resolution order #1853
+- Fix applications comparison #1866
+
2.0.7 (2017-04-12)
------------------
diff --git a/aiohttp/web.py b/aiohttp/web.py
index f117fbc4cea..e1f3d115efd 100644
--- a/aiohttp/web.py
+++ b/aiohttp/web.py
@@ -81,6 +81,9 @@ def __init__(self, *,
# MutableMapping API
+ def __eq__(self, other):
+ return self is other
+
def __getitem__(self, key):
return self._state[key]
diff --git a/tests/test_web_application.py b/tests/test_web_application.py
index 5b65ac7e2ad..0276147f999 100644
--- a/tests/test_web_application.py
+++ b/tests/test_web_application.py
@@ -219,3 +219,11 @@ def test_secure_proxy_ssl_header_init(loop):
assert app._secure_proxy_ssl_header is hdr
app.make_handler(loop=loop)
assert app._secure_proxy_ssl_header is hdr
+
+
+def test_equality():
+ app1 = web.Application()
+ app2 = web.Application()
+
+ assert app1 == app1
+ assert app1 != app2
|
Mailu__Mailu-2049 | Fetchmail: /var/lib/fetchmail needs persistence
According [fetchmail documentation](https://www.fetchmail.info/fetchmail-man.html#12), an `.idfile` is used to keep track of previously downloaded messages. Shouldn't that file persistent over container restarts?
I'm not a Fetchmail user, perhaps somebody can shine a light on how this currently works?
cc: @Nebukadneza, @hoellen, @kaiyou
| [
{
"content": "#!/usr/bin/python3\n\nimport time\nimport os\nimport tempfile\nimport shlex\nimport subprocess\nimport re\nimport requests\nimport sys\nimport traceback\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n\"\"\"\n\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*?)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\n\ndef escape_rc_string(arg):\n return \"\".join(\"\\\\x%2x\" % ord(char) for char in arg)\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n try:\n fetches = requests.get(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n else:\n smtphostport = \"%s/%d\" % (smtphost, smtpport)\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=smtphostport,\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n while True:\n time.sleep(int(os.environ.get(\"FETCHMAIL_DELAY\", 60)))\n run(os.environ.get(\"DEBUG\", None) == \"True\")\n sys.stdout.flush()\n",
"path": "optional/fetchmail/fetchmail.py"
}
] | [
{
"content": "#!/usr/bin/python3\n\nimport time\nimport os\nimport tempfile\nimport shlex\nimport subprocess\nimport re\nimport requests\nimport sys\nimport traceback\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --idfile /data/fetchids --uidl \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n\"\"\"\n\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*?)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\n\ndef escape_rc_string(arg):\n return \"\".join(\"\\\\x%2x\" % ord(char) for char in arg)\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n try:\n fetches = requests.get(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n else:\n smtphostport = \"%s/%d\" % (smtphost, smtpport)\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=smtphostport,\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n while True:\n time.sleep(int(os.environ.get(\"FETCHMAIL_DELAY\", 60)))\n run(os.environ.get(\"DEBUG\", None) == \"True\")\n sys.stdout.flush()\n",
"path": "optional/fetchmail/fetchmail.py"
}
] | diff --git a/optional/fetchmail/Dockerfile b/optional/fetchmail/Dockerfile
index 995ec48f9..068a5dcec 100644
--- a/optional/fetchmail/Dockerfile
+++ b/optional/fetchmail/Dockerfile
@@ -12,8 +12,8 @@ RUN apk add --no-cache \
RUN apk add --no-cache fetchmail ca-certificates openssl \
&& pip3 install requests
-COPY fetchmail.py /fetchmail.py
+RUN mkdir -p /data
-USER fetchmail
+COPY fetchmail.py /fetchmail.py
-CMD ["/fetchmail.py"]
+CMD ["/fetchmail.py"]
\ No newline at end of file
diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py
index 4be3c2bdc..5459de59c 100755
--- a/optional/fetchmail/fetchmail.py
+++ b/optional/fetchmail/fetchmail.py
@@ -13,6 +13,7 @@
FETCHMAIL = """
fetchmail -N \
+ --idfile /data/fetchids --uidl \
--sslcertck --sslcertpath /etc/ssl/certs \
-f {}
"""
diff --git a/setup/flavors/compose/docker-compose.yml b/setup/flavors/compose/docker-compose.yml
index 2675a2ab2..18a881b8b 100644
--- a/setup/flavors/compose/docker-compose.yml
+++ b/setup/flavors/compose/docker-compose.yml
@@ -129,6 +129,8 @@ services:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}fetchmail:${MAILU_VERSION:-{{ version }}}
restart: always
env_file: {{ env }}
+ volumes:
+ - "{{ root }}/data/fetchmail:/data"
{% if resolver_enabled %}
depends_on:
- resolver
diff --git a/setup/flavors/stack/docker-compose.yml b/setup/flavors/stack/docker-compose.yml
index 24afa9f33..0c744d7ec 100644
--- a/setup/flavors/stack/docker-compose.yml
+++ b/setup/flavors/stack/docker-compose.yml
@@ -110,7 +110,7 @@ services:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}fetchmail:${MAILU_VERSION:-{{ version }}}
env_file: {{ env }}
volumes:
- - "{{ root }}/data:/data"
+ - "{{ root }}/data/fetchmail:/data"
deploy:
replicas: 1
healthcheck:
diff --git a/towncrier/newsfragments/1223.bugfix b/towncrier/newsfragments/1223.bugfix
new file mode 100644
index 000000000..3c23d1a4d
--- /dev/null
+++ b/towncrier/newsfragments/1223.bugfix
@@ -0,0 +1,4 @@
+Fixed fetchmail losing track of fetched emails upon container recreation.
+The relevant fetchmail files are now retained in the /data folder (in the fetchmail image).
+See the docker-compose.yml file for the relevant volume mapping.
+If you already had your own mapping, you must double check the volume mapping and take action.
|
rotki__rotki-1873 | SOL token is Solana, not Sola
## Problem Definition
The SOL token on the exchanges is reported with the correct value but the wrong name. Its Solana, not Sola.
| [
{
"content": "from dataclasses import dataclass, field\nfrom functools import total_ordering\nfrom typing import Any, Optional, Type, TypeVar\n\nfrom rotkehlchen.assets.resolver import AssetResolver\nfrom rotkehlchen.errors import DeserializationError, UnknownAsset, UnsupportedAsset\nfrom rotkehlchen.typing import AssetType, ChecksumEthAddress, EthTokenInfo, Timestamp\n\nWORLD_TO_BITTREX = {\n # In Rotkehlchen Bitswift is BITS-2 but in Bittrex it's BITS\n 'BITS-2': 'BITS',\n # In Rotkehlchen NuBits is USNBT but in Bittrex it's NBT\n 'USNBT': 'NBT',\n # In Rotkehlchen BTM-2 is Bytom but in Bittrex it's BTM\n 'BTM-2': 'BTM',\n # In Rotkehlchen PAI-2 is PCHAIN token but in Bittrex it's PI\n 'PAI-2': 'PI',\n # In Rotkehlchen PLA-2 is Playchip but in Bittrex is PLA\n 'PLA-2': 'PLA',\n # In Rotkehlchen sUSD is Synt USD but in Bittrex it's SUSD\n 'sUSD': 'SUSD',\n # In Rotkehlchen LUNA-2 is Terra Luna but in Bittrex it's LUNA\n 'LUNA-2': 'LUNA',\n # In Rotkehlchen WorldWideAssetExchange is WAX but in Bittrex it's WASP\n 'WAX': 'WAXP',\n}\n\nWORLD_TO_POLONIEX = {\n # AIR-2 is aircoin for us and AIR is airtoken. Poloniex has only aircoin\n 'AIR-2': 'AIR',\n # Decentr is DEC-2 for us but DEC in Poloniex\n 'DEC-2': 'DEC',\n # Poloniex delisted BCH and listed it as BCHABC after the Bitcoin Cash\n # ABC / SV fork. In Rotkehlchen we consider BCH to be the same as BCHABC\n 'BCH': 'BCHABC',\n # Poloniex has the BCH Fork, Bitcoin Satoshi's vision listed as BCHSV.\n # We know it as BSV\n 'BSV': 'BCHSV',\n # Caishen is known as CAI in Poloniex. This is before the swap to CAIX\n 'CAIX': 'CAI',\n # CCN is Cannacoin in Poloniex but in Rotkehlchen we know it as CCN-2\n 'CCN-2': 'CCN',\n # CCN is CustomContractNetwork in Rotkehlchen but does not exist in Cryptocompare\n # Putting it as conversion to make sure we don't accidentally ask for wrong price\n 'CCN': '',\n 'cUSDT': 'CUSDT',\n # Faircoin is known as FAIR outside of Poloniex. Seems to be the same as the\n # now delisted Poloniex's FAC if you look at the bitcointalk announcement\n # https://bitcointalk.org/index.php?topic=702675.0\n 'FAIR': 'FAC',\n # KeyCoin in Poloniex is KEY but in Rotkehlchen it's KEY-3\n 'KEY-3': 'KEY',\n # Mazacoin in Poloniex is MZC but in Rotkehlchen it's MAZA\n 'MAZA': 'MZC',\n # Myriadcoin in Poloniex is MYR but in Rotkehlchen it's XMY\n 'XMY': 'MYR',\n # NuBits in Poloniex is NBT but in Rotkehlchen it's USNBT\n 'USNBT': 'NBT',\n # Stellar is XLM everywhere, apart from Poloniex\n 'XLM': 'STR',\n # Poloniex still has the old name WC for WhiteCoin\n 'XWC': 'WC',\n}\n\nWORLD_TO_KRAKEN = {\n 'ATOM': 'ATOM',\n 'ALGO': 'ALGO',\n 'AUD': 'ZAUD',\n 'BAT': 'BAT',\n 'COMP': 'COMP',\n 'DOT': 'DOT',\n 'KAVA': 'KAVA',\n 'KNC': 'KNC',\n 'LINK': 'LINK',\n 'BSV': 'BSV',\n 'ETC': 'XETC',\n 'ETH': 'XETH',\n 'LTC': 'XLTC',\n 'REP': 'XREP',\n 'BTC': 'XXBT',\n 'XMR': 'XXMR',\n 'XRP': 'XXRP',\n 'ZEC': 'XZEC',\n 'EUR': 'ZEUR',\n 'USD': 'ZUSD',\n 'GBP': 'ZGBP',\n 'CAD': 'ZCAD',\n 'JPY': 'ZJPY',\n 'CHF': 'CHF',\n 'KRW': 'ZKRW',\n 'REPV2': 'REPV2',\n 'DAO': 'XDAO',\n 'MLN': 'XMLN',\n 'ICN': 'XICN',\n 'GNO': 'GNO',\n 'BCH': 'BCH',\n 'XLM': 'XXLM',\n 'DASH': 'DASH',\n 'EOS': 'EOS',\n 'USDC': 'USDC',\n 'USDT': 'USDT',\n 'KFEE': 'KFEE',\n 'ADA': 'ADA',\n 'QTUM': 'QTUM',\n 'NMC': 'XNMC',\n 'VEN': 'XXVN',\n 'DOGE': 'XXDG',\n 'DAI': 'DAI',\n 'XTZ': 'XTZ',\n 'WAVES': 'WAVES',\n 'ICX': 'ICX',\n 'NANO': 'NANO',\n 'OMG': 'OMG',\n 'SC': 'SC',\n 'PAXG': 'PAXG',\n 'LSK': 'LSK',\n 'TRX': 'TRX',\n 'OXT': 'OXT',\n 'STORJ': 'STORJ',\n 'BAL': 'BAL',\n 'KSM': 'KSM',\n 'CRV': 'CRV',\n 'SNX': 'SNX',\n 'FIL': 'FIL',\n 'UNI': 'UNI',\n 'YFI': 'YFI',\n 'ANT': 'ANT',\n 'KEEP': 'KEEP',\n 'TBTC': 'TBTC',\n}\n\nWORLD_TO_BINANCE = {\n # When BCH forked to BCHABC and BCHSV, binance renamed the original to ABC\n 'BCH': 'BCHABC',\n 'BSV': 'BCHSV',\n # ETHOS is known as BQX in Binance\n 'ETHOS': 'BQX',\n # GXChain is GXS in Binance but GXC in Rotkehlchen\n 'GXC': 'GXS',\n # Luna Terra is LUNA-2 in rotki\n 'LUNA-2': 'LUNA',\n # YOYOW is known as YOYO in Binance\n 'YOYOW': 'YOYO',\n}\n\n\n@total_ordering\n@dataclass(init=True, repr=True, eq=False, order=False, unsafe_hash=False, frozen=True)\nclass Asset():\n identifier: str\n name: str = field(init=False)\n symbol: str = field(init=False)\n active: bool = field(init=False)\n asset_type: AssetType = field(init=False)\n started: Timestamp = field(init=False)\n ended: Optional[Timestamp] = field(init=False)\n forked: Optional[str] = field(init=False)\n swapped_for: Optional[str] = field(init=False)\n # None means no special mapping. '' means not supported\n cryptocompare: Optional[str] = field(init=False)\n coingecko: Optional[str] = field(init=False)\n\n def __post_init__(self) -> None:\n \"\"\"\n Asset post initialization\n\n The only thing that is given to initialize an asset is a string.\n\n If a non string is given then it's probably a deserialization error or\n invalid data were given to us by the server if an API was queried.\n \"\"\"\n if not isinstance(self.identifier, str):\n raise DeserializationError(\n 'Tried to initialize an asset out of a non-string identifier',\n )\n\n canonical_id = AssetResolver().is_identifier_canonical(self.identifier)\n if canonical_id is None:\n raise UnknownAsset(self.identifier)\n # else let's make sure we got the canonical id in our data struct\n object.__setattr__(self, 'identifier', canonical_id)\n\n data = AssetResolver().get_asset_data(self.identifier)\n # Ugly hack to set attributes of a frozen data class as post init\n # https://docs.python.org/3/library/dataclasses.html#frozen-instances\n object.__setattr__(self, 'name', data.name)\n object.__setattr__(self, 'symbol', data.symbol)\n object.__setattr__(self, 'active', data.active)\n object.__setattr__(self, 'asset_type', data.asset_type)\n object.__setattr__(self, 'started', data.started)\n object.__setattr__(self, 'ended', data.ended)\n object.__setattr__(self, 'forked', data.forked)\n object.__setattr__(self, 'swapped_for', data.swapped_for)\n object.__setattr__(self, 'cryptocompare', data.cryptocompare)\n object.__setattr__(self, 'coingecko', data.coingecko)\n\n def serialize(self) -> str:\n return self.identifier\n\n def is_fiat(self) -> bool:\n return self.asset_type == AssetType.FIAT\n\n def is_eth_token(self) -> bool:\n return self.asset_type in (AssetType.ETH_TOKEN, AssetType.ETH_TOKEN_AND_MORE)\n\n def __str__(self) -> str:\n return self.name\n\n def __repr__(self) -> str:\n return f'<Asset identifier:{self.identifier} name:{self.name} symbol:{self.symbol}>'\n\n def to_kraken(self) -> str:\n return WORLD_TO_KRAKEN[self.identifier]\n\n def to_bittrex(self) -> str:\n return WORLD_TO_BITTREX.get(self.identifier, self.identifier)\n\n def to_binance(self) -> str:\n return WORLD_TO_BINANCE.get(self.identifier, self.identifier)\n\n def to_cryptocompare(self) -> str:\n \"\"\"Returns the symbol with which to query cryptocompare for the asset\n\n May raise:\n - UnsupportedAsset() if the asset is not supported by cryptocompare\n \"\"\"\n cryptocompare_str = self.identifier if self.cryptocompare is None else self.cryptocompare\n # There is an asset which should not be queried in cryptocompare\n if cryptocompare_str == '':\n raise UnsupportedAsset(f'{self.identifier} is not supported by cryptocompare')\n\n # Seems cryptocompare capitalizes everything. So cDAI -> CDAI\n return cryptocompare_str.upper()\n\n def to_coingecko(self) -> str:\n \"\"\"Returns the symbol with which to query coingecko for the asset\n\n May raise:\n - UnsupportedAsset() if the asset is not supported by coingecko\n \"\"\"\n coingecko_str = self.identifier if self.coingecko is None else self.coingecko\n # There is an asset which should not be queried in cryptocompare\n if coingecko_str == '':\n raise UnsupportedAsset(f'{self.identifier} is not supported by coingecko')\n return coingecko_str\n\n def has_coingecko(self) -> bool:\n return self.coingecko is not None and self.coingecko != ''\n\n def __hash__(self) -> int:\n return hash(self.identifier)\n\n def __eq__(self, other: Any) -> bool:\n if other is None:\n return False\n\n if isinstance(other, Asset):\n return self.identifier == other.identifier\n elif isinstance(other, str):\n return self.identifier == other\n else:\n raise ValueError(f'Invalid comparison of asset with {type(other)}')\n\n def __ne__(self, other: Any) -> bool:\n return not self.__eq__(other)\n\n def __lt__(self, other: Any) -> bool:\n if isinstance(other, Asset):\n return self.identifier < other.identifier\n elif isinstance(other, str):\n return self.identifier < other\n else:\n raise ValueError(f'Invalid comparison of asset with {type(other)}')\n\n\n@dataclass(init=True, repr=True, eq=False, order=False, unsafe_hash=False, frozen=True)\nclass HasEthereumToken(Asset):\n \"\"\" Marker to denote assets having an Ethereum token address \"\"\"\n ethereum_address: ChecksumEthAddress = field(init=False)\n decimals: int = field(init=False)\n\n def __post_init__(self) -> None:\n super().__post_init__()\n data = AssetResolver().get_asset_data(self.identifier) # pylint: disable=no-member\n\n if not data.ethereum_address:\n raise DeserializationError(\n 'Tried to initialize a non Ethereum asset as Ethereum Token',\n )\n\n object.__setattr__(self, 'ethereum_address', data.ethereum_address)\n object.__setattr__(self, 'decimals', data.decimals)\n\n\n# Create a generic variable that can be 'EthereumToken', or any subclass.\nT = TypeVar('T', bound='EthereumToken')\n\n\n@dataclass(init=True, repr=True, eq=False, order=False, unsafe_hash=False, frozen=True)\nclass EthereumToken(HasEthereumToken):\n\n def token_info(self) -> EthTokenInfo:\n return EthTokenInfo(\n identifier=self.identifier,\n address=self.ethereum_address,\n symbol=self.symbol,\n name=self.name,\n decimals=self.decimals,\n )\n\n @classmethod\n def from_asset(cls: Type[T], asset: Asset) -> Optional[T]:\n \"\"\"Attempts to turn an asset into an EthereumToken. If it fails returns None\"\"\"\n try:\n return cls(asset.identifier)\n except DeserializationError:\n return None\n",
"path": "rotkehlchen/assets/asset.py"
}
] | [
{
"content": "from dataclasses import dataclass, field\nfrom functools import total_ordering\nfrom typing import Any, Optional, Type, TypeVar\n\nfrom rotkehlchen.assets.resolver import AssetResolver\nfrom rotkehlchen.errors import DeserializationError, UnknownAsset, UnsupportedAsset\nfrom rotkehlchen.typing import AssetType, ChecksumEthAddress, EthTokenInfo, Timestamp\n\nWORLD_TO_BITTREX = {\n # In Rotkehlchen Bitswift is BITS-2 but in Bittrex it's BITS\n 'BITS-2': 'BITS',\n # In Rotkehlchen NuBits is USNBT but in Bittrex it's NBT\n 'USNBT': 'NBT',\n # In Rotkehlchen BTM-2 is Bytom but in Bittrex it's BTM\n 'BTM-2': 'BTM',\n # In Rotkehlchen PAI-2 is PCHAIN token but in Bittrex it's PI\n 'PAI-2': 'PI',\n # In Rotkehlchen PLA-2 is Playchip but in Bittrex is PLA\n 'PLA-2': 'PLA',\n # In Rotkehlchen sUSD is Synt USD but in Bittrex it's SUSD\n 'sUSD': 'SUSD',\n # In Rotkehlchen LUNA-2 is Terra Luna but in Bittrex it's LUNA\n 'LUNA-2': 'LUNA',\n # In Rotkehlchen WorldWideAssetExchange is WAX but in Bittrex it's WASP\n 'WAX': 'WAXP',\n}\n\nWORLD_TO_POLONIEX = {\n # AIR-2 is aircoin for us and AIR is airtoken. Poloniex has only aircoin\n 'AIR-2': 'AIR',\n # Decentr is DEC-2 for us but DEC in Poloniex\n 'DEC-2': 'DEC',\n # Poloniex delisted BCH and listed it as BCHABC after the Bitcoin Cash\n # ABC / SV fork. In Rotkehlchen we consider BCH to be the same as BCHABC\n 'BCH': 'BCHABC',\n # Poloniex has the BCH Fork, Bitcoin Satoshi's vision listed as BCHSV.\n # We know it as BSV\n 'BSV': 'BCHSV',\n # Caishen is known as CAI in Poloniex. This is before the swap to CAIX\n 'CAIX': 'CAI',\n # CCN is Cannacoin in Poloniex but in Rotkehlchen we know it as CCN-2\n 'CCN-2': 'CCN',\n # CCN is CustomContractNetwork in Rotkehlchen but does not exist in Cryptocompare\n # Putting it as conversion to make sure we don't accidentally ask for wrong price\n 'CCN': '',\n 'cUSDT': 'CUSDT',\n # Faircoin is known as FAIR outside of Poloniex. Seems to be the same as the\n # now delisted Poloniex's FAC if you look at the bitcointalk announcement\n # https://bitcointalk.org/index.php?topic=702675.0\n 'FAIR': 'FAC',\n # KeyCoin in Poloniex is KEY but in Rotkehlchen it's KEY-3\n 'KEY-3': 'KEY',\n # Mazacoin in Poloniex is MZC but in Rotkehlchen it's MAZA\n 'MAZA': 'MZC',\n # Myriadcoin in Poloniex is MYR but in Rotkehlchen it's XMY\n 'XMY': 'MYR',\n # NuBits in Poloniex is NBT but in Rotkehlchen it's USNBT\n 'USNBT': 'NBT',\n # Stellar is XLM everywhere, apart from Poloniex\n 'XLM': 'STR',\n # Poloniex still has the old name WC for WhiteCoin\n 'XWC': 'WC',\n}\n\nWORLD_TO_KRAKEN = {\n 'ATOM': 'ATOM',\n 'ALGO': 'ALGO',\n 'AUD': 'ZAUD',\n 'BAT': 'BAT',\n 'COMP': 'COMP',\n 'DOT': 'DOT',\n 'KAVA': 'KAVA',\n 'KNC': 'KNC',\n 'LINK': 'LINK',\n 'BSV': 'BSV',\n 'ETC': 'XETC',\n 'ETH': 'XETH',\n 'LTC': 'XLTC',\n 'REP': 'XREP',\n 'BTC': 'XXBT',\n 'XMR': 'XXMR',\n 'XRP': 'XXRP',\n 'ZEC': 'XZEC',\n 'EUR': 'ZEUR',\n 'USD': 'ZUSD',\n 'GBP': 'ZGBP',\n 'CAD': 'ZCAD',\n 'JPY': 'ZJPY',\n 'CHF': 'CHF',\n 'KRW': 'ZKRW',\n 'REPV2': 'REPV2',\n 'DAO': 'XDAO',\n 'MLN': 'XMLN',\n 'ICN': 'XICN',\n 'GNO': 'GNO',\n 'BCH': 'BCH',\n 'XLM': 'XXLM',\n 'DASH': 'DASH',\n 'EOS': 'EOS',\n 'USDC': 'USDC',\n 'USDT': 'USDT',\n 'KFEE': 'KFEE',\n 'ADA': 'ADA',\n 'QTUM': 'QTUM',\n 'NMC': 'XNMC',\n 'VEN': 'XXVN',\n 'DOGE': 'XXDG',\n 'DAI': 'DAI',\n 'XTZ': 'XTZ',\n 'WAVES': 'WAVES',\n 'ICX': 'ICX',\n 'NANO': 'NANO',\n 'OMG': 'OMG',\n 'SC': 'SC',\n 'PAXG': 'PAXG',\n 'LSK': 'LSK',\n 'TRX': 'TRX',\n 'OXT': 'OXT',\n 'STORJ': 'STORJ',\n 'BAL': 'BAL',\n 'KSM': 'KSM',\n 'CRV': 'CRV',\n 'SNX': 'SNX',\n 'FIL': 'FIL',\n 'UNI': 'UNI',\n 'YFI': 'YFI',\n 'ANT': 'ANT',\n 'KEEP': 'KEEP',\n 'TBTC': 'TBTC',\n}\n\nWORLD_TO_BINANCE = {\n # When BCH forked to BCHABC and BCHSV, binance renamed the original to ABC\n 'BCH': 'BCHABC',\n 'BSV': 'BCHSV',\n # ETHOS is known as BQX in Binance\n 'ETHOS': 'BQX',\n # GXChain is GXS in Binance but GXC in Rotkehlchen\n 'GXC': 'GXS',\n # Luna Terra is LUNA-2 in rotki\n 'LUNA-2': 'LUNA',\n # YOYOW is known as YOYO in Binance\n 'YOYOW': 'YOYO',\n # Solana is SOL-2 in rotki\n 'SOL-2': 'SOL',\n}\n\n\n@total_ordering\n@dataclass(init=True, repr=True, eq=False, order=False, unsafe_hash=False, frozen=True)\nclass Asset():\n identifier: str\n name: str = field(init=False)\n symbol: str = field(init=False)\n active: bool = field(init=False)\n asset_type: AssetType = field(init=False)\n started: Timestamp = field(init=False)\n ended: Optional[Timestamp] = field(init=False)\n forked: Optional[str] = field(init=False)\n swapped_for: Optional[str] = field(init=False)\n # None means no special mapping. '' means not supported\n cryptocompare: Optional[str] = field(init=False)\n coingecko: Optional[str] = field(init=False)\n\n def __post_init__(self) -> None:\n \"\"\"\n Asset post initialization\n\n The only thing that is given to initialize an asset is a string.\n\n If a non string is given then it's probably a deserialization error or\n invalid data were given to us by the server if an API was queried.\n \"\"\"\n if not isinstance(self.identifier, str):\n raise DeserializationError(\n 'Tried to initialize an asset out of a non-string identifier',\n )\n\n canonical_id = AssetResolver().is_identifier_canonical(self.identifier)\n if canonical_id is None:\n raise UnknownAsset(self.identifier)\n # else let's make sure we got the canonical id in our data struct\n object.__setattr__(self, 'identifier', canonical_id)\n\n data = AssetResolver().get_asset_data(self.identifier)\n # Ugly hack to set attributes of a frozen data class as post init\n # https://docs.python.org/3/library/dataclasses.html#frozen-instances\n object.__setattr__(self, 'name', data.name)\n object.__setattr__(self, 'symbol', data.symbol)\n object.__setattr__(self, 'active', data.active)\n object.__setattr__(self, 'asset_type', data.asset_type)\n object.__setattr__(self, 'started', data.started)\n object.__setattr__(self, 'ended', data.ended)\n object.__setattr__(self, 'forked', data.forked)\n object.__setattr__(self, 'swapped_for', data.swapped_for)\n object.__setattr__(self, 'cryptocompare', data.cryptocompare)\n object.__setattr__(self, 'coingecko', data.coingecko)\n\n def serialize(self) -> str:\n return self.identifier\n\n def is_fiat(self) -> bool:\n return self.asset_type == AssetType.FIAT\n\n def is_eth_token(self) -> bool:\n return self.asset_type in (AssetType.ETH_TOKEN, AssetType.ETH_TOKEN_AND_MORE)\n\n def __str__(self) -> str:\n return self.name\n\n def __repr__(self) -> str:\n return f'<Asset identifier:{self.identifier} name:{self.name} symbol:{self.symbol}>'\n\n def to_kraken(self) -> str:\n return WORLD_TO_KRAKEN[self.identifier]\n\n def to_bittrex(self) -> str:\n return WORLD_TO_BITTREX.get(self.identifier, self.identifier)\n\n def to_binance(self) -> str:\n return WORLD_TO_BINANCE.get(self.identifier, self.identifier)\n\n def to_cryptocompare(self) -> str:\n \"\"\"Returns the symbol with which to query cryptocompare for the asset\n\n May raise:\n - UnsupportedAsset() if the asset is not supported by cryptocompare\n \"\"\"\n cryptocompare_str = self.identifier if self.cryptocompare is None else self.cryptocompare\n # There is an asset which should not be queried in cryptocompare\n if cryptocompare_str == '':\n raise UnsupportedAsset(f'{self.identifier} is not supported by cryptocompare')\n\n # Seems cryptocompare capitalizes everything. So cDAI -> CDAI\n return cryptocompare_str.upper()\n\n def to_coingecko(self) -> str:\n \"\"\"Returns the symbol with which to query coingecko for the asset\n\n May raise:\n - UnsupportedAsset() if the asset is not supported by coingecko\n \"\"\"\n coingecko_str = self.identifier if self.coingecko is None else self.coingecko\n # There is an asset which should not be queried in cryptocompare\n if coingecko_str == '':\n raise UnsupportedAsset(f'{self.identifier} is not supported by coingecko')\n return coingecko_str\n\n def has_coingecko(self) -> bool:\n return self.coingecko is not None and self.coingecko != ''\n\n def __hash__(self) -> int:\n return hash(self.identifier)\n\n def __eq__(self, other: Any) -> bool:\n if other is None:\n return False\n\n if isinstance(other, Asset):\n return self.identifier == other.identifier\n elif isinstance(other, str):\n return self.identifier == other\n else:\n raise ValueError(f'Invalid comparison of asset with {type(other)}')\n\n def __ne__(self, other: Any) -> bool:\n return not self.__eq__(other)\n\n def __lt__(self, other: Any) -> bool:\n if isinstance(other, Asset):\n return self.identifier < other.identifier\n elif isinstance(other, str):\n return self.identifier < other\n else:\n raise ValueError(f'Invalid comparison of asset with {type(other)}')\n\n\n@dataclass(init=True, repr=True, eq=False, order=False, unsafe_hash=False, frozen=True)\nclass HasEthereumToken(Asset):\n \"\"\" Marker to denote assets having an Ethereum token address \"\"\"\n ethereum_address: ChecksumEthAddress = field(init=False)\n decimals: int = field(init=False)\n\n def __post_init__(self) -> None:\n super().__post_init__()\n data = AssetResolver().get_asset_data(self.identifier) # pylint: disable=no-member\n\n if not data.ethereum_address:\n raise DeserializationError(\n 'Tried to initialize a non Ethereum asset as Ethereum Token',\n )\n\n object.__setattr__(self, 'ethereum_address', data.ethereum_address)\n object.__setattr__(self, 'decimals', data.decimals)\n\n\n# Create a generic variable that can be 'EthereumToken', or any subclass.\nT = TypeVar('T', bound='EthereumToken')\n\n\n@dataclass(init=True, repr=True, eq=False, order=False, unsafe_hash=False, frozen=True)\nclass EthereumToken(HasEthereumToken):\n\n def token_info(self) -> EthTokenInfo:\n return EthTokenInfo(\n identifier=self.identifier,\n address=self.ethereum_address,\n symbol=self.symbol,\n name=self.name,\n decimals=self.decimals,\n )\n\n @classmethod\n def from_asset(cls: Type[T], asset: Asset) -> Optional[T]:\n \"\"\"Attempts to turn an asset into an EthereumToken. If it fails returns None\"\"\"\n try:\n return cls(asset.identifier)\n except DeserializationError:\n return None\n",
"path": "rotkehlchen/assets/asset.py"
}
] | diff --git a/docs/changelog.rst b/docs/changelog.rst
index 1b3713bb6a..0542817bbb 100755
--- a/docs/changelog.rst
+++ b/docs/changelog.rst
@@ -2,6 +2,7 @@
Changelog
=========
+* :bug:`1868` Binance SOL token is now properly mapped to Solana.
* :bug:`1849` Binance queries should no longer randomly fail with invalid signature.
* :bug:`1846` AMPL token balance should no longer be double counted.
diff --git a/rotkehlchen/assets/asset.py b/rotkehlchen/assets/asset.py
index 7f868ae1ae..58c37e0b83 100644
--- a/rotkehlchen/assets/asset.py
+++ b/rotkehlchen/assets/asset.py
@@ -141,6 +141,8 @@
'LUNA-2': 'LUNA',
# YOYOW is known as YOYO in Binance
'YOYOW': 'YOYO',
+ # Solana is SOL-2 in rotki
+ 'SOL-2': 'SOL',
}
diff --git a/rotkehlchen/tests/api/test_aave.py b/rotkehlchen/tests/api/test_aave.py
index 5f0aa485e4..98fe22ba34 100644
--- a/rotkehlchen/tests/api/test_aave.py
+++ b/rotkehlchen/tests/api/test_aave.py
@@ -206,7 +206,7 @@ def _query_borrowing_aave_history_test(setup: BalancesTestSetup, server: APIServ
total_lost = result[AAVE_TEST_ACC_3]['total_lost']
total_earned_liquidations = result[AAVE_TEST_ACC_3]['total_earned_liquidations']
- assert len(total_earned_interest) == 1
+ assert len(total_earned_interest) >= 1
assert len(total_earned_interest['aWBTC']) == 2
assert FVal(total_earned_interest['aWBTC']['amount']) >= FVal('0.00000833')
assert FVal(total_earned_interest['aWBTC']['usd_value']) >= ZERO
diff --git a/rotkehlchen/tests/api/test_balances.py b/rotkehlchen/tests/api/test_balances.py
index dc398bfdfb..7b714ebb7e 100644
--- a/rotkehlchen/tests/api/test_balances.py
+++ b/rotkehlchen/tests/api/test_balances.py
@@ -13,6 +13,7 @@
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.fval import FVal
from rotkehlchen.tests.utils.api import (
+ ASYNC_TASK_WAIT_TIMEOUT,
api_url_for,
assert_error_response,
assert_ok_async_response,
@@ -580,8 +581,9 @@ def test_balances_caching_mixup(
task_id_btc,
)
result_eth = wait_for_async_task_with_result(
- rotkehlchen_api_server,
- task_id_eth,
+ server=rotkehlchen_api_server,
+ task_id=task_id_eth,
+ timeout=ASYNC_TASK_WAIT_TIMEOUT * 2,
)
assert result_eth['per_account']['ETH'][ethereum_accounts[0]]['assets']['ETH']['amount'] == '1' # noqa: E501
assert result_eth['per_account']['ETH'][ethereum_accounts[0]]['assets']['RDN']['amount'] == '2' # noqa: E501
diff --git a/rotkehlchen/tests/api/test_blockchain.py b/rotkehlchen/tests/api/test_blockchain.py
index 9db8805804..0a66a26cc3 100644
--- a/rotkehlchen/tests/api/test_blockchain.py
+++ b/rotkehlchen/tests/api/test_blockchain.py
@@ -346,7 +346,7 @@ def _add_blockchain_accounts_test_start(
result = wait_for_async_task_with_result(
api_server,
task_id,
- timeout=ASYNC_TASK_WAIT_TIMEOUT * 2,
+ timeout=ASYNC_TASK_WAIT_TIMEOUT * 4,
)
else:
result = assert_proper_response_with_result(response)
|
ivy-llc__ivy-14488 | conj
| [
{
"content": "# global\nfrom typing import Any\nimport itertools\nimport string\nfrom builtins import slice as py_slice\n\n# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\ndef abs(x):\n return ivy.abs(x)\n\n\n@to_ivy_arrays_and_back\ndef acos(x):\n return ivy.acos(x)\n\n\n@to_ivy_arrays_and_back\ndef add(x, y):\n return ivy.add(x, y)\n\n\n@to_ivy_arrays_and_back\ndef argmax(operand, axis, index_dtype):\n return ivy.astype(ivy.argmax(operand, axis=axis), index_dtype)\n\n\n@to_ivy_arrays_and_back\ndef argmin(operand, axis, index_dtype):\n return ivy.astype(ivy.argmin(operand, axis=axis), index_dtype)\n\n\n@to_ivy_arrays_and_back\ndef asin(x):\n return ivy.asin(x)\n\n\n@to_ivy_arrays_and_back\ndef atan(x):\n return ivy.atan(x)\n\n\n@to_ivy_arrays_and_back\ndef atan2(x, y):\n return ivy.atan2(x, y)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_and(x, y):\n return ivy.bitwise_and(x, y)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_not(x):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_or(x, y):\n return ivy.bitwise_or(x, y)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_xor(x, y):\n return ivy.bitwise_xor(x, y)\n\n\n@to_ivy_arrays_and_back\ndef broadcast(operand, sizes):\n ret = ivy.zeros(tuple(sizes) + tuple(ivy.shape(operand)), dtype=ivy.dtype(operand))\n return ret + operand\n\n\n@to_ivy_arrays_and_back\ndef ceil(x):\n return ivy.ceil(x)\n\n\n@to_ivy_arrays_and_back\ndef clamp(min, x, max):\n return ivy.clip(x, min, max)\n\n\n@to_ivy_arrays_and_back\ndef concatenate(operands, dimension):\n return ivy.concat(operands, axis=dimension)\n\n\n@to_ivy_arrays_and_back\ndef conv(\n lhs, rhs, window_strides, padding, precision=None, preferred_element_type=None\n):\n if preferred_element_type:\n lhs = ivy.astype(lhs, preferred_element_type)\n rhs = ivy.astype(rhs, preferred_element_type)\n dims = len(lhs.shape) - 2\n rhs = ivy.permute_dims(rhs, axes=(*range(2, dims + 2), 1, 0))\n return ivy.conv_general_dilated(\n lhs,\n rhs,\n window_strides,\n padding,\n dims=dims,\n data_format=\"channel_first\",\n )\n\n\ndef _dimension_numbers(dimension_numbers, lhs_len, transp=False):\n if dimension_numbers is None:\n if transp:\n iota = (0, lhs_len - 1, *range(1, lhs_len - 1))\n iotb = (lhs_len - 1, lhs_len - 2, *range(0, lhs_len - 2))\n return iota, iotb, iota\n else:\n iota = tuple(range(lhs_len))\n return iota, iota, iota\n elif isinstance(dimension_numbers[0], (tuple, list)):\n return dimension_numbers\n else:\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n\n def getperm(spec, charpair):\n spatial = (i for i, c in enumerate(spec) if c not in charpair)\n if spec is not rhs_spec:\n spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))\n return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)\n\n charpairs = (\"N\", \"C\"), (\"O\", \"I\"), (\"N\", \"C\")\n lhs_spec, rhs_spec, out_spec = map(getperm, dimension_numbers, charpairs)\n return lhs_spec, rhs_spec, out_spec\n\n\ndef _argsort_tuple(the_tuple):\n return tuple([i for i, _ in sorted(enumerate(the_tuple), key=lambda x: x[1])])\n\n\ndef _conv_transpose_padding(k, s, padding):\n if padding == \"SAME\":\n pad_len = k + s - 2\n if s > k - 1:\n pad_a = k - 1\n else:\n pad_a = int(ivy.to_scalar(ivy.ceil(pad_len / 2)))\n elif padding == \"VALID\":\n pad_len = k + s - 2 + ivy.to_scalar(ivy.maximum(k - s, 0))\n pad_a = k - 1\n else:\n raise ValueError(\"Padding mode must be `SAME` or `VALID`.\")\n pad_b = pad_len - pad_a\n return pad_a, pad_b\n\n\n@to_ivy_arrays_and_back\ndef conv_transpose(\n lhs,\n rhs,\n strides,\n padding,\n rhs_dilation=None,\n dimension_numbers=None,\n transpose_kernel=False,\n precision=None,\n preferred_element_type=None,\n):\n # TODO: add support for transpose_kernel\n if preferred_element_type:\n lhs = ivy.astype(lhs, preferred_element_type)\n rhs = ivy.astype(rhs, preferred_element_type)\n dims = len(lhs.shape) - 2\n dim_nums = _dimension_numbers(dimension_numbers, dims + 2, transp=True)\n rhs_spec = tuple([dim_nums[1][i] for i in (*range(2, dims + 2), 1, 0)])\n rhs_dilation = 1 if rhs_dilation is None else rhs_dilation\n if isinstance(padding, str):\n k_sdims = [rhs.shape[i] for i in rhs_spec[:-2]]\n effective_k_size = map(lambda k, r: (k - 1) * r + 1, k_sdims, rhs_dilation)\n padding = [\n _conv_transpose_padding(k, s, padding)\n for k, s in zip(effective_k_size, strides)\n ]\n return ivy.permute_dims(\n ivy.conv_general_dilated(\n ivy.permute_dims(lhs, axes=dim_nums[0]),\n ivy.permute_dims(rhs, axes=rhs_spec),\n 1,\n padding,\n dilations=rhs_dilation,\n x_dilations=strides,\n dims=dims,\n data_format=\"channel_first\",\n ),\n axes=_argsort_tuple(dim_nums[2]),\n )\n\n\n@to_ivy_arrays_and_back\ndef conv_general_dilated(\n lhs,\n rhs,\n window_strides,\n padding,\n lhs_dilation=None,\n rhs_dilation=None,\n dimension_numbers=None,\n feature_group_count=1,\n batch_group_count=1,\n precision=None,\n preferred_element_type=None,\n):\n # TODO: add support for batch_group_count\n if preferred_element_type:\n lhs = ivy.astype(lhs, preferred_element_type)\n rhs = ivy.astype(rhs, preferred_element_type)\n dims = len(lhs.shape) - 2\n dim_nums = _dimension_numbers(dimension_numbers, dims + 2)\n rhs_spec = tuple([dim_nums[1][i] for i in (*range(2, dims + 2), 1, 0)])\n return ivy.permute_dims(\n ivy.conv_general_dilated(\n ivy.permute_dims(lhs, axes=dim_nums[0]),\n ivy.permute_dims(rhs, axes=rhs_spec),\n window_strides,\n padding,\n dims=dims,\n data_format=\"channel_first\",\n x_dilations=1 if lhs_dilation is None else lhs_dilation,\n dilations=1 if rhs_dilation is None else rhs_dilation,\n feature_group_count=feature_group_count,\n ),\n axes=_argsort_tuple(dim_nums[2]),\n )\n\n\n@to_ivy_arrays_and_back\ndef convert_element_type(operand, new_dtype):\n return ivy.astype(operand, new_dtype, copy=False)\n\n\n@to_ivy_arrays_and_back\ndef cos(x):\n return ivy.cos(x)\n\n\n@to_ivy_arrays_and_back\ndef cosh(x):\n return ivy.cosh(x)\n\n\n@to_ivy_arrays_and_back\ndef cumprod(operand, axis=None, reverse=False):\n dtype = ivy.dtype(operand)\n return ivy.cumprod(operand, axis=axis, reverse=reverse).astype(dtype)\n\n\n@to_ivy_arrays_and_back\ndef cumsum(operand, axis=None, reverse=False):\n if reverse:\n return ivy.flip(ivy.cumsum(ivy.flip(operand), axis=axis, dtype=operand.dtype))\n return ivy.cumsum(operand, axis=axis, dtype=operand.dtype)\n\n\n@to_ivy_arrays_and_back\ndef div(x, y):\n return ivy.astype(ivy.divide(x, y), x.dtype)\n\n\n@to_ivy_arrays_and_back\ndef dot(lhs, rhs, precision=None, preferred_element_type=None):\n ret = ivy.matmul(lhs, rhs)\n if preferred_element_type:\n ret = ivy.astype(ret, preferred_element_type, copy=False)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef dot_general(\n lhs, rhs, dimension_numbers, precision=None, preferred_element_type=None\n):\n (lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers\n ivy.utils.assertions.check_less(\n len(lhs.shape), 52, \"number of dimensions greater than 52 is not supported\"\n )\n new_id = itertools.count()\n lhs_axis_ids = [next(new_id) for _ in lhs.shape]\n rhs_axis_ids = [next(new_id) for _ in rhs.shape]\n lhs_out_axis_ids = lhs_axis_ids[:]\n rhs_out_axis_ids = rhs_axis_ids[:]\n for lhs_axis, rhs_axis in zip(lhs_contracting, rhs_contracting):\n shared_id = next(new_id)\n lhs_axis_ids[lhs_axis] = shared_id\n rhs_axis_ids[rhs_axis] = shared_id\n lhs_out_axis_ids[lhs_axis] = None\n rhs_out_axis_ids[rhs_axis] = None\n batch_ids = []\n for lhs_axis, rhs_axis in zip(lhs_batch, rhs_batch):\n shared_id = next(new_id)\n lhs_axis_ids[lhs_axis] = shared_id\n rhs_axis_ids[rhs_axis] = shared_id\n lhs_out_axis_ids[lhs_axis] = None\n rhs_out_axis_ids[rhs_axis] = None\n batch_ids.append(shared_id)\n out_axis_ids = list(\n filter(lambda x: x is not None, batch_ids + lhs_out_axis_ids + rhs_out_axis_ids)\n )\n char_list = [*string.ascii_letters]\n lhs_axis_ids = \"\".join(str(char_list[i]) for i in lhs_axis_ids)\n rhs_axis_ids = \"\".join(str(char_list[i]) for i in rhs_axis_ids)\n out_axis_ids = \"\".join(str(char_list[i]) for i in out_axis_ids)\n equ_str = f\"{lhs_axis_ids},{rhs_axis_ids}->{out_axis_ids}\"\n ret = ivy.einsum(equ_str, lhs, rhs)\n if preferred_element_type:\n ret = ivy.astype(ret, preferred_element_type, copy=False)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef eq(x, y):\n return ivy.equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef erf(x):\n return ivy.erf(x)\n\n\n@to_ivy_arrays_and_back\ndef exp(x):\n return ivy.exp(x)\n\n\n@to_ivy_arrays_and_back\ndef expand_dims(array, dimensions):\n return ivy.expand_dims(array, axis=dimensions)\n\n\n@to_ivy_arrays_and_back\ndef expm1(x):\n return ivy.expm1(x)\n\n\n@to_ivy_arrays_and_back\ndef full(shape, fill_value, dtype=None):\n return ivy.full(shape, fill_value, dtype=dtype)\n\n\n@to_ivy_arrays_and_back\ndef full_like(x, fill_value, dtype=None, shape=None):\n if shape is None:\n return ivy.full_like(x, fill_value, dtype=dtype)\n return ivy.full(shape, fill_value, dtype=dtype)\n\n\n@to_ivy_arrays_and_back\ndef ge(x, y):\n return ivy.greater_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef gt(x, y):\n return ivy.greater(x, y)\n\n\n@to_ivy_arrays_and_back\ndef le(x, y):\n return ivy.less_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef log(x):\n return ivy.log(x)\n\n\n@to_ivy_arrays_and_back\ndef log1p(x):\n return ivy.log1p(x)\n\n\n@to_ivy_arrays_and_back\ndef lt(x, y):\n return ivy.less(x, y)\n\n\n@to_ivy_arrays_and_back\ndef max(x: Any, y: Any):\n return ivy.maximum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef min(x, y):\n return ivy.minimum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef mul(x, y):\n return ivy.multiply(x, y)\n\n\n@to_ivy_arrays_and_back\ndef ne(x, y):\n return ivy.not_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef neg(x):\n return ivy.negative(x)\n\n\n@to_ivy_arrays_and_back\ndef pow(x, y):\n return ivy.pow(x, y)\n\n\n@to_ivy_arrays_and_back\ndef reciprocal(x):\n return ivy.reciprocal(x)\n\n\n@to_ivy_arrays_and_back\ndef rem(x, y):\n return ivy.remainder(ivy.abs(x), ivy.abs(y)) * ivy.sign(x)\n\n\n@to_ivy_arrays_and_back\ndef reshape(operand, new_sizes, dimensions=None):\n if dimensions:\n operand = ivy.permute_dims(operand, dimensions)\n return ivy.reshape(operand, new_sizes)\n\n\n@to_ivy_arrays_and_back\ndef rev(operand, dimensions):\n return ivy.flip(operand, axis=dimensions)\n\n\n@to_ivy_arrays_and_back\ndef round(x, rounding_method=1):\n if rounding_method == 0:\n ret = ivy.where(\n ivy.less(x, 0),\n ivy.ceil(x) - (ivy.ceil(x) - ivy.floor(x)),\n ivy.ceil(x),\n )\n elif rounding_method == 1:\n ret = ivy.ceil(x)\n ret = ivy.where(ivy.remainder(ret, 2) == 0, ret, ret - 1)\n return ivy.where(ivy.abs(x - ivy.floor(x) - 0.5) < 1e-7, ret, ivy.round(x))\n\n\n@to_ivy_arrays_and_back\ndef rsqrt(x):\n return ivy.reciprocal(ivy.sqrt(x))\n\n\n@to_ivy_arrays_and_back\ndef shift_left(x, y):\n return ivy.bitwise_left_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef sign(x):\n return ivy.sign(x)\n\n\n@to_ivy_arrays_and_back\ndef sin(x):\n return ivy.sin(x)\n\n\n@to_ivy_arrays_and_back\ndef sinh(x):\n return ivy.sinh(x)\n\n\n@to_ivy_arrays_and_back\ndef slice(operand, start_indices, limit_indices, strides=None):\n if operand.ndim != len(start_indices):\n msg = (\n \"slice start_indices must have length equal to the number of \"\n \"dimensions of the operand, got indices {} for operand shape {}.\"\n )\n raise TypeError(msg.format(start_indices, operand.shape))\n\n if len(start_indices) != len(limit_indices):\n msg = (\n \"slice limit_indices must have the same length as start_indices, \"\n \"got start_indices {} and limit_indices {}.\"\n )\n raise TypeError(msg.format(start_indices, limit_indices))\n\n if not tuple(limit_indices) <= operand.shape:\n msg = (\n \"slice limit_indices must be less than or equal to operand shape, \"\n \"got limit_indices {} for operand shape {}.\"\n )\n raise TypeError(msg.format(limit_indices, operand.shape))\n\n if not all(si >= 0 for si in start_indices):\n msg = (\n \"slice start_indices must be greater than or equal to zero, \"\n \"got start_indices of {}.\"\n )\n raise TypeError(msg.format(start_indices))\n\n if not limit_indices >= start_indices:\n msg = (\n \"slice limit_indices must be greater than or equal to start_indices,\"\n \" got start_indices {} and limit_indices {}.\"\n )\n raise TypeError(msg.format(start_indices, limit_indices))\n\n start_indices, limit_indices = map(\n lambda x: ivy.array(x) if isinstance(x, int) else x,\n [start_indices, limit_indices],\n )\n strides = [1] * len(operand.shape) if strides is None else strides\n\n full_slice = ()\n for i, _ in enumerate(operand.shape):\n strides_i = int(strides[i])\n start_i = int(start_indices[i])\n limit_i = int(limit_indices[i])\n full_slice += (py_slice(start_i, limit_i, strides_i),)\n ret = operand[full_slice] if full_slice else operand\n\n return ivy.expand_dims(ret)\n\n\n@to_ivy_arrays_and_back\ndef slice_in_dim(operand, start_index, limit_index, stride=1, axis=0):\n start_indices = [0] * operand.ndim\n limit_indices = list(operand.shape)\n strides = [1] * operand.ndim\n\n len_axis = operand.shape[axis]\n start_index_int = start_index if start_index is not None else 0\n limit_index_int = limit_index if limit_index is not None else len_axis\n\n if start_index_int < 0:\n start_index_int = start_index_int + len_axis\n if limit_index_int < 0:\n limit_index_int = limit_index_int + len_axis\n\n axis = int(axis)\n start_indices[axis] = start_index_int\n limit_indices[axis] = limit_index_int\n strides[axis] = int(stride)\n return slice(operand, start_indices, limit_indices, strides)\n\n\n@to_ivy_arrays_and_back\ndef sort(operand, dimension=-1, is_stable=True, num_keys=1):\n return ivy.sort(operand, axis=dimension, stable=is_stable)\n\n\n@to_ivy_arrays_and_back\ndef sqrt(x):\n return ivy.sqrt(x)\n\n\n@to_ivy_arrays_and_back\ndef square(x):\n return ivy.square(x)\n\n\n@to_ivy_arrays_and_back\ndef sub(x, y):\n return ivy.subtract(x, y)\n\n\n@to_ivy_arrays_and_back\ndef tan(x):\n return ivy.tan(x)\n\n\n@to_ivy_arrays_and_back\ndef transpose(operand, permutation):\n return ivy.permute_dims(operand, permutation)\n\n\n@to_ivy_arrays_and_back\ndef shift_right_logical(x, y):\n return ivy.bitwise_right_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef asinh(x):\n return ivy.asinh(x)\n\n\n@to_ivy_arrays_and_back\ndef atanh(x):\n return ivy.atanh(x)\n\n\n@to_ivy_arrays_and_back\ndef select(pred, on_true, on_false):\n return ivy.where(pred, on_true, on_false)\n\n\n# top_k\n@to_ivy_arrays_and_back\ndef top_k(operand, k):\n values, indices = ivy.top_k(operand, k, axis=-1)\n indices = ivy.astype(indices, ivy.int32, copy=False)\n return [values, indices]\n\n\n@to_ivy_arrays_and_back\ndef squeeze(array, dimensions):\n return ivy.squeeze(array, dimensions)\n\n\n@to_ivy_arrays_and_back\ndef real(x):\n return ivy.real(x)\n\n\n@to_ivy_arrays_and_back\ndef nextafter(x1, x2):\n return ivy.nextafter(x1, x2)\n",
"path": "ivy/functional/frontends/jax/lax/operators.py"
}
] | [
{
"content": "# global\nfrom typing import Any\nimport itertools\nimport string\nfrom builtins import slice as py_slice\n\n# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\ndef abs(x):\n return ivy.abs(x)\n\n\n@to_ivy_arrays_and_back\ndef acos(x):\n return ivy.acos(x)\n\n\n@to_ivy_arrays_and_back\ndef add(x, y):\n return ivy.add(x, y)\n\n\n@to_ivy_arrays_and_back\ndef argmax(operand, axis, index_dtype):\n return ivy.astype(ivy.argmax(operand, axis=axis), index_dtype)\n\n\n@to_ivy_arrays_and_back\ndef argmin(operand, axis, index_dtype):\n return ivy.astype(ivy.argmin(operand, axis=axis), index_dtype)\n\n\n@to_ivy_arrays_and_back\ndef asin(x):\n return ivy.asin(x)\n\n\n@to_ivy_arrays_and_back\ndef atan(x):\n return ivy.atan(x)\n\n\n@to_ivy_arrays_and_back\ndef atan2(x, y):\n return ivy.atan2(x, y)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_and(x, y):\n return ivy.bitwise_and(x, y)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_not(x):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_or(x, y):\n return ivy.bitwise_or(x, y)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_xor(x, y):\n return ivy.bitwise_xor(x, y)\n\n\n@to_ivy_arrays_and_back\ndef broadcast(operand, sizes):\n ret = ivy.zeros(tuple(sizes) + tuple(ivy.shape(operand)), dtype=ivy.dtype(operand))\n return ret + operand\n\n\n@to_ivy_arrays_and_back\ndef ceil(x):\n return ivy.ceil(x)\n\n\n@to_ivy_arrays_and_back\ndef clamp(min, x, max):\n return ivy.clip(x, min, max)\n\n\n@to_ivy_arrays_and_back\ndef concatenate(operands, dimension):\n return ivy.concat(operands, axis=dimension)\n\n\n@to_ivy_arrays_and_back\ndef conv(\n lhs, rhs, window_strides, padding, precision=None, preferred_element_type=None\n):\n if preferred_element_type:\n lhs = ivy.astype(lhs, preferred_element_type)\n rhs = ivy.astype(rhs, preferred_element_type)\n dims = len(lhs.shape) - 2\n rhs = ivy.permute_dims(rhs, axes=(*range(2, dims + 2), 1, 0))\n return ivy.conv_general_dilated(\n lhs,\n rhs,\n window_strides,\n padding,\n dims=dims,\n data_format=\"channel_first\",\n )\n\n\ndef _dimension_numbers(dimension_numbers, lhs_len, transp=False):\n if dimension_numbers is None:\n if transp:\n iota = (0, lhs_len - 1, *range(1, lhs_len - 1))\n iotb = (lhs_len - 1, lhs_len - 2, *range(0, lhs_len - 2))\n return iota, iotb, iota\n else:\n iota = tuple(range(lhs_len))\n return iota, iota, iota\n elif isinstance(dimension_numbers[0], (tuple, list)):\n return dimension_numbers\n else:\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n\n def getperm(spec, charpair):\n spatial = (i for i, c in enumerate(spec) if c not in charpair)\n if spec is not rhs_spec:\n spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))\n return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)\n\n charpairs = (\"N\", \"C\"), (\"O\", \"I\"), (\"N\", \"C\")\n lhs_spec, rhs_spec, out_spec = map(getperm, dimension_numbers, charpairs)\n return lhs_spec, rhs_spec, out_spec\n\n\ndef _argsort_tuple(the_tuple):\n return tuple([i for i, _ in sorted(enumerate(the_tuple), key=lambda x: x[1])])\n\n\ndef _conv_transpose_padding(k, s, padding):\n if padding == \"SAME\":\n pad_len = k + s - 2\n if s > k - 1:\n pad_a = k - 1\n else:\n pad_a = int(ivy.to_scalar(ivy.ceil(pad_len / 2)))\n elif padding == \"VALID\":\n pad_len = k + s - 2 + ivy.to_scalar(ivy.maximum(k - s, 0))\n pad_a = k - 1\n else:\n raise ValueError(\"Padding mode must be `SAME` or `VALID`.\")\n pad_b = pad_len - pad_a\n return pad_a, pad_b\n\n\n@to_ivy_arrays_and_back\ndef conv_transpose(\n lhs,\n rhs,\n strides,\n padding,\n rhs_dilation=None,\n dimension_numbers=None,\n transpose_kernel=False,\n precision=None,\n preferred_element_type=None,\n):\n # TODO: add support for transpose_kernel\n if preferred_element_type:\n lhs = ivy.astype(lhs, preferred_element_type)\n rhs = ivy.astype(rhs, preferred_element_type)\n dims = len(lhs.shape) - 2\n dim_nums = _dimension_numbers(dimension_numbers, dims + 2, transp=True)\n rhs_spec = tuple([dim_nums[1][i] for i in (*range(2, dims + 2), 1, 0)])\n rhs_dilation = 1 if rhs_dilation is None else rhs_dilation\n if isinstance(padding, str):\n k_sdims = [rhs.shape[i] for i in rhs_spec[:-2]]\n effective_k_size = map(lambda k, r: (k - 1) * r + 1, k_sdims, rhs_dilation)\n padding = [\n _conv_transpose_padding(k, s, padding)\n for k, s in zip(effective_k_size, strides)\n ]\n return ivy.permute_dims(\n ivy.conv_general_dilated(\n ivy.permute_dims(lhs, axes=dim_nums[0]),\n ivy.permute_dims(rhs, axes=rhs_spec),\n 1,\n padding,\n dilations=rhs_dilation,\n x_dilations=strides,\n dims=dims,\n data_format=\"channel_first\",\n ),\n axes=_argsort_tuple(dim_nums[2]),\n )\n\n\n@to_ivy_arrays_and_back\ndef conv_general_dilated(\n lhs,\n rhs,\n window_strides,\n padding,\n lhs_dilation=None,\n rhs_dilation=None,\n dimension_numbers=None,\n feature_group_count=1,\n batch_group_count=1,\n precision=None,\n preferred_element_type=None,\n):\n # TODO: add support for batch_group_count\n if preferred_element_type:\n lhs = ivy.astype(lhs, preferred_element_type)\n rhs = ivy.astype(rhs, preferred_element_type)\n dims = len(lhs.shape) - 2\n dim_nums = _dimension_numbers(dimension_numbers, dims + 2)\n rhs_spec = tuple([dim_nums[1][i] for i in (*range(2, dims + 2), 1, 0)])\n return ivy.permute_dims(\n ivy.conv_general_dilated(\n ivy.permute_dims(lhs, axes=dim_nums[0]),\n ivy.permute_dims(rhs, axes=rhs_spec),\n window_strides,\n padding,\n dims=dims,\n data_format=\"channel_first\",\n x_dilations=1 if lhs_dilation is None else lhs_dilation,\n dilations=1 if rhs_dilation is None else rhs_dilation,\n feature_group_count=feature_group_count,\n ),\n axes=_argsort_tuple(dim_nums[2]),\n )\n\n\n@to_ivy_arrays_and_back\ndef convert_element_type(operand, new_dtype):\n return ivy.astype(operand, new_dtype, copy=False)\n\n\n@to_ivy_arrays_and_back\ndef cos(x):\n return ivy.cos(x)\n\n\n@to_ivy_arrays_and_back\ndef cosh(x):\n return ivy.cosh(x)\n\n\n@to_ivy_arrays_and_back\ndef cumprod(operand, axis=None, reverse=False):\n dtype = ivy.dtype(operand)\n return ivy.cumprod(operand, axis=axis, reverse=reverse).astype(dtype)\n\n\n@to_ivy_arrays_and_back\ndef cumsum(operand, axis=None, reverse=False):\n if reverse:\n return ivy.flip(ivy.cumsum(ivy.flip(operand), axis=axis, dtype=operand.dtype))\n return ivy.cumsum(operand, axis=axis, dtype=operand.dtype)\n\n\n@to_ivy_arrays_and_back\ndef div(x, y):\n return ivy.astype(ivy.divide(x, y), x.dtype)\n\n\n@to_ivy_arrays_and_back\ndef dot(lhs, rhs, precision=None, preferred_element_type=None):\n ret = ivy.matmul(lhs, rhs)\n if preferred_element_type:\n ret = ivy.astype(ret, preferred_element_type, copy=False)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef dot_general(\n lhs, rhs, dimension_numbers, precision=None, preferred_element_type=None\n):\n (lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers\n ivy.utils.assertions.check_less(\n len(lhs.shape), 52, \"number of dimensions greater than 52 is not supported\"\n )\n new_id = itertools.count()\n lhs_axis_ids = [next(new_id) for _ in lhs.shape]\n rhs_axis_ids = [next(new_id) for _ in rhs.shape]\n lhs_out_axis_ids = lhs_axis_ids[:]\n rhs_out_axis_ids = rhs_axis_ids[:]\n for lhs_axis, rhs_axis in zip(lhs_contracting, rhs_contracting):\n shared_id = next(new_id)\n lhs_axis_ids[lhs_axis] = shared_id\n rhs_axis_ids[rhs_axis] = shared_id\n lhs_out_axis_ids[lhs_axis] = None\n rhs_out_axis_ids[rhs_axis] = None\n batch_ids = []\n for lhs_axis, rhs_axis in zip(lhs_batch, rhs_batch):\n shared_id = next(new_id)\n lhs_axis_ids[lhs_axis] = shared_id\n rhs_axis_ids[rhs_axis] = shared_id\n lhs_out_axis_ids[lhs_axis] = None\n rhs_out_axis_ids[rhs_axis] = None\n batch_ids.append(shared_id)\n out_axis_ids = list(\n filter(lambda x: x is not None, batch_ids + lhs_out_axis_ids + rhs_out_axis_ids)\n )\n char_list = [*string.ascii_letters]\n lhs_axis_ids = \"\".join(str(char_list[i]) for i in lhs_axis_ids)\n rhs_axis_ids = \"\".join(str(char_list[i]) for i in rhs_axis_ids)\n out_axis_ids = \"\".join(str(char_list[i]) for i in out_axis_ids)\n equ_str = f\"{lhs_axis_ids},{rhs_axis_ids}->{out_axis_ids}\"\n ret = ivy.einsum(equ_str, lhs, rhs)\n if preferred_element_type:\n ret = ivy.astype(ret, preferred_element_type, copy=False)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef eq(x, y):\n return ivy.equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef erf(x):\n return ivy.erf(x)\n\n\n@to_ivy_arrays_and_back\ndef exp(x):\n return ivy.exp(x)\n\n\n@to_ivy_arrays_and_back\ndef expand_dims(array, dimensions):\n return ivy.expand_dims(array, axis=dimensions)\n\n\n@to_ivy_arrays_and_back\ndef expm1(x):\n return ivy.expm1(x)\n\n\n@to_ivy_arrays_and_back\ndef full(shape, fill_value, dtype=None):\n return ivy.full(shape, fill_value, dtype=dtype)\n\n\n@to_ivy_arrays_and_back\ndef full_like(x, fill_value, dtype=None, shape=None):\n if shape is None:\n return ivy.full_like(x, fill_value, dtype=dtype)\n return ivy.full(shape, fill_value, dtype=dtype)\n\n\n@to_ivy_arrays_and_back\ndef ge(x, y):\n return ivy.greater_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef gt(x, y):\n return ivy.greater(x, y)\n\n\n@to_ivy_arrays_and_back\ndef le(x, y):\n return ivy.less_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef log(x):\n return ivy.log(x)\n\n\n@to_ivy_arrays_and_back\ndef log1p(x):\n return ivy.log1p(x)\n\n\n@to_ivy_arrays_and_back\ndef lt(x, y):\n return ivy.less(x, y)\n\n\n@to_ivy_arrays_and_back\ndef max(x: Any, y: Any):\n return ivy.maximum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef min(x, y):\n return ivy.minimum(x, y)\n\n\n@to_ivy_arrays_and_back\ndef mul(x, y):\n return ivy.multiply(x, y)\n\n\n@to_ivy_arrays_and_back\ndef ne(x, y):\n return ivy.not_equal(x, y)\n\n\n@to_ivy_arrays_and_back\ndef neg(x):\n return ivy.negative(x)\n\n\n@to_ivy_arrays_and_back\ndef pow(x, y):\n return ivy.pow(x, y)\n\n\n@to_ivy_arrays_and_back\ndef reciprocal(x):\n return ivy.reciprocal(x)\n\n\n@to_ivy_arrays_and_back\ndef rem(x, y):\n return ivy.remainder(ivy.abs(x), ivy.abs(y)) * ivy.sign(x)\n\n\n@to_ivy_arrays_and_back\ndef reshape(operand, new_sizes, dimensions=None):\n if dimensions:\n operand = ivy.permute_dims(operand, dimensions)\n return ivy.reshape(operand, new_sizes)\n\n\n@to_ivy_arrays_and_back\ndef rev(operand, dimensions):\n return ivy.flip(operand, axis=dimensions)\n\n\n@to_ivy_arrays_and_back\ndef round(x, rounding_method=1):\n if rounding_method == 0:\n ret = ivy.where(\n ivy.less(x, 0),\n ivy.ceil(x) - (ivy.ceil(x) - ivy.floor(x)),\n ivy.ceil(x),\n )\n elif rounding_method == 1:\n ret = ivy.ceil(x)\n ret = ivy.where(ivy.remainder(ret, 2) == 0, ret, ret - 1)\n return ivy.where(ivy.abs(x - ivy.floor(x) - 0.5) < 1e-7, ret, ivy.round(x))\n\n\n@to_ivy_arrays_and_back\ndef rsqrt(x):\n return ivy.reciprocal(ivy.sqrt(x))\n\n\n@to_ivy_arrays_and_back\ndef shift_left(x, y):\n return ivy.bitwise_left_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef sign(x):\n return ivy.sign(x)\n\n\n@to_ivy_arrays_and_back\ndef sin(x):\n return ivy.sin(x)\n\n\n@to_ivy_arrays_and_back\ndef sinh(x):\n return ivy.sinh(x)\n\n\n@to_ivy_arrays_and_back\ndef slice(operand, start_indices, limit_indices, strides=None):\n if operand.ndim != len(start_indices):\n msg = (\n \"slice start_indices must have length equal to the number of \"\n \"dimensions of the operand, got indices {} for operand shape {}.\"\n )\n raise TypeError(msg.format(start_indices, operand.shape))\n\n if len(start_indices) != len(limit_indices):\n msg = (\n \"slice limit_indices must have the same length as start_indices, \"\n \"got start_indices {} and limit_indices {}.\"\n )\n raise TypeError(msg.format(start_indices, limit_indices))\n\n if not tuple(limit_indices) <= operand.shape:\n msg = (\n \"slice limit_indices must be less than or equal to operand shape, \"\n \"got limit_indices {} for operand shape {}.\"\n )\n raise TypeError(msg.format(limit_indices, operand.shape))\n\n if not all(si >= 0 for si in start_indices):\n msg = (\n \"slice start_indices must be greater than or equal to zero, \"\n \"got start_indices of {}.\"\n )\n raise TypeError(msg.format(start_indices))\n\n if not limit_indices >= start_indices:\n msg = (\n \"slice limit_indices must be greater than or equal to start_indices,\"\n \" got start_indices {} and limit_indices {}.\"\n )\n raise TypeError(msg.format(start_indices, limit_indices))\n\n start_indices, limit_indices = map(\n lambda x: ivy.array(x) if isinstance(x, int) else x,\n [start_indices, limit_indices],\n )\n strides = [1] * len(operand.shape) if strides is None else strides\n\n full_slice = ()\n for i, _ in enumerate(operand.shape):\n strides_i = int(strides[i])\n start_i = int(start_indices[i])\n limit_i = int(limit_indices[i])\n full_slice += (py_slice(start_i, limit_i, strides_i),)\n ret = operand[full_slice] if full_slice else operand\n\n return ivy.expand_dims(ret)\n\n\n@to_ivy_arrays_and_back\ndef slice_in_dim(operand, start_index, limit_index, stride=1, axis=0):\n start_indices = [0] * operand.ndim\n limit_indices = list(operand.shape)\n strides = [1] * operand.ndim\n\n len_axis = operand.shape[axis]\n start_index_int = start_index if start_index is not None else 0\n limit_index_int = limit_index if limit_index is not None else len_axis\n\n if start_index_int < 0:\n start_index_int = start_index_int + len_axis\n if limit_index_int < 0:\n limit_index_int = limit_index_int + len_axis\n\n axis = int(axis)\n start_indices[axis] = start_index_int\n limit_indices[axis] = limit_index_int\n strides[axis] = int(stride)\n return slice(operand, start_indices, limit_indices, strides)\n\n\n@to_ivy_arrays_and_back\ndef sort(operand, dimension=-1, is_stable=True, num_keys=1):\n return ivy.sort(operand, axis=dimension, stable=is_stable)\n\n\n@to_ivy_arrays_and_back\ndef sqrt(x):\n return ivy.sqrt(x)\n\n\n@to_ivy_arrays_and_back\ndef square(x):\n return ivy.square(x)\n\n\n@to_ivy_arrays_and_back\ndef sub(x, y):\n return ivy.subtract(x, y)\n\n\n@to_ivy_arrays_and_back\ndef tan(x):\n return ivy.tan(x)\n\n\n@to_ivy_arrays_and_back\ndef transpose(operand, permutation):\n return ivy.permute_dims(operand, permutation)\n\n\n@to_ivy_arrays_and_back\ndef shift_right_logical(x, y):\n return ivy.bitwise_right_shift(x, y)\n\n\n@to_ivy_arrays_and_back\ndef asinh(x):\n return ivy.asinh(x)\n\n\n@to_ivy_arrays_and_back\ndef atanh(x):\n return ivy.atanh(x)\n\n\n@to_ivy_arrays_and_back\ndef select(pred, on_true, on_false):\n return ivy.where(pred, on_true, on_false)\n\n\n# top_k\n@to_ivy_arrays_and_back\ndef top_k(operand, k):\n values, indices = ivy.top_k(operand, k, axis=-1)\n indices = ivy.astype(indices, ivy.int32, copy=False)\n return [values, indices]\n\n\n@to_ivy_arrays_and_back\ndef squeeze(array, dimensions):\n return ivy.squeeze(array, dimensions)\n\n\n@to_ivy_arrays_and_back\ndef real(x):\n return ivy.real(x)\n\n\n@to_ivy_arrays_and_back\ndef nextafter(x1, x2):\n return ivy.nextafter(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef conj(x):\n return ivy.conj(x)\n",
"path": "ivy/functional/frontends/jax/lax/operators.py"
}
] | diff --git a/ivy/functional/frontends/jax/lax/operators.py b/ivy/functional/frontends/jax/lax/operators.py
index 0acc4a4ddaf2b..3709196dcbf10 100644
--- a/ivy/functional/frontends/jax/lax/operators.py
+++ b/ivy/functional/frontends/jax/lax/operators.py
@@ -620,3 +620,8 @@ def real(x):
@to_ivy_arrays_and_back
def nextafter(x1, x2):
return ivy.nextafter(x1, x2)
+
+
+@to_ivy_arrays_and_back
+def conj(x):
+ return ivy.conj(x)
diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_lax_operators.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_lax_operators.py
index 12ba1d34e59ea..78ffccd2561ec 100644
--- a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_lax_operators.py
+++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_lax_operators.py
@@ -2553,3 +2553,29 @@ def test_jax_lax_nextafter(
x1=x[0],
x2=x[0],
)
+
+
+# conj
+@handle_frontend_test(
+ fn_tree="jax.lax.conj",
+ dtype_and_x=helpers.dtype_and_values(
+ available_dtypes=["complex64"],
+ ),
+)
+def test_jax_lax_conj(
+ *,
+ dtype_and_x,
+ test_flags,
+ on_device,
+ fn_tree,
+ frontend,
+):
+ input_dtype, x = dtype_and_x
+ helpers.test_frontend_function(
+ input_dtypes=input_dtype,
+ test_flags=test_flags,
+ frontend=frontend,
+ fn_tree=fn_tree,
+ on_device=on_device,
+ x=x[0],
+ )
|
iterative__dvc-2282 | test: s3: use moto to test multipart objects
Currently, we are unable to use it because of [this bug](https://github.com/spulec/moto/issues/2154). When it is fixed, we should switch to it from using actual s3 for unit testing. Related to https://github.com/iterative/dvc/pull/1867
| [
{
"content": "from setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py as _build_py\nimport os\nimport sys\n\n\n# https://packaging.python.org/guides/single-sourcing-package-version/\npkg_dir = os.path.dirname(__file__)\n\n# This will define __version__ implicitly\nwith open(os.path.join(pkg_dir, \"dvc\", \"version.py\")) as fobj:\n exec(fobj.read())\n\nversion = __version__ # noqa: F821\n\n\n# To achieve consistency between the build version and the one provided\n# by your package during runtime, you need to **pin** the build version.\n#\n# This custom class will replace the version.py module with a **static**\n# `__version__` that your package can read at runtime, assuring consistancy.\n#\n# References:\n# - https://docs.python.org/3.7/distutils/extending.html\n# - https://github.com/python/mypy\nclass build_py(_build_py):\n def pin_version(self):\n path = os.path.join(self.build_lib, \"dvc\")\n self.mkpath(path)\n with open(os.path.join(path, \"version.py\"), \"w\") as fobj:\n fobj.write(\"# AUTOGENERATED at build time by setup.py\\n\")\n fobj.write('__version__ = \"{}\"\\n'.format(version))\n\n def run(self):\n self.execute(self.pin_version, ())\n _build_py.run(self)\n\n\ninstall_requires = [\n \"ply>=3.9\", # See https://github.com/pyinstaller/pyinstaller/issues/1945\n \"configparser>=3.5.0\",\n \"zc.lockfile>=1.2.1\",\n \"future>=0.16.0\",\n \"colorama>=0.3.9\",\n \"configobj>=5.0.6\",\n \"gitpython>=2.1.8\",\n \"setuptools>=34.0.0\",\n \"nanotime>=0.5.2\",\n \"pyasn1>=0.4.1\",\n \"schema>=0.6.7\",\n \"jsonpath-ng>=1.4.3\",\n \"requests>=2.22.0\",\n \"grandalf==0.6\",\n \"asciimatics>=1.10.0\",\n \"distro>=1.3.0\",\n \"appdirs>=1.4.3\",\n \"treelib>=1.5.5\",\n \"inflect>=2.1.0\",\n \"humanize>=0.5.1\",\n \"ruamel.yaml>=0.15.91\",\n \"psutil==5.6.2\",\n \"funcy>=1.12\",\n \"pathspec>=0.5.9\",\n \"shortuuid>=0.5.0\",\n \"win-unicode-console>=0.5; sys_platform == 'win32'\",\n]\n\nif sys.version_info[0] == 2:\n install_requires.append(\"networkx>=2.1,<2.3\")\nelse:\n install_requires.append(\"networkx>=2.1\")\n\n# Extra dependencies for remote integrations\ngs = [\"google-cloud-storage==1.13.0\"]\ns3 = [\"boto3==1.9.115\"]\nazure = [\"azure-storage-blob==2.0.1\"]\noss = [\"oss2==2.6.1\"]\nssh = [\"paramiko>=2.5.0\"]\nall_remotes = gs + s3 + azure + ssh + oss\n\n# Extra dependecies to run tests\ntests_requirements = [\n \"PyInstaller==3.4\",\n \"wheel>=0.31.1\",\n \"pydot>=1.2.4\",\n # Test requirements:\n \"pytest>=4.6.0\",\n \"pytest-timeout>=1.3.3\",\n \"pytest-cov>=2.6.1\",\n \"pytest-xdist>=1.26.1\",\n \"pytest-mock>=1.10.4\",\n \"flaky>=3.5.3\",\n \"mock>=3.0.0\",\n \"xmltodict>=0.11.0\",\n \"awscli>=1.16.125\",\n \"google-compute-engine\",\n \"pywin32; sys_platform == 'win32'\",\n \"Pygments\", # required by collective.checkdocs,\n \"collective.checkdocs\",\n \"flake8\",\n \"flake8-docstrings\",\n \"pydocstyle<4.0\",\n \"jaraco.windows==3.9.2\",\n \"mock-ssh-server>=0.5.0\",\n]\n\nif (sys.version_info) >= (3, 6):\n tests_requirements.append(\"black==19.3b0\")\n\nsetup(\n name=\"dvc\",\n version=version,\n description=\"Git for data scientists - manage your code and data together\",\n long_description=open(\"README.rst\", \"r\").read(),\n author=\"Dmitry Petrov\",\n author_email=\"[email protected]\",\n download_url=\"https://github.com/iterative/dvc\",\n license=\"Apache License 2.0\",\n install_requires=install_requires,\n extras_require={\n \"all\": all_remotes,\n \"gs\": gs,\n \"s3\": s3,\n \"azure\": azure,\n \"oss\": oss,\n \"ssh\": ssh,\n # NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1\n \":python_version=='2.7'\": [\"futures\", \"pathlib2\"],\n \"tests\": tests_requirements,\n },\n keywords=\"data science, data version control, machine learning\",\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(exclude=[\"tests\"]),\n include_package_data=True,\n url=\"http://dataversioncontrol.com\",\n entry_points={\"console_scripts\": [\"dvc = dvc.main:main\"]},\n cmdclass={\"build_py\": build_py},\n zip_safe=False,\n)\n",
"path": "setup.py"
}
] | [
{
"content": "from setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py as _build_py\nimport os\nimport sys\n\n\n# https://packaging.python.org/guides/single-sourcing-package-version/\npkg_dir = os.path.dirname(__file__)\n\n# This will define __version__ implicitly\nwith open(os.path.join(pkg_dir, \"dvc\", \"version.py\")) as fobj:\n exec(fobj.read())\n\nversion = __version__ # noqa: F821\n\n\n# To achieve consistency between the build version and the one provided\n# by your package during runtime, you need to **pin** the build version.\n#\n# This custom class will replace the version.py module with a **static**\n# `__version__` that your package can read at runtime, assuring consistancy.\n#\n# References:\n# - https://docs.python.org/3.7/distutils/extending.html\n# - https://github.com/python/mypy\nclass build_py(_build_py):\n def pin_version(self):\n path = os.path.join(self.build_lib, \"dvc\")\n self.mkpath(path)\n with open(os.path.join(path, \"version.py\"), \"w\") as fobj:\n fobj.write(\"# AUTOGENERATED at build time by setup.py\\n\")\n fobj.write('__version__ = \"{}\"\\n'.format(version))\n\n def run(self):\n self.execute(self.pin_version, ())\n _build_py.run(self)\n\n\ninstall_requires = [\n \"ply>=3.9\", # See https://github.com/pyinstaller/pyinstaller/issues/1945\n \"configparser>=3.5.0\",\n \"zc.lockfile>=1.2.1\",\n \"future>=0.16.0\",\n \"colorama>=0.3.9\",\n \"configobj>=5.0.6\",\n \"gitpython>=2.1.8\",\n \"setuptools>=34.0.0\",\n \"nanotime>=0.5.2\",\n \"pyasn1>=0.4.1\",\n \"schema>=0.6.7\",\n \"jsonpath-ng>=1.4.3\",\n \"requests>=2.22.0\",\n \"grandalf==0.6\",\n \"asciimatics>=1.10.0\",\n \"distro>=1.3.0\",\n \"appdirs>=1.4.3\",\n \"treelib>=1.5.5\",\n \"inflect>=2.1.0\",\n \"humanize>=0.5.1\",\n \"ruamel.yaml>=0.15.91\",\n \"psutil==5.6.2\",\n \"funcy>=1.12\",\n \"pathspec>=0.5.9\",\n \"shortuuid>=0.5.0\",\n \"win-unicode-console>=0.5; sys_platform == 'win32'\",\n]\n\nif sys.version_info[0] == 2:\n install_requires.append(\"networkx>=2.1,<2.3\")\nelse:\n install_requires.append(\"networkx>=2.1\")\n\n# Extra dependencies for remote integrations\ngs = [\"google-cloud-storage==1.13.0\"]\ns3 = [\"boto3==1.9.115\"]\nazure = [\"azure-storage-blob==2.0.1\"]\noss = [\"oss2==2.6.1\"]\nssh = [\"paramiko>=2.5.0\"]\nall_remotes = gs + s3 + azure + ssh + oss\n\n# Extra dependecies to run tests\ntests_requirements = [\n \"PyInstaller==3.4\",\n \"wheel>=0.31.1\",\n \"pydot>=1.2.4\",\n # Test requirements:\n \"pytest>=4.6.0\",\n \"pytest-timeout>=1.3.3\",\n \"pytest-cov>=2.6.1\",\n \"pytest-xdist>=1.26.1\",\n \"pytest-mock>=1.10.4\",\n \"flaky>=3.5.3\",\n \"mock>=3.0.0\",\n \"xmltodict>=0.11.0\",\n \"awscli>=1.16.125\",\n \"google-compute-engine\",\n \"pywin32; sys_platform == 'win32'\",\n \"Pygments\", # required by collective.checkdocs,\n \"collective.checkdocs\",\n \"flake8\",\n \"flake8-docstrings\",\n \"pydocstyle<4.0\",\n \"jaraco.windows==3.9.2\",\n \"mock-ssh-server>=0.5.0\",\n \"moto\",\n]\n\nif (sys.version_info) >= (3, 6):\n tests_requirements.append(\"black==19.3b0\")\n\nsetup(\n name=\"dvc\",\n version=version,\n description=\"Git for data scientists - manage your code and data together\",\n long_description=open(\"README.rst\", \"r\").read(),\n author=\"Dmitry Petrov\",\n author_email=\"[email protected]\",\n download_url=\"https://github.com/iterative/dvc\",\n license=\"Apache License 2.0\",\n install_requires=install_requires,\n extras_require={\n \"all\": all_remotes,\n \"gs\": gs,\n \"s3\": s3,\n \"azure\": azure,\n \"oss\": oss,\n \"ssh\": ssh,\n # NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1\n \":python_version=='2.7'\": [\"futures\", \"pathlib2\"],\n \"tests\": tests_requirements,\n },\n keywords=\"data science, data version control, machine learning\",\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(exclude=[\"tests\"]),\n include_package_data=True,\n url=\"http://dataversioncontrol.com\",\n entry_points={\"console_scripts\": [\"dvc = dvc.main:main\"]},\n cmdclass={\"build_py\": build_py},\n zip_safe=False,\n)\n",
"path": "setup.py"
}
] | diff --git a/scripts/ci/install.sh b/scripts/ci/install.sh
index 40a73a133d..dd69f6caee 100644
--- a/scripts/ci/install.sh
+++ b/scripts/ci/install.sh
@@ -17,6 +17,10 @@ function retry {
retry pip install --upgrade pip setuptools wheel
retry pip install .[all,tests]
+# NOTE: waiting for https://github.com/spulec/moto/issues/2172
+pip uninstall -y moto
+retry pip install git+https://github.com/efiop/moto.git@move-env-mocking
+
git config --global user.email "[email protected]"
git config --global user.name "DVC Tester"
diff --git a/setup.py b/setup.py
index da01f7fb96..28d85847ab 100644
--- a/setup.py
+++ b/setup.py
@@ -102,6 +102,7 @@ def run(self):
"pydocstyle<4.0",
"jaraco.windows==3.9.2",
"mock-ssh-server>=0.5.0",
+ "moto",
]
if (sys.version_info) >= (3, 6):
diff --git a/tests/func/test_s3.py b/tests/func/test_s3.py
index 3bcb64ba3a..9cead965fe 100644
--- a/tests/func/test_s3.py
+++ b/tests/func/test_s3.py
@@ -1,8 +1,32 @@
import boto3
-import pytest
+
+from moto import mock_s3
+from functools import wraps
+import moto.s3.models as s3model
from dvc.remote.s3 import RemoteS3
-from tests.func.test_data_cloud import _should_test_aws, get_aws_url
+from tests.func.test_data_cloud import get_aws_url
+
+
+# from https://github.com/spulec/moto/blob/v1.3.5/tests/test_s3/test_s3.py#L40
+REDUCED_PART_SIZE = 256
+
+
+def reduced_min_part_size(f):
+ """ speed up tests by temporarily making the multipart minimum part size
+ small
+ """
+ orig_size = s3model.UPLOAD_PART_MIN_SIZE
+
+ @wraps(f)
+ def wrapped(*args, **kwargs):
+ try:
+ s3model.UPLOAD_PART_MIN_SIZE = REDUCED_PART_SIZE
+ return f(*args, **kwargs)
+ finally:
+ s3model.UPLOAD_PART_MIN_SIZE = orig_size
+
+ return wrapped
def _get_src_dst():
@@ -10,13 +34,12 @@ def _get_src_dst():
return base_info / "from", base_info / "to"
+@mock_s3
def test_copy_singlepart_preserve_etag():
from_info, to_info = _get_src_dst()
- if not _should_test_aws():
- pytest.skip()
-
s3 = boto3.client("s3")
+ s3.create_bucket(Bucket=from_info.bucket)
s3.put_object(Bucket=from_info.bucket, Key=from_info.path, Body="data")
RemoteS3._copy(s3, from_info, to_info, {})
@@ -32,8 +55,8 @@ def _upload_multipart(s3, Bucket, Key):
# NOTE: Generation parts of variable size. Part size should be at
# least 5MB:
# https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
- part_size = 10 * 1024 * 1024 + 2 ** i
- body = str(i) * part_size
+ part_size = REDUCED_PART_SIZE + i
+ body = b"1" * part_size
part = s3.upload_part(
Bucket=Bucket,
Key=Key,
@@ -52,12 +75,12 @@ def _upload_multipart(s3, Bucket, Key):
)
+@mock_s3
+@reduced_min_part_size
def test_copy_multipart_preserve_etag():
from_info, to_info = _get_src_dst()
- if not _should_test_aws():
- pytest.skip()
-
s3 = boto3.client("s3")
+ s3.create_bucket(Bucket=from_info.bucket)
_upload_multipart(s3, from_info.bucket, from_info.path)
RemoteS3._copy(s3, from_info, to_info, {})
|
xonsh__xonsh-4511 | "Little Bobby Colors": $PROMPT evaluates {colors} *after* substitution from external input
## xonfig
<details>
```
+------------------+----------------------+
| xonsh | 0.9.27 |
| Git SHA | 71fe9014 |
| Commit Date | Jan 29 08:58:58 2021 |
| Python | 3.9.5 |
| PLY | 3.11 |
| have readline | True |
| prompt toolkit | 3.0.19 |
| shell type | prompt_toolkit |
| pygments | 2.9.0 |
| on posix | True |
| on linux | True |
| distro | ubuntu |
| on darwin | False |
| on windows | False |
| on cygwin | False |
| on msys2 | False |
| is superuser | False |
| default encoding | utf-8 |
| xonsh encoding | utf-8 |
| encoding errors | surrogateescape |
| on jupyter | False |
| jupyter kernel | None |
| xontrib 1 | apt_tabcomplete |
| xontrib 2 | direnv |
| xontrib 3 | kitty |
| xontrib 4 | prompt_ret_code |
+------------------+----------------------+
```
</details>
## Expected Behavior
When a prompt includes shell-external names (like working directory, running job name, etc), any "meta" command directives should be escaped, lest unusual names trigger surprising (and perhaps even harmful) behavior.
## Current Behavior
```
$ xonsh --no-rc # just to demo with the default prompt
... default xonsh message ...
egnor@ostrich ~ $ mkdir '{' # slightly odd directory name
egnor@ostrich ~ $ cd '{' # here we go!
{BOLD_GREEN}egnor@ostrich{BOLD_BLUE} ~/{{BOLD_INTENSE_YELLOW}{RESET} {BOLD_BLUE}
${RESET}
```
Suddenly the prompt is barfing, because the curly braces are no longer balanced, because the `{` directory name was substituted into the prompt. This is also fun:
```
egnor@ostrich ~ $ mkdir '{BACKGROUND_RED} ALERT'
egnor@ostrich ~ $ cd '{BACKGROUND_RED} ALERT'
egnor@ostrich ~/ ALERT $
```
...and "ALERT" gets a bright red background color in the prompt. As far as I know, nothing in curly braces will do anything particularly terrible (nothing takes any arguments) so I don't _think_ this is a security issue but it sure doesn't feel right.
## Steps to Reproduce
1. Have a prompt that shows the current directory (e.g. the default prompt)
2. Create a directory with `{` / `}` characters in it, perhaps even color tags like `{RED}`
3. Enter that directory
## VERY VERY HUMBLE editorializing 🙇
<sup>(Please take this commentary with a HUGE grain of salt :salt: because I am a super newcomer to xonsh and not a contributor (yet?), though I'd be happy to help improve this... I _love_ xonsh's overall structure and direction!)</sup>
<sup>This problem could be fixed by somehow "escaping" the output from `{cwd}` and the like, OR by doing color substitution before/while doing other expansions (rather than expanding `{cwd}` in one pass, then expanding colors like `{RED}` in a separate pass)...</sup>
<sup>BUT I don't love the little mini language used in `$PROMPT` and friends (`$RIGHT_PROMPT`, `$TITLE`, `$BOTTOM_TOOLBAR`, etc). It's conventional to have such a little mini language in shells but I think xonsh can do better _and_ be simpler. Fundamentally this is yet another little string interpolation mini template language, with the usual problems of escaping "text" vs "markup".</sup>
<sup>But since this is all Python we don't really need "markup" and its attendant escaping problems. $PROMPT could just be a function that returns the prompt to show. That function can then call whatever handy dandy utility functions it wants to (to get the cwd formatted various ways, hostname, git status, etc) and assemble it using ordinary Python string manipulation (f-strings or `string.Template` or just the `+` operator), no fuss, no muss, no weird little special curly-brackets-with-colons things to learn. Colors and similar text formatting could be supported with a `ColoredStr` class which could be constructed and concatenated (with other `ColoredStr` and/or regular `str`) and sliced much like `str`. Then everything would be clean and easy and Pythonic without curly braces flopping about.</sup>
<sup>(End humble editorializing!)</sup>
## For community
⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
| [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"CWD related prompt formatter\"\"\"\n\nimport os\nimport shutil\n\nimport xonsh.tools as xt\nimport xonsh.platform as xp\nfrom xonsh.built_ins import XSH\n\n\ndef _replace_home(x):\n if xp.ON_WINDOWS:\n home = XSH.env[\"HOMEDRIVE\"] + XSH.env[\"HOMEPATH\"][0]\n if x.startswith(home):\n x = x.replace(home, \"~\", 1)\n\n if XSH.env.get(\"FORCE_POSIX_PATHS\"):\n x = x.replace(os.sep, os.altsep)\n\n return x\n else:\n home = XSH.env[\"HOME\"]\n if x.startswith(home):\n x = x.replace(home, \"~\", 1)\n return x\n\n\ndef _replace_home_cwd():\n return _replace_home(XSH.env[\"PWD\"])\n\n\ndef _collapsed_pwd():\n sep = xt.get_sep()\n pwd = _replace_home_cwd().split(sep)\n size = len(pwd)\n leader = sep if size > 0 and len(pwd[0]) == 0 else \"\"\n base = [\n i[0] if ix != size - 1 and i[0] != \".\" else i[0:2] if ix != size - 1 else i\n for ix, i in enumerate(pwd)\n if len(i) > 0\n ]\n return leader + sep.join(base)\n\n\ndef _dynamically_collapsed_pwd():\n \"\"\"Return the compact current working directory. It respects the\n environment variable DYNAMIC_CWD_WIDTH.\n \"\"\"\n original_path = _replace_home_cwd()\n target_width, units = XSH.env[\"DYNAMIC_CWD_WIDTH\"]\n elision_char = XSH.env[\"DYNAMIC_CWD_ELISION_CHAR\"]\n if target_width == float(\"inf\"):\n return original_path\n if units == \"%\":\n cols, _ = shutil.get_terminal_size()\n target_width = (cols * target_width) // 100\n sep = xt.get_sep()\n pwd = original_path.split(sep)\n last = pwd.pop()\n remaining_space = target_width - len(last)\n # Reserve space for separators\n remaining_space_for_text = remaining_space - len(pwd)\n parts = []\n for i in range(len(pwd)):\n part = pwd[i]\n part_len = int(\n min(len(part), max(1, remaining_space_for_text // (len(pwd) - i)))\n )\n remaining_space_for_text -= part_len\n if len(part) > part_len:\n reduced_part = part[0 : part_len - len(elision_char)] + elision_char\n parts.append(reduced_part)\n else:\n parts.append(part)\n parts.append(last)\n full = sep.join(parts)\n truncature_char = elision_char if elision_char else \"...\"\n # If even if displaying one letter per dir we are too long\n if len(full) > target_width:\n # We truncate the left most part\n full = truncature_char + full[int(-target_width) + len(truncature_char) :]\n # if there is not even a single separator we still\n # want to display at least the beginning of the directory\n if full.find(sep) == -1:\n full = (truncature_char + sep + last)[\n 0 : int(target_width) - len(truncature_char)\n ] + truncature_char\n return full\n",
"path": "xonsh/prompt/cwd.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"CWD related prompt formatter\"\"\"\n\nimport os\nimport shutil\n\nimport xonsh.tools as xt\nimport xonsh.platform as xp\nfrom xonsh.built_ins import XSH\n\n\ndef _replace_home(x):\n if xp.ON_WINDOWS:\n home = XSH.env[\"HOMEDRIVE\"] + XSH.env[\"HOMEPATH\"][0]\n if x.startswith(home):\n x = x.replace(home, \"~\", 1)\n\n if XSH.env.get(\"FORCE_POSIX_PATHS\"):\n x = x.replace(os.sep, os.altsep)\n\n return x\n else:\n home = XSH.env[\"HOME\"]\n if x.startswith(home):\n x = x.replace(home, \"~\", 1)\n return x\n\n\ndef _replace_home_cwd():\n pwd = XSH.env[\"PWD\"].replace(\"{\", \"{{\").replace(\"}\", \"}}\")\n return _replace_home(pwd)\n\n\ndef _collapsed_pwd():\n sep = xt.get_sep()\n pwd = _replace_home_cwd().split(sep)\n size = len(pwd)\n leader = sep if size > 0 and len(pwd[0]) == 0 else \"\"\n base = [\n i[0] if ix != size - 1 and i[0] != \".\" else i[0:2] if ix != size - 1 else i\n for ix, i in enumerate(pwd)\n if len(i) > 0\n ]\n return leader + sep.join(base)\n\n\ndef _dynamically_collapsed_pwd():\n \"\"\"Return the compact current working directory. It respects the\n environment variable DYNAMIC_CWD_WIDTH.\n \"\"\"\n original_path = _replace_home_cwd()\n target_width, units = XSH.env[\"DYNAMIC_CWD_WIDTH\"]\n elision_char = XSH.env[\"DYNAMIC_CWD_ELISION_CHAR\"]\n if target_width == float(\"inf\"):\n return original_path\n if units == \"%\":\n cols, _ = shutil.get_terminal_size()\n target_width = (cols * target_width) // 100\n sep = xt.get_sep()\n pwd = original_path.split(sep)\n last = pwd.pop()\n remaining_space = target_width - len(last)\n # Reserve space for separators\n remaining_space_for_text = remaining_space - len(pwd)\n parts = []\n for i in range(len(pwd)):\n part = pwd[i]\n part_len = int(\n min(len(part), max(1, remaining_space_for_text // (len(pwd) - i)))\n )\n remaining_space_for_text -= part_len\n if len(part) > part_len:\n reduced_part = part[0 : part_len - len(elision_char)] + elision_char\n parts.append(reduced_part)\n else:\n parts.append(part)\n parts.append(last)\n full = sep.join(parts)\n truncature_char = elision_char if elision_char else \"...\"\n # If even if displaying one letter per dir we are too long\n if len(full) > target_width:\n # We truncate the left most part\n full = truncature_char + full[int(-target_width) + len(truncature_char) :]\n # if there is not even a single separator we still\n # want to display at least the beginning of the directory\n if full.find(sep) == -1:\n full = (truncature_char + sep + last)[\n 0 : int(target_width) - len(truncature_char)\n ] + truncature_char\n return full\n",
"path": "xonsh/prompt/cwd.py"
}
] | diff --git a/news/pwd-curly-escape.rst b/news/pwd-curly-escape.rst
new file mode 100644
index 0000000000..4a069b1eba
--- /dev/null
+++ b/news/pwd-curly-escape.rst
@@ -0,0 +1,23 @@
+**Added:**
+
+* <news item>
+
+**Changed:**
+
+* Curly braces { } in directory names are now escaped in the prompt
+
+**Deprecated:**
+
+* <news item>
+
+**Removed:**
+
+* <news item>
+
+**Fixed:**
+
+* <news item>
+
+**Security:**
+
+* <news item>
diff --git a/tests/prompt/test_cwd.py b/tests/prompt/test_cwd.py
new file mode 100644
index 0000000000..977a64b094
--- /dev/null
+++ b/tests/prompt/test_cwd.py
@@ -0,0 +1,16 @@
+from xonsh.prompt.cwd import _replace_home_cwd
+from xonsh.built_ins import XSH
+
+
+def test_cwd_escapes_curly_brackets_with_more_curly_brackets():
+ XSH.env["PWD"] = "{foo}"
+ assert _replace_home_cwd() == "{{foo}}"
+
+ XSH.env["PWD"] = "{{foo}}"
+ assert _replace_home_cwd() == "{{{{foo}}}}"
+
+ XSH.env["PWD"] = "{"
+ assert _replace_home_cwd() == "{{"
+
+ XSH.env["PWD"] = "}}"
+ assert _replace_home_cwd() == "}}}}"
diff --git a/xonsh/prompt/cwd.py b/xonsh/prompt/cwd.py
index 032b0e9755..32d1685bb1 100644
--- a/xonsh/prompt/cwd.py
+++ b/xonsh/prompt/cwd.py
@@ -27,7 +27,8 @@ def _replace_home(x):
def _replace_home_cwd():
- return _replace_home(XSH.env["PWD"])
+ pwd = XSH.env["PWD"].replace("{", "{{").replace("}", "}}")
+ return _replace_home(pwd)
def _collapsed_pwd():
|
doccano__doccano-1842 | Doccano is not importing any text data
Hello,
Doccano is not importing any text data. When importing the text data the following browser loading is going on:

The command line terminal is showing the following:-
```
<Starting server with port 8000.
WARNING:waitress.queue:Task queue depth is 1
WARNING:waitress.queue:Task queue depth is 2
Bad Request: /v1/auth/login/
WARNING:django.request:Bad Request: /v1/auth/login/
WARNING:waitress.queue:Task queue depth is 1
WARNING:waitress.queue:Task queue depth is 2
WARNING:waitress.queue:Task queue depth is 1
WARNING:waitress.queue:Task queue depth is 1
WARNING:waitress.queue:Task queue depth is 1>
```
Your Environment
---------
* Operating System: Windows 10
* Python Version Used: 3.10
* When you install doccano: Few days back
* How did you install doccano (Heroku button etc): Command Line
| [
{
"content": "import argparse\nimport multiprocessing\nimport os\nimport platform\nimport sys\nfrom pathlib import Path\n\nimport django\nfrom django.core import management\nfrom environs import Env\n\nfrom .config.celery import app\n\nenv = Env()\nDOCCANO_HOME = os.path.expanduser(os.environ.get(\"DOCCANO_HOME\", \"~/doccano\"))\nPath(DOCCANO_HOME).mkdir(parents=True, exist_ok=True)\nenv.bool(\"DEBUG\", False)\nos.environ[\"STANDALONE\"] = \"True\"\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"config.settings.production\")\nos.environ.setdefault(\"DATABASE_URL\", os.path.join(f\"sqlite:///{DOCCANO_HOME}\", \"db.sqlite3\"))\nos.environ.setdefault(\"MEDIA_ROOT\", os.path.join(DOCCANO_HOME, \"media\"))\nbase = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(base)\nparser = argparse.ArgumentParser(description=\"doccano, text annotation for machine learning practitioners.\")\n\n\ndef number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1\n\n\ndef is_windows():\n return platform.system() == \"Windows\"\n\n\ndef run_on_nix(args):\n import gunicorn.app.base\n import gunicorn.util\n\n class StandaloneApplication(gunicorn.app.base.BaseApplication):\n def __init__(self, options=None):\n self.options = options or {}\n super().__init__()\n\n def load_config(self):\n config = {\n key: value for key, value in self.options.items() if key in self.cfg.settings and value is not None\n }\n for key, value in config.items():\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return gunicorn.util.import_app(\"config.wsgi\")\n\n options = {\n \"bind\": \"%s:%s\" % (\"0.0.0.0\", args.port),\n \"workers\": args.workers,\n \"chdir\": base,\n \"capture_output\": True,\n \"loglevel\": \"debug\",\n }\n StandaloneApplication(options).run()\n\n\ndef run_on_windows(args):\n from waitress import serve\n\n from config.wsgi import application\n\n serve(application, port=args.port)\n\n\ndef command_db_init(args):\n print(\"Setup Database.\")\n management.call_command(\"wait_for_db\")\n management.call_command(\"migrate\")\n management.call_command(\"create_roles\")\n\n\ndef command_user_create(args):\n print(\"Create admin user.\")\n management.call_command(\n \"create_admin\", \"--noinput\", username=args.username, password=args.password, email=args.email\n )\n\n\ndef command_migrate(args):\n print(\"Start migration.\")\n management.call_command(\"migrate\")\n\n\ndef command_run_webserver(args):\n print(f\"Starting server with port {args.port}.\")\n if is_windows():\n run_on_windows(args)\n else:\n run_on_nix(args)\n\n\ndef command_run_task_queue(args):\n print(\"Starting task queue.\")\n argv = [\n \"--app=config\",\n \"--workdir={}\".format(base),\n \"worker\",\n \"--loglevel=info\",\n \"--concurrency={}\".format(args.concurrency),\n ]\n if is_windows():\n argv.append(\"--pool=solo\")\n app.worker_main(argv=argv)\n\n\ndef command_help(args):\n print(parser.parse_args([args.command, \"--help\"]))\n\n\ndef main():\n # Create a command line parser.\n subparsers = parser.add_subparsers()\n\n # Create a parser for db initialization.\n parser_init = subparsers.add_parser(\"init\", help=\"see `init -h`\")\n parser_init.set_defaults(handler=command_db_init)\n\n # Create a parser for migration.\n parser_migration = subparsers.add_parser(\"migrate\", help=\"Updates database schema.\")\n parser_migration.set_defaults(handler=command_migrate)\n\n # Create a parser for user creation.\n parser_create_user = subparsers.add_parser(\"createuser\", help=\"see `createuser -h`\")\n parser_create_user.add_argument(\"--username\", type=str, default=\"admin\", help=\"admin username\")\n parser_create_user.add_argument(\"--password\", type=str, default=\"password\", help=\"admin password\")\n parser_create_user.add_argument(\"--email\", type=str, default=\"[email protected]\", help=\"admin email\")\n parser_create_user.set_defaults(handler=command_user_create)\n\n # Create a parser for web server.\n parser_server = subparsers.add_parser(\"webserver\", help=\"see `webserver -h`\")\n parser_server.add_argument(\"--port\", type=int, default=8000, help=\"port number\")\n parser_server.add_argument(\"--workers\", type=int, default=number_of_workers(), help=\"the number of workers\")\n parser_server.add_argument(\"--env_file\", type=str, help=\"read in a file of environment variables\")\n parser_server.set_defaults(handler=command_run_webserver)\n\n # Create a parser for task queue.\n parser_queue = subparsers.add_parser(\"task\", help=\"see `task -h`\")\n parser_queue.add_argument(\"--concurrency\", type=int, default=2, help=\"concurrency\")\n parser_queue.add_argument(\"--env_file\", type=str, help=\"read in a file of environment variables\")\n parser_queue.set_defaults(handler=command_run_task_queue)\n\n # Create a parser for help.\n parser_help = subparsers.add_parser(\"help\", help=\"see `help -h`\")\n parser_help.add_argument(\"command\", help=\"command name which help is shown\")\n parser_help.set_defaults(handler=command_help)\n\n # Dispatch handler.\n args = parser.parse_args()\n if hasattr(args, \"env_file\"):\n env.read_env(args.env_file, recurse=False, override=True)\n if hasattr(args, \"handler\"):\n django.setup()\n args.handler(args)\n else:\n # If specified unknown command, show help.\n parser.print_help()\n\n\nif __name__ == \"__main__\":\n main()\n",
"path": "backend/cli.py"
}
] | [
{
"content": "import argparse\nimport multiprocessing\nimport os\nimport platform\nimport sys\nfrom pathlib import Path\n\nimport django\nfrom django.core import management\nfrom environs import Env\n\nfrom .config.celery import app\n\nenv = Env()\nDOCCANO_HOME = os.path.expanduser(os.environ.get(\"DOCCANO_HOME\", \"~/doccano\"))\nPath(DOCCANO_HOME).mkdir(parents=True, exist_ok=True)\nenv.bool(\"DEBUG\", False)\nos.environ[\"STANDALONE\"] = \"True\"\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"config.settings.production\")\nos.environ.setdefault(\"DATABASE_URL\", os.path.join(f\"sqlite:///{DOCCANO_HOME}\", \"db.sqlite3\"))\nos.environ.setdefault(\"MEDIA_ROOT\", os.path.join(DOCCANO_HOME, \"media\"))\nbase = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(base)\nparser = argparse.ArgumentParser(description=\"doccano, text annotation for machine learning practitioners.\")\n\n\ndef number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1\n\n\ndef is_windows():\n return platform.system() == \"Windows\"\n\n\ndef run_on_nix(args):\n import gunicorn.app.base\n import gunicorn.util\n\n class StandaloneApplication(gunicorn.app.base.BaseApplication):\n def __init__(self, options=None):\n self.options = options or {}\n super().__init__()\n\n def load_config(self):\n config = {\n key: value for key, value in self.options.items() if key in self.cfg.settings and value is not None\n }\n for key, value in config.items():\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return gunicorn.util.import_app(\"config.wsgi\")\n\n options = {\n \"bind\": \"%s:%s\" % (\"0.0.0.0\", args.port),\n \"workers\": args.workers,\n \"chdir\": base,\n \"capture_output\": True,\n \"loglevel\": \"debug\",\n }\n StandaloneApplication(options).run()\n\n\ndef run_on_windows(args):\n from waitress import serve\n\n from config.wsgi import application\n\n serve(application, port=args.port, threads=args.workers)\n\n\ndef command_db_init(args):\n print(\"Setup Database.\")\n management.call_command(\"wait_for_db\")\n management.call_command(\"migrate\")\n management.call_command(\"create_roles\")\n\n\ndef command_user_create(args):\n print(\"Create admin user.\")\n management.call_command(\n \"create_admin\", \"--noinput\", username=args.username, password=args.password, email=args.email\n )\n\n\ndef command_migrate(args):\n print(\"Start migration.\")\n management.call_command(\"migrate\")\n\n\ndef command_run_webserver(args):\n print(f\"Starting server with port {args.port}.\")\n if is_windows():\n run_on_windows(args)\n else:\n run_on_nix(args)\n\n\ndef command_run_task_queue(args):\n print(\"Starting task queue.\")\n argv = [\n \"--app=config\",\n \"--workdir={}\".format(base),\n \"worker\",\n \"--loglevel=info\",\n \"--concurrency={}\".format(args.concurrency),\n ]\n if is_windows():\n argv.append(\"--pool=solo\")\n app.worker_main(argv=argv)\n\n\ndef command_help(args):\n print(parser.parse_args([args.command, \"--help\"]))\n\n\ndef main():\n # Create a command line parser.\n subparsers = parser.add_subparsers()\n\n # Create a parser for db initialization.\n parser_init = subparsers.add_parser(\"init\", help=\"see `init -h`\")\n parser_init.set_defaults(handler=command_db_init)\n\n # Create a parser for migration.\n parser_migration = subparsers.add_parser(\"migrate\", help=\"Updates database schema.\")\n parser_migration.set_defaults(handler=command_migrate)\n\n # Create a parser for user creation.\n parser_create_user = subparsers.add_parser(\"createuser\", help=\"see `createuser -h`\")\n parser_create_user.add_argument(\"--username\", type=str, default=\"admin\", help=\"admin username\")\n parser_create_user.add_argument(\"--password\", type=str, default=\"password\", help=\"admin password\")\n parser_create_user.add_argument(\"--email\", type=str, default=\"[email protected]\", help=\"admin email\")\n parser_create_user.set_defaults(handler=command_user_create)\n\n # Create a parser for web server.\n parser_server = subparsers.add_parser(\"webserver\", help=\"see `webserver -h`\")\n parser_server.add_argument(\"--port\", type=int, default=8000, help=\"port number\")\n parser_server.add_argument(\"--workers\", type=int, default=number_of_workers(), help=\"the number of workers\")\n parser_server.add_argument(\"--env_file\", type=str, help=\"read in a file of environment variables\")\n parser_server.set_defaults(handler=command_run_webserver)\n\n # Create a parser for task queue.\n parser_queue = subparsers.add_parser(\"task\", help=\"see `task -h`\")\n parser_queue.add_argument(\"--concurrency\", type=int, default=2, help=\"concurrency\")\n parser_queue.add_argument(\"--env_file\", type=str, help=\"read in a file of environment variables\")\n parser_queue.set_defaults(handler=command_run_task_queue)\n\n # Create a parser for help.\n parser_help = subparsers.add_parser(\"help\", help=\"see `help -h`\")\n parser_help.add_argument(\"command\", help=\"command name which help is shown\")\n parser_help.set_defaults(handler=command_help)\n\n # Dispatch handler.\n args = parser.parse_args()\n if hasattr(args, \"env_file\"):\n env.read_env(args.env_file, recurse=False, override=True)\n if hasattr(args, \"handler\"):\n django.setup()\n args.handler(args)\n else:\n # If specified unknown command, show help.\n parser.print_help()\n\n\nif __name__ == \"__main__\":\n main()\n",
"path": "backend/cli.py"
}
] | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 9872731980..04a7192419 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -9,35 +9,35 @@ jobs:
run:
working-directory: ./backend
steps:
- - uses: actions/checkout@v2
- - name: Set up Python 3.8
- uses: actions/setup-python@v2
- with:
- python-version: 3.8
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip
- pip install poetry
- poetry install
- - name: Run migrations
- run: |
- poetry run task wait_for_db
- poetry run task migrate
- - name: Lint with flake8
- run: |
- poetry run task flake8
- - name: Lint with isort
- run: |
- poetry run task isort
- - name: Black
- run: |
- poetry run task black
- - name: mypy
- run: |
- poetry run task mypy
- - name: Run tests
- run: |
- poetry run task test
+ - uses: actions/checkout@v2
+ - name: Set up Python 3.8
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.8
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install poetry
+ poetry install
+ - name: Run migrations
+ run: |
+ poetry run task wait_for_db
+ poetry run task migrate
+ - name: Lint with flake8
+ run: |
+ poetry run task flake8
+ - name: Lint with isort
+ run: |
+ poetry run task isort
+ - name: Black
+ run: |
+ poetry run task black
+ - name: mypy
+ run: |
+ poetry run task mypy
+ - name: Run tests
+ run: |
+ poetry run task test
frontend:
runs-on: ubuntu-latest
@@ -48,13 +48,15 @@ jobs:
- uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
- node-version: '16'
+ node-version: "16"
- name: Install Yarn
run: npm install -g yarn
- name: Install npm modules
run: yarn install
- name: Lint
run: yarn lint
+ - name: Prettier
+ run: yarn lint:prettier
docker-lint:
runs-on: ubuntu-latest
diff --git a/backend/cli.py b/backend/cli.py
index 51c0f6c9e7..cdbfc7c67a 100644
--- a/backend/cli.py
+++ b/backend/cli.py
@@ -66,7 +66,7 @@ def run_on_windows(args):
from config.wsgi import application
- serve(application, port=args.port)
+ serve(application, port=args.port, threads=args.workers)
def command_db_init(args):
diff --git a/frontend/.eslintrc.js b/frontend/.eslintrc.js
index 96da87adb2..a1e0ca0d14 100644
--- a/frontend/.eslintrc.js
+++ b/frontend/.eslintrc.js
@@ -4,27 +4,23 @@ module.exports = {
browser: true,
node: true
},
- extends: [
- '@nuxtjs',
- 'plugin:nuxt/recommended',
- '@nuxtjs/eslint-config-typescript',
- 'prettier'
- ],
+ extends: ['@nuxtjs/eslint-config-typescript', 'plugin:nuxt/recommended', 'prettier'],
rules: {
- 'no-console': 'off',
- 'no-restricted-syntax': [
+ 'no-console': process.env.NODE_ENV === 'production' ? 'warn' : 'off',
+ 'no-debugger': process.env.NODE_ENV === 'production' ? 'warn' : 'off',
+ 'no-useless-constructor': 'off',
+ camelcase: 'off',
+ 'max-len': [
'error',
+ 100,
+ 2,
{
- selector: "CallExpression[callee.object.name='console'][callee.property.name!=/^(log|warn|error|info|trace)$/]",
- message: 'Unexpected property on console object was called'
+ ignoreUrls: true,
+ ignoreComments: false,
+ ignoreRegExpLiterals: true,
+ ignoreStrings: true,
+ ignoreTemplateLiterals: true
}
- ],
- // 'vue/valid-template-root': 'off',
- // 'space-before-function-paren': ['error', 'never'],
- 'no-useless-constructor': 'off',
- // '@typescript-eslint/no-useless-constructor': 'off',
- // 'no-unused-vars': 'off',
- // '@typescript-eslint/no-unused-vars': 'off',
- 'camelcase': 'off'
+ ]
}
}
diff --git a/frontend/.prettierignore b/frontend/.prettierignore
new file mode 100644
index 0000000000..8d9ce052a1
--- /dev/null
+++ b/frontend/.prettierignore
@@ -0,0 +1,8 @@
+.nuxt/
+assets/
+coverage/
+dist/
+node_modules/
+static/
+*.html
+*.md
diff --git a/frontend/.prettierrc b/frontend/.prettierrc
new file mode 100644
index 0000000000..a65b64adeb
--- /dev/null
+++ b/frontend/.prettierrc
@@ -0,0 +1,6 @@
+{
+ "printWidth": 100,
+ "semi": false,
+ "singleQuote": true,
+ "trailingComma": "none"
+}
diff --git a/frontend/components/auth/FormLogin.vue b/frontend/components/auth/FormLogin.vue
index c561926c68..0b5c68b3e4 100644
--- a/frontend/components/auth/FormLogin.vue
+++ b/frontend/components/auth/FormLogin.vue
@@ -7,12 +7,7 @@
>
<template #content>
<v-form v-model="valid">
- <v-alert
- v-show="showError"
- v-model="showError"
- type="error"
- dismissible
- >
+ <v-alert v-show="showError" v-model="showError" type="error" dismissible>
{{ $t('errors.invalidUserOrPass') }}
</v-alert>
<v-text-field
diff --git a/frontend/components/comment/Comment.vue b/frontend/components/comment/Comment.vue
index 7dd7b1780c..66d9441697 100644
--- a/frontend/components/comment/Comment.vue
+++ b/frontend/components/comment/Comment.vue
@@ -11,41 +11,26 @@
<v-list-item-content>
<v-list-item-title>{{ comment.username }}</v-list-item-title>
<v-list-item-subtitle>
- {{ comment.createdAt | dateParse('YYYY-MM-DDTHH:mm:ss') | dateFormat('DD/MM/YYYY HH:mm') }}
+ {{
+ comment.createdAt | dateParse('YYYY-MM-DDTHH:mm:ss') | dateFormat('DD/MM/YYYY HH:mm')
+ }}
</v-list-item-subtitle>
</v-list-item-content>
- <v-row
- align="center"
- justify="end"
- >
- <v-menu
- v-if="comment.user == userId"
- bottom
- left
- >
+ <v-row align="center" justify="end">
+ <v-menu v-if="comment.user == userId" bottom left>
<template #activator="{ on, attrs }">
- <v-btn
- icon
- v-bind="attrs"
- v-on="on"
- >
+ <v-btn icon v-bind="attrs" v-on="on">
<v-icon>{{ mdiDotsVertical }}</v-icon>
</v-btn>
</template>
<v-list>
<v-list-item>
- <v-list-item-title
- @click="showEdit=true"
- >
- Edit
- </v-list-item-title>
+ <v-list-item-title @click="showEdit = true"> Edit </v-list-item-title>
</v-list-item>
<v-list-item>
- <v-list-item-title
- @click="$emit('delete-comment', comment)"
- >
+ <v-list-item-title @click="$emit('delete-comment', comment)">
Delete
</v-list-item-title>
</v-list-item>
@@ -59,27 +44,12 @@
<span v-if="!showEdit">
{{ comment.text }}
</span>
- <v-form
- v-else
- v-model="valid"
- >
+ <v-form v-else v-model="valid">
<v-row>
- <v-textarea
- v-model="editText"
- auto-grow
- rows="1"
- solo
- :rules="commentRules"
- />
+ <v-textarea v-model="editText" auto-grow rows="1" solo :rules="commentRules" />
</v-row>
<v-row justify="end">
- <v-btn
- text
- class="text-capitalize"
- @click="cancel"
- >
- Cancel
- </v-btn>
+ <v-btn text class="text-capitalize" @click="cancel"> Cancel </v-btn>
<v-btn
:disabled="!valid"
color="primary"
@@ -119,9 +89,7 @@ export default Vue.extend({
return {
showEdit: false,
editText: this.comment.text,
- commentRules: [
- (v: string) => !!v.trim() || 'Comment is required'
- ],
+ commentRules: [(v: string) => !!v.trim() || 'Comment is required'],
valid: false,
mdiAccountCircle,
mdiDotsVertical
@@ -131,10 +99,10 @@ export default Vue.extend({
methods: {
updateComment(newText: string) {
this.showEdit = false
- const comment = {...this.comment, text:newText }
+ const comment = { ...this.comment, text: newText }
this.$emit('update-comment', comment)
},
-
+
cancel() {
this.showEdit = false
this.editText = this.comment.text
diff --git a/frontend/components/comment/CommentList.vue b/frontend/components/comment/CommentList.vue
index a847d94165..92e3f7b2d8 100644
--- a/frontend/components/comment/CommentList.vue
+++ b/frontend/components/comment/CommentList.vue
@@ -10,7 +10,7 @@
:loading-text="$t('generic.loading')"
:no-data-text="$t('vuetify.noDataAvailable')"
:footer-props="{
- 'showFirstLastPage': true,
+ showFirstLastPage: true,
'items-per-page-options': [10, 50, 100],
'items-per-page-text': $t('vuetify.itemsPerPageText'),
'page-text': $t('dataset.pageText')
@@ -20,7 +20,9 @@
@input="$emit('input', $event)"
>
<template #[`item.createdAt`]="{ item }">
- <span>{{ item.createdAt | dateParse('YYYY-MM-DDTHH:mm:ss') | dateFormat('DD/MM/YYYY HH:mm') }}</span>
+ <span>{{
+ item.createdAt | dateParse('YYYY-MM-DDTHH:mm:ss') | dateFormat('DD/MM/YYYY HH:mm')
+ }}</span>
</template>
<template #top>
<v-text-field
@@ -88,7 +90,7 @@ export default Vue.extend({
{ text: this.$t('dataset.text'), value: 'text' },
{ text: this.$t('user.username'), value: 'username' },
{ text: this.$t('comments.created_at'), value: 'createdAt' },
- { text: this.$t('dataset.action'), value: 'action' },
+ { text: this.$t('dataset.action'), value: 'action' }
],
mdiMagnify
}
@@ -117,7 +119,7 @@ export default Vue.extend({
})
this.options.page = 1
}
- },
+ }
// methods: {
// toLabeling(item: CommentReadDTO) {
diff --git a/frontend/components/comment/FormCreate.vue b/frontend/components/comment/FormCreate.vue
index b15863ee26..96d6f87cdc 100644
--- a/frontend/components/comment/FormCreate.vue
+++ b/frontend/components/comment/FormCreate.vue
@@ -28,9 +28,7 @@ import Vue from 'vue'
export default Vue.extend({
data() {
return {
- commentRules: [
- (v: string) => !!v.trim() || 'Comment is required'
- ],
+ commentRules: [(v: string) => !!v.trim() || 'Comment is required'],
message: '',
valid: false
}
diff --git a/frontend/components/configAutoLabeling/ConfigCreationForm.vue b/frontend/components/configAutoLabeling/ConfigCreationForm.vue
index af594dd021..7ae36e9b53 100644
--- a/frontend/components/configAutoLabeling/ConfigCreationForm.vue
+++ b/frontend/components/configAutoLabeling/ConfigCreationForm.vue
@@ -1,15 +1,10 @@
<template>
- <v-stepper
- v-model="step.count"
- >
+ <v-stepper v-model="step.count">
<v-overlay :value="isLoading">
<v-progress-circular indeterminate size="64" />
</v-overlay>
<config-header :step="step.count" />
- <config-template-name
- v-model="fields"
- @next="step.next()"
- />
+ <config-template-name v-model="fields" @next="step.next()" />
<config-parameters
v-if="fields.modelAttrs !== undefined"
v-model="fields.modelAttrs"
@@ -86,37 +81,44 @@ export default Vue.extend({
watch: {
'fields.modelName'() {
- this.passTesting = {parameter: false, template: false, mapping: false}
- },
- 'fields.modelAttrs': {
- handler() {
- this.passTesting = {parameter: false, template: false, mapping: false}
- },
- deep: true
- },
+ this.passTesting = { parameter: false, template: false, mapping: false }
+ },
+ 'fields.modelAttrs': {
+ handler() {
+ this.passTesting = {
+ parameter: false,
+ template: false,
+ mapping: false
+ }
+ },
+ deep: true
+ },
'fields.template'() {
- this.passTesting = {parameter: true, template: false, mapping: false}
+ this.passTesting = { parameter: true, template: false, mapping: false }
},
'fields.labelMapping': {
- handler() {
- this.passTesting = {parameter: true, template: true, mapping: false}
- },
- deep: true
- },
+ handler() {
+ this.passTesting = { parameter: true, template: true, mapping: false }
+ },
+ deep: true
+ }
},
methods: {
- testConfig(promise: Promise<any>, key: 'parameter'|'template'|'mapping') {
+ testConfig(promise: Promise<any>, key: 'parameter' | 'template' | 'mapping') {
this.isLoading = true
- promise.then((value) => {
- this.response[key] = value
- this.passTesting[key] = true
- this.errors = []
- }).catch((error) => {
- this.errors = [error.message]
- }).finally(() => {
- this.isLoading = false
- })
+ promise
+ .then((value) => {
+ this.response[key] = value
+ this.passTesting[key] = true
+ this.errors = []
+ })
+ .catch((error) => {
+ this.errors = [error.message]
+ })
+ .finally(() => {
+ this.isLoading = false
+ })
},
testParameters(text: string) {
const projectId = this.$route.params.id
@@ -140,7 +142,8 @@ export default Vue.extend({
const projectId = this.$route.params.id
const item = ConfigItem.parseFromUI(this.fields)
this.isLoading = true
- this.$services.config.save(projectId, item)
+ this.$services.config
+ .save(projectId, item)
.then(() => {
this.step.first()
this.$emit('onCreate')
diff --git a/frontend/components/configAutoLabeling/ConfigList.vue b/frontend/components/configAutoLabeling/ConfigList.vue
index 0136cdd18c..b3940d7690 100644
--- a/frontend/components/configAutoLabeling/ConfigList.vue
+++ b/frontend/components/configAutoLabeling/ConfigList.vue
@@ -11,23 +11,23 @@
>
<template #top>
<div class="ma-4">
- <v-btn
- class="primary text-capitalize"
- @click="dialogCreate=true"
- >
+ <v-btn class="primary text-capitalize" @click="dialogCreate = true">
{{ $t('generic.create') }}
</v-btn>
<v-btn
class="text-capitalize ms-2"
:disabled="!isDeletable()"
outlined
- @click="dialogDelete=true"
+ @click="dialogDelete = true"
>
{{ $t('generic.delete') }}
</v-btn>
<v-dialog v-model="dialogCreate">
<config-creation-form
- @onCreate="onCreate();dialogCreate=false"
+ @onCreate="
+ onCreate()
+ dialogCreate = false
+ "
/>
</v-dialog>
<v-dialog v-model="dialogDelete">
@@ -36,8 +36,11 @@
title="Delete Config"
message="Are you sure you want to delete these configs?"
item-key="modelName"
- @ok="remove();dialogDelete=false"
- @cancel="dialogDelete=false"
+ @ok="
+ remove()
+ dialogDelete = false
+ "
+ @cancel="dialogDelete = false"
/>
</v-dialog>
</div>
diff --git a/frontend/components/configAutoLabeling/form/ConfigHeader.vue b/frontend/components/configAutoLabeling/form/ConfigHeader.vue
index 4d909550bd..6ccf117310 100644
--- a/frontend/components/configAutoLabeling/form/ConfigHeader.vue
+++ b/frontend/components/configAutoLabeling/form/ConfigHeader.vue
@@ -1,32 +1,12 @@
<template>
<v-stepper-header>
- <v-stepper-step
- :complete="step > 1"
- step="1"
- >
- Select a template
- </v-stepper-step>
+ <v-stepper-step :complete="step > 1" step="1"> Select a template </v-stepper-step>
<v-divider />
- <v-stepper-step
- :complete="step > 2"
- step="2"
- >
- Set parameters
- </v-stepper-step>
+ <v-stepper-step :complete="step > 2" step="2"> Set parameters </v-stepper-step>
<v-divider />
- <v-stepper-step
- :complete="step > 3"
- step="3"
- >
- Set a template
- </v-stepper-step>
+ <v-stepper-step :complete="step > 3" step="3"> Set a template </v-stepper-step>
<v-divider />
- <v-stepper-step
- :complete="step > 4"
- step="4"
- >
- Set mappings
- </v-stepper-step>
+ <v-stepper-step :complete="step > 4" step="4"> Set mappings </v-stepper-step>
</v-stepper-header>
</template>
diff --git a/frontend/components/configAutoLabeling/form/ConfigLabelMapping.vue b/frontend/components/configAutoLabeling/form/ConfigLabelMapping.vue
index 3950256e11..f18ebcb170 100644
--- a/frontend/components/configAutoLabeling/form/ConfigLabelMapping.vue
+++ b/frontend/components/configAutoLabeling/form/ConfigLabelMapping.vue
@@ -4,65 +4,33 @@
<v-card-text class="pa-0">
<h4 class="text-h6">Configure label mappings</h4>
<p class="font-weight-regular body-1">
- Once you fetch the API response, you need to convert the label in the response into the one which you defined at the label page.
+ Once you fetch the API response, you need to convert the label in the response into the
+ one which you defined at the label page.
</p>
- <h4 class="text-h6">
- Response
- </h4>
- <v-sheet
- :dark="!$vuetify.theme.dark"
- :light="$vuetify.theme.dark"
- class="mb-5 pa-5"
- >
+ <h4 class="text-h6">Response</h4>
+ <v-sheet :dark="!$vuetify.theme.dark" :light="$vuetify.theme.dark" class="mb-5 pa-5">
<pre>{{ JSON.stringify(response, null, 4) }}</pre>
</v-sheet>
<label-mapping v-model="mapping" />
- <v-alert
- v-for="(error, index) in errorMessages"
- :key="index"
- prominent
- type="error"
- >
+ <v-alert v-for="(error, index) in errorMessages" :key="index" prominent type="error">
<v-row align="center">
<v-col class="grow">
{{ error }}
</v-col>
</v-row>
</v-alert>
- <h4 class="text-h6">
- Result
- </h4>
- <v-sheet
- :dark="!$vuetify.theme.dark"
- :light="$vuetify.theme.dark"
- class="mb-5 pa-5"
- >
+ <h4 class="text-h6">Result</h4>
+ <v-sheet :dark="!$vuetify.theme.dark" :light="$vuetify.theme.dark" class="mb-5 pa-5">
<pre>{{ JSON.stringify(result, null, 4) }}</pre>
</v-sheet>
</v-card-text>
<v-card-actions class="pa-0">
<v-spacer />
- <v-btn
- text
- class="text-capitalize"
- @click="$emit('prev')"
- >
- Prev
- </v-btn>
- <v-btn
- v-show="!isPassed"
- color="primary"
- class="text-capitalize"
- @click="$emit('onTest')"
- >
+ <v-btn text class="text-capitalize" @click="$emit('prev')"> Prev </v-btn>
+ <v-btn v-show="!isPassed" color="primary" class="text-capitalize" @click="$emit('onTest')">
Test
</v-btn>
- <v-btn
- v-show="isPassed"
- color="success"
- class="text-capitalize"
- @click="$emit('next')"
- >
+ <v-btn v-show="isPassed" color="success" class="text-capitalize" @click="$emit('next')">
Finish
</v-btn>
</v-card-actions>
diff --git a/frontend/components/configAutoLabeling/form/ConfigParameters.vue b/frontend/components/configAutoLabeling/form/ConfigParameters.vue
index e779327b85..e216c124b9 100644
--- a/frontend/components/configAutoLabeling/form/ConfigParameters.vue
+++ b/frontend/components/configAutoLabeling/form/ConfigParameters.vue
@@ -4,9 +4,7 @@
<v-card-text class="pa-0">
<v-form>
<h4 class="text-h6">Set parameters</h4>
- <p class="font-weight-regular body-1">
- You can set parameters to fetch API response.
- </p>
+ <p class="font-weight-regular body-1">You can set parameters to fetch API response.</p>
<template v-for="item in value">
<v-text-field
v-if="item.type === 'textField'"
@@ -33,7 +31,8 @@
<h4 class="text-h6">Test the parameters</h4>
<p class="font-weight-regular body-1">
Before proceeding, you need to test the parameters whether they can fetch API response.
- Please input sample text and press the <strong>Test</strong> button.
+ Please input sample text and press the
+ <strong>Test</strong> button.
</p>
<v-text-field
v-if="project.isTextProject"
@@ -41,43 +40,23 @@
outlined
label="Sample Text"
/>
- <file-field
- v-else
- v-model="payload"
- />
- <v-alert
- v-for="(error, index) in errorMessages"
- :key="index"
- prominent
- type="error"
- >
+ <file-field v-else v-model="payload" />
+ <v-alert v-for="(error, index) in errorMessages" :key="index" prominent type="error">
<v-row align="center">
<v-col class="grow">
{{ error }}
</v-col>
</v-row>
</v-alert>
- <h4 class="text-h6">
- Response
- </h4>
- <v-sheet
- :dark="!$vuetify.theme.dark"
- :light="$vuetify.theme.dark"
- class="mb-5 pa-5"
- >
+ <h4 class="text-h6">Response</h4>
+ <v-sheet :dark="!$vuetify.theme.dark" :light="$vuetify.theme.dark" class="mb-5 pa-5">
<pre>{{ JSON.stringify(response, null, 4) }}</pre>
</v-sheet>
</v-form>
</v-card-text>
<v-card-actions class="pa-0">
<v-spacer />
- <v-btn
- text
- class="text-capitalize"
- @click="$emit('prev')"
- >
- Prev
- </v-btn>
+ <v-btn text class="text-capitalize" @click="$emit('prev')"> Prev </v-btn>
<v-btn
v-show="!isPassed"
color="primary"
@@ -86,12 +65,7 @@
>
Test
</v-btn>
- <v-btn
- v-show="isPassed"
- color="primary"
- class="text-capitalize"
- @click="$emit('next')"
- >
+ <v-btn v-show="isPassed" color="primary" class="text-capitalize" @click="$emit('next')">
Next
</v-btn>
</v-card-actions>
@@ -107,7 +81,7 @@ import FileField from './FileField.vue'
export default Vue.extend({
components: {
ObjectField,
- FileField,
+ FileField
},
props: {
diff --git a/frontend/components/configAutoLabeling/form/ConfigTemplate.vue b/frontend/components/configAutoLabeling/form/ConfigTemplate.vue
index bb4833a47c..99432a870d 100644
--- a/frontend/components/configAutoLabeling/form/ConfigTemplate.vue
+++ b/frontend/components/configAutoLabeling/form/ConfigTemplate.vue
@@ -4,41 +4,33 @@
<v-card-text class="pa-0">
<h4 class="text-h6">Set mapping template</h4>
<p class="font-weight-regular body-1">
- Now, you can successfuly fetch the API response.
- Next, you need to convert API response to doccano format with the mapping template.
+ Now, you can successfuly fetch the API response. Next, you need to convert API response to
+ doccano format with the mapping template.
</p>
- <h4 class="text-h6">
- Response
- </h4>
- <v-sheet
- :dark="!$vuetify.theme.dark"
- :light="$vuetify.theme.dark"
- class="mb-5 pa-5"
- >
+ <h4 class="text-h6">Response</h4>
+ <v-sheet :dark="!$vuetify.theme.dark" :light="$vuetify.theme.dark" class="mb-5 pa-5">
<pre>{{ JSON.stringify(response, null, 4) }}</pre>
</v-sheet>
- <h4 class="text-h6">
- doccano format
- </h4>
- <v-sheet
- :dark="!$vuetify.theme.dark"
- :light="$vuetify.theme.dark"
- class="mb-5 pa-5"
- >
+ <h4 class="text-h6">doccano format</h4>
+ <v-sheet :dark="!$vuetify.theme.dark" :light="$vuetify.theme.dark" class="mb-5 pa-5">
<pre>Text Classification</pre>
<pre>[{ "label": "Cat" }, ...]</pre>
- <br>
+ <br />
<pre>Sequence Labeling</pre>
<pre>[{ "label": "Cat", "start_offset": 0, "end_offset": 5 }, ...]</pre>
- <br>
+ <br />
<pre>Sequence to sequence</pre>
<pre>[{ "text": "Cat" }, ...]</pre>
</v-sheet>
<h4 class="text-h6">Mapping template</h4>
<p class="font-weight-regular body-1">
- You can set mapping template(<a href="https://jinja.palletsprojects.com/en/2.11.x/">Jinja2</a> format) to convert API response to doccano format.
- In the template, you can refer to the API response by the <strong>input</strong> variable.
- If you want to know the Jinja2 notation, please refer to the site.
+ You can set mapping template(<a href="https://jinja.palletsprojects.com/en/2.11.x/"
+ >Jinja2</a
+ >
+ format) to convert API response to doccano format. In the template, you can refer to the
+ API response by the
+ <strong>input</strong> variable. If you want to know the Jinja2 notation, please refer to
+ the site.
</p>
<v-textarea
:value="value"
@@ -46,52 +38,25 @@
label="Mapping Template"
@change="$emit('input', $event)"
/>
- <v-alert
- v-for="(error, index) in errorMessages"
- :key="index"
- prominent
- type="error"
- >
+ <v-alert v-for="(error, index) in errorMessages" :key="index" prominent type="error">
<v-row align="center">
<v-col class="grow">
{{ error }}
</v-col>
</v-row>
</v-alert>
- <h4 class="text-h6">
- Result
- </h4>
- <v-sheet
- :dark="!$vuetify.theme.dark"
- :light="$vuetify.theme.dark"
- class="mb-5 pa-5"
- >
+ <h4 class="text-h6">Result</h4>
+ <v-sheet :dark="!$vuetify.theme.dark" :light="$vuetify.theme.dark" class="mb-5 pa-5">
<pre>{{ JSON.stringify(result, null, 4) }}</pre>
</v-sheet>
</v-card-text>
<v-card-actions class="pa-0">
<v-spacer />
- <v-btn
- text
- class="text-capitalize"
- @click="$emit('prev')"
- >
- Prev
- </v-btn>
- <v-btn
- v-show="!isPassed"
- color="primary"
- class="text-capitalize"
- @click="$emit('onTest')"
- >
+ <v-btn text class="text-capitalize" @click="$emit('prev')"> Prev </v-btn>
+ <v-btn v-show="!isPassed" color="primary" class="text-capitalize" @click="$emit('onTest')">
Test
</v-btn>
- <v-btn
- v-show="isPassed"
- color="primary"
- class="text-capitalize"
- @click="$emit('next')"
- >
+ <v-btn v-show="isPassed" color="primary" class="text-capitalize" @click="$emit('next')">
Next
</v-btn>
</v-card-actions>
diff --git a/frontend/components/configAutoLabeling/form/ConfigTemplateName.vue b/frontend/components/configAutoLabeling/form/ConfigTemplateName.vue
index dad2bf0e02..ceb4f909c6 100644
--- a/frontend/components/configAutoLabeling/form/ConfigTemplateName.vue
+++ b/frontend/components/configAutoLabeling/form/ConfigTemplateName.vue
@@ -7,12 +7,7 @@
<p class="font-weight-regular body-1">
You can select the template to create the auto-labeling configuration.{{ valid }}
</p>
- <v-select
- v-model="selectedTask"
- :items="taskNames"
- label="Select a task name"
- outlined
- />
+ <v-select v-model="selectedTask" :items="taskNames" label="Select a task name" outlined />
<v-select
v-model="templateName"
:items="templateNames"
@@ -24,12 +19,7 @@
</v-card-text>
<v-card-actions class="pa-0">
<v-spacer />
- <v-btn
- :disabled="!valid"
- color="primary"
- class="text-capitalize"
- @click="$emit('next')"
- >
+ <v-btn :disabled="!valid" color="primary" class="text-capitalize" @click="$emit('next')">
Next
</v-btn>
</v-card-actions>
@@ -66,12 +56,12 @@ export default Vue.extend({
taskType(): string {
return {
DocumentClassification: 'Category',
- SequenceLabeling : 'Span',
- Seq2seq : 'Text',
- ImageClassification : 'Category',
- Speech2text : 'Text',
+ SequenceLabeling: 'Span',
+ Seq2seq: 'Text',
+ ImageClassification: 'Category',
+ Speech2text: 'Text'
}[this.selectedTask]!
- }
+ }
},
watch: {
diff --git a/frontend/components/configAutoLabeling/form/FileField.vue b/frontend/components/configAutoLabeling/form/FileField.vue
index 30a74e0f33..bb348504a0 100644
--- a/frontend/components/configAutoLabeling/form/FileField.vue
+++ b/frontend/components/configAutoLabeling/form/FileField.vue
@@ -13,16 +13,14 @@
<script>
import Cookies from 'js-cookie'
-import vueFilePond from "vue-filepond"
-import "filepond/dist/filepond.min.css"
-import FilePondPluginFileValidateType from "filepond-plugin-file-validate-type"
-const FilePond = vueFilePond(
- FilePondPluginFileValidateType,
-)
+import vueFilePond from 'vue-filepond'
+import 'filepond/dist/filepond.min.css'
+import FilePondPluginFileValidateType from 'filepond-plugin-file-validate-type'
+const FilePond = vueFilePond(FilePondPluginFileValidateType)
export default {
components: {
- FilePond,
+ FilePond
},
props: {
@@ -32,18 +30,18 @@ export default {
required: true
}
},
-
+
data() {
return {
myFiles: [],
server: {
url: '/v1/fp',
headers: {
- 'X-CSRFToken': Cookies.get('csrftoken'),
+ 'X-CSRFToken': Cookies.get('csrftoken')
},
process: {
url: '/process/',
- method: 'POST',
+ method: 'POST'
},
patch: '/patch/',
revert: '/revert/',
@@ -62,6 +60,6 @@ export default {
handleFilePondRemovefile() {
this.$emit('input', '')
}
- },
-};
+ }
+}
</script>
diff --git a/frontend/components/configAutoLabeling/form/LabelMapping.vue b/frontend/components/configAutoLabeling/form/LabelMapping.vue
index c3b08509d9..d6c48005fe 100644
--- a/frontend/components/configAutoLabeling/form/LabelMapping.vue
+++ b/frontend/components/configAutoLabeling/form/LabelMapping.vue
@@ -1,23 +1,9 @@
<template>
- <v-data-table
- :headers="headers"
- :items="value"
- >
+ <v-data-table :headers="headers" :items="value">
<template #top>
- <v-dialog
- v-model="dialog"
- max-width="800px"
- >
+ <v-dialog v-model="dialog" max-width="800px">
<template #activator="{ on, attrs }">
- <v-btn
- color="primary"
- dark
- class="text-none"
- v-bind="attrs"
- v-on="on"
- >
- Add
- </v-btn>
+ <v-btn color="primary" dark class="text-none" v-bind="attrs" v-on="on"> Add </v-btn>
</template>
<v-card>
<v-card-title>
@@ -26,16 +12,9 @@
<v-card-text>
<v-container>
- <v-form
- ref="form"
- v-model="valid"
- >
+ <v-form ref="form" v-model="valid">
<v-row>
- <v-col
- cols="12"
- sm="12"
- class="pa-0"
- >
+ <v-col cols="12" sm="12" class="pa-0">
<v-text-field
v-model="editedItem.from"
label="From"
@@ -43,11 +22,7 @@
outlined
/>
</v-col>
- <v-col
- cols="12"
- sm="12"
- class="pa-0"
- >
+ <v-col cols="12" sm="12" class="pa-0">
<v-select
v-model="editedItem.to"
:items="items"
@@ -63,12 +38,7 @@
<v-card-actions>
<v-spacer />
- <v-btn
- color="blue darken-1"
- class="text-capitalize"
- text
- @click="close"
- >
+ <v-btn color="blue darken-1" class="text-capitalize" text @click="close">
Cancel
</v-btn>
<v-btn
@@ -85,17 +55,10 @@
</v-dialog>
</template>
<template #[`item.actions`]="{ item }">
- <v-icon
- small
- class="mr-2"
- @click="editItem(item)"
- >
+ <v-icon small class="mr-2" @click="editItem(item)">
{{ mdiPencil }}
</v-icon>
- <v-icon
- small
- @click="deleteItem(item)"
- >
+ <v-icon small @click="deleteItem(item)">
{{ mdiDelete }}
</v-icon>
</template>
@@ -140,12 +103,12 @@ export default Vue.extend({
valid: false,
editedIndex: -1,
editedItem: {
- 'from': '',
- 'to': ''
+ from: '',
+ to: ''
},
defaultItem: {
- 'from': '',
- 'to': ''
+ from: '',
+ to: ''
},
items: [] as string[],
labelNameRules,
@@ -158,21 +121,21 @@ export default Vue.extend({
const project = await this.$services.project.findById(this.$route.params.id)
if (project.projectType.endsWith('Classification')) {
const labels = await this.$services.categoryType.list(this.$route.params.id)
- this.items = labels.map(item => item.text)
+ this.items = labels.map((item) => item.text)
} else {
const labels = await this.$services.spanType.list(this.$route.params.id)
- this.items = labels.map(item => item.text)
+ this.items = labels.map((item) => item.text)
}
},
methods: {
- editItem(item: {'from': string, 'to': string}) {
+ editItem(item: { from: string; to: string }) {
this.editedIndex = this.value.indexOf(item)
this.editedItem = Object.assign({}, item)
this.dialog = true
},
- deleteItem(item: {'from': string, 'to': string}) {
+ deleteItem(item: { from: string; to: string }) {
this.editedIndex = this.value.indexOf(item)
this.editedItem = Object.assign({}, item)
const items = Object.assign([], this.value)
diff --git a/frontend/components/configAutoLabeling/form/ObjectField.vue b/frontend/components/configAutoLabeling/form/ObjectField.vue
index f1c4c225b1..bce8f1c586 100644
--- a/frontend/components/configAutoLabeling/form/ObjectField.vue
+++ b/frontend/components/configAutoLabeling/form/ObjectField.vue
@@ -1,31 +1,14 @@
<template>
- <v-data-table
- :headers="headers"
- :items="value"
- >
+ <v-data-table :headers="headers" :items="value">
<template #top>
- <v-toolbar
- class="toolbar-control"
- flat
- >
+ <v-toolbar class="toolbar-control" flat>
<v-toolbar-title class="text-capitalize">
{{ title }}
</v-toolbar-title>
<v-spacer />
- <v-dialog
- v-model="dialog"
- max-width="800px"
- >
+ <v-dialog v-model="dialog" max-width="800px">
<template #activator="{ on, attrs }">
- <v-btn
- color="primary"
- dark
- class="text-none"
- v-bind="attrs"
- v-on="on"
- >
- Add
- </v-btn>
+ <v-btn color="primary" dark class="text-none" v-bind="attrs" v-on="on"> Add </v-btn>
</template>
<v-card>
<v-card-title>
@@ -34,32 +17,13 @@
<v-card-text>
<v-container>
- <v-form
- ref="form"
- v-model="valid"
- >
+ <v-form ref="form" v-model="valid">
<v-row>
- <v-col
- cols="12"
- sm="12"
- class="pa-0"
- >
- <v-text-field
- v-model="editedItem.key"
- label="Key"
- outlined
- />
+ <v-col cols="12" sm="12" class="pa-0">
+ <v-text-field v-model="editedItem.key" label="Key" outlined />
</v-col>
- <v-col
- cols="12"
- sm="12"
- class="pa-0"
- >
- <v-text-field
- v-model="editedItem.value"
- label="Value"
- outlined
- />
+ <v-col cols="12" sm="12" class="pa-0">
+ <v-text-field v-model="editedItem.value" label="Value" outlined />
</v-col>
</v-row>
</v-form>
@@ -68,12 +32,7 @@
<v-card-actions>
<v-spacer />
- <v-btn
- color="blue darken-1"
- class="text-capitalize"
- text
- @click="close"
- >
+ <v-btn color="blue darken-1" class="text-capitalize" text @click="close">
Cancel
</v-btn>
<v-btn
@@ -91,17 +50,10 @@
</v-toolbar>
</template>
<template #[`item.actions`]="{ item }">
- <v-icon
- small
- class="mr-2"
- @click="editItem(item)"
- >
+ <v-icon small class="mr-2" @click="editItem(item)">
{{ mdiPencil }}
</v-icon>
- <v-icon
- small
- @click="deleteItem(item)"
- >
+ <v-icon small @click="deleteItem(item)">
{{ mdiDelete }}
</v-icon>
</template>
@@ -113,7 +65,6 @@ import Vue from 'vue'
import { mdiPencil, mdiDelete } from '@mdi/js'
export default Vue.extend({
-
props: {
value: {
type: Array,
@@ -151,12 +102,12 @@ export default Vue.extend({
valid: false,
editedIndex: -1,
editedItem: {
- 'key': '',
- 'value': ''
+ key: '',
+ value: ''
},
defaultItem: {
- 'key': '',
- 'value': ''
+ key: '',
+ value: ''
},
items: [] as string[],
mdiPencil,
@@ -165,13 +116,13 @@ export default Vue.extend({
},
methods: {
- editItem(item: {'key': string, 'value': string}) {
+ editItem(item: { key: string; value: string }) {
this.editedIndex = this.value.indexOf(item)
this.editedItem = Object.assign({}, item)
this.dialog = true
},
- deleteItem(item: {'key': string, 'value': string}) {
+ deleteItem(item: { key: string; value: string }) {
this.editedIndex = this.value.indexOf(item)
this.editedItem = Object.assign({}, item)
const items = Object.assign([], this.value)
@@ -207,4 +158,4 @@ export default Vue.extend({
.toolbar-control >>> .v-toolbar__content {
padding: 0px !important;
}
-</style>
\ No newline at end of file
+</style>
diff --git a/frontend/components/example/ActionMenu.vue b/frontend/components/example/ActionMenu.vue
index d4df5a8636..33b88cc512 100644
--- a/frontend/components/example/ActionMenu.vue
+++ b/frontend/components/example/ActionMenu.vue
@@ -20,17 +20,17 @@ export default Vue.extend({
computed: {
items() {
- return [
- {
- title: this.$t('dataset.importDataset'),
- icon: mdiUpload,
- event: 'upload'
- },
- {
- title: this.$t('dataset.exportDataset'),
- icon: mdiDownload,
- event: 'download'
- }
+ return [
+ {
+ title: this.$t('dataset.importDataset'),
+ icon: mdiUpload,
+ event: 'upload'
+ },
+ {
+ title: this.$t('dataset.exportDataset'),
+ icon: mdiDownload,
+ event: 'download'
+ }
]
}
}
diff --git a/frontend/components/example/AudioList.vue b/frontend/components/example/AudioList.vue
index e81df83f87..332df0c196 100644
--- a/frontend/components/example/AudioList.vue
+++ b/frontend/components/example/AudioList.vue
@@ -10,7 +10,7 @@
:loading-text="$t('generic.loading')"
:no-data-text="$t('vuetify.noDataAvailable')"
:footer-props="{
- 'showFirstLastPage': true,
+ showFirstLastPage: true,
'items-per-page-options': [10, 50, 100],
'items-per-page-text': $t('vuetify.itemsPerPageText'),
'page-text': $t('dataset.pageText')
@@ -30,11 +30,7 @@
/>
</template>
<template #[`item.fileUrl`]="{ item }">
- <audio
- controls
- :src="item.fileUrl"
- class="mt-2"
- >
+ <audio controls :src="item.fileUrl" class="mt-2">
Your browser does not support the
<code>audio</code> element.
</audio>
@@ -46,11 +42,7 @@
<span> {{ item.commentCount }} </span>
</template>
<template #[`item.action`]="{ item }">
- <v-btn
- small
- color="primary text-capitalize"
- @click="toLabeling(item)"
- >
+ <v-btn small color="primary text-capitalize" @click="toLabeling(item)">
{{ $t('dataset.annotate') }}
</v-btn>
</template>
diff --git a/frontend/components/example/DocumentList.vue b/frontend/components/example/DocumentList.vue
index fd7c3aff1b..e4424b2923 100644
--- a/frontend/components/example/DocumentList.vue
+++ b/frontend/components/example/DocumentList.vue
@@ -10,7 +10,7 @@
:loading-text="$t('generic.loading')"
:no-data-text="$t('vuetify.noDataAvailable')"
:footer-props="{
- 'showFirstLastPage': true,
+ showFirstLastPage: true,
'items-per-page-options': [10, 50, 100],
'items-per-page-text': $t('vuetify.itemsPerPageText'),
'page-text': $t('dataset.pageText')
@@ -40,11 +40,7 @@
<span> {{ item.commentCount }} </span>
</template>
<template #[`item.action`]="{ item }">
- <v-btn
- small
- color="primary text-capitalize"
- @click="toLabeling(item)"
- >
+ <v-btn small color="primary text-capitalize" @click="toLabeling(item)">
{{ $t('dataset.annotate') }}
</v-btn>
</template>
diff --git a/frontend/components/example/FormDelete.vue b/frontend/components/example/FormDelete.vue
index 52e4219c85..5f5488d3ff 100644
--- a/frontend/components/example/FormDelete.vue
+++ b/frontend/components/example/FormDelete.vue
@@ -1,7 +1,7 @@
<template>
<confirm-form
:title="$t('dataset.deleteDocumentsTitle')"
- :message="$t('dataset.deleteDocumentsMessage', { 'number': selected.length })"
+ :message="$t('dataset.deleteDocumentsMessage', { number: selected.length })"
@ok="$emit('remove')"
@cancel="$emit('cancel')"
/>
diff --git a/frontend/components/example/ImageList.vue b/frontend/components/example/ImageList.vue
index 39afa3cc9c..b85e369da2 100644
--- a/frontend/components/example/ImageList.vue
+++ b/frontend/components/example/ImageList.vue
@@ -10,7 +10,7 @@
:loading-text="$t('generic.loading')"
:no-data-text="$t('vuetify.noDataAvailable')"
:footer-props="{
- 'showFirstLastPage': true,
+ showFirstLastPage: true,
'items-per-page-options': [10, 50, 100],
'items-per-page-text': $t('vuetify.itemsPerPageText'),
'page-text': $t('dataset.pageText')
@@ -46,11 +46,7 @@
<span> {{ item.commentCount }} </span>
</template>
<template #[`item.action`]="{ item }">
- <v-btn
- small
- color="primary text-capitalize"
- @click="toLabeling(item)"
- >
+ <v-btn small color="primary text-capitalize" @click="toLabeling(item)">
{{ $t('dataset.annotate') }}
</v-btn>
</template>
diff --git a/frontend/components/label/ActionMenu.vue b/frontend/components/label/ActionMenu.vue
index 008b512fb3..229ce73ffa 100644
--- a/frontend/components/label/ActionMenu.vue
+++ b/frontend/components/label/ActionMenu.vue
@@ -20,7 +20,7 @@ export default Vue.extend({
computed: {
items() {
- return [
+ return [
{
title: this.$t('labels.createLabel'),
icon: mdiPencil,
diff --git a/frontend/components/label/FormCreate.vue b/frontend/components/label/FormCreate.vue
index 69f216d0d6..5421ee8331 100644
--- a/frontend/components/label/FormCreate.vue
+++ b/frontend/components/label/FormCreate.vue
@@ -2,10 +2,7 @@
<v-card>
<v-card-title>Create a Label Type</v-card-title>
<v-card-text>
- <v-form
- ref="form"
- v-model="valid"
- >
+ <v-form ref="form" v-model="valid">
<v-row>
<v-col cols="12" sm="6">
<v-text-field
@@ -41,27 +38,18 @@
required
@input="$emit('update:backgroundColor', $event)"
/>
- <v-chip-group
- v-model="selectedColorIndex"
- column
- mandatory
- >
+ <v-chip-group v-model="selectedColorIndex" column mandatory>
<v-chip
v-for="color in predefinedColors"
:key="color"
:color="color"
filter
label
- style="height: 32px; width: 32px;"
+ style="height: 32px; width: 32px"
/>
<v-tooltip bottom>
<template #activator="{ on, attrs }">
- <v-chip
- label
- v-bind="attrs"
- v-on="on"
- @click="setRandomColor"
- >
+ <v-chip label v-bind="attrs" v-on="on" @click="setRandomColor">
<v-icon>{{ mdiReload }}</v-icon>
</v-chip>
</template>
@@ -74,17 +62,9 @@
<v-row>
<v-col>
<div class="title black--text mb-2">Preview</div>
- <v-chip
- :color="backgroundColor"
- :text-color="textColor"
- >
+ <v-chip :color="backgroundColor" :text-color="textColor">
{{ text }}
- <v-avatar
- v-if="suffixKey"
- right
- color="white"
- class="black--text font-weight-bold"
- >
+ <v-avatar v-if="suffixKey" right color="white" class="black--text font-weight-bold">
{{ suffixKey }}
</v-avatar>
</v-chip>
@@ -103,7 +83,7 @@
<script lang="ts">
import Vue, { PropType } from 'vue'
-import { mdiReload } from '@mdi/js';
+import { mdiReload } from '@mdi/js'
import { LabelDTO } from '~/services/application/label/labelData'
export default Vue.extend({
@@ -119,11 +99,11 @@ export default Vue.extend({
},
text: {
type: String,
- required: true,
+ required: true
},
backgroundColor: {
type: String,
- required: true,
+ required: true
},
suffixKey: {
type: String as () => string | null,
@@ -137,13 +117,17 @@ export default Vue.extend({
valid: false,
rules: {
required: (v: string) => !!v || 'Required',
- // @ts-ignore
- counter: (v: string) => (v && v.length <= 100) || this.$t('rules.labelNameRules').labelLessThan100Chars,
- // @ts-ignore
- nameDuplicated: (v: string) => (!this.isUsedName(v)) || this.$t('rules.labelNameRules').duplicated,
- // @ts-ignore
- keyDuplicated: (v: string) => (!this.isUsedSuffixKey(v)) || this.$t('rules.keyNameRules').duplicated,
- validColor: (v: string) => (/^#[0-9A-F]{6}$/i.test(v)) || 'This string is NOT a valid hex color.'
+ counter: (
+ v: string // @ts-ignore
+ ) => (v && v.length <= 100) || this.$t('rules.labelNameRules').labelLessThan100Chars,
+ nameDuplicated: (
+ v: string // @ts-ignore
+ ) => !this.isUsedName(v) || this.$t('rules.labelNameRules').duplicated,
+ keyDuplicated: (
+ v: string // @ts-ignore
+ ) => !this.isUsedSuffixKey(v) || this.$t('rules.keyNameRules').duplicated,
+ validColor: (v: string) =>
+ /^#[0-9A-F]{6}$/i.test(v) || 'This string is NOT a valid hex color.'
},
mdiReload
}
@@ -151,20 +135,36 @@ export default Vue.extend({
computed: {
availableSuffixKeys(): string[] {
- const usedSuffixKeys = this.items.map(item => item.suffixKey).filter(item => item !== this.suffixKey)
+ const usedSuffixKeys = this.items
+ .map((item) => item.suffixKey)
+ .filter((item) => item !== this.suffixKey)
const allSuffixKeys = '0123456789abcdefghijklmnopqrstuvwxyz'.split('')
- return allSuffixKeys.filter(item => !usedSuffixKeys.includes(item))
+ return allSuffixKeys.filter((item) => !usedSuffixKeys.includes(item))
},
predefinedColors(): string[] {
return [
- '#73D8FF', '#009CE0', '#0062B1',
- '#AEA1FF', '#7B64FF', '#653294',
- '#FDA1FF', '#FA28FF', '#AB149E',
- '#68CCCA', '#16A5A5', '#0C797D',
- '#A4DD00', '#68BC00', '#194D33',
- '#FCDC00', '#FCC400', '#FB9E00',
- '#F44E3B', '#D33115', '#9F0500'
+ '#73D8FF',
+ '#009CE0',
+ '#0062B1',
+ '#AEA1FF',
+ '#7B64FF',
+ '#653294',
+ '#FDA1FF',
+ '#FA28FF',
+ '#AB149E',
+ '#68CCCA',
+ '#16A5A5',
+ '#0C797D',
+ '#A4DD00',
+ '#68BC00',
+ '#194D33',
+ '#FCDC00',
+ '#FCC400',
+ '#FB9E00',
+ '#F44E3B',
+ '#D33115',
+ '#9F0500'
]
},
@@ -183,18 +183,18 @@ export default Vue.extend({
methods: {
isUsedName(text: string): boolean {
- return this.items.filter(item => item.id !== this.id && item.text === text).length > 0
+ return this.items.filter((item) => item.id !== this.id && item.text === text).length > 0
},
isUsedSuffixKey(key: string) {
if (key === null) {
return false
}
- return this.items.filter(item => item.id !== this.id && item.suffixKey === key).length > 0
+ return this.items.filter((item) => item.id !== this.id && item.suffixKey === key).length > 0
},
setRandomColor() {
- const maxVal = 0xFFFFFF
+ const maxVal = 0xffffff
const randomNumber = Math.floor(Math.random() * maxVal)
const randomString = randomNumber.toString(16)
const randColor = randomString.padStart(6, '0')
diff --git a/frontend/components/label/FormImport.vue b/frontend/components/label/FormImport.vue
index aa1d2e5a0d..d80f870d6d 100644
--- a/frontend/components/label/FormImport.vue
+++ b/frontend/components/label/FormImport.vue
@@ -2,10 +2,7 @@
<v-card>
<v-card-title v-text="$t('labels.importLabels')" />
<v-card-text>
- <v-form
- ref="form"
- v-model="valid"
- >
+ <v-form ref="form" v-model="valid">
<h3>{{ $t('labels.importMessage1') }}</h3>
<v-sheet
v-if="exampleFormat"
@@ -55,7 +52,7 @@ export default Vue.extend({
return {
file: null,
valid: false,
- uploadSingleFileRules,
+ uploadSingleFileRules
}
},
diff --git a/frontend/components/label/LabelList.vue b/frontend/components/label/LabelList.vue
index 74d568b2ce..e89419ea22 100644
--- a/frontend/components/label/LabelList.vue
+++ b/frontend/components/label/LabelList.vue
@@ -8,7 +8,7 @@
:loading-text="$t('generic.loading')"
:no-data-text="$t('vuetify.noDataAvailable')"
:footer-props="{
- 'showFirstLastPage': true,
+ showFirstLastPage: true,
'items-per-page-text': $t('vuetify.itemsPerPageText'),
'page-text': $t('dataset.pageText')
}"
@@ -35,10 +35,7 @@
</v-chip>
</template>
<template #[`item.actions`]="{ item }">
- <v-icon
- small
- @click="$emit('edit', item)"
- >
+ <v-icon small @click="$emit('edit', item)">
{{ mdiPencil }}
</v-icon>
</template>
@@ -80,10 +77,10 @@ export default Vue.extend({
computed: {
headers() {
return [
- { text: this.$t('generic.name'), value: 'text' },
+ { text: this.$t('generic.name'), value: 'text' },
{ text: this.$t('labels.shortkey'), value: 'suffixKey' },
- { text: this.$t('labels.color'), value: 'backgroundColor' },
- { text: 'Actions', value: 'actions', sortable: false },
+ { text: this.$t('labels.color'), value: 'backgroundColor' },
+ { text: 'Actions', value: 'actions', sortable: false }
]
}
}
diff --git a/frontend/components/layout/FeatureCard.vue b/frontend/components/layout/FeatureCard.vue
index 8c3515fa95..98b5e37b61 100644
--- a/frontend/components/layout/FeatureCard.vue
+++ b/frontend/components/layout/FeatureCard.vue
@@ -1,9 +1,6 @@
<template>
<v-card>
- <v-img
- :src="imageSrc"
- height="200px"
- />
+ <v-img :src="imageSrc" height="200px" />
<v-card-title primary-title class="layout justify-center">
<div class="headline text-xs-center font-weight-bold mb-2">
{{ title }}
diff --git a/frontend/components/layout/FeatureCards.vue b/frontend/components/layout/FeatureCards.vue
index 316f4e3b02..715cdb1da1 100644
--- a/frontend/components/layout/FeatureCards.vue
+++ b/frontend/components/layout/FeatureCards.vue
@@ -1,11 +1,6 @@
<template>
<v-container>
- <v-layout
- column
- wrap
- class="my-5"
- align-center
- >
+ <v-layout column wrap class="my-5" align-center>
<v-flex xs12 sm4 class="my-3">
<div class="text-xs-center">
<h2 class="headline">
@@ -16,12 +11,7 @@
<v-flex xs12>
<v-container grid-list-xl>
<v-layout wrap align-center>
- <v-flex
- v-for="(item, index) in featureCards"
- :key="index"
- xs12
- md4
- >
+ <v-flex v-for="(item, index) in featureCards" :key="index" xs12 md4>
<feature-card
:image-src="require(`~/assets/${item.imageSrc}`)"
:title="item.title"
diff --git a/frontend/components/layout/LocaleMenu.vue b/frontend/components/layout/LocaleMenu.vue
index c7d93a5207..0eba373591 100644
--- a/frontend/components/layout/LocaleMenu.vue
+++ b/frontend/components/layout/LocaleMenu.vue
@@ -1,21 +1,12 @@
<template>
- <v-menu
- open-on-hover
- offset-y
- >
+ <v-menu open-on-hover offset-y>
<template #activator="{ on }">
- <v-btn
- text
- v-on="on"
- >
+ <v-btn text v-on="on">
{{ $i18n.locale }}
<v-icon>{{ mdiMenuDown }}</v-icon>
</v-btn>
</template>
- <v-list
- v-for="locale in $i18n.locales"
- :key="locale.code"
- >
+ <v-list v-for="locale in $i18n.locales" :key="locale.code">
<nuxt-link
class="v-list-item v-list-item--link"
:class="$vuetify.theme.dark ? 'theme--dark' : 'theme--light'"
diff --git a/frontend/components/layout/TheBottomBanner.vue b/frontend/components/layout/TheBottomBanner.vue
index 6c39d53d8e..7da1136667 100644
--- a/frontend/components/layout/TheBottomBanner.vue
+++ b/frontend/components/layout/TheBottomBanner.vue
@@ -1,44 +1,19 @@
<template>
<section>
- <v-parallax
- :src="require(`~/assets/vbanner.jpg`)"
- height="400"
- dark
- >
+ <v-parallax :src="require(`~/assets/vbanner.jpg`)" height="400" dark>
<v-container>
- <v-layout
- wrap
- align-center
- justify-center
- class="white--text"
- >
- <v-flex
- xs12
- md7
- >
- <v-img
- :src="require(`~/assets/ner_demo.png`)"
- max-height="380"
- contain
- class="ma-5"
- />
+ <v-layout wrap align-center justify-center class="white--text">
+ <v-flex xs12 md7>
+ <v-img :src="require(`~/assets/ner_demo.png`)" max-height="380" contain class="ma-5" />
</v-flex>
- <v-flex
- xs12
- md5
- >
+ <v-flex xs12 md5>
<h1 class="mb-2 display-1 text-xs-center">
{{ $t('home.footerTitle') }}
</h1>
<div class="mt-4">
<v-menu open-on-hover offset-y>
<template #activator="{ on }">
- <v-btn
- class="blue lighten-2"
- dark
- large
- v-on="on"
- >
+ <v-btn class="blue lighten-2" dark large v-on="on">
{{ $t('home.demoDropDown') }}
<v-icon>{{ mdiMenuDown }}</v-icon>
</v-btn>
diff --git a/frontend/components/layout/TheColorModeSwitcher.vue b/frontend/components/layout/TheColorModeSwitcher.vue
index b9e4440e8f..cef3b17b89 100644
--- a/frontend/components/layout/TheColorModeSwitcher.vue
+++ b/frontend/components/layout/TheColorModeSwitcher.vue
@@ -1,9 +1,5 @@
<template>
- <v-btn
- icon
- fab
- @click="isDark=!isDark"
- >
+ <v-btn icon fab @click="isDark = !isDark">
<v-icon v-if="isDark">
{{ mdiMoonWaxingCrescent }}
</v-icon>
diff --git a/frontend/components/layout/TheFooter.vue b/frontend/components/layout/TheFooter.vue
index 9e2923e624..fa307fc849 100644
--- a/frontend/components/layout/TheFooter.vue
+++ b/frontend/components/layout/TheFooter.vue
@@ -1,20 +1,7 @@
<template>
- <v-footer
- color="primary lighten-1"
- padless
- >
- <v-layout
- justify-center
- wrap
- >
- <v-flex
- black
- lighten-2
- py-4
- text-center
- white--text
- xs12
- >
+ <v-footer color="primary lighten-1" padless>
+ <v-layout justify-center wrap>
+ <v-flex black lighten-2 py-4 text-center white--text xs12>
© {{ new Date().getFullYear() }} doccano
</v-flex>
</v-layout>
diff --git a/frontend/components/layout/TheHeader.vue b/frontend/components/layout/TheHeader.vue
index 84d6bb711d..9c67745844 100644
--- a/frontend/components/layout/TheHeader.vue
+++ b/frontend/components/layout/TheHeader.vue
@@ -1,27 +1,17 @@
<template>
- <v-app-bar
- app
- clipped-left
- >
+ <v-app-bar app clipped-left>
<slot name="leftDrawerIcon" />
- <nuxt-link
- v-if="!isAuthenticated"
- to="/"
- style="line-height:0;"
- >
- <img src="~/assets/icon.png" height="48">
+ <nuxt-link v-if="!isAuthenticated" to="/" style="line-height: 0">
+ <img src="~/assets/icon.png" height="48" />
</nuxt-link>
- <v-toolbar-title
- v-if="!isAuthenticated"
- class="ml-2 d-none d-sm-flex"
- >
+ <v-toolbar-title v-if="!isAuthenticated" class="ml-2 d-none d-sm-flex">
doccano
</v-toolbar-title>
<v-btn
v-if="isAuthenticated && isIndividualProject"
text
class="d-none d-sm-flex"
- style="text-transform:none"
+ style="text-transform: none"
>
<v-icon small class="mr-1">
{{ mdiHexagonMultiple }}
@@ -39,16 +29,9 @@
>
{{ $t('header.projects') }}
</v-btn>
- <v-menu
- v-if="!isAuthenticated"
- open-on-hover
- offset-y
- >
+ <v-menu v-if="!isAuthenticated" open-on-hover offset-y>
<template #activator="{ on }">
- <v-btn
- text
- v-on="on"
- >
+ <v-btn text v-on="on">
{{ $t('home.demoDropDown') }}
<v-icon>{{ mdiMenuDown }}</v-icon>
</v-btn>
@@ -63,17 +46,10 @@
</v-list-item>
</v-list>
</v-menu>
- <v-btn
- v-if="!isAuthenticated"
- outlined
- @click="$router.push(localePath('/auth'))"
- >
+ <v-btn v-if="!isAuthenticated" outlined @click="$router.push(localePath('/auth'))">
{{ $t('user.login') }}
</v-btn>
- <v-menu
- v-if="isAuthenticated"
- offset-y
- >
+ <v-menu v-if="isAuthenticated" offset-y>
<template #activator="{ on }">
<v-btn on icon v-on="on">
<v-icon>{{ mdiDotsVertical }}</v-icon>
@@ -83,12 +59,7 @@
<v-subheader>{{ getUsername }}</v-subheader>
<v-list-item>
<v-list-item-content>
- <v-switch
- :input-value="isRTL"
- :label="direction"
- class="ms-1"
- @change="toggleRTL"
- />
+ <v-switch :input-value="isRTL" :label="direction" class="ms-1" @change="toggleRTL" />
</v-list-item-content>
</v-list-item>
<v-list-item @click="signout">
@@ -124,10 +95,13 @@ export default {
{ title: this.$t('home.demoNER'), link: 'named-entity-recognition' },
{ title: this.$t('home.demoSent'), link: 'sentiment-analysis' },
{ title: this.$t('home.demoTranslation'), link: 'translation' },
- { title: 'Intent Detection and Slot Filling', link: 'intent-detection-and-slot-filling' },
+ {
+ title: 'Intent Detection and Slot Filling',
+ link: 'intent-detection-and-slot-filling'
+ },
{ title: this.$t('home.demoTextToSQL'), link: 'text-to-sql' },
{ title: 'Image Classification', link: 'image-classification' },
- { title: 'Speech to Text', link: 'speech-to-text' },
+ { title: 'Speech to Text', link: 'speech-to-text' }
],
mdiLogout,
mdiDotsVertical,
diff --git a/frontend/components/layout/TheSideBar.vue b/frontend/components/layout/TheSideBar.vue
index 3ea649d1b7..1890f615a2 100644
--- a/frontend/components/layout/TheSideBar.vue
+++ b/frontend/components/layout/TheSideBar.vue
@@ -1,19 +1,12 @@
<template>
<v-list dense>
- <v-btn
- color="ms-4 my-1 mb-2 primary text-capitalize"
- nuxt
- @click="toLabeling"
- >
+ <v-btn color="ms-4 my-1 mb-2 primary text-capitalize" nuxt @click="toLabeling">
<v-icon left>
{{ mdiPlayCircleOutline }}
</v-icon>
{{ $t('home.startAnnotation') }}
</v-btn>
- <v-list-item-group
- v-model="selected"
- mandatory
- >
+ <v-list-item-group v-model="selected" mandatory>
<v-list-item
v-for="(item, i) in filteredItems"
:key="i"
@@ -35,7 +28,17 @@
</template>
<script>
-import { mdiHome, mdiDatabase, mdiCog, mdiChartBar, mdiBookOpenOutline, mdiCommentAccountOutline, mdiLabel, mdiAccount, mdiPlayCircleOutline } from '@mdi/js'
+import {
+ mdiHome,
+ mdiDatabase,
+ mdiCog,
+ mdiChartBar,
+ mdiBookOpenOutline,
+ mdiCommentAccountOutline,
+ mdiLabel,
+ mdiAccount,
+ mdiPlayCircleOutline
+} from '@mdi/js'
export default {
props: {
@@ -121,7 +124,7 @@ export default {
isVisible: this.isProjectAdmin
}
]
- return items.filter(item => item.isVisible)
+ return items.filter((item) => item.isVisible)
}
},
diff --git a/frontend/components/layout/TheTopBanner.vue b/frontend/components/layout/TheTopBanner.vue
index c993e48121..9f2507e2b5 100644
--- a/frontend/components/layout/TheTopBanner.vue
+++ b/frontend/components/layout/TheTopBanner.vue
@@ -1,41 +1,22 @@
<template>
<section>
- <v-parallax
- :src="require(`~/assets/vbanner.jpg`)"
- height="400"
- dark
- >
- <v-layout
- wrap
- align-center
- justify-center
- class="white--text"
- >
+ <v-parallax :src="require(`~/assets/vbanner.jpg`)" height="400" dark>
+ <v-layout wrap align-center justify-center class="white--text">
<v-flex text-right class="mr-5">
- <img src="~/assets/icon.png" alt="doccano" height="200">
+ <img src="~/assets/icon.png" alt="doccano" height="200" />
</v-flex>
<v-flex>
<h1 class="mb-2 display-1 text-xs-center">
{{ $t('home.mainTitle') }}
</h1>
<div class="mt-4">
- <v-btn
- large
- outlined
- color="white"
- href="https://github.com/doccano/doccano"
- >
+ <v-btn large outlined color="white" href="https://github.com/doccano/doccano">
<v-icon left>
{{ mdiGithub }}
</v-icon>
GitHub
</v-btn>
- <v-btn
- class="blue lighten-2 ml-5"
- dark
- large
- :href="localePath('/auth')"
- >
+ <v-btn class="blue lighten-2 ml-5" dark large :href="localePath('/auth')">
{{ $t('home.getStarted') }}
</v-btn>
</div>
diff --git a/frontend/components/member/FormCreate.vue b/frontend/components/member/FormCreate.vue
index b3e9268d2b..7bd7378c5f 100644
--- a/frontend/components/member/FormCreate.vue
+++ b/frontend/components/member/FormCreate.vue
@@ -39,11 +39,7 @@
{{ $translateRole(props.item.rolename, $t('members.roles')) }}
</template>
</v-select>
- <v-alert
- v-show="errorMessage"
- prominent
- type="error"
- >
+ <v-alert v-show="errorMessage" prominent type="error">
<v-row align="center">
<v-col class="grow">
{{ errorMessage }}
@@ -67,7 +63,7 @@ export default Vue.extend({
components: {
BaseCard
},
-
+
props: {
value: {
type: Object as PropType<MemberDTO>,
@@ -87,8 +83,8 @@ export default Vue.extend({
roles: [] as RoleDTO[],
username: '',
rules: {
- userRequired: (v: UserDTO) => !!v && !!v.username || 'Required',
- roleRequired: (v: RoleDTO) => !!v && !!v.rolename || 'Required'
+ userRequired: (v: UserDTO) => (!!v && !!v.username) || 'Required',
+ roleRequired: (v: RoleDTO) => (!!v && !!v.rolename) || 'Required'
},
mdiAccount,
mdiCreditCardOutline
diff --git a/frontend/components/member/MemberList.vue b/frontend/components/member/MemberList.vue
index 67201275be..dee4733ec2 100644
--- a/frontend/components/member/MemberList.vue
+++ b/frontend/components/member/MemberList.vue
@@ -8,7 +8,7 @@
:loading-text="$t('generic.loading')"
:no-data-text="$t('vuetify.noDataAvailable')"
:footer-props="{
- 'showFirstLastPage': true,
+ showFirstLastPage: true,
'items-per-page-text': $t('vuetify.itemsPerPageText'),
'page-text': $t('dataset.pageText')
}"
@@ -30,10 +30,7 @@
{{ $translateRole(item.rolename, $t('members.roles')) }}
</template>
<template #[`item.actions`]="{ item }">
- <v-icon
- small
- @click="$emit('edit', item)"
- >
+ <v-icon small @click="$emit('edit', item)">
{{ mdiPencil }}
</v-icon>
</template>
@@ -78,6 +75,6 @@ export default Vue.extend({
{ text: 'Actions', value: 'actions', sortable: false }
]
}
- }
+ }
})
</script>
diff --git a/frontend/components/metrics/LabelDistribution.vue b/frontend/components/metrics/LabelDistribution.vue
index 965a7217a9..05184d0ce3 100644
--- a/frontend/components/metrics/LabelDistribution.vue
+++ b/frontend/components/metrics/LabelDistribution.vue
@@ -3,21 +3,12 @@
<v-card-title v-text="title" />
<v-divider />
<v-tabs show-arrows>
- <v-tab
- v-for="(value, user) in chartJSFormat"
- :key="user"
- class="text-capitalize"
- >
+ <v-tab v-for="(value, user) in chartJSFormat" :key="user" class="text-capitalize">
{{ user }}
</v-tab>
- <v-tab-item
- v-for="(value, user) in chartJSFormat"
- :key="user"
- >
+ <v-tab-item v-for="(value, user) in chartJSFormat" :key="user">
<v-card-text>
- <bar-chart
- :chart-data="value"
- />
+ <bar-chart :chart-data="value" />
</v-card-text>
</v-tab-item>
</v-tabs>
@@ -48,29 +39,35 @@ export default Vue.extend({
labelTypes: {
type: Array as PropType<LabelDTO[]>,
default: () => [],
- required: true,
- },
+ required: true
+ }
},
computed: {
- colorMapping(): {[text: string]: string} {
- return Object.fromEntries(this.labelTypes.map((labelType) => [labelType.text, labelType.backgroundColor]))
+ colorMapping(): { [text: string]: string } {
+ return Object.fromEntries(
+ this.labelTypes.map((labelType) => [labelType.text, labelType.backgroundColor])
+ )
},
chartJSFormat(): any {
- const data: {[user: string]: {labels: string[], datasets: any[]}} = {}
+ const data: { [user: string]: { labels: string[]; datasets: any[] } } = {}
for (const user in this.distribution) {
const labels = Object.keys(this.distribution[user])
labels.sort()
const counts = labels.map((label) => this.distribution[user][label])
- const colors = labels.map((label) => label in this.colorMapping ? this.colorMapping[label] : '#00d1b2')
+ const colors = labels.map((label) =>
+ label in this.colorMapping ? this.colorMapping[label] : '#00d1b2'
+ )
data[user] = {
labels,
- datasets: [{
- title: this.title,
- backgroundColor: colors,
- data: counts
- }]
+ datasets: [
+ {
+ title: this.title,
+ backgroundColor: colors,
+ data: counts
+ }
+ ]
}
}
return data
diff --git a/frontend/components/metrics/MemberProgress.vue b/frontend/components/metrics/MemberProgress.vue
index 8b5263e49d..04d23297d6 100644
--- a/frontend/components/metrics/MemberProgress.vue
+++ b/frontend/components/metrics/MemberProgress.vue
@@ -3,11 +3,7 @@
<v-card-title>Member's Progress</v-card-title>
<v-divider />
<v-card-text>
- <div
- v-for="(item, index) in stats.progress"
- :key="index"
- class="mb-2"
- >
+ <div v-for="(item, index) in stats.progress" :key="index" class="mb-2">
<span class="font-weight-medium">{{ item.user }}</span>
<span class="font-weight-medium">{{ item.done }} / {{ stats.total }}</span>
<v-progress-linear :value="rate(item.done, stats.total)" />
@@ -31,7 +27,7 @@ export default Vue.extend({
methods: {
rate(done: number, total: number) {
- return done / total * 100
+ return (done / total) * 100
}
}
})
diff --git a/frontend/components/project/FormCreate.vue b/frontend/components/project/FormCreate.vue
index c722ce3100..9014a86d32 100644
--- a/frontend/components/project/FormCreate.vue
+++ b/frontend/components/project/FormCreate.vue
@@ -11,11 +11,7 @@
<v-row no-gutters>
<v-col v-for="(item, i) in projectTypes" :key="i">
<v-item v-slot="{ active, toggle }">
- <v-card
- class="mb-6 me-6"
- max-width="350"
- outlined
- >
+ <v-card class="mb-6 me-6" max-width="350" outlined>
<v-img
:src="require(`~/assets/images/tasks/${images[i]}`)"
height="200"
@@ -33,7 +29,7 @@
</v-col>
</v-row>
</v-item-group>
-
+
<v-text-field
:value="name"
:rules="projectNameRules($t('rules.projectNameRules'))"
@@ -98,12 +94,7 @@
Count
<v-tooltip bottom>
<template #activator="{ on }">
- <a
- target="_blank"
- href="https://unicode.org/reports/tr29/"
- @click.stop
- v-on="on"
- >
+ <a target="_blank" href="https://unicode.org/reports/tr29/" @click.stop v-on="on">
grapheme clusters
</a>
</template>
@@ -190,7 +181,7 @@ export default Vue.extend({
},
tags: {
type: Array,
- default: () => [],
+ default: () => []
}
},
@@ -201,7 +192,7 @@ export default Vue.extend({
projectTypeRules,
descriptionRules,
mdiCheckBold,
- selected: 0,
+ selected: 0
}
},
@@ -213,7 +204,7 @@ export default Vue.extend({
'Seq2seq',
'IntentDetectionAndSlotFilling',
'ImageClassification',
- 'Speech2text',
+ 'Speech2text'
]
},
images() {
@@ -227,10 +218,7 @@ export default Vue.extend({
]
},
hasSingleLabelOption() {
- return [
- 'DocumentClassification',
- 'ImageClassification',
- ].includes(this.projectType)
+ return ['DocumentClassification', 'ImageClassification'].includes(this.projectType)
},
isSequenceLabelingProject() {
return this.projectType === 'SequenceLabeling'
@@ -239,7 +227,7 @@ export default Vue.extend({
methods: {
updateValue(key: string, value: string) {
- this.$emit(`update:${key}`, value);
+ this.$emit(`update:${key}`, value)
},
translateTypeName(type: string, types: string[]): string {
const index = this.projectTypes.indexOf(type)
diff --git a/frontend/components/project/FormUpdate.vue b/frontend/components/project/FormUpdate.vue
index 1a87d05765..72d003a575 100644
--- a/frontend/components/project/FormUpdate.vue
+++ b/frontend/components/project/FormUpdate.vue
@@ -1,15 +1,9 @@
<template>
<v-card>
<v-card-text v-if="isReady">
- <v-form
- ref="form"
- v-model="valid"
- >
+ <v-form ref="form" v-model="valid">
<v-row>
- <v-col
- cols="12"
- sm="6"
- >
+ <v-col cols="12" sm="6">
<h3>Name</h3>
<v-text-field
v-model="project.name"
@@ -19,10 +13,7 @@
single-line
/>
</v-col>
- <v-col
- cols="12"
- sm="6"
- >
+ <v-col cols="12" sm="6">
<v-btn
v-if="!edit.name"
outlined
@@ -53,10 +44,7 @@
</v-col>
</v-row>
<v-row>
- <v-col
- cols="12"
- sm="6"
- >
+ <v-col cols="12" sm="6">
<h3>Description</h3>
<v-text-field
v-model="project.description"
@@ -66,10 +54,7 @@
single-line
/>
</v-col>
- <v-col
- cols="12"
- sm="6"
- >
+ <v-col cols="12" sm="6">
<v-btn
v-if="!edit.desc"
outlined
@@ -100,32 +85,28 @@
</v-col>
</v-row>
<v-row>
- <v-col
- cols="12"
- sm="6"
- >
+ <v-col cols="12" sm="6">
<h3>Tags</h3>
<v-chip
v-for="tag in tags"
:key="tag.id"
close
outlined
- @click:close="removeTag(tag.id)">{{tag.text}}
+ @click:close="removeTag(tag.id)"
+ >{{ tag.text }}
</v-chip>
<v-text-field
v-model="tagInput"
clearable
:prepend-icon="mdiPlusCircle"
@keyup.enter="addTag()"
- @click:prepend="addTag()">
+ @click:prepend="addTag()"
+ >
</v-text-field>
</v-col>
</v-row>
<v-row>
- <v-col
- cols="12"
- sm="6"
- >
+ <v-col cols="12" sm="6">
<h3>Shuffle</h3>
<v-checkbox
v-model="project.enableRandomOrder"
@@ -134,10 +115,7 @@
</v-col>
</v-row>
<v-row>
- <v-col
- cols="12"
- sm="6"
- >
+ <v-col cols="12" sm="6">
<h3>Collaboration</h3>
<v-checkbox
v-model="project.enableShareAnnotation"
@@ -155,7 +133,6 @@ import { mdiPlusCircle } from '@mdi/js'
import { projectNameRules, descriptionRules } from '@/rules/index'
export default {
-
data() {
return {
project: {},
@@ -197,7 +174,9 @@ export default {
methods: {
initEdit() {
- Object.keys(this.edit).forEach((v) => { this.edit[v] = false })
+ Object.keys(this.edit).forEach((v) => {
+ this.edit[v] = false
+ })
},
editProject(name) {
@@ -229,19 +208,19 @@ export default {
return this.$refs.form.validate()
},
- async getTags(){
- this.tags = await this.$services.tag.list(this.projectId)
+ async getTags() {
+ this.tags = await this.$services.tag.list(this.projectId)
},
- addTag(){
+ addTag() {
this.$services.tag.create(this.projectId, this.tagInput)
this.tagInput = ''
this.getTags()
},
- removeTag(id){
+ removeTag(id) {
this.$services.tag.delete(this.projectId, id)
- this.tags = this.tags.filter(tag => tag.id !== id)
+ this.tags = this.tags.filter((tag) => tag.id !== id)
}
}
}
diff --git a/frontend/components/project/ProjectList.vue b/frontend/components/project/ProjectList.vue
index 7e129cbdb9..07379e2179 100644
--- a/frontend/components/project/ProjectList.vue
+++ b/frontend/components/project/ProjectList.vue
@@ -10,7 +10,7 @@
:loading-text="$t('generic.loading')"
:no-data-text="$t('vuetify.noDataAvailable')"
:footer-props="{
- 'showFirstLastPage': true,
+ showFirstLastPage: true,
'items-per-page-options': [10, 50, 100],
'items-per-page-text': $t('vuetify.itemsPerPageText'),
'page-text': $t('dataset.pageText')
@@ -35,14 +35,12 @@
</nuxt-link>
</template>
<template #[`item.updatedAt`]="{ item }">
- <span>{{ item.updatedAt | dateParse('YYYY-MM-DDTHH:mm:ss') | dateFormat('DD/MM/YYYY HH:mm') }}</span>
+ <span>{{
+ item.updatedAt | dateParse('YYYY-MM-DDTHH:mm:ss') | dateFormat('DD/MM/YYYY HH:mm')
+ }}</span>
</template>
<template #[`item.tags`]="{ item }">
- <v-chip
- v-for="tag in item.tags"
- :key="tag.id"
- outlined v-text="tag.text"
- />
+ <v-chip v-for="tag in item.tags" :key="tag.id" outlined v-text="tag.text" />
</template>
</v-data-table>
</template>
@@ -96,7 +94,7 @@ export default Vue.extend({
{ text: this.$t('generic.description'), value: 'description' },
{ text: this.$t('generic.type'), value: 'projectType' },
{ text: 'Updated', value: 'updatedAt' },
- { text: 'Tags', value: 'tags'}
+ { text: 'Tags', value: 'tags' }
]
}
},
diff --git a/frontend/components/tasks/audio/AudioViewer.vue b/frontend/components/tasks/audio/AudioViewer.vue
index 240d8b47d8..2920679d55 100644
--- a/frontend/components/tasks/audio/AudioViewer.vue
+++ b/frontend/components/tasks/audio/AudioViewer.vue
@@ -1,11 +1,7 @@
<template>
<div>
<div id="waveform" />
- <v-row
- no-gutters
- align="center"
- class="mb-3 mt-1"
- >
+ <v-row no-gutters align="center" class="mb-3 mt-1">
<v-col md="8">
<v-slider
v-model="zoom"
@@ -43,21 +39,11 @@
/>
</v-col>
</v-row>
- <v-btn
- color="primary"
- class="text-capitalize"
- @click="play"
- >
- <v-icon
- v-if="!isPlaying"
- left
- >
+ <v-btn color="primary" class="text-capitalize" @click="play">
+ <v-icon v-if="!isPlaying" left>
{{ mdiPlayCircleOutline }}
</v-icon>
- <v-icon
- v-else
- left
- >
+ <v-icon v-else left>
{{ mdiPauseCircleOutline }}
</v-icon>
<span v-if="!isPlaying">Play</span>
@@ -69,7 +55,13 @@
<script>
import Vue from 'vue'
import WaveSurfer from 'wavesurfer.js'
-import { mdiPlayCircleOutline, mdiPauseCircleOutline, mdiVolumeHigh, mdiMagnifyPlusOutline, mdiMagnifyMinusOutline } from '@mdi/js'
+import {
+ mdiPlayCircleOutline,
+ mdiPauseCircleOutline,
+ mdiVolumeHigh,
+ mdiMagnifyPlusOutline,
+ mdiMagnifyMinusOutline
+} from '@mdi/js'
export default Vue.extend({
props: {
@@ -105,26 +97,26 @@ export default Vue.extend({
mounted() {
this.wavesurfer = WaveSurfer.create({
- container: '#waveform',
- backend: "MediaElement"
+ container: '#waveform',
+ backend: 'MediaElement'
})
this.load()
},
methods: {
load() {
- this.wavesurfer.load(this.source)
+ this.wavesurfer.load(this.source)
},
play() {
this.isPlaying = !this.isPlaying
- this.wavesurfer.playPause()
+ this.wavesurfer.playPause()
},
zoomOut() {
- this.zoom = (this.zoom - 10) || 0
+ this.zoom = this.zoom - 10 || 0
this.onChangeZoom(this.zoom)
},
zoomIn() {
- this.zoom = (this.zoom + 10) || 500
+ this.zoom = this.zoom + 10 || 500
this.onChangeZoom(this.zoom)
},
onChangeVolume(value) {
diff --git a/frontend/components/tasks/seq2seq/Seq2seqBox.vue b/frontend/components/tasks/seq2seq/Seq2seqBox.vue
index 7c3c2dd84e..5b374bc526 100644
--- a/frontend/components/tasks/seq2seq/Seq2seqBox.vue
+++ b/frontend/components/tasks/seq2seq/Seq2seqBox.vue
@@ -26,7 +26,7 @@
</template>
<template #[`item.text`]="{ item }">
<v-edit-dialog>
- <span class="title" style="font-weight:400">
+ <span class="title" style="font-weight: 400">
{{ item.text }}
</span>
<template #input>
@@ -40,10 +40,7 @@
</v-edit-dialog>
</template>
<template #[`item.action`]="{ item }">
- <v-icon
- small
- @click="remove(item.id)"
- >
+ <v-icon small @click="remove(item.id)">
{{ mdiDeleteOutline }}
</v-icon>
</template>
@@ -59,7 +56,7 @@ export default Vue.extend({
props: {
annotations: {
type: Array,
- default: () => ([]),
+ default: () => [],
required: true
}
},
diff --git a/frontend/components/tasks/sequenceLabeling/EntityEditor.vue b/frontend/components/tasks/sequenceLabeling/EntityEditor.vue
index 092d45d574..0b709b20ec 100644
--- a/frontend/components/tasks/sequenceLabeling/EntityEditor.vue
+++ b/frontend/components/tasks/sequenceLabeling/EntityEditor.vue
@@ -48,59 +48,59 @@ import 'vue-virtual-scroller/dist/vue-virtual-scroller.css'
export default Vue.extend({
components: {
VAnnotator,
- LabelingMenu,
+ LabelingMenu
},
props: {
dark: {
type: Boolean,
- default: false,
+ default: false
},
rtl: {
type: Boolean,
- default: false,
+ default: false
},
text: {
type: String,
- default: "",
- required: true,
+ default: '',
+ required: true
},
entities: {
type: Array as PropType<SpanDTO[]>,
default: () => [],
- required: true,
+ required: true
},
entityLabels: {
type: Array,
default: () => [],
- required: true,
+ required: true
},
relations: {
type: Array,
- default: () => [],
+ default: () => []
},
relationLabels: {
type: Array,
- default: () => [],
+ default: () => []
},
allowOverlapping: {
type: Boolean,
default: false,
- required: false,
+ required: false
},
graphemeMode: {
type: Boolean,
- default: false,
+ default: false
},
selectedLabel: {
type: Object,
default: null,
- required: false,
+ required: false
},
relationMode: {
type: Boolean,
- default: false,
- },
+ default: false
+ }
},
data() {
@@ -113,8 +113,8 @@ export default Vue.extend({
endOffset: 0,
entity: null as any,
relation: null as any,
- selectedEntities: [] as SpanDTO[],
- };
+ selectedEntities: [] as SpanDTO[]
+ }
},
computed: {
@@ -267,12 +267,12 @@ export default Vue.extend({
},
updateRelation(labelId: number) {
- this.$emit("click:relation", this.relation.id, labelId)
+ this.$emit('click:relation', this.relation.id, labelId)
},
deleteRelation(relation: any) {
this.$emit('contextmenu:relation', relation.id)
}
- },
-});
+ }
+})
</script>
diff --git a/frontend/components/tasks/sequenceLabeling/LabelingMenu.vue b/frontend/components/tasks/sequenceLabeling/LabelingMenu.vue
index 850cfef21f..7aba88c3b8 100644
--- a/frontend/components/tasks/sequenceLabeling/LabelingMenu.vue
+++ b/frontend/components/tasks/sequenceLabeling/LabelingMenu.vue
@@ -1,18 +1,6 @@
<template>
- <v-menu
- :value="opened"
- :position-x="x"
- :position-y="y"
- absolute
- offset-y
- @input="close"
- >
- <v-list
- dense
- min-width="150"
- max-height="400"
- class="overflow-y-auto"
- >
+ <v-menu :value="opened" :position-x="x" :position-y="y" absolute offset-y @input="close">
+ <v-list dense min-width="150" max-height="400" class="overflow-y-auto">
<v-list-item>
<v-autocomplete
ref="autocomplete"
@@ -28,14 +16,8 @@
small-chips
/>
</v-list-item>
- <v-list-item
- v-for="(label, i) in labels"
- :key="i"
- @click="onLabelSelected(label.id)"
- >
- <v-list-item-action
- v-if="hasAnySuffixKey"
- >
+ <v-list-item v-for="(label, i) in labels" :key="i" @click="onLabelSelected(label.id)">
+ <v-list-item-action v-if="hasAnySuffixKey">
<v-chip
v-if="label.suffixKey"
:color="label.backgroundColor"
@@ -46,7 +28,7 @@
<span v-else class="mr-8" />
</v-list-item-action>
<v-list-item-content>
- <v-list-item-title v-text="label.text"/>
+ <v-list-item-title v-text="label.text" />
</v-list-item-content>
</v-list-item>
</v-list>
@@ -60,27 +42,27 @@ export default Vue.extend({
labels: {
type: Array,
default: () => [],
- required: true,
+ required: true
},
opened: {
type: Boolean,
default: false,
- required: true,
+ required: true
},
selectedLabel: {
type: Object,
default: null,
- required: false,
+ required: false
},
x: {
type: Number,
default: 0,
- required: true,
+ required: true
},
y: {
type: Number,
default: 0,
- required: true,
+ required: true
}
},
@@ -90,8 +72,8 @@ export default Vue.extend({
endOffset: 0,
entity: null as any,
fromEntity: null as any,
- toEntity: null as any,
- };
+ toEntity: null as any
+ }
},
computed: {
@@ -115,7 +97,7 @@ export default Vue.extend({
// https://github.com/vuetifyjs/vuetify/issues/10765
this.$nextTick(() => {
if (this.$refs.autocomplete) {
- (this.$refs.autocomplete as any).selectedItems = []
+ ;(this.$refs.autocomplete as any).selectedItems = []
}
})
this.$emit('close')
diff --git a/frontend/components/tasks/sidebar/AnnotationProgress.vue b/frontend/components/tasks/sidebar/AnnotationProgress.vue
index 6fb8f848d1..11bd4b7df2 100644
--- a/frontend/components/tasks/sidebar/AnnotationProgress.vue
+++ b/frontend/components/tasks/sidebar/AnnotationProgress.vue
@@ -12,11 +12,7 @@
<v-list-item-subtitle class="text-right" v-text="progress.complete" />
</v-list-item>
</v-list>
- <v-progress-linear
- :value="percentage"
- color="success"
- height="25"
- >
+ <v-progress-linear :value="percentage" color="success" height="25">
<template #default="{ value }">
<strong>{{ value }}%</strong>
</template>
@@ -34,12 +30,12 @@ export default Vue.extend({
progress: {
type: Object as PropType<MyProgress>,
required: true
- },
+ }
},
computed: {
percentage(): number {
- return Math.ceil(this.progress.complete / this.progress.total * 100)
+ return Math.ceil((this.progress.complete / this.progress.total) * 100)
}
}
})
diff --git a/frontend/components/tasks/textClassification/LabelGroup.vue b/frontend/components/tasks/textClassification/LabelGroup.vue
index cc6dbbee0f..b7a504bfc1 100644
--- a/frontend/components/tasks/textClassification/LabelGroup.vue
+++ b/frontend/components/tasks/textClassification/LabelGroup.vue
@@ -33,7 +33,7 @@ export default {
},
annotations: {
type: Array,
- default: () => ([]),
+ default: () => [],
required: true
},
singleLabel: {
diff --git a/frontend/components/tasks/textClassification/LabelSelect.vue b/frontend/components/tasks/textClassification/LabelSelect.vue
index 0a70cda113..cb7620fcbd 100644
--- a/frontend/components/tasks/textClassification/LabelSelect.vue
+++ b/frontend/components/tasks/textClassification/LabelSelect.vue
@@ -33,7 +33,7 @@ export default {
},
annotations: {
type: Array,
- default: () => ([]),
+ default: () => [],
required: true
},
singleLabel: {
diff --git a/frontend/components/tasks/textClassification/multiLabel/LabelGroup.vue b/frontend/components/tasks/textClassification/multiLabel/LabelGroup.vue
index a28f118e6a..b3e637a443 100644
--- a/frontend/components/tasks/textClassification/multiLabel/LabelGroup.vue
+++ b/frontend/components/tasks/textClassification/multiLabel/LabelGroup.vue
@@ -1,10 +1,5 @@
<template>
- <v-chip-group
- :value="annotatedLabel"
- column
- multiple
- @change="addOrRemove"
- >
+ <v-chip-group :value="annotatedLabel" column multiple @change="addOrRemove">
<v-chip
v-for="item in labels"
:key="item.id"
@@ -13,12 +8,7 @@
:text-color="$contrastColor(item.backgroundColor)"
>
{{ item.text }}
- <v-avatar
- v-if="item.suffixKey"
- right
- color="white"
- class="black--text font-weight-bold"
- >
+ <v-avatar v-if="item.suffixKey" right color="white" class="black--text font-weight-bold">
{{ item.suffixKey }}
</v-avatar>
</v-chip>
@@ -37,15 +27,15 @@ export default {
},
annotations: {
type: Array,
- default: () => ([]),
+ default: () => [],
required: true
}
},
computed: {
annotatedLabel() {
- const labelIds = this.annotations.map(item => item.label)
- return labelIds.map(id => this.labels.findIndex(item => item.id === id))
+ const labelIds = this.annotations.map((item) => item.label)
+ return labelIds.map((id) => this.labels.findIndex((item) => item.id === id))
}
},
@@ -67,7 +57,7 @@ export default {
},
remove(label) {
- const annotation = this.annotations.find(item => item.label === label.id)
+ const annotation = this.annotations.find((item) => item.label === label.id)
this.$emit('remove', annotation.id)
}
}
diff --git a/frontend/components/tasks/textClassification/multiLabel/LabelSelect.vue b/frontend/components/tasks/textClassification/multiLabel/LabelSelect.vue
index 55a08e6eea..6842174b8a 100644
--- a/frontend/components/tasks/textClassification/multiLabel/LabelSelect.vue
+++ b/frontend/components/tasks/textClassification/multiLabel/LabelSelect.vue
@@ -9,7 +9,7 @@
multiple
class="pt-0"
:search-input.sync="search"
- @change="search=''"
+ @change="search = ''"
>
<template #selection="{ attrs, item, select, selected }">
<v-chip
@@ -21,28 +21,15 @@
@click="select"
@click:close="remove(item)"
>
- <v-avatar
- v-if="item.suffixKey"
- left
- color="white"
- class="black--text font-weight-bold"
- >
+ <v-avatar v-if="item.suffixKey" left color="white" class="black--text font-weight-bold">
{{ item.suffixKey }}
</v-avatar>
{{ item.text }}
</v-chip>
</template>
<template #item="{ item }">
- <v-chip
- :color="item.backgroundColor"
- :text-color="$contrastColor(item.backgroundColor)"
- >
- <v-avatar
- v-if="item.suffixKey"
- left
- color="white"
- class="black--text font-weight-bold"
- >
+ <v-chip :color="item.backgroundColor" :text-color="$contrastColor(item.backgroundColor)">
+ <v-avatar v-if="item.suffixKey" left color="white" class="black--text font-weight-bold">
{{ item.suffixKey }}
</v-avatar>
{{ item.text }}
@@ -61,7 +48,7 @@ export default {
},
annotations: {
type: Array,
- default: () => ([]),
+ default: () => [],
required: true
}
},
@@ -75,8 +62,8 @@ export default {
computed: {
annotatedLabels: {
get() {
- const labelIds = this.annotations.map(item => item.label)
- return this.labels.filter(item => labelIds.includes(item.id))
+ const labelIds = this.annotations.map((item) => item.label)
+ return this.labels.filter((item) => labelIds.includes(item.id))
},
set(newValue) {
if (newValue.length > this.annotations.length) {
@@ -87,7 +74,7 @@ export default {
newValue.pop()
}
} else {
- const label = this.annotatedLabels.find(x => !newValue.some(y => y.id === x.id))
+ const label = this.annotatedLabels.find((x) => !newValue.some((y) => y.id === x.id))
if (typeof label === 'object') {
this.remove(label)
}
@@ -102,7 +89,7 @@ export default {
},
remove(label) {
- const annotation = this.annotations.find(item => item.label === label.id)
+ const annotation = this.annotations.find((item) => item.label === label.id)
this.$emit('remove', annotation.id)
}
}
diff --git a/frontend/components/tasks/textClassification/singleLabel/LabelGroup.vue b/frontend/components/tasks/textClassification/singleLabel/LabelGroup.vue
index dbe45d688f..74a30acf4b 100644
--- a/frontend/components/tasks/textClassification/singleLabel/LabelGroup.vue
+++ b/frontend/components/tasks/textClassification/singleLabel/LabelGroup.vue
@@ -1,9 +1,5 @@
<template>
- <v-chip-group
- :value="annotatedLabel"
- column
- @change="addOrRemove"
- >
+ <v-chip-group :value="annotatedLabel" column @change="addOrRemove">
<v-chip
v-for="item in labels"
:key="item.id"
@@ -12,12 +8,7 @@
:text-color="$contrastColor(item.backgroundColor)"
>
{{ item.text }}
- <v-avatar
- v-if="item.suffixKey"
- right
- color="white"
- class="black--text font-weight-bold"
- >
+ <v-avatar v-if="item.suffixKey" right color="white" class="black--text font-weight-bold">
{{ item.suffixKey }}
</v-avatar>
</v-chip>
@@ -34,15 +25,15 @@ export default {
},
annotations: {
type: Array,
- default: () => ([]),
+ default: () => [],
required: true
}
},
computed: {
annotatedLabel() {
- const labelIds = this.annotations.map(item => item.label)
- return this.labels.findIndex(item => labelIds.includes(item.id))
+ const labelIds = this.annotations.map((item) => item.label)
+ return this.labels.findIndex((item) => labelIds.includes(item.id))
}
},
@@ -62,7 +53,7 @@ export default {
},
remove(label) {
- const annotation = this.annotations.find(item => item.label === label.id)
+ const annotation = this.annotations.find((item) => item.label === label.id)
this.$emit('remove', annotation.id)
}
}
diff --git a/frontend/components/tasks/textClassification/singleLabel/LabelSelect.vue b/frontend/components/tasks/textClassification/singleLabel/LabelSelect.vue
index 83ee95167d..506794786a 100644
--- a/frontend/components/tasks/textClassification/singleLabel/LabelSelect.vue
+++ b/frontend/components/tasks/textClassification/singleLabel/LabelSelect.vue
@@ -21,28 +21,15 @@
@click="select"
@click:close="remove(item)"
>
- <v-avatar
- v-if="item.suffixKey"
- left
- color="white"
- class="black--text font-weight-bold"
- >
+ <v-avatar v-if="item.suffixKey" left color="white" class="black--text font-weight-bold">
{{ item.suffixKey }}
</v-avatar>
{{ item.text }}
</v-chip>
</template>
<template #item="{ item }">
- <v-chip
- :color="item.backgroundColor"
- :text-color="$contrastColor(item.backgroundColor)"
- >
- <v-avatar
- v-if="item.suffixKey"
- left
- color="white"
- class="black--text font-weight-bold"
- >
+ <v-chip :color="item.backgroundColor" :text-color="$contrastColor(item.backgroundColor)">
+ <v-avatar v-if="item.suffixKey" left color="white" class="black--text font-weight-bold">
{{ item.suffixKey }}
</v-avatar>
{{ item.text }}
@@ -61,15 +48,15 @@ export default {
},
annotations: {
type: Array,
- default: () => ([]),
+ default: () => [],
required: true
}
},
computed: {
annotatedLabel() {
- const labelIds = this.annotations.map(item => item.label)
- return this.labels.find(item => labelIds.includes(item.id))
+ const labelIds = this.annotations.map((item) => item.label)
+ return this.labels.find((item) => labelIds.includes(item.id))
}
},
@@ -87,7 +74,7 @@ export default {
},
remove(label) {
- const annotation = this.annotations.find(item => item.label === label.id)
+ const annotation = this.annotations.find((item) => item.label === label.id)
this.$emit('remove', annotation.id)
}
}
diff --git a/frontend/components/tasks/toolbar/ToolbarLaptop.vue b/frontend/components/tasks/toolbar/ToolbarLaptop.vue
index cdc8a42de9..ebc2c42dfb 100644
--- a/frontend/components/tasks/toolbar/ToolbarLaptop.vue
+++ b/frontend/components/tasks/toolbar/ToolbarLaptop.vue
@@ -1,60 +1,39 @@
<template>
- <v-toolbar
- class="toolbar-control"
- dense
- flat
- >
+ <v-toolbar class="toolbar-control" dense flat>
<v-row no-gutters>
<v-btn-toggle>
- <button-review
- :is-reviewd="isReviewd"
- @click:review="$emit('click:review')"
- />
+ <button-review :is-reviewd="isReviewd" @click:review="$emit('click:review')" />
- <button-filter
- :value="filterOption"
- @click:filter="changeFilter"
- />
+ <button-filter :value="filterOption" @click:filter="changeFilter" />
- <button-guideline
- @click:guideline="dialogGuideline=true"
- />
+ <button-guideline @click:guideline="dialogGuideline = true" />
<v-dialog v-model="dialogGuideline">
- <form-guideline
- :guideline-text="guidelineText"
- @click:close="dialogGuideline=false"
- />
+ <form-guideline :guideline-text="guidelineText" @click:close="dialogGuideline = false" />
</v-dialog>
- <button-comment
- @click:comment="dialogComment=true"
- />
+ <button-comment @click:comment="dialogComment = true" />
<v-dialog v-model="dialogComment">
- <form-comment
- :example-id="docId"
- @click:cancel="dialogComment=false"
- />
+ <form-comment :example-id="docId" @click:cancel="dialogComment = false" />
</v-dialog>
- <button-auto-labeling
- @click:auto="dialogAutoLabeling=true"
- />
+ <button-auto-labeling @click:auto="dialogAutoLabeling = true" />
<v-dialog v-model="dialogAutoLabeling">
<form-auto-labeling
:is-enabled="enableAutoLabeling"
:error-message="errorMessage"
- @click:cancel="dialogAutoLabeling=false"
+ @click:cancel="dialogAutoLabeling = false"
@input="updateAutoLabeling"
/>
</v-dialog>
- <button-clear
- @click:clear="dialogClear=true"
- />
+ <button-clear @click:clear="dialogClear = true" />
<v-dialog v-model="dialogClear">
<form-clear-label
- @click:ok="$emit('click:clear-label');dialogClear=false"
- @click:cancel="dialogClear=false"
+ @click:ok="
+ $emit('click:clear-label')
+ dialogClear = false
+ "
+ @click:cancel="dialogClear = false"
/>
</v-dialog>
</v-btn-toggle>
@@ -150,21 +129,25 @@ export default Vue.extend({
methods: {
updatePage(page: number) {
- this.$router.push({ query: {
- page: page.toString(),
- isChecked: this.filterOption,
- q: this.$route.query.q
- }})
+ this.$router.push({
+ query: {
+ page: page.toString(),
+ isChecked: this.filterOption,
+ q: this.$route.query.q
+ }
+ })
},
changeFilter(isChecked: string) {
- this.$router.push({ query: {
- page: '1',
- isChecked,
- q: this.$route.query.q
- }})
+ this.$router.push({
+ query: {
+ page: '1',
+ isChecked,
+ q: this.$route.query.q
+ }
+ })
},
-
+
updateAutoLabeling(isEnable: boolean) {
if (isEnable) {
this.$emit('update:enable-auto-labeling', true)
@@ -184,4 +167,4 @@ export default Vue.extend({
::v-deep .v-dialog {
width: 800px;
}
-</style>
\ No newline at end of file
+</style>
diff --git a/frontend/components/tasks/toolbar/ToolbarMobile.vue b/frontend/components/tasks/toolbar/ToolbarMobile.vue
index 734111d49f..6009c1a1ec 100644
--- a/frontend/components/tasks/toolbar/ToolbarMobile.vue
+++ b/frontend/components/tasks/toolbar/ToolbarMobile.vue
@@ -1,21 +1,11 @@
<template>
- <v-bottom-navigation
- app
- absolute
- hide-on-scroll
- >
- <v-btn
- :disabled="isFirstPage"
- @click="updatePage(page - 1)"
- >
+ <v-bottom-navigation app absolute hide-on-scroll>
+ <v-btn :disabled="isFirstPage" @click="updatePage(page - 1)">
<span>Prev</span>
<v-icon>{{ mdiChevronLeft }}</v-icon>
</v-btn>
- <v-btn
- :disabled="isLastPage"
- @click="updatePage(page + 1)"
- >
+ <v-btn :disabled="isLastPage" @click="updatePage(page + 1)">
<span>Next</span>
<v-icon>{{ mdiChevronRight }}</v-icon>
</v-btn>
@@ -57,7 +47,7 @@ export default Vue.extend({
methods: {
updatePage(page: number) {
- this.$router.push({ query: { page: page.toString() }})
+ this.$router.push({ query: { page: page.toString() } })
}
}
})
diff --git a/frontend/components/tasks/toolbar/buttons/ButtonAutoLabeling.vue b/frontend/components/tasks/toolbar/buttons/ButtonAutoLabeling.vue
index 2052c6a33d..d68431431b 100644
--- a/frontend/components/tasks/toolbar/buttons/ButtonAutoLabeling.vue
+++ b/frontend/components/tasks/toolbar/buttons/ButtonAutoLabeling.vue
@@ -1,11 +1,7 @@
<template>
<v-tooltip bottom>
<template #activator="{ on }">
- <v-btn
- icon
- v-on="on"
- @click="$emit('click:auto')"
- >
+ <v-btn icon v-on="on" @click="$emit('click:auto')">
<v-icon>
{{ mdiAutoFix }}
</v-icon>
diff --git a/frontend/components/tasks/toolbar/buttons/ButtonClear.vue b/frontend/components/tasks/toolbar/buttons/ButtonClear.vue
index 08cb8dcf39..d963b7be53 100644
--- a/frontend/components/tasks/toolbar/buttons/ButtonClear.vue
+++ b/frontend/components/tasks/toolbar/buttons/ButtonClear.vue
@@ -1,11 +1,7 @@
<template>
<v-tooltip bottom>
<template #activator="{ on }">
- <v-btn
- icon
- v-on="on"
- @click="$emit('click:clear')"
- >
+ <v-btn icon v-on="on" @click="$emit('click:clear')">
<v-icon>
{{ mdiDeleteOutline }}
</v-icon>
@@ -23,6 +19,6 @@ export default {
return {
mdiDeleteOutline
}
- },
+ }
}
</script>
diff --git a/frontend/components/tasks/toolbar/buttons/ButtonComment.vue b/frontend/components/tasks/toolbar/buttons/ButtonComment.vue
index 1521a3361c..9f3d5737a0 100644
--- a/frontend/components/tasks/toolbar/buttons/ButtonComment.vue
+++ b/frontend/components/tasks/toolbar/buttons/ButtonComment.vue
@@ -1,11 +1,7 @@
<template>
<v-tooltip bottom>
<template #activator="{ on }">
- <v-btn
- icon
- v-on="on"
- @click="$emit('click:comment')"
- >
+ <v-btn icon v-on="on" @click="$emit('click:comment')">
<v-icon>
{{ mdiMessageText }}
</v-icon>
diff --git a/frontend/components/tasks/toolbar/buttons/ButtonFilter.vue b/frontend/components/tasks/toolbar/buttons/ButtonFilter.vue
index 37dc84128d..05ff609992 100644
--- a/frontend/components/tasks/toolbar/buttons/ButtonFilter.vue
+++ b/frontend/components/tasks/toolbar/buttons/ButtonFilter.vue
@@ -3,10 +3,7 @@
<template #activator="{ on: menu }">
<v-tooltip bottom>
<template #activator="{ on: tooltip }">
- <v-btn
- icon
- v-on="{ ...tooltip, ...menu }"
- >
+ <v-btn icon v-on="{ ...tooltip, ...menu }">
<v-icon>
{{ mdiFilter }}
</v-icon>
@@ -17,10 +14,7 @@
</template>
<v-list>
<v-list-item-group v-model="selected" mandatory>
- <v-list-item
- v-for="(item, i) in items"
- :key="i"
- >
+ <v-list-item v-for="(item, i) in items" :key="i">
<v-list-item-icon>
<v-icon v-if="selected === i">
{{ mdiCheck }}
@@ -64,7 +58,7 @@ export default {
computed: {
selected: {
get() {
- const index = this.items.findIndex(item => item.param === this.value)
+ const index = this.items.findIndex((item) => item.param === this.value)
return index === -1 ? 0 : index
},
set(value) {
diff --git a/frontend/components/tasks/toolbar/buttons/ButtonGuideline.vue b/frontend/components/tasks/toolbar/buttons/ButtonGuideline.vue
index 6cdccf91da..03cf207461 100644
--- a/frontend/components/tasks/toolbar/buttons/ButtonGuideline.vue
+++ b/frontend/components/tasks/toolbar/buttons/ButtonGuideline.vue
@@ -1,11 +1,7 @@
<template>
<v-tooltip bottom>
<template #activator="{ on }">
- <v-btn
- icon
- v-on="on"
- @click="$emit('click:guideline')"
- >
+ <v-btn icon v-on="on" @click="$emit('click:guideline')">
<v-icon>
{{ mdiBookOpenOutline }}
</v-icon>
@@ -23,6 +19,6 @@ export default {
return {
mdiBookOpenOutline
}
- },
+ }
}
</script>
diff --git a/frontend/components/tasks/toolbar/buttons/ButtonLabelSwitch.vue b/frontend/components/tasks/toolbar/buttons/ButtonLabelSwitch.vue
index d2a62bf315..9622242ea6 100644
--- a/frontend/components/tasks/toolbar/buttons/ButtonLabelSwitch.vue
+++ b/frontend/components/tasks/toolbar/buttons/ButtonLabelSwitch.vue
@@ -1,8 +1,5 @@
<template>
- <v-btn-toggle
- v-model="option"
- mandatory
- >
+ <v-btn-toggle v-model="option" mandatory>
<v-btn icon>
<v-icon>{{ mdiFormatListBulleted }}</v-icon>
</v-btn>
diff --git a/frontend/components/tasks/toolbar/buttons/ButtonPagination.vue b/frontend/components/tasks/toolbar/buttons/ButtonPagination.vue
index 65c3996bbe..11caf161bc 100644
--- a/frontend/components/tasks/toolbar/buttons/ButtonPagination.vue
+++ b/frontend/components/tasks/toolbar/buttons/ButtonPagination.vue
@@ -1,15 +1,9 @@
<template>
<div class="v-data-footer">
- <v-edit-dialog
- large
- persistent
- @save="changePageNumber"
- >
+ <v-edit-dialog large persistent @save="changePageNumber">
<span>{{ value }} of {{ total }}</span>
<template #input>
- <div class="mt-4 title">
- Move Page
- </div>
+ <div class="mt-4 title">Move Page</div>
<v-text-field
v-model="editedPage"
:rules="rules"
@@ -89,7 +83,8 @@ export default Vue.extend({
return {
editedPage: '1',
rules: [
- (v: string) => (v && parseInt(v, 10) > 0 && parseInt(v, 10) <= this.total) || 'Invalid page number!'
+ (v: string) =>
+ (v && parseInt(v, 10) > 0 && parseInt(v, 10) <= this.total) || 'Invalid page number!'
],
mdiPageFirst,
mdiPageLast,
diff --git a/frontend/components/tasks/toolbar/forms/FormAutoLabeling.vue b/frontend/components/tasks/toolbar/forms/FormAutoLabeling.vue
index 07654aa143..e0393813f1 100644
--- a/frontend/components/tasks/toolbar/forms/FormAutoLabeling.vue
+++ b/frontend/components/tasks/toolbar/forms/FormAutoLabeling.vue
@@ -1,19 +1,16 @@
<template>
- <base-card
- title="Settings"
- :cancel-text="$t('generic.close')"
- @cancel="$emit('click:cancel')"
- >
+ <base-card title="Settings" :cancel-text="$t('generic.close')" @cancel="$emit('click:cancel')">
<template #content>
<h3>Auto Labeling</h3>
<p>
- The auto labeling allows users to annotate data automatically.
- It enables them to speed up annotating data.
- You only have to correct labels which are mislabeled by the system and annotate labels which aren’t labeled by it.
+ The auto labeling allows users to annotate data automatically. It enables them to speed up
+ annotating data. You only have to correct labels which are mislabeled by the system and
+ annotate labels which aren’t labeled by it.
</p>
<p>
- Notice that you can't use this feature unless the project administrators configure the auto labeling.
- Also, depending on the configuration, it will take some cost for the administrators(e.g. In the case of configuring some paid service like AWS or GCP).
+ Notice that you can't use this feature unless the project administrators configure the auto
+ labeling. Also, depending on the configuration, it will take some cost for the
+ administrators(e.g. In the case of configuring some paid service like AWS or GCP).
</p>
<v-switch
:value="isEnabled"
diff --git a/frontend/components/tasks/toolbar/forms/FormComment.vue b/frontend/components/tasks/toolbar/forms/FormComment.vue
index 3abb79e57e..dfd3af6df7 100644
--- a/frontend/components/tasks/toolbar/forms/FormComment.vue
+++ b/frontend/components/tasks/toolbar/forms/FormComment.vue
@@ -5,9 +5,7 @@
@cancel="$emit('click:cancel')"
>
<template v-if="user.id" #content>
- <form-create
- @add-comment="add"
- />
+ <form-create @add-comment="add" />
<comment
v-for="comment in comments"
:key="comment.id"
@@ -44,7 +42,7 @@ export default Vue.extend({
data() {
return {
user: {},
- comments: [] as CommentReadDTO[],
+ comments: [] as CommentReadDTO[]
}
},
diff --git a/frontend/components/tasks/toolbar/forms/FormGuideline.vue b/frontend/components/tasks/toolbar/forms/FormGuideline.vue
index 175883fe5d..d01c1ab7df 100644
--- a/frontend/components/tasks/toolbar/forms/FormGuideline.vue
+++ b/frontend/components/tasks/toolbar/forms/FormGuideline.vue
@@ -5,9 +5,7 @@
@cancel="close"
>
<template #content>
- <viewer
- :initial-value="guidelineText"
- />
+ <viewer :initial-value="guidelineText" />
</template>
</base-card>
</template>
diff --git a/frontend/components/utils/ActionMenu.vue b/frontend/components/utils/ActionMenu.vue
index f94e1f26c5..454f79baa6 100644
--- a/frontend/components/utils/ActionMenu.vue
+++ b/frontend/components/utils/ActionMenu.vue
@@ -1,23 +1,13 @@
<template>
- <v-menu
- offset-y
- open-on-hover
- >
+ <v-menu offset-y open-on-hover>
<template #activator="{ on }">
- <v-btn
- color="primary text-capitalize"
- v-on="on"
- >
+ <v-btn color="primary text-capitalize" v-on="on">
{{ text }}
<v-icon>{{ mdiMenuDown }}</v-icon>
</v-btn>
</template>
<v-list>
- <v-list-item
- v-for="(item, index) in items"
- :key="index"
- @click="$emit(item.event)"
- >
+ <v-list-item v-for="(item, index) in items" :key="index" @click="$emit(item.event)">
<v-list-item-icon>
<v-icon>{{ item.icon }}</v-icon>
</v-list-item-icon>
@@ -50,6 +40,6 @@ export default Vue.extend({
return {
mdiMenuDown
}
- },
+ }
})
</script>
diff --git a/frontend/components/utils/BaseCard.vue b/frontend/components/utils/BaseCard.vue
index d928fe3e72..20600750b6 100644
--- a/frontend/components/utils/BaseCard.vue
+++ b/frontend/components/utils/BaseCard.vue
@@ -1,9 +1,6 @@
<template>
<v-card>
- <v-toolbar
- color="primary white--text"
- flat
- >
+ <v-toolbar color="primary white--text" flat>
<v-toolbar-title>{{ title }}</v-toolbar-title>
</v-toolbar>
<v-card-text class="text--primary mt-3 pl-4">
diff --git a/frontend/composables/useExampleItem.ts b/frontend/composables/useExampleItem.ts
index 718382b609..d616850ba3 100644
--- a/frontend/composables/useExampleItem.ts
+++ b/frontend/composables/useExampleItem.ts
@@ -12,9 +12,9 @@ export const useExampleItem = () => {
const { app } = useContext()
const exampleService = app.$services.example
- const getExample = async(
+ const getExample = async (
projectId: string,
- { page, q, isChecked }: { page: string, q: string, isChecked: string}
+ { page, q, isChecked }: { page: string; q: string; isChecked: string }
) => {
const examples = await exampleService.fetchOne(projectId, page, q, isChecked)
state.totalExample = examples.count
@@ -23,19 +23,15 @@ export const useExampleItem = () => {
}
}
- const getExampleById = async(
- projectId: string
- ) => {
+ const getExampleById = async (projectId: string) => {
state.example = await exampleService.findById(projectId, state.example.id)
}
- const updateProgress = async(projectId: string) => {
+ const updateProgress = async (projectId: string) => {
state.progress = await app.$services.metrics.fetchMyProgress(projectId)
}
- const confirm = async(
- projectId: string,
- ) => {
+ const confirm = async (projectId: string) => {
await exampleService.confirm(projectId, state.example.id)
await getExampleById(projectId)
updateProgress(projectId)
diff --git a/frontend/composables/useLabelList.ts b/frontend/composables/useLabelList.ts
index 1bc025b763..59bf4603cb 100644
--- a/frontend/composables/useLabelList.ts
+++ b/frontend/composables/useLabelList.ts
@@ -1,6 +1,6 @@
import { computed, reactive } from '@nuxtjs/composition-api'
import { LabelDTO } from '@/services/application/label/labelData'
-import { CreateLabelCommand , UpdateLabelCommand } from '@/services/application/label/labelCommand'
+import { CreateLabelCommand, UpdateLabelCommand } from '@/services/application/label/labelCommand'
import { LabelApplicationService } from '@/services/application/label/labelApplicationService'
export const useLabelList = (service: LabelApplicationService) => {
@@ -8,41 +8,30 @@ export const useLabelList = (service: LabelApplicationService) => {
labels: [] as LabelDTO[]
})
- const getLabelList = async(
- projectId: string
- ) => {
+ const getLabelList = async (projectId: string) => {
state.labels = await service.list(projectId)
}
- const createLabel = async(
- projectId: string,
- command: CreateLabelCommand
- ) => {
+ const createLabel = async (projectId: string, command: CreateLabelCommand) => {
await service.create(projectId, command)
await getLabelList(projectId)
}
- const updateLabel = async(
- projectId: string,
- command: UpdateLabelCommand
- ) => {
+ const updateLabel = async (projectId: string, command: UpdateLabelCommand) => {
await service.update(projectId, command)
}
- const deleteLabelList = async(
- projectId: string,
- items: LabelDTO[]
- ) => {
+ const deleteLabelList = async (projectId: string, items: LabelDTO[]) => {
await service.bulkDelete(projectId, items)
await getLabelList(projectId)
}
const findLabelById = (labelId: number) => {
- return state.labels.find(item => item.id === labelId)
+ return state.labels.find((item) => item.id === labelId)
}
const shortKeys = computed(() => {
- return Object.fromEntries(state.labels.map(item => [item.id, [item.suffixKey]]))
+ return Object.fromEntries(state.labels.map((item) => [item.id, [item.suffixKey]]))
})
return {
@@ -52,6 +41,6 @@ export const useLabelList = (service: LabelApplicationService) => {
createLabel,
updateLabel,
deleteLabelList,
- shortKeys,
+ shortKeys
}
}
diff --git a/frontend/composables/useProjectItem.ts b/frontend/composables/useProjectItem.ts
index 16065d0d8a..a086ce8f36 100644
--- a/frontend/composables/useProjectItem.ts
+++ b/frontend/composables/useProjectItem.ts
@@ -9,9 +9,7 @@ export const useProjectItem = () => {
const { app } = useContext()
const projectService = app.$services.project
- const getProjectById = async(
- projectId: string
- ) => {
+ const getProjectById = async (projectId: string) => {
state.project = await projectService.findById(projectId)
}
diff --git a/frontend/composables/useTeacherList.ts b/frontend/composables/useTeacherList.ts
index aae1b88bd5..bdc38522e2 100644
--- a/frontend/composables/useTeacherList.ts
+++ b/frontend/composables/useTeacherList.ts
@@ -5,57 +5,36 @@ export const useTeacherList = (service: any) => {
teacherList: []
})
- const getTeacherList = async(
- projectId: string,
- exampleId: number
- ) => {
+ const getTeacherList = async (projectId: string, exampleId: number) => {
state.teacherList = await service.list(projectId, exampleId)
}
- const removeTeacher = async(
- projectId: string,
- exampleId: number,
- teacherId: number
- ) => {
+ const removeTeacher = async (projectId: string, exampleId: number, teacherId: number) => {
await service.delete(projectId, exampleId, teacherId)
await getTeacherList(projectId, exampleId)
}
- const annotateLabel = async(
- projectId: string,
- exampleId: number,
- labelId: number
- ) => {
+ const annotateLabel = async (projectId: string, exampleId: number, labelId: number) => {
await service.create(projectId, exampleId, labelId)
await getTeacherList(projectId, exampleId)
}
- const clearTeacherList = async(
- projectId: string,
- exampleId: number
- ) => {
+ const clearTeacherList = async (projectId: string, exampleId: number) => {
await service.clear(projectId, exampleId)
await getTeacherList(projectId, exampleId)
}
- const autoLabel = async(
- projectId: string,
- exampleId: number
- ) => {
+ const autoLabel = async (projectId: string, exampleId: number) => {
await service.autoLabel(projectId, exampleId)
await getTeacherList(projectId, exampleId)
}
- const annotateOrRemoveLabel = async(
- projectId: string,
- exampleId: number,
- srcKey: string
- ) => {
+ const annotateOrRemoveLabel = async (projectId: string, exampleId: number, srcKey: string) => {
const labelId = parseInt(srcKey, 10)
// @ts-ignore
- const annotation = state.teacherList.find(item => item.label === labelId)
+ const annotation = state.teacherList.find((item) => item.label === labelId)
if (annotation) {
- // @ts-ignore
+ // @ts-ignore
await removeTeacher(projectId, exampleId, annotation.id)
} else {
await annotateLabel(projectId, exampleId, labelId)
@@ -69,6 +48,6 @@ export const useTeacherList = (service: any) => {
annotateOrRemoveLabel,
removeTeacher,
clearTeacherList,
- autoLabel,
+ autoLabel
}
-}
\ No newline at end of file
+}
diff --git a/frontend/domain/models/autoLabeling/config.ts b/frontend/domain/models/autoLabeling/config.ts
index 119c534414..89fd0d9f6b 100644
--- a/frontend/domain/models/autoLabeling/config.ts
+++ b/frontend/domain/models/autoLabeling/config.ts
@@ -6,27 +6,27 @@ export class ConfigItemList {
}
toArray(): Object[] {
- return this.configItems.map(item => item.toObject())
+ return this.configItems.map((item) => item.toObject())
}
}
interface LabelMappingForUI {
- from: string,
+ from: string
to: string
}
export interface ParametersForUI {
- name: string,
- value: string | object[],
- type?: string,
+ name: string
+ value: string | object[]
+ type?: string
items?: string[]
}
export interface Fields {
- modelName: string,
- modelAttrs: ParametersForUI[],
- template: string,
- labelMapping: LabelMappingForUI[],
+ modelName: string
+ modelAttrs: ParametersForUI[]
+ template: string
+ labelMapping: LabelMappingForUI[]
taskType: string
}
@@ -37,23 +37,42 @@ export class ConfigItem {
public modelAttrs: object,
public template: string,
public labelMapping: object,
- public taskType: string,
+ public taskType: string
) {}
- static valueOf(
- { id, model_name, model_attrs, template, label_mapping, task_type }:
- { id: number, model_name: string, model_attrs: object, template: string, label_mapping: object, task_type: string }
- ): ConfigItem {
+ static valueOf({
+ id,
+ model_name,
+ model_attrs,
+ template,
+ label_mapping,
+ task_type
+ }: {
+ id: number
+ model_name: string
+ model_attrs: object
+ template: string
+ label_mapping: object
+ task_type: string
+ }): ConfigItem {
return new ConfigItem(id, model_name, model_attrs, template, label_mapping, task_type)
}
- static parseFromUI(
- { modelName, modelAttrs, template, labelMapping, taskType }: Fields): ConfigItem {
- const mapping = labelMapping.reduce((a, x) => ({...a, [x.from]: x.to}), {})
- const attributes: {[key: string]: any} = modelAttrs.reduce((a, x) => ({...a, [x.name]: x.value}), {})
+ static parseFromUI({
+ modelName,
+ modelAttrs,
+ template,
+ labelMapping,
+ taskType
+ }: Fields): ConfigItem {
+ const mapping = labelMapping.reduce((a, x) => ({ ...a, [x.from]: x.to }), {})
+ const attributes: { [key: string]: any } = modelAttrs.reduce(
+ (a, x) => ({ ...a, [x.name]: x.value }),
+ {}
+ )
for (const [key, value] of Object.entries(attributes)) {
if (Array.isArray(value)) {
- attributes[key] = value.reduce((a, x) => ({...a, [x.key]: x.value}), {})
+ attributes[key] = value.reduce((a, x) => ({ ...a, [x.key]: x.value }), {})
}
}
return new ConfigItem(99999, modelName, attributes, template, mapping, taskType)
diff --git a/frontend/domain/models/autoLabeling/configRepository.ts b/frontend/domain/models/autoLabeling/configRepository.ts
index 205747a686..c3c5ab482a 100644
--- a/frontend/domain/models/autoLabeling/configRepository.ts
+++ b/frontend/domain/models/autoLabeling/configRepository.ts
@@ -1,7 +1,7 @@
import { ConfigItem, ConfigItemList } from '~/domain/models/autoLabeling/config'
export interface ConfigTestResponse {
- valid: boolean,
+ valid: boolean
labels: object[]
}
diff --git a/frontend/domain/models/autoLabeling/template.ts b/frontend/domain/models/autoLabeling/template.ts
index 7276e1e3eb..4d56bd5b8c 100644
--- a/frontend/domain/models/autoLabeling/template.ts
+++ b/frontend/domain/models/autoLabeling/template.ts
@@ -1,27 +1,21 @@
import { Fields, ParametersForUI } from '~/domain/models/autoLabeling/config'
export interface Schema {
- title: string,
- type: string,
+ title: string
+ type: string
properties: object
}
export interface ConfigResponse {
- name: string,
- schema: Schema,
+ name: string
+ schema: Schema
template: string
}
export class ConfigTemplateItem {
- constructor(
- private schema: Schema,
- public template: string
- ) {}
+ constructor(private schema: Schema, public template: string) {}
- static valueOf(
- { schema, template }:
- { schema: Schema, template: string }
- ): ConfigTemplateItem {
+ static valueOf({ schema, template }: { schema: Schema; template: string }): ConfigTemplateItem {
return new ConfigTemplateItem(schema, template)
}
@@ -33,26 +27,20 @@ export class ConfigTemplateItem {
const response: ParametersForUI[] = []
for (const [key, value] of Object.entries(this.schema.properties)) {
if ('type' in value && value.type === 'string') {
- response.push({name: key, type: 'textField', value: ''})
+ response.push({ name: key, type: 'textField', value: '' })
} else if ('anyOf' in value) {
- response.push(
- {
- name: key,
- type: 'selectField',
- value: '',
- items: value.anyOf.map(
- (item: {'const': string, 'type': string}) => item.const
- )
- }
- )
+ response.push({
+ name: key,
+ type: 'selectField',
+ value: '',
+ items: value.anyOf.map((item: { const: string; type: string }) => item.const)
+ })
} else if ('type' in value && value.type === 'object') {
- response.push(
- {
- name: key,
- type: 'objectField',
- value: []
- }
- )
+ response.push({
+ name: key,
+ type: 'objectField',
+ value: []
+ })
}
}
return response
diff --git a/frontend/domain/models/celery/status.ts b/frontend/domain/models/celery/status.ts
index 24fb766f8f..4f1cc36188 100644
--- a/frontend/domain/models/celery/status.ts
+++ b/frontend/domain/models/celery/status.ts
@@ -1,5 +1,5 @@
export class Status {
- ready: boolean;
- result: object;
- error: any;
+ ready: boolean
+ result: object
+ error: any
}
diff --git a/frontend/domain/models/celery/taskStatusRepository.ts b/frontend/domain/models/celery/taskStatusRepository.ts
index 413314e24f..24f3928856 100644
--- a/frontend/domain/models/celery/taskStatusRepository.ts
+++ b/frontend/domain/models/celery/taskStatusRepository.ts
@@ -1,6 +1,5 @@
import { Status } from './status'
-
export interface TaskStatusRepository {
get(taskId: string): Promise<Status>
}
diff --git a/frontend/domain/models/comment/comment.ts b/frontend/domain/models/comment/comment.ts
index 0dc161889c..48c99bf99e 100644
--- a/frontend/domain/models/comment/comment.ts
+++ b/frontend/domain/models/comment/comment.ts
@@ -1,15 +1,15 @@
-import "reflect-metadata"
+import 'reflect-metadata'
import { Expose, Type } from 'class-transformer'
export class CommentItem {
- id: number;
- user: number;
- username: string;
- example: number;
- text: string;
+ id: number
+ user: number
+ username: string
+ example: number
+ text: string
@Expose({ name: 'created_at' })
- createdAt: string;
+ createdAt: string
by(userId: number) {
return this.user === userId
@@ -28,11 +28,11 @@ export class CommentItem {
}
export class CommentItemList {
- count: number;
- next: string | null;
- prev: string | null;
+ count: number
+ next: string | null
+ prev: string | null
@Type(() => CommentItem)
@Expose({ name: 'results' })
- items: CommentItem[];
+ items: CommentItem[]
}
diff --git a/frontend/domain/models/comment/commentRepository.ts b/frontend/domain/models/comment/commentRepository.ts
index a5a02529a9..0ce92efb42 100644
--- a/frontend/domain/models/comment/commentRepository.ts
+++ b/frontend/domain/models/comment/commentRepository.ts
@@ -1,6 +1,6 @@
import { CommentItem, CommentItemList } from '~/domain/models/comment/comment'
-export type SearchOption = {[key: string]: string | (string | null)[]}
+export type SearchOption = { [key: string]: string | (string | null)[] }
export interface CommentRepository {
listAll(projectId: string, { limit, offset, q }: SearchOption): Promise<CommentItemList>
diff --git a/frontend/domain/models/download/downloadFormatRepository.ts b/frontend/domain/models/download/downloadFormatRepository.ts
index 5b70240fc3..e1d833c5f4 100644
--- a/frontend/domain/models/download/downloadFormatRepository.ts
+++ b/frontend/domain/models/download/downloadFormatRepository.ts
@@ -1,6 +1,5 @@
import { Format } from './format'
-
export interface DownloadFormatRepository {
list(projectId: string): Promise<Format[]>
}
diff --git a/frontend/domain/models/download/format.ts b/frontend/domain/models/download/format.ts
index 5f78d5e20c..514dd3063c 100644
--- a/frontend/domain/models/download/format.ts
+++ b/frontend/domain/models/download/format.ts
@@ -1,5 +1,5 @@
export class Format {
- name: string;
- example: string;
- properties: object;
+ name: string
+ example: string
+ properties: object
}
diff --git a/frontend/domain/models/example/example.ts b/frontend/domain/models/example/example.ts
index ae01de1b45..91f937c939 100644
--- a/frontend/domain/models/example/example.ts
+++ b/frontend/domain/models/example/example.ts
@@ -1,25 +1,25 @@
-import "reflect-metadata"
+import 'reflect-metadata'
import { Expose, Type } from 'class-transformer'
export class ExampleItem {
- id: number;
- text: string;
- meta: object;
+ id: number
+ text: string
+ meta: object
@Expose({ name: 'annotation_approver' })
- annotationApprover: boolean | null;
+ annotationApprover: boolean | null
@Expose({ name: 'comment_count' })
- commentCount: number;
+ commentCount: number
@Expose({ name: 'filename' })
- fileUrl: string;
+ fileUrl: string
@Expose({ name: 'is_confirmed' })
- isConfirmed: boolean;
+ isConfirmed: boolean
@Expose({ name: 'upload_name' })
- filename: string;
+ filename: string
get url() {
const l = this.fileUrl.indexOf('media/')
@@ -39,11 +39,11 @@ export class ExampleItem {
}
export class ExampleItemList {
- count: number;
- next: string | null;
- prev: string | null;
+ count: number
+ next: string | null
+ prev: string | null
@Type(() => ExampleItem)
@Expose({ name: 'results' })
- items: ExampleItem[];
+ items: ExampleItem[]
}
diff --git a/frontend/domain/models/example/exampleRepository.ts b/frontend/domain/models/example/exampleRepository.ts
index 9991558e2d..06e417897b 100644
--- a/frontend/domain/models/example/exampleRepository.ts
+++ b/frontend/domain/models/example/exampleRepository.ts
@@ -1,6 +1,6 @@
import { ExampleItem, ExampleItemList } from '~/domain/models/example/example'
-export type SearchOption = {[key: string]: string | (string | null)[]}
+export type SearchOption = { [key: string]: string | (string | null)[] }
export interface ExampleRepository {
list(projectId: string, { limit, offset, q, isChecked }: SearchOption): Promise<ExampleItemList>
diff --git a/frontend/domain/models/label/label.ts b/frontend/domain/models/label/label.ts
index f702684405..75aa766273 100644
--- a/frontend/domain/models/label/label.ts
+++ b/frontend/domain/models/label/label.ts
@@ -1,20 +1,20 @@
import { Expose } from 'class-transformer'
export class LabelItem {
- id: number;
- text: string;
+ id: number
+ text: string
@Expose({ name: 'prefix_key' })
- prefixKey: string | null;
+ prefixKey: string | null
@Expose({ name: 'suffix_key' })
- suffixKey: string | null;
+ suffixKey: string | null
@Expose({ name: 'background_color' })
- backgroundColor: string;
+ backgroundColor: string
@Expose({ name: 'text_color' })
- textColor: string = '#ffffff';
+ textColor: string = '#ffffff'
toObject() {
return {
diff --git a/frontend/domain/models/label/labelRepository.ts b/frontend/domain/models/label/labelRepository.ts
index a38178a969..90ace8c36a 100644
--- a/frontend/domain/models/label/labelRepository.ts
+++ b/frontend/domain/models/label/labelRepository.ts
@@ -11,5 +11,5 @@ export interface LabelRepository {
bulkDelete(projectId: string, labelIds: number[]): Promise<void>
- uploadFile(projectId: string, payload: FormData): Promise<void>
+ uploadFile(projectId: string, payload: FormData): Promise<void>
}
diff --git a/frontend/domain/models/member/member.ts b/frontend/domain/models/member/member.ts
index 99b7bb041c..d7c060f168 100644
--- a/frontend/domain/models/member/member.ts
+++ b/frontend/domain/models/member/member.ts
@@ -1,9 +1,9 @@
export class MemberItem {
- id: number;
- user: number;
- role: number;
- username: string;
- rolename: string;
+ id: number
+ user: number
+ role: number
+ username: string
+ rolename: string
get isProjectAdmin(): boolean {
return this.rolename === 'project_admin'
diff --git a/frontend/domain/models/metrics/metrics.ts b/frontend/domain/models/metrics/metrics.ts
index 0a4c27478f..aa3df6626a 100644
--- a/frontend/domain/models/metrics/metrics.ts
+++ b/frontend/domain/models/metrics/metrics.ts
@@ -1,10 +1,10 @@
-export type Label = {[key: string]: number}
-export type User = {[key: string]: number}
-export type ConfirmedCount = {[key: string]: number}
-export type Distribution = {[user: string]: {[label: string]: number}}
+export type Label = { [key: string]: number }
+export type User = { [key: string]: number }
+export type ConfirmedCount = { [key: string]: number }
+export type Distribution = { [user: string]: { [label: string]: number } }
export interface Progress {
total: number
- progress: {user: string, done: number}[]
+ progress: { user: string; done: number }[]
}
export interface MyProgress {
diff --git a/frontend/domain/models/option/option.ts b/frontend/domain/models/option/option.ts
index 0f453f72bf..8fc046e758 100644
--- a/frontend/domain/models/option/option.ts
+++ b/frontend/domain/models/option/option.ts
@@ -1,9 +1,7 @@
export class PageNumber {
num: number
- constructor(
- public page: number
- ) {
+ constructor(public page: number) {
if (typeof page === 'string' && /^\d+$/.test(page)) {
this.num = parseInt(page, 10)
}
@@ -15,16 +13,17 @@ export class PageNumber {
}
export class OptionItem {
- constructor(
- public page : number,
- public q? : string,
- public isChecked?: string
- ) {}
+ constructor(public page: number, public q?: string, public isChecked?: string) {}
- static valueOf(
- { page, q = '', isChecked = '' }:
- { page: number, q?: string, isChecked?: string }
- ): OptionItem {
+ static valueOf({
+ page,
+ q = '',
+ isChecked = ''
+ }: {
+ page: number
+ q?: string
+ isChecked?: string
+ }): OptionItem {
return new OptionItem(page, q, isChecked)
}
diff --git a/frontend/domain/models/project/project.ts b/frontend/domain/models/project/project.ts
index c564c90253..f47273dd17 100644
--- a/frontend/domain/models/project/project.ts
+++ b/frontend/domain/models/project/project.ts
@@ -1,67 +1,72 @@
-import "reflect-metadata"
+import 'reflect-metadata'
import { Expose, Type } from 'class-transformer'
-export type ProjectType = 'DocumentClassification' | 'SequenceLabeling' | 'Seq2seq' | 'IntentDetectionAndSlotFilling' | 'ImageClassification' | 'Speech2text'
-
+export type ProjectType =
+ | 'DocumentClassification'
+ | 'SequenceLabeling'
+ | 'Seq2seq'
+ | 'IntentDetectionAndSlotFilling'
+ | 'ImageClassification'
+ | 'Speech2text'
export class ProjectReadItem {
- id: number;
- name: string;
- description: string;
- guideline: string;
- users: number[];
- tags: Object[];
+ id: number
+ name: string
+ description: string
+ guideline: string
+ users: number[]
+ tags: Object[]
@Expose({ name: 'project_type' })
- projectType: ProjectType;
+ projectType: ProjectType
@Expose({ name: 'updated_at' })
- updatedAt: string;
+ updatedAt: string
@Expose({ name: 'random_order' })
- randomOrder: boolean;
+ randomOrder: boolean
@Expose({ name: 'collaborative_annotation' })
- collaborative_annotation: boolean;
+ collaborative_annotation: boolean
@Expose({ name: 'single_class_classification' })
- exclusiveCategories: boolean;
+ exclusiveCategories: boolean
@Expose({ name: 'resourcetype' })
- resourceType: string;
+ resourceType: string
@Expose({ name: 'allow_overlapping' })
- allowOverlapping: boolean;
+ allowOverlapping: boolean
@Expose({ name: 'grapheme_mode' })
- graphemeMode: boolean;
+ graphemeMode: boolean
@Expose({ name: 'use_relation' })
- useRelation: boolean;
+ useRelation: boolean
- @Expose({ name: 'is_text_project'})
- isTextProject: boolean;
+ @Expose({ name: 'is_text_project' })
+ isTextProject: boolean
@Expose({ name: 'can_define_label' })
- canDefineLabel: boolean;
+ canDefineLabel: boolean
@Expose({ name: 'can_define_relation' })
- canDefineRelation: boolean;
+ canDefineRelation: boolean
- @Expose({ name: 'can_define_span'})
- canDefineSpan: boolean;
+ @Expose({ name: 'can_define_span' })
+ canDefineSpan: boolean
@Expose({ name: 'can_define_category' })
- canDefineCategory: boolean;
+ canDefineCategory: boolean
get annotationPageLink(): string {
const mapping = {
DocumentClassification: 'text-classification',
- SequenceLabeling : 'sequence-labeling',
- Seq2seq : 'sequence-to-sequence',
+ SequenceLabeling: 'sequence-labeling',
+ Seq2seq: 'sequence-to-sequence',
IntentDetectionAndSlotFilling: 'intent-detection-and-slot-filling',
- ImageClassification : 'image-classification',
- Speech2text : 'speech-to-text',
+ ImageClassification: 'image-classification',
+ Speech2text: 'speech-to-text'
}
const url = `/projects/${this.id}/${mapping[this.projectType]}`
return url
@@ -69,49 +74,46 @@ export class ProjectReadItem {
get taskNames(): string[] {
if (this.projectType === 'IntentDetectionAndSlotFilling') {
- return [
- 'DocumentClassification',
- 'SequenceLabeling',
- ]
+ return ['DocumentClassification', 'SequenceLabeling']
}
return [this.projectType]
}
}
export class ProjectItemList {
- count: number;
- next: string | null;
- prev: string | null;
+ count: number
+ next: string | null
+ prev: string | null
@Type(() => ProjectReadItem)
@Expose({ name: 'results' })
- items: ProjectReadItem[];
+ items: ProjectReadItem[]
}
export class ProjectWriteItem {
constructor(
- public id: number,
- public name: string,
- public description: string,
- public guideline: string,
- public project_type: ProjectType,
- public random_order: boolean,
- public collaborative_annotation: boolean,
+ public id: number,
+ public name: string,
+ public description: string,
+ public guideline: string,
+ public project_type: ProjectType,
+ public random_order: boolean,
+ public collaborative_annotation: boolean,
public single_class_classification: boolean,
- public allow_overlapping: boolean,
- public grapheme_mode: boolean,
- public use_relation: boolean,
- public tags: string[],
+ public allow_overlapping: boolean,
+ public grapheme_mode: boolean,
+ public use_relation: boolean,
+ public tags: string[]
) {}
get resourceType(): string {
const mapping = {
DocumentClassification: 'TextClassificationProject',
- SequenceLabeling : 'SequenceLabelingProject',
- Seq2seq : 'Seq2seqProject',
+ SequenceLabeling: 'SequenceLabelingProject',
+ Seq2seq: 'Seq2seqProject',
IntentDetectionAndSlotFilling: 'IntentDetectionAndSlotFillingProject',
- ImageClassification : 'ImageClassificationProject',
- Speech2text : 'Speech2textProject',
+ ImageClassification: 'ImageClassificationProject',
+ Speech2text: 'Speech2textProject'
}
return mapping[this.project_type]
}
@@ -129,7 +131,7 @@ export class ProjectWriteItem {
allow_overlapping: this.allow_overlapping,
grapheme_mode: this.grapheme_mode,
use_relation: this.use_relation,
- tags: this.tags.map(tag => ({text: tag})),
+ tags: this.tags.map((tag) => ({ text: tag })),
resourcetype: this.resourceType
}
}
diff --git a/frontend/domain/models/project/projectRepository.ts b/frontend/domain/models/project/projectRepository.ts
index 223ea0175d..874579f6a8 100644
--- a/frontend/domain/models/project/projectRepository.ts
+++ b/frontend/domain/models/project/projectRepository.ts
@@ -1,6 +1,6 @@
import { ProjectReadItem, ProjectWriteItem, ProjectItemList } from '~/domain/models/project/project'
-export type SearchOption = {[key: string]: string | (string | null)[]}
+export type SearchOption = { [key: string]: string | (string | null)[] }
export interface ProjectRepository {
list({ limit, offset, q }: SearchOption): Promise<ProjectItemList>
diff --git a/frontend/domain/models/role/role.ts b/frontend/domain/models/role/role.ts
index 36895d4689..48ab4fafd5 100644
--- a/frontend/domain/models/role/role.ts
+++ b/frontend/domain/models/role/role.ts
@@ -1,6 +1,6 @@
export class RoleItem {
- id: number;
- name: string;
+ id: number
+ name: string
toObject(): Object {
return {
diff --git a/frontend/domain/models/tag/tag.ts b/frontend/domain/models/tag/tag.ts
index 4d7887e0d1..2d2a5863b3 100644
--- a/frontend/domain/models/tag/tag.ts
+++ b/frontend/domain/models/tag/tag.ts
@@ -1,7 +1,7 @@
export class TagItem {
- id: number;
- text: string;
- project: string;
+ id: number
+ text: string
+ project: string
toObject(): Object {
return {
diff --git a/frontend/domain/models/tasks/annotationRepository.ts b/frontend/domain/models/tasks/annotationRepository.ts
index 0f39657761..881d84af92 100644
--- a/frontend/domain/models/tasks/annotationRepository.ts
+++ b/frontend/domain/models/tasks/annotationRepository.ts
@@ -1,41 +1,37 @@
import ApiService from '@/services/api.service'
import { AnnotationModel } from './interface'
-
export abstract class AnnotationRepository<T extends AnnotationModel> {
- constructor(
- private readonly model: any,
- readonly request = ApiService
- ) {}
-
- public async list(projectId: string, docId: number): Promise<T[]> {
- const url = this.baseUrl(projectId, docId)
- const response = await this.request.get(url)
- const items: T[] = response.data
- return items.map(item => this.model.valueOf(item))
- }
-
- public async create(projectId: string, docId: number, item: T): Promise<void> {
- const url = this.baseUrl(projectId, docId)
- await this.request.post(url, item.toObject())
- }
-
- public async delete(projectId: string, docId: number, annotationId: number): Promise<void> {
- const url = this.baseUrl(projectId, docId) + `/${annotationId}`
- await this.request.delete(url)
- }
-
- public async clear(projectId: string, docId: number): Promise<void> {
- const url = this.baseUrl(projectId, docId)
- await this.request.delete(url)
- }
-
- public async autoLabel(projectId: string, docId: number): Promise<void> {
- const url = `/projects/${projectId}/auto-labeling?example=${docId}`
- await this.request.post(url, {})
- }
+ constructor(private readonly model: any, readonly request = ApiService) {}
+
+ public async list(projectId: string, docId: number): Promise<T[]> {
+ const url = this.baseUrl(projectId, docId)
+ const response = await this.request.get(url)
+ const items: T[] = response.data
+ return items.map((item) => this.model.valueOf(item))
+ }
+
+ public async create(projectId: string, docId: number, item: T): Promise<void> {
+ const url = this.baseUrl(projectId, docId)
+ await this.request.post(url, item.toObject())
+ }
+
+ public async delete(projectId: string, docId: number, annotationId: number): Promise<void> {
+ const url = this.baseUrl(projectId, docId) + `/${annotationId}`
+ await this.request.delete(url)
+ }
+
+ public async clear(projectId: string, docId: number): Promise<void> {
+ const url = this.baseUrl(projectId, docId)
+ await this.request.delete(url)
+ }
+
+ public async autoLabel(projectId: string, docId: number): Promise<void> {
+ const url = `/projects/${projectId}/auto-labeling?example=${docId}`
+ await this.request.post(url, {})
+ }
- protected baseUrl(projectId: string, docId: number): string {
- return `/projects/${projectId}/examples/${docId}/annotations`
- }
+ protected baseUrl(projectId: string, docId: number): string {
+ return `/projects/${projectId}/examples/${docId}/annotations`
+ }
}
diff --git a/frontend/domain/models/tasks/interface.ts b/frontend/domain/models/tasks/interface.ts
index 065bdae10a..4781f3b56d 100644
--- a/frontend/domain/models/tasks/interface.ts
+++ b/frontend/domain/models/tasks/interface.ts
@@ -1,4 +1,3 @@
-
export interface AnnotationModel {
valueOf(values: object): any
toObject(): object
diff --git a/frontend/domain/models/tasks/relation.ts b/frontend/domain/models/tasks/relation.ts
index a8282f1000..420ab06d59 100644
--- a/frontend/domain/models/tasks/relation.ts
+++ b/frontend/domain/models/tasks/relation.ts
@@ -1,24 +1,26 @@
export class RelationItem {
- constructor(
- public id: number,
- public fromId: number,
- public toId: number,
- public type: number,
- ) {
- }
+ constructor(public id: number, public fromId: number, public toId: number, public type: number) {}
- static valueOf(
- {id, from_id, to_id, type}: { id: number, from_id: number, to_id: number, type: number }
- ): RelationItem {
- return new RelationItem(id, from_id, to_id, type)
- }
+ static valueOf({
+ id,
+ from_id,
+ to_id,
+ type
+ }: {
+ id: number
+ from_id: number
+ to_id: number
+ type: number
+ }): RelationItem {
+ return new RelationItem(id, from_id, to_id, type)
+ }
- toObject(): Object {
- return {
- id: this.id,
- from_id: this.fromId,
- to_id: this.toId,
- type: this.type,
- }
+ toObject(): Object {
+ return {
+ id: this.id,
+ from_id: this.fromId,
+ to_id: this.toId,
+ type: this.type
}
+ }
}
diff --git a/frontend/domain/models/tasks/relationRepository.ts b/frontend/domain/models/tasks/relationRepository.ts
index dd7bbe456e..a454539925 100644
--- a/frontend/domain/models/tasks/relationRepository.ts
+++ b/frontend/domain/models/tasks/relationRepository.ts
@@ -1,13 +1,18 @@
import { RelationItem } from '~/domain/models/tasks/relation'
export interface RelationRepository {
- list(projectId: string, exampleId: number): Promise<RelationItem[]>
+ list(projectId: string, exampleId: number): Promise<RelationItem[]>
- create(projectId: string, exampleId: number, relation: RelationItem): Promise<RelationItem>
+ create(projectId: string, exampleId: number, relation: RelationItem): Promise<RelationItem>
- update(projectId: string, exampleId: number, relationId: number, relationType: number): Promise<RelationItem>
+ update(
+ projectId: string,
+ exampleId: number,
+ relationId: number,
+ relationType: number
+ ): Promise<RelationItem>
- delete(projectId: string, exampleId: number, relationId: number): Promise<void>
+ delete(projectId: string, exampleId: number, relationId: number): Promise<void>
- bulkDelete(projectId: string, exampleId: number, relationIds: number[]): Promise<void>
+ bulkDelete(projectId: string, exampleId: number, relationIds: number[]): Promise<void>
}
diff --git a/frontend/domain/models/tasks/seq2seq.ts b/frontend/domain/models/tasks/seq2seq.ts
index bc86ef7690..273d76d41c 100644
--- a/frontend/domain/models/tasks/seq2seq.ts
+++ b/frontend/domain/models/tasks/seq2seq.ts
@@ -1,16 +1,9 @@
import { AnnotationModel } from './interface'
-export class Seq2seqLabel implements AnnotationModel{
- constructor(
- public id: number,
- public text: string,
- public user: number,
- ) {}
+export class Seq2seqLabel implements AnnotationModel {
+ constructor(public id: number, public text: string, public user: number) {}
- static valueOf(
- { id, text, user }:
- { id: number, text: string, user: number }
- ) {
+ static valueOf({ id, text, user }: { id: number; text: string; user: number }) {
return new Seq2seqLabel(id, text, user)
}
diff --git a/frontend/domain/models/tasks/sequenceLabeling.ts b/frontend/domain/models/tasks/sequenceLabeling.ts
index 64e009d712..37a7a84f67 100644
--- a/frontend/domain/models/tasks/sequenceLabeling.ts
+++ b/frontend/domain/models/tasks/sequenceLabeling.ts
@@ -9,10 +9,19 @@ export class Span implements AnnotationModel {
public endOffset: number
) {}
- static valueOf(
- { id, label, user, start_offset, end_offset }:
- { id: number, label: number, user: number, start_offset: number, end_offset: number }
- ) {
+ static valueOf({
+ id,
+ label,
+ user,
+ start_offset,
+ end_offset
+ }: {
+ id: number
+ label: number
+ user: number
+ start_offset: number
+ end_offset: number
+ }) {
return new Span(id, label, user, start_offset, end_offset)
}
diff --git a/frontend/domain/models/tasks/textClassification.ts b/frontend/domain/models/tasks/textClassification.ts
index 0c3c73b687..721c89d206 100644
--- a/frontend/domain/models/tasks/textClassification.ts
+++ b/frontend/domain/models/tasks/textClassification.ts
@@ -1,17 +1,10 @@
import { AnnotationModel } from './interface'
-export class TextClassificationItem implements AnnotationModel{
- constructor(
- public id: number,
- public label: number,
- public user: number,
- ) {}
+export class CategoryItem implements AnnotationModel {
+ constructor(public id: number, public label: number, public user: number) {}
- static valueOf(
- { id, label, user }:
- { id: number, label: number, user: number }
- ) {
- return new TextClassificationItem(id, label, user)
+ static valueOf({ id, label, user }: { id: number; label: number; user: number }) {
+ return new CategoryItem(id, label, user)
}
toObject() {
diff --git a/frontend/domain/models/upload/catalog.ts b/frontend/domain/models/upload/catalog.ts
index f6497bfa83..e13c713227 100644
--- a/frontend/domain/models/upload/catalog.ts
+++ b/frontend/domain/models/upload/catalog.ts
@@ -1,16 +1,16 @@
import { Expose } from 'class-transformer'
export class Catalog {
- name: string;
- example: string;
- properties: object;
+ name: string
+ example: string
+ properties: object
@Expose({ name: 'task_id' })
- taskId: string;
+ taskId: string
@Expose({ name: 'display_name' })
- displayName: string;
+ displayName: string
@Expose({ name: 'accept_types' })
- acceptTypes: string;
+ acceptTypes: string
}
diff --git a/frontend/domain/models/upload/catalogRepository.ts b/frontend/domain/models/upload/catalogRepository.ts
index 42c01f60e3..c39724d542 100644
--- a/frontend/domain/models/upload/catalogRepository.ts
+++ b/frontend/domain/models/upload/catalogRepository.ts
@@ -1,6 +1,5 @@
import { Catalog } from './catalog'
-
export interface CatalogRepository {
list(projectId: string): Promise<Catalog[]>
}
diff --git a/frontend/domain/models/upload/parseRepository.ts b/frontend/domain/models/upload/parseRepository.ts
index fcd5d94a3a..d8b2590744 100644
--- a/frontend/domain/models/upload/parseRepository.ts
+++ b/frontend/domain/models/upload/parseRepository.ts
@@ -1,5 +1,11 @@
export interface ParseRepository {
- analyze(projectId: string, format: string, task: string, uploadIds: number[], option: object): Promise<string>
+ analyze(
+ projectId: string,
+ format: string,
+ task: string,
+ uploadIds: number[],
+ option: object
+ ): Promise<string>
revert(serverId: string): void
}
diff --git a/frontend/domain/models/user/user.ts b/frontend/domain/models/user/user.ts
index 62c3e99021..bc88f68c46 100644
--- a/frontend/domain/models/user/user.ts
+++ b/frontend/domain/models/user/user.ts
@@ -1,14 +1,14 @@
import { Expose } from 'class-transformer'
export class UserItem {
- id: number;
- username: string;
-
+ id: number
+ username: string
+
@Expose({ name: 'is_superuser' })
- isSuperuser: boolean;
+ isSuperuser: boolean
@Expose({ name: 'is_staff' })
- isStaff: boolean;
+ isStaff: boolean
toObject(): Object {
return {
diff --git a/frontend/domain/models/utils/stepper.ts b/frontend/domain/models/utils/stepper.ts
index 5e169f6f58..a7e5a8c15c 100644
--- a/frontend/domain/models/utils/stepper.ts
+++ b/frontend/domain/models/utils/stepper.ts
@@ -1,16 +1,11 @@
export class StepCounter {
private step: number
- constructor(
- private readonly minStep: number = 1,
- private readonly maxStep: number = 10
- ) {
+ constructor(private readonly minStep: number = 1, private readonly maxStep: number = 10) {
this.step = 1
}
- static valueOf(
- minStep: number = 1, maxStep: number = 10
- ): StepCounter {
+ static valueOf(minStep: number = 1, maxStep: number = 10): StepCounter {
return new StepCounter(minStep, maxStep)
}
@@ -53,4 +48,4 @@ export class StepCounter {
isLast(): boolean {
return this.step === this.maxStep
}
-}
\ No newline at end of file
+}
diff --git a/frontend/i18n/de/projects/dataset.js b/frontend/i18n/de/projects/dataset.js
index edec027252..272ffc1a39 100644
--- a/frontend/i18n/de/projects/dataset.js
+++ b/frontend/i18n/de/projects/dataset.js
@@ -14,6 +14,7 @@ export default {
exportDataTitle: 'Exportiere Daten',
exportDataMessage: 'Wähle ein Dateiformat',
deleteDocumentsTitle: 'Dokument löschen',
- deleteDocumentsMessage: 'Bist du dir sicher, dass du die Dokumente aus dem Projekt löschen willst?',
+ deleteDocumentsMessage:
+ 'Bist du dir sicher, dass du die Dokumente aus dem Projekt löschen willst?',
pageText: '{0}-{1} von {2}'
}
diff --git a/frontend/i18n/de/projects/errors.js b/frontend/i18n/de/projects/errors.js
index c9ead2c6ab..cf96ab8196 100644
--- a/frontend/i18n/de/projects/errors.js
+++ b/frontend/i18n/de/projects/errors.js
@@ -1,5 +1,7 @@
export default {
- fileCannotUpload: 'Die Datei(en) konnten nicht hochgeladen werden. Vielleicht ungültiges Format.\n Bitte prüfe die verfügbaren Dateiformate und folgende Datei(en): ',
- labelCannotCreate: 'Das Label konnte nicht erstellt werden.\n Jeder Labelname und jedes Tastenkürzel kann nur einmal vergeben werden.',
+ fileCannotUpload:
+ 'Die Datei(en) konnten nicht hochgeladen werden. Vielleicht ungültiges Format.\n Bitte prüfe die verfügbaren Dateiformate und folgende Datei(en): ',
+ labelCannotCreate:
+ 'Das Label konnte nicht erstellt werden.\n Jeder Labelname und jedes Tastenkürzel kann nur einmal vergeben werden.',
invalidUserOrPass: 'Falscher Benutername oder falsches Passwort, oder etwas ist schief gelaufen.'
}
diff --git a/frontend/i18n/de/projects/overview.js b/frontend/i18n/de/projects/overview.js
index e7cbd754c9..b856fbbcb8 100644
--- a/frontend/i18n/de/projects/overview.js
+++ b/frontend/i18n/de/projects/overview.js
@@ -17,6 +17,6 @@ export default {
'Sequenz zu Sequenz',
'Intent-Erkennung und Slot-Füllung',
'Bildklassifikation',
- 'Sprache zu Text',
+ 'Sprache zu Text'
]
}
diff --git a/frontend/i18n/de/projects/statistics.js b/frontend/i18n/de/projects/statistics.js
index 57053ca1fa..b8f07d0726 100644
--- a/frontend/i18n/de/projects/statistics.js
+++ b/frontend/i18n/de/projects/statistics.js
@@ -1,9 +1,6 @@
export default {
statistics: 'Statistiken',
- progress: [
- 'Abgeschlossen',
- 'Unvollständig'
- ],
+ progress: ['Abgeschlossen', 'Unvollständig'],
labelStats: 'Labelstatistiken',
userStats: 'Nutzerstatistiken'
}
diff --git a/frontend/i18n/en/projects/errors.js b/frontend/i18n/en/projects/errors.js
index fcb938469b..568932c4de 100644
--- a/frontend/i18n/en/projects/errors.js
+++ b/frontend/i18n/en/projects/errors.js
@@ -1,5 +1,7 @@
export default {
- fileCannotUpload: 'The file(s) could not be uploaded. Maybe invalid format.\n Please check available formats and the following file(s): ',
- labelCannotCreate: 'The label could not be created.\n You cannot use the same label name or shortcut key.',
+ fileCannotUpload:
+ 'The file(s) could not be uploaded. Maybe invalid format.\n Please check available formats and the following file(s): ',
+ labelCannotCreate:
+ 'The label could not be created.\n You cannot use the same label name or shortcut key.',
invalidUserOrPass: 'Incorrect username or password, or something went wrong.'
}
diff --git a/frontend/i18n/en/projects/overview.js b/frontend/i18n/en/projects/overview.js
index 6be96ed1fe..22a78dc8e9 100644
--- a/frontend/i18n/en/projects/overview.js
+++ b/frontend/i18n/en/projects/overview.js
@@ -17,6 +17,6 @@ export default {
'Sequence to sequence',
'Intent Detection and Slot Filling',
'Image Classification',
- 'Speech to Text',
+ 'Speech to Text'
]
}
diff --git a/frontend/i18n/en/projects/statistics.js b/frontend/i18n/en/projects/statistics.js
index f19665917b..da646fe48d 100644
--- a/frontend/i18n/en/projects/statistics.js
+++ b/frontend/i18n/en/projects/statistics.js
@@ -1,9 +1,6 @@
export default {
statistics: 'Metrics',
- progress: [
- 'Completed',
- 'Incomplete'
- ],
+ progress: ['Completed', 'Incomplete'],
labelStats: 'Label stats',
userStats: 'User stats'
}
diff --git a/frontend/i18n/fr/home.js b/frontend/i18n/fr/home.js
index 3f6b188428..b77bce87bf 100644
--- a/frontend/i18n/fr/home.js
+++ b/frontend/i18n/fr/home.js
@@ -6,12 +6,12 @@ export default {
featuresTitle1: 'Collaboration des équipes',
featuresText1: 'Annotation avec vos coéquipiers',
featuresTitle2: 'Toute langue',
- featuresText2: 'Annotation dans n\'importe quelle langue',
+ featuresText2: "Annotation dans n'importe quelle langue",
featuresTitle3: 'Source ouverte',
featuresText3: 'Annotation gratuite et personnalisable',
footerTitle: 'Réalisez vos idées rapidement',
demoDropDown: 'Essayer la démo',
- demoNER: 'Reconnaissance de l\'entité désignée',
+ demoNER: "Reconnaissance de l'entité désignée",
demoSent: 'Analyse du sentiment',
demoTranslation: 'Traduction',
demoTextToSQL: 'Texte à SQL'
diff --git a/frontend/i18n/fr/projects/dataset.js b/frontend/i18n/fr/projects/dataset.js
index a28c22ff46..e88fa49d44 100644
--- a/frontend/i18n/fr/projects/dataset.js
+++ b/frontend/i18n/fr/projects/dataset.js
@@ -1,8 +1,8 @@
export default {
dataset: 'Ensemble de données',
actions: 'Actions',
- importDataset: 'Importer l\'ensemble de données',
- exportDataset: 'Exporter l\'ensemble de données',
+ importDataset: "Importer l'ensemble de données",
+ exportDataset: "Exporter l'ensemble de données",
text: 'Texte',
metadata: 'Métadonnées',
action: 'Action',
diff --git a/frontend/i18n/fr/projects/errors.js b/frontend/i18n/fr/projects/errors.js
index ff83e635a0..4b2308bea6 100644
--- a/frontend/i18n/fr/projects/errors.js
+++ b/frontend/i18n/fr/projects/errors.js
@@ -1,5 +1,7 @@
export default {
- fileCannotUpload: 'Le fichier n\'a pas pu être téléchargé. Peut-être un format non valide.\n Veuillez vérifier attentivement les formats disponibles.',
- labelCannotCreate: 'L\'étiquette n\'a pas pu être créé.\n Vous ne pouvez pas utiliser le même nom d\'étiquette ou la même raccourci clavier.',
- invalidUserOrPass: 'Nom d\'utilisateur ou mot de passe incorrect, ou quelque chose a mal tourné.'
+ fileCannotUpload:
+ "Le fichier n'a pas pu être téléchargé. Peut-être un format non valide.\n Veuillez vérifier attentivement les formats disponibles.",
+ labelCannotCreate:
+ "L'étiquette n'a pas pu être créé.\n Vous ne pouvez pas utiliser le même nom d'étiquette ou la même raccourci clavier.",
+ invalidUserOrPass: "Nom d'utilisateur ou mot de passe incorrect, ou quelque chose a mal tourné."
}
diff --git a/frontend/i18n/fr/projects/guideline.js b/frontend/i18n/fr/projects/guideline.js
index 75faf5a448..28da0e326a 100644
--- a/frontend/i18n/fr/projects/guideline.js
+++ b/frontend/i18n/fr/projects/guideline.js
@@ -1,4 +1,4 @@
export default {
guideline: 'Ligne directrice',
- writeGuidelinePrompt: 'Veuillez rédiger le guide d\'annotation.'
+ writeGuidelinePrompt: "Veuillez rédiger le guide d'annotation."
}
diff --git a/frontend/i18n/fr/projects/home.js b/frontend/i18n/fr/projects/home.js
index ed010b4294..7584ce7fe9 100644
--- a/frontend/i18n/fr/projects/home.js
+++ b/frontend/i18n/fr/projects/home.js
@@ -7,5 +7,5 @@ export default {
defineGuideline: 'Définir une ligne directrice pour le travail',
annotateDataset: 'Annoter un ensemble de données',
viewStatistics: 'Voir les statistiques',
- exportDataset: 'Exporter l\'ensemble de données'
+ exportDataset: "Exporter l'ensemble de données"
}
diff --git a/frontend/i18n/fr/projects/labels.js b/frontend/i18n/fr/projects/labels.js
index b98d5ba39b..ce3885fa76 100644
--- a/frontend/i18n/fr/projects/labels.js
+++ b/frontend/i18n/fr/projects/labels.js
@@ -5,15 +5,15 @@ export default {
createLabel: 'Créer un label',
importLabels: 'Importer des étiquettes',
exportLabels: 'Exporter des étiquettes',
- labelName: 'Nom de l\'étiquette',
- labelMessage: 'Le nom de l\'étiquette est obligatoire',
+ labelName: "Nom de l'étiquette",
+ labelMessage: "Le nom de l'étiquette est obligatoire",
createLink: 'Create Link',
linkName: 'Link name',
linkMessage: 'Link name is required',
key: 'Clé',
- deleteLabel: 'Supprimer l\'étiquette',
+ deleteLabel: "Supprimer l'étiquette",
deleteMessage: 'Êtes-vous sûr de vouloir supprimer ces étiquettes de ce projet ?',
- importTitle: 'Télécharger l\'étiquette',
+ importTitle: "Télécharger l'étiquette",
importMessage1: 'Exemple de format',
importMessage2: 'Sélectionnez un fichier',
filePlaceholder: 'Saisir un fichier'
diff --git a/frontend/i18n/fr/projects/members.js b/frontend/i18n/fr/projects/members.js
index e82845dff9..5c74de1d53 100644
--- a/frontend/i18n/fr/projects/members.js
+++ b/frontend/i18n/fr/projects/members.js
@@ -3,14 +3,14 @@ export default {
role: 'Rôle',
updateRole: 'Mettre à jour le rôle',
addMember: 'Ajouter un membre',
- userSearchAPIs: 'Rechercher des utilisateurs (avec l\'IPA)',
+ userSearchAPIs: "Rechercher des utilisateurs (avec l'IPA)",
userSearchPrompt: 'Commencez à taper pour rechercher',
removeMember: 'Supprimer un membre',
removePrompt: 'Êtes-vous sûr de vouloir supprimer ces membres ?',
roles: {
- projectAdmin: 'L\'administrateur du projet',
+ projectAdmin: "L'administrateur du projet",
annotator: 'Étiqueteuse',
- annotationApprover: 'Approbateur d\'étiquettes',
+ annotationApprover: "Approbateur d'étiquettes",
undefined: 'Aucun rôle défini'
}
}
diff --git a/frontend/i18n/fr/projects/overview.js b/frontend/i18n/fr/projects/overview.js
index a3179ad8ef..23b8555053 100644
--- a/frontend/i18n/fr/projects/overview.js
+++ b/frontend/i18n/fr/projects/overview.js
@@ -5,7 +5,7 @@ export default {
textClassification: 'Classification des textes',
sequenceLabeling: 'Étiquetage des séquences',
sequenceToSequence: 'Séquence à séquence',
- randomizeDocOrder: 'Randomiser l\'ordre des documents',
+ randomizeDocOrder: "Randomiser l'ordre des documents",
shareAnnotations: 'Partager les annotations entre tous les utilisateurs',
deleteProjectTitle: 'Supprimer le projet',
deleteProjectMessage: 'Êtes-vous sûr de vouloir supprimer ces projets ?',
@@ -13,8 +13,8 @@ export default {
'Classification des textes',
'Étiquetage des séquences',
'Séquence à séquence',
- 'Détection d\'intention et remplissage d\'emplacements',
- 'Classification d\'images',
- 'De la parole au texte',
+ "Détection d'intention et remplissage d'emplacements",
+ "Classification d'images",
+ 'De la parole au texte'
]
}
diff --git a/frontend/i18n/fr/projects/statistics.js b/frontend/i18n/fr/projects/statistics.js
index 83f0e99bac..c685efc0c0 100644
--- a/frontend/i18n/fr/projects/statistics.js
+++ b/frontend/i18n/fr/projects/statistics.js
@@ -1,9 +1,6 @@
export default {
statistics: 'Statistiques',
- progress: [
- 'Complété',
- 'Incomplet'
- ],
+ progress: ['Complété', 'Incomplet'],
labelStats: 'Étiqueter les stats',
userStats: 'Stats des utilisateurs'
}
diff --git a/frontend/i18n/fr/rules.js b/frontend/i18n/fr/rules.js
index 861b135bfc..23a23004aa 100644
--- a/frontend/i18n/fr/rules.js
+++ b/frontend/i18n/fr/rules.js
@@ -3,12 +3,12 @@ export default {
colorRequired: 'La couleur est obligatoire'
},
labelNameRules: {
- labelRequired: 'Le nom de l\'étiquette est obligatoire',
- labelLessThan100Chars: 'Le nom de l\'étiquette doit comporter moins de 100 caractères'
+ labelRequired: "Le nom de l'étiquette est obligatoire",
+ labelLessThan100Chars: "Le nom de l'étiquette doit comporter moins de 100 caractères"
},
userNameRules: {
- userNameRequired: 'Le nom d\'utilisateur est requis',
- userNameLessThan30Chars: 'Le nom d\'utilisateur doit comporter moins de 30 caractères'
+ userNameRequired: "Le nom d'utilisateur est requis",
+ userNameLessThan30Chars: "Le nom d'utilisateur doit comporter moins de 30 caractères"
},
roleRules: {
roleRequired: 'Rôle est obligatoire'
diff --git a/frontend/i18n/fr/user.js b/frontend/i18n/fr/user.js
index 47c729a76c..06f4474784 100644
--- a/frontend/i18n/fr/user.js
+++ b/frontend/i18n/fr/user.js
@@ -1,6 +1,6 @@
export default {
login: 'Connexion',
signOut: 'Déconnexion',
- username: 'Nom d\'utilisateur',
+ username: "Nom d'utilisateur",
password: 'Mot de passe'
}
diff --git a/frontend/i18n/zh/projects/overview.js b/frontend/i18n/zh/projects/overview.js
index f8c5878d59..9ae79f040a 100644
--- a/frontend/i18n/zh/projects/overview.js
+++ b/frontend/i18n/zh/projects/overview.js
@@ -11,12 +11,5 @@ export default {
deleteProjectTitle: '删除项目',
deleteProjectMessage: '你确定要删除这些项目吗?',
search: '搜索',
- projectTypes: [
- '文本分类',
- '序列标注',
- '序列到序列',
- '意图检测和槽填充',
- '图像分类',
- '文字转语音'
- ]
+ projectTypes: ['文本分类', '序列标注', '序列到序列', '意图检测和槽填充', '图像分类', '文字转语音']
}
diff --git a/frontend/i18n/zh/projects/statistics.js b/frontend/i18n/zh/projects/statistics.js
index 602c70ce1f..180cf7cb24 100644
--- a/frontend/i18n/zh/projects/statistics.js
+++ b/frontend/i18n/zh/projects/statistics.js
@@ -1,9 +1,6 @@
export default {
statistics: '统计',
- progress: [
- '已完成',
- '未完成'
- ],
+ progress: ['已完成', '未完成'],
labelStats: '标签统计',
userStats: '用户统计'
}
diff --git a/frontend/jest.config.js b/frontend/jest.config.js
index 9c167fabb5..c39293f3d4 100644
--- a/frontend/jest.config.js
+++ b/frontend/jest.config.js
@@ -3,7 +3,7 @@ module.exports = {
'^@/(.*)$': '<rootDir>/$1',
'^~/(.*)$': '<rootDir>/$1',
'^vue$': 'vue/dist/vue.common.js',
- "^.+\\.(css|styl|less|sass|scss|png|jpg|ttf|woff|woff2)$": "jest-transform-stub"
+ '^.+\\.(css|styl|less|sass|scss|png|jpg|ttf|woff|woff2)$': 'jest-transform-stub'
},
moduleFileExtensions: ['js', 'vue', 'json'],
transform: {
@@ -12,8 +12,5 @@ module.exports = {
'^.+\\.(css|styl|less|sass|scss|png|jpg|ttf|woff|woff2)$': 'jest-transform-stub'
},
collectCoverage: true,
- collectCoverageFrom: [
- '<rootDir>/components/**/*.vue',
- '<rootDir>/pages/**/*.vue'
- ]
+ collectCoverageFrom: ['<rootDir>/components/**/*.vue', '<rootDir>/pages/**/*.vue']
}
diff --git a/frontend/layouts/default.vue b/frontend/layouts/default.vue
index a700aa297d..f55dd13c6e 100644
--- a/frontend/layouts/default.vue
+++ b/frontend/layouts/default.vue
@@ -14,6 +14,6 @@ export default {
components: {
TheFooter,
TheHeader
- },
+ }
}
</script>
diff --git a/frontend/layouts/demo.vue b/frontend/layouts/demo.vue
index 2bf94d4497..3ed281accc 100644
--- a/frontend/layouts/demo.vue
+++ b/frontend/layouts/demo.vue
@@ -11,6 +11,6 @@ import TheHeader from '~/components/layout/TheHeader'
export default {
components: {
TheHeader
- },
+ }
}
</script>
diff --git a/frontend/layouts/error.vue b/frontend/layouts/error.vue
index dda5a2bf2c..c8c8b18cc3 100644
--- a/frontend/layouts/error.vue
+++ b/frontend/layouts/error.vue
@@ -1,22 +1,11 @@
<template>
<v-app dark>
- <v-container
- fill-height
- style="height: calc(100vh - 58px);"
- >
+ <v-container fill-height style="height: calc(100vh - 58px)">
<v-layout align-center>
<v-flex text-center>
- <h1 class="display-2 primary--text">
- Whoops, 404
- </h1>
+ <h1 class="display-2 primary--text">Whoops, 404</h1>
<p>The page you were looking for does not exist</p>
- <v-btn
- to="/"
- outlined
- color="primary"
- >
- Get me out of here!
- </v-btn>
+ <v-btn to="/" outlined color="primary"> Get me out of here! </v-btn>
</v-flex>
</v-layout>
</v-container>
@@ -35,12 +24,11 @@ export default {
data() {
return {
pageNotFound: '404 Not Found',
- otherError: 'The page you were looking for wasn\'t allowed to access.'
+ otherError: "The page you were looking for wasn't allowed to access."
}
},
head() {
- const title =
- this.error.statusCode === 404 ? this.pageNotFound : this.otherError
+ const title = this.error.statusCode === 404 ? this.pageNotFound : this.otherError
return {
title
}
diff --git a/frontend/layouts/project.vue b/frontend/layouts/project.vue
index 1b8b497912..d4b66e23bb 100644
--- a/frontend/layouts/project.vue
+++ b/frontend/layouts/project.vue
@@ -6,27 +6,13 @@
</template>
</the-header>
- <v-navigation-drawer
- v-model="drawerLeft"
- app
- clipped
- color=""
- >
- <the-side-bar
- :link="getLink"
- :is-project-admin="isProjectAdmin"
- :project="currentProject"
- />
+ <v-navigation-drawer v-model="drawerLeft" app clipped color="">
+ <the-side-bar :link="getLink" :is-project-admin="isProjectAdmin" :project="currentProject" />
</v-navigation-drawer>
<v-main>
- <v-container
- fluid
- fill-height
- >
- <v-layout
- justify-center
- >
+ <v-container fluid fill-height>
+ <v-layout justify-center>
<v-flex fill-height>
<nuxt />
</v-flex>
@@ -42,7 +28,6 @@ import TheSideBar from '~/components/layout/TheSideBar'
import TheHeader from '~/components/layout/TheHeader'
export default {
-
components: {
TheSideBar,
TheHeader
@@ -52,12 +37,12 @@ export default {
data() {
return {
drawerLeft: null,
- isProjectAdmin: false,
+ isProjectAdmin: false
}
},
computed: {
- ...mapGetters('projects', ['getLink', 'currentProject']),
+ ...mapGetters('projects', ['getLink', 'currentProject'])
},
async created() {
diff --git a/frontend/layouts/projects.vue b/frontend/layouts/projects.vue
index 2987d50cb8..56056101f3 100644
--- a/frontend/layouts/projects.vue
+++ b/frontend/layouts/projects.vue
@@ -2,13 +2,8 @@
<v-app>
<the-header />
<v-main>
- <v-container
- fluid
- fill-height
- >
- <v-layout
- justify-center
- >
+ <v-container fluid fill-height>
+ <v-layout justify-center>
<v-flex>
<nuxt />
</v-flex>
diff --git a/frontend/layouts/workspace.vue b/frontend/layouts/workspace.vue
index 6777dd6e2a..5bab94662b 100644
--- a/frontend/layouts/workspace.vue
+++ b/frontend/layouts/workspace.vue
@@ -6,16 +6,8 @@
</template>
</the-header>
- <v-navigation-drawer
- v-model="drawerLeft"
- app
- clipped
- >
- <the-side-bar
- :link="getLink"
- :is-project-admin="isProjectAdmin"
- :project="currentProject"
- />
+ <v-navigation-drawer v-model="drawerLeft" app clipped>
+ <the-side-bar :link="getLink" :is-project-admin="isProjectAdmin" :project="currentProject" />
</v-navigation-drawer>
<v-main class="pb-0">
@@ -30,7 +22,6 @@ import TheHeader from '~/components/layout/TheHeader'
import TheSideBar from '~/components/layout/TheSideBar'
export default {
-
components: {
TheSideBar,
TheHeader
@@ -45,9 +36,9 @@ export default {
},
computed: {
- ...mapGetters('projects', ['getLink', 'currentProject']),
+ ...mapGetters('projects', ['getLink', 'currentProject'])
},
-
+
watch: {
'$route.query'() {
this.$services.option.save(this.$route.params.id, this.$route.query)
diff --git a/frontend/middleware/auth.js b/frontend/middleware/auth.js
index 9f4ede2c07..8957e44964 100644
--- a/frontend/middleware/auth.js
+++ b/frontend/middleware/auth.js
@@ -1,4 +1,4 @@
-export default function({ store, redirect }) {
+export default function ({ store, redirect }) {
if (!store.getters['auth/isAuthenticated']) {
redirect('/auth')
}
diff --git a/frontend/middleware/check-admin.js b/frontend/middleware/check-admin.js
index f6028c0ee4..29b647e528 100644
--- a/frontend/middleware/check-admin.js
+++ b/frontend/middleware/check-admin.js
@@ -1,9 +1,9 @@
import _ from 'lodash'
-export default _.debounce(async function({ app, store, route, redirect }) {
+export default _.debounce(async function ({ app, store, route, redirect }) {
try {
await store.dispatch('projects/setCurrentProject', route.params.id)
- } catch(e) {
+ } catch (e) {
redirect('/projects')
}
const isProjectAdmin = await app.$services.member.isProjectAdmin(route.params.id)
diff --git a/frontend/middleware/check-auth.js b/frontend/middleware/check-auth.js
index 358f7abd74..1d205554d5 100644
--- a/frontend/middleware/check-auth.js
+++ b/frontend/middleware/check-auth.js
@@ -1,4 +1,4 @@
-export default async function({ store }) {
+export default async function ({ store }) {
if (!store.getters['auth/isAuthenticated'] || !store.getters['auth/getUsername']) {
await store.dispatch('auth/initAuth')
}
diff --git a/frontend/middleware/set-project.js b/frontend/middleware/set-project.js
index 5f46022946..935eaf1bf6 100644
--- a/frontend/middleware/set-project.js
+++ b/frontend/middleware/set-project.js
@@ -1,4 +1,4 @@
-export default async function({ store, route }) {
+export default async function ({ store, route }) {
const project = store.getters['projects/currentProject']
const isEmpty = Object.keys(project).length === 0 && project.constructor === Object
if (isEmpty) {
diff --git a/frontend/nuxt.config.js b/frontend/nuxt.config.js
index 53c8832c9f..b398810b85 100644
--- a/frontend/nuxt.config.js
+++ b/frontend/nuxt.config.js
@@ -4,19 +4,21 @@ import i18n from './i18n'
export default {
ssr: false,
/*
- ** Headers of the page
- */
+ ** Headers of the page
+ */
head: {
titleTemplate: '%s - ' + process.env.npm_package_name,
title: process.env.npm_package_name || '',
meta: [
{ charset: 'utf-8' },
{ name: 'viewport', content: 'width=device-width, initial-scale=1' },
- { hid: 'description', name: 'description', content: process.env.npm_package_description || '' }
+ {
+ hid: 'description',
+ name: 'description',
+ content: process.env.npm_package_description || ''
+ }
],
- link: [
- { rel: 'icon', type: 'image/x-icon', href: '/favicon.ico' },
- ]
+ link: [{ rel: 'icon', type: 'image/x-icon', href: '/favicon.ico' }]
},
server: {
@@ -28,17 +30,16 @@ export default {
},
/*
- ** Customize the progress-bar color
- */
+ ** Customize the progress-bar color
+ */
loading: { color: '#fff' },
/*
- ** Global CSS
- */
- css: [
- ],
+ ** Global CSS
+ */
+ css: [],
/*
- ** Plugins to load before mounting the App
- */
+ ** Plugins to load before mounting the App
+ */
plugins: [
'~/plugins/filters.js',
'~/plugins/vue-youtube.js',
@@ -48,8 +49,8 @@ export default {
'~/plugins/role.ts'
],
/*
- ** Nuxt.js modules
- */
+ ** Nuxt.js modules
+ */
modules: [
['nuxt-i18n', i18n],
// Doc: https://axios.nuxtjs.org/usage
@@ -60,9 +61,12 @@ export default {
buildModules: [
'@nuxt/typescript-build',
'@nuxtjs/composition-api/module',
- ['@nuxtjs/google-analytics', {
- id: process.env.GOOGLE_TRACKING_ID
- }],
+ [
+ '@nuxtjs/google-analytics',
+ {
+ id: process.env.GOOGLE_TRACKING_ID
+ }
+ ],
[
'@nuxtjs/vuetify',
{
@@ -70,9 +74,9 @@ export default {
treeShake: true,
defaultAssets: {
font: false,
- icons: ['mdiSvg'],
- },
- },
+ icons: ['mdiSvg']
+ }
+ }
],
[
'@nuxtjs/google-fonts',
@@ -83,14 +87,14 @@ export default {
display: 'swap',
download: true,
overwriting: true,
- inject: true,
+ inject: true
}
]
],
/*
- ** Axios module configuration
- ** See https://axios.nuxtjs.org/options
- */
+ ** Axios module configuration
+ ** See https://axios.nuxtjs.org/options
+ */
axios: {
proxy: true
},
@@ -102,9 +106,9 @@ export default {
}
},
/*
- ** vuetify module configuration
- ** https://github.com/nuxt-community/vuetify-module
- */
+ ** vuetify module configuration
+ ** https://github.com/nuxt-community/vuetify-module
+ */
vuetify: {
theme: {
primary: colors.blue.darken2,
@@ -137,12 +141,12 @@ export default {
}
},
/*
- ** Build configuration
- */
+ ** Build configuration
+ */
build: {
/*
- ** You can extend webpack config here
- */
+ ** You can extend webpack config here
+ */
publicPath: process.env.PUBLIC_PATH || '/_nuxt/',
extend(config, _) {
// config.module.rules.push({
@@ -165,6 +169,6 @@ export default {
name: '[path][name].[ext]'
}
})
- },
+ }
}
}
diff --git a/frontend/package.json b/frontend/package.json
index 29de78e8cb..8d5e17f9c3 100644
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -8,6 +8,8 @@
"lint": "eslint --ext .ts,.js,.vue --ignore-path .gitignore .",
"lintfix": "eslint --fix --ext .ts,.js,.vue --ignore-path .gitignore .",
"precommit": "yarn lint",
+ "lint:prettier": "prettier --check .",
+ "fix:prettier": "prettier --write .",
"test": "jest",
"dev": "nuxt",
"build": "nuxt build",
diff --git a/frontend/pages/auth.vue b/frontend/pages/auth.vue
index ed928f5a0a..a223e9d040 100644
--- a/frontend/pages/auth.vue
+++ b/frontend/pages/auth.vue
@@ -4,9 +4,7 @@
<v-container class="fill-height" fluid>
<v-row align="center" justify="center">
<v-col cols="12" sm="8" md="4">
- <form-login
- :login="authenticateUser"
- />
+ <form-login :login="authenticateUser" />
</v-col>
</v-row>
</v-container>
diff --git a/frontend/pages/demo/image-classification/index.vue b/frontend/pages/demo/image-classification/index.vue
index 9271a777d7..8bfc768b82 100644
--- a/frontend/pages/demo/image-classification/index.vue
+++ b/frontend/pages/demo/image-classification/index.vue
@@ -14,12 +14,7 @@
/>
</v-card-title>
<v-divider />
- <v-img
- contain
- :src="currentDoc.filename"
- max-height="300"
- class="grey lighten-2"
- />
+ <v-img contain :src="currentDoc.filename" max-height="300" class="grey lighten-2" />
</v-card>
</v-col>
<v-col cols="12" md="3">
@@ -35,7 +30,6 @@ import ListMetadata from '@/components/tasks/metadata/ListMetadata'
import LabelGroup from '@/components/tasks/textClassification/LabelGroup'
export default {
-
components: {
LabelGroup,
ListMetadata
@@ -78,14 +72,16 @@ export default {
meta: {
url: 'https://github.com/Hironsan'
},
- annotation_approver: null,
+ annotation_approver: null
}
}
},
methods: {
removeLabel(annotationId) {
- this.currentDoc.annotations = this.currentDoc.annotations.filter(item => item.id !== annotationId)
+ this.currentDoc.annotations = this.currentDoc.annotations.filter(
+ (item) => item.id !== annotationId
+ )
},
addLabel(labelId) {
const payload = {
diff --git a/frontend/pages/demo/intent-detection-and-slot-filling/index.vue b/frontend/pages/demo/intent-detection-and-slot-filling/index.vue
index e32906bb53..f5bbc02efb 100644
--- a/frontend/pages/demo/intent-detection-and-slot-filling/index.vue
+++ b/frontend/pages/demo/intent-detection-and-slot-filling/index.vue
@@ -1,4 +1,3 @@
-
<template>
<v-main>
<v-container fluid>
@@ -58,7 +57,7 @@ export default {
prefixKey: null,
suffixKey: 'l',
color: '#fbb028',
- textColor: '#ffffff',
+ textColor: '#ffffff'
},
{
id: 5,
@@ -66,7 +65,7 @@ export default {
prefixKey: null,
suffixKey: 'm',
color: '#7c20e0',
- textColor: '#000000',
+ textColor: '#000000'
},
{
id: 6,
@@ -74,7 +73,7 @@ export default {
prefixKey: null,
suffixKey: 'o',
color: '#e6d176',
- textColor: '#000000',
+ textColor: '#000000'
},
{
id: 7,
@@ -82,7 +81,7 @@ export default {
prefixKey: null,
suffixKey: 'p',
color: '#6a74b9',
- textColor: '#ffffff',
+ textColor: '#ffffff'
}
],
items: [
@@ -123,7 +122,7 @@ export default {
exclusive: true,
currentDoc: {
id: 8,
- text: "I want to fly from Boston at 8:38 am and arrive in Denver at 11:10 in the morning.",
+ text: 'I want to fly from Boston at 8:38 am and arrive in Denver at 11:10 in the morning.',
annotations: [
{
id: 17,
@@ -131,7 +130,7 @@ export default {
label: 4,
startOffset: 19,
endOffset: 25,
- user: 1,
+ user: 1
},
{
id: 19,
@@ -139,7 +138,7 @@ export default {
label: 7,
startOffset: 29,
endOffset: 36,
- user: 1,
+ user: 1
},
{
id: 16,
@@ -147,7 +146,7 @@ export default {
label: 4,
startOffset: 51,
endOffset: 57,
- user: 1,
+ user: 1
},
{
id: 18,
@@ -155,8 +154,8 @@ export default {
label: 7,
startOffset: 61,
endOffset: 66,
- user: 1,
- },
+ user: 1
+ }
],
meta: { wikiPageId: 2 },
annotation_approver: null
@@ -166,11 +165,13 @@ export default {
methods: {
deleteEntity(annotationId) {
- this.currentDoc.annotations = this.currentDoc.annotations.filter(item => item.id !== annotationId)
+ this.currentDoc.annotations = this.currentDoc.annotations.filter(
+ (item) => item.id !== annotationId
+ )
},
updateEntity(annotationId, labelId) {
- const index = this.currentDoc.annotations.findIndex(item => item.id === annotationId)
+ const index = this.currentDoc.annotations.findIndex((item) => item.id === annotationId)
this.currentDoc.annotations[index].label = labelId
},
@@ -185,7 +186,7 @@ export default {
},
removeLabel(annotationId) {
- this.categoryAnnotations = this.categoryAnnotations.filter(item => item.id !== annotationId)
+ this.categoryAnnotations = this.categoryAnnotations.filter((item) => item.id !== annotationId)
},
addLabel(labelId) {
@@ -203,7 +204,7 @@ export default {
font-size: 1.25rem !important;
font-weight: 500;
line-height: 2rem;
- font-family: "Roboto", sans-serif !important;
+ font-family: 'Roboto', sans-serif !important;
opacity: 0.6;
}
</style>
diff --git a/frontend/pages/demo/named-entity-recognition/index.vue b/frontend/pages/demo/named-entity-recognition/index.vue
index 60c0477a97..936f947c55 100644
--- a/frontend/pages/demo/named-entity-recognition/index.vue
+++ b/frontend/pages/demo/named-entity-recognition/index.vue
@@ -1,4 +1,3 @@
-
<template>
<v-main>
<v-container fluid>
@@ -7,12 +6,13 @@
text
outlined
class="text-capitalize mr-2"
- @click="allowOverlapping=!allowOverlapping"
+ @click="allowOverlapping = !allowOverlapping"
>
Overlapping({{ allowOverlapping }})
</v-btn>
- <v-btn text outlined @click="rtl=!rtl">
- RTL(<span class="text-capitalize">{{ rtl }}</span>)
+ <v-btn text outlined @click="rtl = !rtl">
+ RTL(<span class="text-capitalize">{{ rtl }}</span
+ >)
</v-btn>
</div>
<v-row justify="center">
@@ -49,7 +49,7 @@ import ListMetadata from '@/components/tasks/metadata/ListMetadata'
export default {
components: {
EntityEditor,
- ListMetadata,
+ ListMetadata
},
layout: 'demo',
data() {
@@ -63,7 +63,7 @@ export default {
prefixKey: null,
suffixKey: 'l',
color: '#7c20e0',
- textColor: '#ffffff',
+ textColor: '#ffffff'
},
{
id: 5,
@@ -71,7 +71,7 @@ export default {
prefixKey: null,
suffixKey: 'm',
color: '#fbb028',
- textColor: '#000000',
+ textColor: '#000000'
},
{
id: 6,
@@ -79,7 +79,7 @@ export default {
prefixKey: null,
suffixKey: 'o',
color: '#e6d176',
- textColor: '#000000',
+ textColor: '#000000'
},
{
id: 7,
@@ -87,7 +87,7 @@ export default {
prefixKey: null,
suffixKey: 'p',
color: '#6a74b9',
- textColor: '#ffffff',
+ textColor: '#ffffff'
}
],
relations: [
@@ -95,15 +95,15 @@ export default {
id: 0,
fromId: 16,
toId: 17,
- labelId: 0,
- },
+ labelId: 0
+ }
],
relationLabels: [
{
id: 0,
- text: "isLorem",
- color: "#ffffff",
- },
+ text: 'isLorem',
+ color: '#ffffff'
+ }
],
currentDoc: {
id: 8,
@@ -115,7 +115,7 @@ export default {
label: 4,
startOffset: 60,
endOffset: 70,
- user: 1,
+ user: 1
},
{
id: 19,
@@ -123,7 +123,7 @@ export default {
label: 4,
startOffset: 164,
endOffset: 171,
- user: 1,
+ user: 1
},
{
id: 16,
@@ -131,7 +131,7 @@ export default {
label: 6,
startOffset: 14,
endOffset: 22,
- user: 1,
+ user: 1
},
{
id: 18,
@@ -139,7 +139,7 @@ export default {
label: 6,
startOffset: 72,
endOffset: 86,
- user: 1,
+ user: 1
},
{
id: 20,
@@ -147,8 +147,8 @@ export default {
label: 7,
startOffset: 180,
endOffset: 192,
- user: 1,
- },
+ user: 1
+ }
],
meta: { wikiPageId: 2 },
annotation_approver: null
@@ -163,13 +163,16 @@ export default {
if (this.rtl) {
this.currentDoc.text = 'داستان SVG Tiny 1.2 طولا ني است.'
} else {
- this.currentDoc.text = 'After bowling Somerset out for 83 on the opening morning at Grace Road, Leicestershire extended their first innings by 94 runs before being bowled out for 296 with England discard Andy Caddick taking three for 83.'
+ this.currentDoc.text =
+ 'After bowling Somerset out for 83 on the opening morning at Grace Road, Leicestershire extended their first innings by 94 runs before being bowled out for 296 with England discard Andy Caddick taking three for 83.'
}
}
},
methods: {
deleteEntity(annotationId) {
- this.currentDoc.annotations = this.currentDoc.annotations.filter(item => item.id !== annotationId)
+ this.currentDoc.annotations = this.currentDoc.annotations.filter(
+ (item) => item.id !== annotationId
+ )
this.relations.forEach((r) => {
if (r.fromId === annotationId || r.toId === annotationId) {
this.deleteRelation(r.id)
@@ -177,7 +180,7 @@ export default {
})
},
updateEntity(annotationId, labelId) {
- const index = this.currentDoc.annotations.findIndex(item => item.id === annotationId)
+ const index = this.currentDoc.annotations.findIndex((item) => item.id === annotationId)
this.currentDoc.annotations[index].label = labelId
},
addEntity(startOffset, endOffset, labelId) {
@@ -190,7 +193,7 @@ export default {
this.currentDoc.annotations.push(payload)
},
deleteRelation(relationId) {
- this.relations = this.relations.filter(item => item.id !== relationId)
+ this.relations = this.relations.filter((item) => item.id !== relationId)
}
}
}
@@ -200,7 +203,7 @@ export default {
font-size: 1.25rem !important;
font-weight: 500;
line-height: 2rem;
- font-family: "Roboto", sans-serif !important;
+ font-family: 'Roboto', sans-serif !important;
opacity: 0.6;
}
</style>
diff --git a/frontend/pages/demo/sentiment-analysis/index.vue b/frontend/pages/demo/sentiment-analysis/index.vue
index 75b6ff5bd5..dbf5a6fda4 100644
--- a/frontend/pages/demo/sentiment-analysis/index.vue
+++ b/frontend/pages/demo/sentiment-analysis/index.vue
@@ -32,7 +32,6 @@ import ListMetadata from '@/components/tasks/metadata/ListMetadata'
import LabelGroup from '@/components/tasks/textClassification/LabelGroup'
export default {
-
components: {
LabelGroup,
ListMetadata
@@ -72,7 +71,7 @@ export default {
document: 8
}
],
- meta: { wikiPageId: 2},
+ meta: { wikiPageId: 2 },
annotation_approver: null
}
}
@@ -80,7 +79,9 @@ export default {
methods: {
removeLabel(annotationId) {
- this.currentDoc.annotations = this.currentDoc.annotations.filter(item => item.id !== annotationId)
+ this.currentDoc.annotations = this.currentDoc.annotations.filter(
+ (item) => item.id !== annotationId
+ )
},
addLabel(labelId) {
const payload = {
diff --git a/frontend/pages/demo/speech-to-text/index.vue b/frontend/pages/demo/speech-to-text/index.vue
index b278658dc3..8e68e64453 100644
--- a/frontend/pages/demo/speech-to-text/index.vue
+++ b/frontend/pages/demo/speech-to-text/index.vue
@@ -3,12 +3,7 @@
<v-container fluid>
<v-row justify="center">
<v-col cols="12" md="9">
- <audio
- controls
- :src="src"
- class="mt-2 mb-5"
- style="width:100%;"
- >
+ <audio controls :src="src" class="mt-2 mb-5" style="width: 100%">
Your browser does not support the
<code>audio</code> element.
</audio>
@@ -33,7 +28,6 @@ import ListMetadata from '@/components/tasks/metadata/ListMetadata'
import Seq2seqBox from '~/components/tasks/seq2seq/Seq2seqBox'
export default {
-
components: {
Seq2seqBox,
ListMetadata
@@ -48,7 +42,7 @@ export default {
annotations: [
{
id: 17,
- text: "Hi! Welcome to doccano!",
+ text: 'Hi! Welcome to doccano!',
user: 1,
document: 8
}
@@ -64,10 +58,12 @@ export default {
methods: {
_deleteAnnotation(annotationId) {
- this.currentDoc.annotations = this.currentDoc.annotations.filter(item => item.id !== annotationId)
+ this.currentDoc.annotations = this.currentDoc.annotations.filter(
+ (item) => item.id !== annotationId
+ )
},
_updateAnnotation(annotationId, text) {
- const index = this.currentDoc.annotations.findIndex(item => item.id === annotationId)
+ const index = this.currentDoc.annotations.findIndex((item) => item.id === annotationId)
this.currentDoc.annotations[index].text = text
},
_createAnnotation(text) {
diff --git a/frontend/pages/demo/text-to-sql/index.vue b/frontend/pages/demo/text-to-sql/index.vue
index a06441762e..0ee10ef7d2 100644
--- a/frontend/pages/demo/text-to-sql/index.vue
+++ b/frontend/pages/demo/text-to-sql/index.vue
@@ -3,9 +3,7 @@
<v-container fluid>
<v-row justify="center">
<v-col cols="12" md="9">
- <v-card
- class="title mb-5"
- >
+ <v-card class="title mb-5">
<v-card-text class="title">
{{ currentDoc.text }}
</v-card-text>
@@ -31,7 +29,6 @@ import ListMetadata from '@/components/tasks/metadata/ListMetadata'
import Seq2seqBox from '~/components/tasks/seq2seq/Seq2seqBox'
export default {
-
components: {
Seq2seqBox,
ListMetadata
@@ -52,15 +49,15 @@ export default {
}
],
meta: {
- "department.department_id": "INT",
- "department.name": "CHAR",
- "department.num_employee": "INT",
- "head.head_id": "INT",
- "head.name": "INT",
- "head.age": "INT",
- "management.department_id": "INT",
- "management.head_id": "INT",
- "management.temporary_acting": "VARCHAR"
+ 'department.department_id': 'INT',
+ 'department.name': 'CHAR',
+ 'department.num_employee': 'INT',
+ 'head.head_id': 'INT',
+ 'head.name': 'INT',
+ 'head.age': 'INT',
+ 'management.department_id': 'INT',
+ 'management.head_id': 'INT',
+ 'management.temporary_acting': 'VARCHAR'
},
annotation_approver: null
}
@@ -69,10 +66,12 @@ export default {
methods: {
_deleteAnnotation(annotationId) {
- this.currentDoc.annotations = this.currentDoc.annotations.filter(item => item.id !== annotationId)
+ this.currentDoc.annotations = this.currentDoc.annotations.filter(
+ (item) => item.id !== annotationId
+ )
},
_updateAnnotation(annotationId, text) {
- const index = this.currentDoc.annotations.findIndex(item => item.id === annotationId)
+ const index = this.currentDoc.annotations.findIndex((item) => item.id === annotationId)
this.currentDoc.annotations[index].text = text
},
_createAnnotation(text) {
diff --git a/frontend/pages/demo/translation/index.vue b/frontend/pages/demo/translation/index.vue
index cbc9e0e05d..e1645d3cd7 100644
--- a/frontend/pages/demo/translation/index.vue
+++ b/frontend/pages/demo/translation/index.vue
@@ -3,9 +3,7 @@
<v-container fluid>
<v-row justify="center">
<v-col cols="12" md="9">
- <v-card
- class="title mb-5"
- >
+ <v-card class="title mb-5">
<v-card-text class="title">
{{ currentDoc.text }}
</v-card-text>
@@ -31,7 +29,6 @@ import ListMetadata from '@/components/tasks/metadata/ListMetadata'
import Seq2seqBox from '~/components/tasks/seq2seq/Seq2seqBox'
export default {
-
components: {
Seq2seqBox,
ListMetadata
@@ -57,7 +54,7 @@ export default {
document: 8
}
],
- meta: { wikiPageId: 2},
+ meta: { wikiPageId: 2 },
annotation_approver: null
}
}
@@ -65,10 +62,12 @@ export default {
methods: {
_deleteAnnotation(annotationId) {
- this.currentDoc.annotations = this.currentDoc.annotations.filter(item => item.id !== annotationId)
+ this.currentDoc.annotations = this.currentDoc.annotations.filter(
+ (item) => item.id !== annotationId
+ )
},
_updateAnnotation(annotationId, text) {
- const index = this.currentDoc.annotations.findIndex(item => item.id === annotationId)
+ const index = this.currentDoc.annotations.findIndex((item) => item.id === annotationId)
this.currentDoc.annotations[index].text = text
},
_createAnnotation(text) {
diff --git a/frontend/pages/projects/_id/comments/index.vue b/frontend/pages/projects/_id/comments/index.vue
index 55fb9449e0..80214e3f3c 100644
--- a/frontend/pages/projects/_id/comments/index.vue
+++ b/frontend/pages/projects/_id/comments/index.vue
@@ -5,16 +5,12 @@
class="text-capitalize ms-2"
:disabled="!canDelete"
outlined
- @click.stop="dialogDelete=true"
+ @click.stop="dialogDelete = true"
>
{{ $t('generic.delete') }}
</v-btn>
<v-dialog v-model="dialogDelete">
- <form-delete
- :selected="selected"
- @cancel="dialogDelete=false"
- @remove="remove"
- />
+ <form-delete :selected="selected" @cancel="dialogDelete = false" @remove="remove" />
</v-dialog>
</v-card-title>
<comment-list
@@ -37,7 +33,6 @@ import { ProjectDTO } from '~/services/application/project/projectData'
import FormDelete from '~/components/comment/FormDelete.vue'
export default Vue.extend({
-
components: {
CommentList,
FormDelete
@@ -75,11 +70,10 @@ export default Vue.extend({
},
watch: {
- '$route.query': _.debounce(function() {
- // @ts-ignore
- this.$fetch()
- }, 1000
- ),
+ '$route.query': _.debounce(function () {
+ // @ts-ignore
+ this.$fetch()
+ }, 1000)
},
methods: {
diff --git a/frontend/pages/projects/_id/dataset/export.vue b/frontend/pages/projects/_id/dataset/export.vue
index 16785cfb1c..c9dffe5e2c 100644
--- a/frontend/pages/projects/_id/dataset/export.vue
+++ b/frontend/pages/projects/_id/dataset/export.vue
@@ -5,15 +5,9 @@
</v-card-title>
<v-card-text>
<v-overlay :value="isProcessing">
- <v-progress-circular
- indeterminate
- size="64"
- />
+ <v-progress-circular indeterminate size="64" />
</v-overlay>
- <v-form
- ref="form"
- v-model="valid"
- >
+ <v-form ref="form" v-model="valid">
<v-select
v-model="selectedFormat"
:items="formats"
@@ -31,19 +25,11 @@
>
<pre>{{ example }}</pre>
</v-sheet>
- <v-checkbox
- v-model="exportApproved"
- label="Export only approved documents"
- hide-details
- />
+ <v-checkbox v-model="exportApproved" label="Export only approved documents" hide-details />
</v-form>
</v-card-text>
<v-card-actions>
- <v-btn
- class='text-capitalize ms-2 primary'
- :disabled="!valid"
- @click="downloadRequest"
- >
+ <v-btn class="text-capitalize ms-2 primary" :disabled="!valid" @click="downloadRequest">
{{ $t('generic.export') }}
</v-btn>
</v-card-actions>
@@ -72,7 +58,7 @@ export default Vue.extend({
polling: null,
selectedFormat: null as any,
taskId: '',
- valid: false,
+ valid: false
}
},
@@ -93,12 +79,12 @@ export default Vue.extend({
beforeDestroy() {
// @ts-ignore
- clearInterval(this.polling)
+ clearInterval(this.polling)
},
methods: {
reset() {
- (this.$refs.form as HTMLFormElement).reset()
+ ;(this.$refs.form as HTMLFormElement).reset()
this.taskId = ''
this.exportApproved = false
this.selectedFormat = null
@@ -107,13 +93,17 @@ export default Vue.extend({
async downloadRequest() {
this.isProcessing = true
- this.taskId = await this.$services.download.request(this.projectId, this.selectedFormat, this.exportApproved)
+ this.taskId = await this.$services.download.request(
+ this.projectId,
+ this.selectedFormat,
+ this.exportApproved
+ )
this.pollData()
},
pollData() {
// @ts-ignore
- this.polling = setInterval(async() => {
+ this.polling = setInterval(async () => {
if (this.taskId) {
const res = await this.$services.taskStatus.get(this.taskId)
if (res.ready) {
@@ -121,8 +111,8 @@ export default Vue.extend({
this.reset()
}
}
- }, 1000)
- },
- }
+ }, 1000)
+ }
+ }
})
</script>
diff --git a/frontend/pages/projects/_id/dataset/import.vue b/frontend/pages/projects/_id/dataset/import.vue
index adf388c7cd..3aede52af6 100644
--- a/frontend/pages/projects/_id/dataset/import.vue
+++ b/frontend/pages/projects/_id/dataset/import.vue
@@ -5,10 +5,7 @@
</v-card-title>
<v-card-text>
<v-overlay :value="isImporting">
- <v-progress-circular
- indeterminate
- size="64"
- />
+ <v-progress-circular indeterminate size="64" />
</v-overlay>
<v-select
v-model="selected"
@@ -48,7 +45,7 @@
:light="$vuetify.theme.dark"
class="mb-5 pa-5"
>
- <pre>{{ example }}</pre>
+ <pre>{{ example }}</pre>
</v-sheet>
<file-pond
v-if="selected && acceptedFileTypes !== '*'"
@@ -81,11 +78,7 @@
></v-data-table>
</v-card-text>
<v-card-actions>
- <v-btn
- class='text-capitalize ms-2 primary'
- :disabled="isDisabled"
- @click="importDataset"
- >
+ <v-btn class="text-capitalize ms-2 primary" :disabled="isDisabled" @click="importDataset">
{{ $t('generic.import') }}
</v-btn>
</v-card-actions>
@@ -94,17 +87,14 @@
<script>
import Cookies from 'js-cookie'
-import vueFilePond from "vue-filepond"
-import "filepond/dist/filepond.min.css"
-import FilePondPluginFileValidateType from "filepond-plugin-file-validate-type"
-const FilePond = vueFilePond(
- FilePondPluginFileValidateType,
-)
+import vueFilePond from 'vue-filepond'
+import 'filepond/dist/filepond.min.css'
+import FilePondPluginFileValidateType from 'filepond-plugin-file-validate-type'
+const FilePond = vueFilePond(FilePondPluginFileValidateType)
export default {
-
components: {
- FilePond,
+ FilePond
},
layout: 'project',
@@ -112,13 +102,13 @@ export default {
validate({ params }) {
return /^\d+$/.test(params.id)
},
-
+
data() {
return {
catalog: [],
selected: null,
myFiles: [],
- option: {'column_data': '', 'column_label': '', 'delimiter': ''},
+ option: { column_data: '', column_label: '', delimiter: '' },
taskId: null,
polling: null,
errors: [],
@@ -127,17 +117,15 @@ export default {
{ text: 'Line', value: 'line' },
{ text: 'Message', value: 'message' }
],
- requiredRules: [
- v => !!v || 'Field value is required'
- ],
+ requiredRules: [(v) => !!v || 'Field value is required'],
server: {
url: '/v1/fp',
headers: {
- 'X-CSRFToken': Cookies.get('csrftoken'),
+ 'X-CSRFToken': Cookies.get('csrftoken')
},
process: {
url: '/process/',
- method: 'POST',
+ method: 'POST'
},
patch: '/patch/',
revert: '/revert/',
@@ -147,7 +135,7 @@ export default {
},
uploadedFiles: [],
valid: false,
- isImporting: false,
+ isImporting: false
}
},
@@ -156,7 +144,7 @@ export default {
return this.uploadedFiles.length === 0 || this.taskId !== null || !this.valid
},
properties() {
- const item = this.catalog.find(item => item.displayName === this.selected)
+ const item = this.catalog.find((item) => item.displayName === this.selected)
if (item) {
return item.properties
} else {
@@ -174,7 +162,7 @@ export default {
return Object.fromEntries(textFields)
},
acceptedFileTypes() {
- const item = this.catalog.find(item => item.displayName === this.selected)
+ const item = this.catalog.find((item) => item.displayName === this.selected)
if (item) {
return item.acceptTypes
} else {
@@ -182,14 +170,15 @@ export default {
}
},
example() {
- const item = this.catalog.find(item => item.displayName === this.selected)
+ const item = this.catalog.find((item) => item.displayName === this.selected)
if (item) {
const column_data = 'column_data'
const column_label = 'column_label'
if (column_data in this.option && column_label in this.option) {
- return item.example.replaceAll(column_data, this.option[column_data])
- .replaceAll(column_label, this.option[column_label])
- .trim()
+ return item.example
+ .replaceAll(column_data, this.option[column_data])
+ .replaceAll(column_label, this.option[column_label])
+ .trim()
} else {
return item.example.trim()
}
@@ -201,7 +190,7 @@ export default {
watch: {
selected() {
- const item = this.catalog.find(item => item.displayName === this.selected)
+ const item = this.catalog.find((item) => item.displayName === this.selected)
for (const [key, value] of Object.entries(item.properties)) {
this.option[key] = value.default
}
@@ -220,7 +209,7 @@ export default {
},
beforeDestroy() {
- clearInterval(this.polling)
+ clearInterval(this.polling)
},
methods: {
@@ -231,25 +220,25 @@ export default {
},
handleFilePondRemoveFile(error, file) {
console.log(error)
- const index = this.uploadedFiles.findIndex(item => item.id === file.id)
+ const index = this.uploadedFiles.findIndex((item) => item.id === file.id)
if (index > -1) {
- this.uploadedFiles.splice(index, 1)
- this.$nextTick()
+ this.uploadedFiles.splice(index, 1)
+ this.$nextTick()
}
},
async importDataset() {
this.isImporting = true
- const item = this.catalog.find(item => item.displayName === this.selected)
+ const item = this.catalog.find((item) => item.displayName === this.selected)
this.taskId = await this.$services.parse.analyze(
this.$route.params.id,
item.name,
item.taskId,
- this.uploadedFiles.map(item => item.serverId),
+ this.uploadedFiles.map((item) => item.serverId),
this.option
)
},
pollData() {
- this.polling = setInterval(async() => {
+ this.polling = setInterval(async () => {
if (this.taskId) {
const res = await this.$services.taskStatus.get(this.taskId)
if (res.ready) {
@@ -263,8 +252,8 @@ export default {
}
}
}
- }, 3000)
- },
+ }, 3000)
+ },
toVisualize(text) {
if (text === '\t') {
return 'Tab'
@@ -276,6 +265,6 @@ export default {
return text
}
}
- },
-};
+ }
+}
</script>
diff --git a/frontend/pages/projects/_id/dataset/index.vue b/frontend/pages/projects/_id/dataset/index.vue
index 6da3d34002..49157ed627 100644
--- a/frontend/pages/projects/_id/dataset/index.vue
+++ b/frontend/pages/projects/_id/dataset/index.vue
@@ -9,7 +9,7 @@
class="text-capitalize ms-2"
:disabled="!canDelete"
outlined
- @click.stop="dialogDelete=true"
+ @click.stop="dialogDelete = true"
>
{{ $t('generic.delete') }}
</v-btn>
@@ -18,7 +18,7 @@
:disabled="!item.count"
class="text-capitalize"
color="error"
- @click="dialogDeleteAll=true"
+ @click="dialogDeleteAll = true"
>
{{ $t('generic.deleteAll') }}
</v-btn>
@@ -26,15 +26,12 @@
<form-delete
:selected="selected"
:item-key="itemKey"
- @cancel="dialogDelete=false"
+ @cancel="dialogDelete = false"
@remove="remove"
/>
</v-dialog>
<v-dialog v-model="dialogDeleteAll">
- <form-delete-bulk
- @cancel="dialogDeleteAll=false"
- @remove="removeAll"
- />
+ <form-delete-bulk @cancel="dialogDeleteAll = false" @remove="removeAll" />
</v-dialog>
</v-card-title>
<image-list
@@ -80,14 +77,13 @@ import ActionMenu from '~/components/example/ActionMenu.vue'
import { ProjectDTO } from '~/services/application/project/projectData'
export default Vue.extend({
-
components: {
ActionMenu,
AudioList,
DocumentList,
ImageList,
FormDelete,
- FormDeleteBulk,
+ FormDeleteBulk
},
layout: 'project',
@@ -134,15 +130,14 @@ export default Vue.extend({
} else {
return 'text'
}
- },
+ }
},
watch: {
- '$route.query': _.debounce(function() {
- // @ts-ignore
- this.$fetch()
- }, 1000
- ),
+ '$route.query': _.debounce(function () {
+ // @ts-ignore
+ this.$fetch()
+ }, 1000)
},
async created() {
diff --git a/frontend/pages/projects/_id/guideline/index.vue b/frontend/pages/projects/_id/guideline/index.vue
index 80490df54d..c1044723fe 100644
--- a/frontend/pages/projects/_id/guideline/index.vue
+++ b/frontend/pages/projects/_id/guideline/index.vue
@@ -18,7 +18,6 @@ import { Editor } from '@toast-ui/vue-editor'
import '@/assets/style/editor.css'
export default {
-
components: {
Editor
},
@@ -35,7 +34,7 @@ export default {
language: this.$t('toastui.localeCode')
},
project: {},
- mounted: false,
+ mounted: false
}
},
@@ -47,7 +46,7 @@ export default {
},
methods: {
- updateProject: _.debounce(function() {
+ updateProject: _.debounce(function () {
if (this.mounted) {
this.project.guideline = this.$refs.toastuiEditor.invoke('getMarkdown')
this.$services.project.update(this.project)
@@ -58,7 +57,8 @@ export default {
</script>
<style>
-.te-md-container .CodeMirror, .tui-editor-contents {
+.te-md-container .CodeMirror,
+.tui-editor-contents {
font-size: 20px;
}
</style>
diff --git a/frontend/pages/projects/_id/image-classification/index.vue b/frontend/pages/projects/_id/image-classification/index.vue
index 0b21159e21..3bce0f3484 100644
--- a/frontend/pages/projects/_id/image-classification/index.vue
+++ b/frontend/pages/projects/_id/image-classification/index.vue
@@ -11,11 +11,7 @@
@click:clear-label="clear"
@click:review="confirm"
>
- <v-btn-toggle
- v-model="labelOption"
- mandatory
- class="ms-2"
- >
+ <v-btn-toggle v-model="labelOption" mandatory class="ms-2">
<v-btn icon>
<v-icon>{{ mdiFormatListBulleted }}</v-icon>
</v-btn>
@@ -24,16 +20,10 @@
</v-btn>
</v-btn-toggle>
</toolbar-laptop>
- <toolbar-mobile
- :total="images.count"
- class="d-flex d-sm-none"
- />
+ <toolbar-mobile :total="images.count" class="d-flex d-sm-none" />
</template>
<template #content>
- <v-card
- v-shortkey="shortKeys"
- @shortkey="addOrRemove"
- >
+ <v-card v-shortkey="shortKeys" @shortkey="addOrRemove">
<v-card-title>
<label-group
v-if="labelOption === 0"
@@ -53,12 +43,7 @@
/>
</v-card-title>
<v-divider />
- <v-img
- contain
- :src="image.fileUrl"
- :max-height="imageSize.height"
- class="grey lighten-2"
- />
+ <v-img contain :src="image.fileUrl" :max-height="imageSize.height" class="grey lighten-2" />
</v-card>
</template>
<template #sidebar>
@@ -82,7 +67,6 @@ import { useLabelList } from '@/composables/useLabelList'
import AnnotationProgress from '@/components/tasks/sidebar/AnnotationProgress.vue'
export default {
-
components: {
AnnotationProgress,
LabelGroup,
@@ -105,7 +89,7 @@ export default {
return {
...toRefs(state),
getLabelList,
- shortKeys,
+ shortKeys
}
},
@@ -186,7 +170,7 @@ export default {
async addOrRemove(event) {
const labelId = parseInt(event.srcKey, 10)
- const annotation = this.annotations.find(item => item.label === labelId)
+ const annotation = this.annotations.find((item) => item.label === labelId)
if (annotation) {
await this.remove(annotation.id)
} else {
@@ -220,7 +204,7 @@ export default {
setImageSize(val) {
const img = new Image()
const self = this
- img.onload = function() {
+ img.onload = function () {
self.imageSize.height = this.height
self.imageSize.width = this.width
}
diff --git a/frontend/pages/projects/_id/index.vue b/frontend/pages/projects/_id/index.vue
index a6572d1559..1049886957 100644
--- a/frontend/pages/projects/_id/index.vue
+++ b/frontend/pages/projects/_id/index.vue
@@ -3,45 +3,19 @@
<v-card-title>
{{ $t('projectHome.welcome') }}
</v-card-title>
- <v-stepper
- v-model="e6"
- vertical
- non-linear
- >
- <div
- v-for="(item, index) in items"
- :key="index"
- >
- <v-stepper-step
- :complete="e6 > index + 1"
- :step="index + 1"
- editable
- >
+ <v-stepper v-model="e6" vertical non-linear>
+ <div v-for="(item, index) in items" :key="index">
+ <v-stepper-step :complete="e6 > index + 1" :step="index + 1" editable>
{{ item.title }}
</v-stepper-step>
<v-stepper-content :step="index + 1">
- <v-card
- v-if="e6 === index + 1"
- class="mb-12"
- width="560"
- height="315"
- >
- <youtube
- ref="youtube"
- :video-id="item.videoId"
- />
+ <v-card v-if="e6 === index + 1" class="mb-12" width="560" height="315">
+ <youtube ref="youtube" :video-id="item.videoId" />
</v-card>
- <v-btn
- color="primary mt-5"
- @click="next"
- >
+ <v-btn color="primary mt-5" @click="next">
{{ $t('generic.continue') }}
</v-btn>
- <v-btn
- class="mt-5"
- text
- @click="prev"
- >
+ <v-btn class="mt-5" text @click="prev">
{{ $t('generic.cancel') }}
</v-btn>
</v-stepper-content>
@@ -65,9 +39,18 @@ export default {
{ title: this.$t('projectHome.importData'), videoId: 'dA4ID1DSxCE' },
{ title: this.$t('projectHome.createLabels'), videoId: '1bSML270quU' },
{ title: this.$t('projectHome.addMembers'), videoId: 'NI09dcBz-qA' },
- { title: this.$t('projectHome.defineGuideline'), videoId: 'AvvX3Xs32nA' },
- { title: this.$t('projectHome.annotateDataset'), videoId: 'F3XoSdyiMhA' },
- { title: this.$t('projectHome.viewStatistics'), videoId: 'kfRpa0mNQMY' },
+ {
+ title: this.$t('projectHome.defineGuideline'),
+ videoId: 'AvvX3Xs32nA'
+ },
+ {
+ title: this.$t('projectHome.annotateDataset'),
+ videoId: 'F3XoSdyiMhA'
+ },
+ {
+ title: this.$t('projectHome.viewStatistics'),
+ videoId: 'kfRpa0mNQMY'
+ },
{ title: this.$t('projectHome.exportDataset'), videoId: 'Pfy_QcHEeQ4' }
]
}
diff --git a/frontend/pages/projects/_id/intent-detection-and-slot-filling/index.vue b/frontend/pages/projects/_id/intent-detection-and-slot-filling/index.vue
index 25904e2044..ed3236aef5 100644
--- a/frontend/pages/projects/_id/intent-detection-and-slot-filling/index.vue
+++ b/frontend/pages/projects/_id/intent-detection-and-slot-filling/index.vue
@@ -1,4 +1,3 @@
-
<template>
<layout-text v-if="doc.id">
<template #header>
@@ -12,10 +11,7 @@
@click:clear-label="clear"
@click:review="confirm"
/>
- <toolbar-mobile
- :total="docs.count"
- class="d-flex d-sm-none"
- />
+ <toolbar-mobile :total="docs.count" class="d-flex d-sm-none" />
</template>
<template #content>
<v-card>
@@ -132,8 +128,8 @@ export default {
methods: {
async listSpan(docId) {
- const spans = await this.$services.sequenceLabeling.list(this.projectId, docId);
- this.spans = spans;
+ const spans = await this.$services.sequenceLabeling.list(this.projectId, docId)
+ this.spans = spans
},
async deleteSpan(id) {
@@ -142,12 +138,23 @@ export default {
},
async addSpan(startOffset, endOffset, labelId) {
- await this.$services.sequenceLabeling.create(this.projectId, this.doc.id, labelId, startOffset, endOffset)
+ await this.$services.sequenceLabeling.create(
+ this.projectId,
+ this.doc.id,
+ labelId,
+ startOffset,
+ endOffset
+ )
await this.listSpan(this.doc.id)
},
async updateSpan(annotationId, labelId) {
- await this.$services.sequenceLabeling.changeLabel(this.projectId, this.doc.id, annotationId, labelId)
+ await this.$services.sequenceLabeling.changeLabel(
+ this.projectId,
+ this.doc.id,
+ annotationId,
+ labelId
+ )
await this.listSpan(this.doc.id)
},
@@ -187,7 +194,7 @@ export default {
font-size: 1.25rem !important;
font-weight: 500;
line-height: 2rem;
- font-family: "Roboto", sans-serif !important;
+ font-family: 'Roboto', sans-serif !important;
opacity: 0.6;
}
</style>
diff --git a/frontend/pages/projects/_id/labels/_label_id/edit.vue b/frontend/pages/projects/_id/labels/_label_id/edit.vue
index fe1810e9f9..24506168d3 100644
--- a/frontend/pages/projects/_id/labels/_label_id/edit.vue
+++ b/frontend/pages/projects/_id/labels/_label_id/edit.vue
@@ -1,15 +1,6 @@
<template>
- <form-create
- v-slot="slotProps"
- v-bind.sync="editedItem"
- :items="items"
- >
- <v-btn
- :disabled="!slotProps.valid"
- color="primary"
- class="text-capitalize"
- @click="save"
- >
+ <form-create v-slot="slotProps" v-bind.sync="editedItem" :items="items">
+ <v-btn :disabled="!slotProps.valid" color="primary" class="text-capitalize" @click="save">
Save
</v-btn>
</form-create>
@@ -23,18 +14,17 @@ import FormCreate from '~/components/label/FormCreate.vue'
export default Vue.extend({
components: {
- FormCreate,
+ FormCreate
},
layout: 'project',
validate({ params, query, app }) {
- if (!['category', 'span', 'relation'].includes((query.type as string))) {
+ if (!['category', 'span', 'relation'].includes(query.type as string)) {
return false
}
if (/^\d+$/.test(params.id)) {
- return app.$services.project.findById(params.id)
- .then((res:ProjectDTO) => {
+ return app.$services.project.findById(params.id).then((res: ProjectDTO) => {
return res.canDefineLabel
})
}
@@ -67,12 +57,12 @@ export default Vue.extend({
const type = this.$route.query.type
if (type === 'category') {
return this.$services.categoryType
- } else if (type === 'span'){
+ } else if (type === 'span') {
return this.$services.spanType
} else {
return this.$services.relationType
}
- },
+ }
},
async created() {
diff --git a/frontend/pages/projects/_id/labels/add.vue b/frontend/pages/projects/_id/labels/add.vue
index 223f56b76b..e89eacced8 100644
--- a/frontend/pages/projects/_id/labels/add.vue
+++ b/frontend/pages/projects/_id/labels/add.vue
@@ -1,15 +1,6 @@
<template>
- <form-create
- v-slot="slotProps"
- v-bind.sync="editedItem"
- :items="items"
- >
- <v-btn
- :disabled="!slotProps.valid"
- color="primary"
- class="text-capitalize"
- @click="save"
- >
+ <form-create v-slot="slotProps" v-bind.sync="editedItem" :items="items">
+ <v-btn :disabled="!slotProps.valid" color="primary" class="text-capitalize" @click="save">
Save
</v-btn>
@@ -33,18 +24,17 @@ import FormCreate from '~/components/label/FormCreate.vue'
export default Vue.extend({
components: {
- FormCreate,
+ FormCreate
},
layout: 'project',
validate({ params, query, app }) {
- if (!['category', 'span', 'relation'].includes((query.type as string))) {
+ if (!['category', 'span', 'relation'].includes(query.type as string)) {
return false
}
if (/^\d+$/.test(params.id)) {
- return app.$services.project.findById(params.id)
- .then((res:ProjectDTO) => {
+ return app.$services.project.findById(params.id).then((res: ProjectDTO) => {
return res.canDefineLabel
})
}
@@ -85,7 +75,7 @@ export default Vue.extend({
} else {
return this.$services.relationType
}
- },
+ }
},
async created() {
diff --git a/frontend/pages/projects/_id/labels/import.vue b/frontend/pages/projects/_id/labels/import.vue
index 162f0c5d74..930f349d41 100644
--- a/frontend/pages/projects/_id/labels/import.vue
+++ b/frontend/pages/projects/_id/labels/import.vue
@@ -1,9 +1,5 @@
<template>
- <form-import
- :error-message="errorMessage"
- @clear="clearErrorMessage"
- @upload="upload"
- />
+ <form-import :error-message="errorMessage" @clear="clearErrorMessage" @upload="upload" />
</template>
<script lang="ts">
@@ -13,18 +9,17 @@ import FormImport from '~/components/label/FormImport.vue'
export default Vue.extend({
components: {
- FormImport,
+ FormImport
},
layout: 'project',
validate({ params, query, app }) {
- if (!['category', 'span', 'relation'].includes((query.type as string))) {
+ if (!['category', 'span', 'relation'].includes(query.type as string)) {
return false
}
if (/^\d+$/.test(params.id)) {
- return app.$services.project.findById(params.id)
- .then((res:ProjectDTO) => {
+ return app.$services.project.findById(params.id).then((res: ProjectDTO) => {
return res.canDefineLabel
})
}
@@ -33,7 +28,7 @@ export default Vue.extend({
data() {
return {
- errorMessage: '',
+ errorMessage: ''
}
},
@@ -51,7 +46,7 @@ export default Vue.extend({
} else {
return this.$services.relationType
}
- },
+ }
},
methods: {
@@ -59,7 +54,7 @@ export default Vue.extend({
try {
await this.service.upload(this.projectId, file)
this.$router.push(`/projects/${this.projectId}/labels`)
- } catch(e) {
+ } catch (e) {
this.errorMessage = e.message
}
},
@@ -67,6 +62,6 @@ export default Vue.extend({
clearErrorMessage() {
this.errorMessage = ''
}
- },
+ }
})
</script>
diff --git a/frontend/pages/projects/_id/labels/index.vue b/frontend/pages/projects/_id/labels/index.vue
index fc572677f1..238b378391 100644
--- a/frontend/pages/projects/_id/labels/index.vue
+++ b/frontend/pages/projects/_id/labels/index.vue
@@ -20,24 +20,15 @@
class="text-capitalize ms-2"
:disabled="!canDelete"
outlined
- @click.stop="dialogDelete=true"
+ @click.stop="dialogDelete = true"
>
{{ $t('generic.delete') }}
</v-btn>
<v-dialog v-model="dialogDelete">
- <form-delete
- :selected="selected"
- @cancel="dialogDelete=false"
- @remove="remove"
- />
+ <form-delete :selected="selected" @cancel="dialogDelete = false" @remove="remove" />
</v-dialog>
</v-card-title>
- <label-list
- v-model="selected"
- :items="items"
- :is-loading="isLoading"
- @edit="editItem"
- />
+ <label-list v-model="selected" :items="items" :is-loading="isLoading" @edit="editItem" />
</v-card>
</template>
@@ -50,7 +41,6 @@ import { LabelDTO } from '~/services/application/label/labelData'
import { ProjectDTO } from '~/services/application/project/projectData'
export default Vue.extend({
-
components: {
ActionMenu,
FormDelete,
@@ -60,8 +50,7 @@ export default Vue.extend({
validate({ params, app }) {
if (/^\d+$/.test(params.id)) {
- return app.$services.project.findById(params.id)
- .then((res:ProjectDTO) => {
+ return app.$services.project.findById(params.id).then((res: ProjectDTO) => {
return res.canDefineLabel
})
}
@@ -75,7 +64,7 @@ export default Vue.extend({
selected: [] as LabelDTO[],
isLoading: false,
tab: 0,
- project: {} as ProjectDTO,
+ project: {} as ProjectDTO
}
},
@@ -102,7 +91,7 @@ export default Vue.extend({
labelType(): string {
if (this.hasMultiType) {
- if (this.isIntentDetectionAndSlotFilling){
+ if (this.isIntentDetectionAndSlotFilling) {
return ['category', 'span'][this.tab!]
} else {
return ['span', 'relation'][this.tab!]
@@ -149,7 +138,7 @@ export default Vue.extend({
this.items = await this.service.list(this.projectId)
this.isLoading = false
},
-
+
async remove() {
await this.service.bulkDelete(this.projectId, this.selected)
this.list()
diff --git a/frontend/pages/projects/_id/members/index.vue b/frontend/pages/projects/_id/members/index.vue
index d3521ac506..62879f6e07 100644
--- a/frontend/pages/projects/_id/members/index.vue
+++ b/frontend/pages/projects/_id/members/index.vue
@@ -1,18 +1,14 @@
<template>
<v-card>
<v-card-title>
- <v-btn
- class="text-capitalize"
- color="primary"
- @click.stop="dialogCreate=true"
- >
+ <v-btn class="text-capitalize" color="primary" @click.stop="dialogCreate = true">
{{ $t('generic.add') }}
</v-btn>
<v-btn
class="text-capitalize ms-2"
:disabled="!canDelete"
outlined
- @click.stop="dialogDelete=true"
+ @click.stop="dialogDelete = true"
>
{{ $t('generic.delete') }}
</v-btn>
@@ -25,19 +21,10 @@
/>
</v-dialog>
<v-dialog v-model="dialogDelete">
- <form-delete
- :selected="selected"
- @cancel="dialogDelete=false"
- @remove="remove"
- />
+ <form-delete :selected="selected" @cancel="dialogDelete = false" @remove="remove" />
</v-dialog>
</v-card-title>
- <member-list
- v-model="selected"
- :items="items"
- :is-loading="isLoading"
- @edit="editItem"
- />
+ <member-list v-model="selected" :items="items" :is-loading="isLoading" @edit="editItem" />
</v-card>
</template>
@@ -49,7 +36,6 @@ import FormCreate from '~/components/member/FormCreate.vue'
import { MemberDTO } from '~/services/application/member/memberData'
export default Vue.extend({
-
components: {
MemberList,
FormCreate,
@@ -89,7 +75,7 @@ export default Vue.extend({
this.isLoading = true
try {
this.items = await this.$services.member.list(this.projectId)
- } catch(e) {
+ } catch (e) {
this.$router.push(`/projects/${this.projectId}`)
} finally {
this.isLoading = false
@@ -111,7 +97,7 @@ export default Vue.extend({
await this.$services.member.create(this.projectId, this.editedItem)
this.close()
this.$fetch()
- } catch(e) {
+ } catch (e) {
this.errorMessage = e.message
}
},
@@ -121,7 +107,7 @@ export default Vue.extend({
await this.$services.member.update(this.projectId, this.editedItem)
this.close()
this.$fetch()
- } catch(e) {
+ } catch (e) {
this.errorMessage = e.message
}
},
diff --git a/frontend/pages/projects/_id/metrics/index.vue b/frontend/pages/projects/_id/metrics/index.vue
index 3e5aaeff05..091b1e2221 100644
--- a/frontend/pages/projects/_id/metrics/index.vue
+++ b/frontend/pages/projects/_id/metrics/index.vue
@@ -34,9 +34,9 @@ import MemberProgress from '~/components/metrics/MemberProgress'
export default {
components: {
LabelDistribution,
- MemberProgress,
+ MemberProgress
},
-
+
layout: 'project',
validate({ params }) {
@@ -51,7 +51,7 @@ export default {
relationTypes: [],
relationDistribution: {},
spanTypes: [],
- spanDistribution: {},
+ spanDistribution: {}
}
},
@@ -65,7 +65,9 @@ export default {
this.project = await this.$services.project.findById(this.projectId)
if (this.project.hasCategory) {
this.categoryTypes = await this.$services.categoryType.list(this.projectId)
- this.categoryDistribution = await this.$services.metrics.fetchCategoryDistribution(this.projectId)
+ this.categoryDistribution = await this.$services.metrics.fetchCategoryDistribution(
+ this.projectId
+ )
}
if (this.project.hasSpan) {
this.spanTypes = await this.$services.spanType.list(this.projectId)
@@ -73,7 +75,9 @@ export default {
}
if (this.project.useRelation) {
this.relationTypes = await this.$services.relationType.list(this.projectId)
- this.relationDistribution = await this.$services.metrics.fetchRelationDistribution(this.projectId)
+ this.relationDistribution = await this.$services.metrics.fetchRelationDistribution(
+ this.projectId
+ )
}
}
}
diff --git a/frontend/pages/projects/_id/sequence-labeling/index.vue b/frontend/pages/projects/_id/sequence-labeling/index.vue
index b673b40262..37ecd1357f 100644
--- a/frontend/pages/projects/_id/sequence-labeling/index.vue
+++ b/frontend/pages/projects/_id/sequence-labeling/index.vue
@@ -11,10 +11,7 @@
@click:clear-label="clear"
@click:review="confirm"
/>
- <toolbar-mobile
- :total="docs.count"
- class="d-flex d-sm-none"
- />
+ <toolbar-mobile :total="docs.count" class="d-flex d-sm-none" />
</template>
<template #content>
<v-card>
@@ -46,19 +43,13 @@
<v-card class="mt-4">
<v-card-title>Label Types</v-card-title>
<v-card-text>
- <v-switch
- v-if="useRelationLabeling"
- v-model="relationMode"
- >
+ <v-switch v-if="useRelationLabeling" v-model="relationMode">
<template #label>
<span v-if="relationMode">Relation</span>
<span v-else>Span</span>
</template>
</v-switch>
- <v-chip-group
- v-model="selectedLabelIndex"
- column
- >
+ <v-chip-group v-model="selectedLabelIndex" column>
<v-chip
v-for="(item, index) in labelTypes"
:key="item.id"
@@ -97,7 +88,6 @@ import EntityEditor from '@/components/tasks/sequenceLabeling/EntityEditor.vue'
import AnnotationProgress from '@/components/tasks/sidebar/AnnotationProgress.vue'
export default {
-
components: {
AnnotationProgress,
EntityEditor,
@@ -125,7 +115,7 @@ export default {
rtl: false,
selectedLabelIndex: null,
progress: {},
- relationMode: false,
+ relationMode: false
}
},
@@ -148,7 +138,7 @@ export default {
...mapGetters('config', ['isRTL']),
shortKeys() {
- return Object.fromEntries(this.spanTypes.map(item => [item.id, [item.suffixKey]]))
+ return Object.fromEntries(this.spanTypes.map((item) => [item.id, [item.suffixKey]]))
},
projectId() {
@@ -206,17 +196,18 @@ export default {
methods: {
async maybeFetchSpanTypes(annotations) {
- const labelIds = new Set(this.spanTypes.map((label) => label.id));
+ const labelIds = new Set(this.spanTypes.map((label) => label.id))
if (annotations.some((item) => !labelIds.has(item.label))) {
- this.spanTypes = await this.$services.spanType.list(this.projectId);
+ this.spanTypes = await this.$services.spanType.list(this.projectId)
}
},
async list(docId) {
const annotations = await this.$services.sequenceLabeling.list(this.projectId, docId)
const relations = await this.$services.sequenceLabeling.listRelations(this.projectId, docId)
- // In colab mode, if someone add a new label and annotate data with the label during your work,
- // it occurs exception because there is no corresponding label.
+ // In colab mode, if someone add a new label and annotate data
+ // with the label during your work, it occurs exception
+ // because there is no corresponding label.
await this.maybeFetchSpanTypes(annotations)
this.annotations = annotations
this.relations = relations
@@ -228,22 +219,44 @@ export default {
},
async addSpan(startOffset, endOffset, labelId) {
- await this.$services.sequenceLabeling.create(this.projectId, this.doc.id, labelId, startOffset, endOffset)
+ await this.$services.sequenceLabeling.create(
+ this.projectId,
+ this.doc.id,
+ labelId,
+ startOffset,
+ endOffset
+ )
await this.list(this.doc.id)
},
async updateSpan(annotationId, labelId) {
- await this.$services.sequenceLabeling.changeLabel(this.projectId, this.doc.id, annotationId, labelId)
+ await this.$services.sequenceLabeling.changeLabel(
+ this.projectId,
+ this.doc.id,
+ annotationId,
+ labelId
+ )
await this.list(this.doc.id)
},
async addRelation(fromId, toId, typeId) {
- await this.$services.sequenceLabeling.createRelation(this.projectId, this.doc.id, fromId, toId, typeId)
+ await this.$services.sequenceLabeling.createRelation(
+ this.projectId,
+ this.doc.id,
+ fromId,
+ toId,
+ typeId
+ )
await this.list(this.doc.id)
},
async updateRelation(relationId, typeId) {
- await this.$services.sequenceLabeling.updateRelation(this.projectId, this.doc.id, relationId, typeId)
+ await this.$services.sequenceLabeling.updateRelation(
+ this.projectId,
+ this.doc.id,
+ relationId,
+ typeId
+ )
await this.list(this.doc.id)
},
@@ -287,7 +300,7 @@ export default {
font-size: 1.25rem !important;
font-weight: 500;
line-height: 2rem;
- font-family: "Roboto", sans-serif !important;
+ font-family: 'Roboto', sans-serif !important;
opacity: 0.6;
}
</style>
diff --git a/frontend/pages/projects/_id/sequence-to-sequence/index.vue b/frontend/pages/projects/_id/sequence-to-sequence/index.vue
index 957a606e13..a68e4ad3b0 100644
--- a/frontend/pages/projects/_id/sequence-to-sequence/index.vue
+++ b/frontend/pages/projects/_id/sequence-to-sequence/index.vue
@@ -11,10 +11,7 @@
@click:clear-label="clear"
@click:review="confirm"
/>
- <toolbar-mobile
- :total="docs.count"
- class="d-flex d-sm-none"
- />
+ <toolbar-mobile :total="docs.count" class="d-flex d-sm-none" />
</template>
<template #content>
<v-card class="mb-5">
@@ -45,7 +42,6 @@ import AnnotationProgress from '@/components/tasks/sidebar/AnnotationProgress.vu
import Seq2seqBox from '~/components/tasks/seq2seq/Seq2seqBox'
export default {
-
components: {
AnnotationProgress,
LayoutText,
diff --git a/frontend/pages/projects/_id/settings/index.vue b/frontend/pages/projects/_id/settings/index.vue
index 00c909b5d3..b0bc352780 100644
--- a/frontend/pages/projects/_id/settings/index.vue
+++ b/frontend/pages/projects/_id/settings/index.vue
@@ -1,15 +1,9 @@
<template>
<v-card>
- <v-tabs
- v-model="tab"
- >
+ <v-tabs v-model="tab">
<v-tabs-slider color="primary" />
- <v-tab href="#tab-project" class="text-capitalize">
- Project
- </v-tab>
- <v-tab href="#tab-auto-labeling" class="text-capitalize">
- Auto Labeling
- </v-tab>
+ <v-tab href="#tab-project" class="text-capitalize"> Project </v-tab>
+ <v-tab href="#tab-auto-labeling" class="text-capitalize"> Auto Labeling </v-tab>
</v-tabs>
<v-divider />
@@ -30,7 +24,6 @@ import FormUpdate from '@/components/project/FormUpdate.vue'
import ConfigList from '@/components/configAutoLabeling/ConfigList.vue'
export default Vue.extend({
-
components: {
ConfigList,
FormUpdate
diff --git a/frontend/pages/projects/_id/speech-to-text/index.vue b/frontend/pages/projects/_id/speech-to-text/index.vue
index 0d5c3a418c..937326c919 100644
--- a/frontend/pages/projects/_id/speech-to-text/index.vue
+++ b/frontend/pages/projects/_id/speech-to-text/index.vue
@@ -11,22 +11,13 @@
@click:clear-label="clear"
@click:review="confirm"
/>
- <toolbar-mobile
- :total="items.count"
- class="d-flex d-sm-none"
- />
+ <toolbar-mobile :total="items.count" class="d-flex d-sm-none" />
</template>
<template #content>
<v-overlay :value="isLoading">
- <v-progress-circular
- indeterminate
- size="64"
- />
+ <v-progress-circular indeterminate size="64" />
</v-overlay>
- <audio-viewer
- :source="item.fileUrl"
- class="mb-5"
- />
+ <audio-viewer :source="item.fileUrl" class="mb-5" />
<seq2seq-box
:text="item.text"
:annotations="annotations"
@@ -53,7 +44,6 @@ import Seq2seqBox from '~/components/tasks/seq2seq/Seq2seqBox'
import AudioViewer from '~/components/tasks/audio/AudioViewer'
export default {
-
components: {
AnnotationProgress,
AudioViewer,
diff --git a/frontend/pages/projects/_id/text-classification/index.vue b/frontend/pages/projects/_id/text-classification/index.vue
index 81636cb9a2..b8bd49ed23 100644
--- a/frontend/pages/projects/_id/text-classification/index.vue
+++ b/frontend/pages/projects/_id/text-classification/index.vue
@@ -11,15 +11,9 @@
@click:clear-label="clearTeacherList(project.id, example.id)"
@click:review="confirm(project.id)"
>
- <button-label-switch
- class="ms-2"
- @change="labelComponent=$event"
- />
+ <button-label-switch class="ms-2" @change="labelComponent = $event" />
</toolbar-laptop>
- <toolbar-mobile
- :total="totalExample"
- class="d-flex d-sm-none"
- />
+ <toolbar-mobile :total="totalExample" class="d-flex d-sm-none" />
</template>
<template #content>
<v-card
@@ -37,11 +31,7 @@
/>
</v-card-title>
<v-divider />
- <v-card-text
- class="title highlight"
- style="white-space: pre-wrap;"
- v-text="example.text"
- />
+ <v-card-text class="title highlight" style="white-space: pre-wrap" v-text="example.text" />
</v-card>
</template>
<template #sidebar>
@@ -67,7 +57,6 @@ import { useTeacherList } from '@/composables/useTeacherList'
import AnnotationProgress from '@/components/tasks/sidebar/AnnotationProgress.vue'
export default {
-
components: {
AnnotationProgress,
ButtonLabelSwitch,
@@ -106,15 +95,12 @@ export default {
getProjectById(projectId)
updateProgress(projectId)
- const { fetch } = useFetch(async() => {
- await getExample(
- projectId,
- query.value
- )
+ const { fetch } = useFetch(async () => {
+ await getExample(projectId, query.value)
if (enableAutoLabeling.value) {
try {
await autoLabel(projectId, exampleState.example.id)
- } catch(e) {
+ } catch (e) {
enableAutoLabeling.value = false
alert(e.response.data.detail)
}
@@ -136,7 +122,7 @@ export default {
enableAutoLabeling,
labelComponent,
removeTeacher,
- shortKeys,
+ shortKeys
}
}
}
diff --git a/frontend/pages/projects/create.vue b/frontend/pages/projects/create.vue
index e8c6a0ab55..0bda529cba 100644
--- a/frontend/pages/projects/create.vue
+++ b/frontend/pages/projects/create.vue
@@ -1,8 +1,5 @@
<template>
- <form-create
- v-bind.sync="editedItem"
- @save="create"
- />
+ <form-create v-bind.sync="editedItem" @save="create" />
</template>
<script lang="ts">
@@ -12,7 +9,7 @@ import { ProjectWriteDTO } from '~/services/application/project/projectData'
export default Vue.extend({
components: {
- FormCreate,
+ FormCreate
},
layout: 'projects',
@@ -31,7 +28,7 @@ export default Vue.extend({
allowOverlapping: false,
graphemeMode: false,
useRelation: false,
- tags: [] as string[],
+ tags: [] as string[]
} as ProjectWriteDTO,
defaultItem: {
name: '',
@@ -43,8 +40,8 @@ export default Vue.extend({
allowOverlapping: false,
graphemeMode: false,
useRelation: false,
- tags: [] as string[],
- } as ProjectWriteDTO,
+ tags: [] as string[]
+ } as ProjectWriteDTO
}
},
@@ -55,7 +52,7 @@ export default Vue.extend({
this.$nextTick(() => {
this.editedItem = Object.assign({}, this.defaultItem)
})
- },
+ }
}
})
</script>
diff --git a/frontend/pages/projects/index.vue b/frontend/pages/projects/index.vue
index 4c21ceb594..0bccdd321e 100644
--- a/frontend/pages/projects/index.vue
+++ b/frontend/pages/projects/index.vue
@@ -1,27 +1,19 @@
<template>
<v-card>
<v-card-title v-if="isStaff">
- <v-btn
- class="text-capitalize"
- color="primary"
- @click.stop="$router.push('projects/create')"
- >
+ <v-btn class="text-capitalize" color="primary" @click.stop="$router.push('projects/create')">
{{ $t('generic.create') }}
</v-btn>
<v-btn
class="text-capitalize ms-2"
:disabled="!canDelete"
outlined
- @click.stop="dialogDelete=true"
+ @click.stop="dialogDelete = true"
>
{{ $t('generic.delete') }}
</v-btn>
<v-dialog v-model="dialogDelete">
- <form-delete
- :selected="selected"
- @cancel="dialogDelete=false"
- @remove="remove"
- />
+ <form-delete :selected="selected" @cancel="dialogDelete = false" @remove="remove" />
</v-dialog>
</v-card-title>
<project-list
@@ -43,10 +35,9 @@ import { ProjectDTO, ProjectListDTO } from '~/services/application/project/proje
import FormDelete from '~/components/project/FormDelete.vue'
export default Vue.extend({
-
components: {
FormDelete,
- ProjectList,
+ ProjectList
},
layout: 'projects',
@@ -71,15 +62,14 @@ export default Vue.extend({
...mapGetters('auth', ['isStaff']),
canDelete(): boolean {
return this.selected.length > 0
- },
+ }
},
watch: {
- '$route.query': _.debounce(function() {
- // @ts-ignore
- this.$fetch()
- }, 1000
- ),
+ '$route.query': _.debounce(function () {
+ // @ts-ignore
+ this.$fetch()
+ }, 1000)
},
methods: {
diff --git a/frontend/plugins/color.ts b/frontend/plugins/color.ts
index 05b91a1f5f..50f96ac2e2 100644
--- a/frontend/plugins/color.ts
+++ b/frontend/plugins/color.ts
@@ -7,10 +7,10 @@ declare module 'vue/types/vue' {
}
Vue.prototype.$contrastColor = (hexString: string) => {
- // W3c offers a formula for calculating ideal color:
+ // W3c offers a formula for calculating ideal color:
// https://www.w3.org/TR/AERT/#color-contrast
const r = parseInt(hexString.substr(1, 2), 16)
const g = parseInt(hexString.substr(3, 2), 16)
const b = parseInt(hexString.substr(5, 2), 16)
- return ((((r * 299) + (g * 587) + (b * 114)) / 1000) < 128) ? '#ffffff' : '#000000'
+ return (r * 299 + g * 587 + b * 114) / 1000 < 128 ? '#ffffff' : '#000000'
}
diff --git a/frontend/plugins/filters.js b/frontend/plugins/filters.js
index 6a52a76895..68766e080b 100644
--- a/frontend/plugins/filters.js
+++ b/frontend/plugins/filters.js
@@ -1,6 +1,6 @@
import Vue from 'vue'
-export const truncate = function(text, length, clamp) {
+export const truncate = function (text, length, clamp) {
text = text || ''
clamp = clamp || '...'
length = length || 30
diff --git a/frontend/plugins/role.ts b/frontend/plugins/role.ts
index db2c910848..333e3a6db7 100644
--- a/frontend/plugins/role.ts
+++ b/frontend/plugins/role.ts
@@ -7,9 +7,9 @@ declare module 'vue/types/vue' {
}
type RoleMapping = {
- projectAdmin: string,
- annotator: string,
- annotationApprover: string,
+ projectAdmin: string
+ annotator: string
+ annotationApprover: string
undefined: string
}
diff --git a/frontend/plugins/services.ts b/frontend/plugins/services.ts
index 9ceb191f70..b320b20a4a 100644
--- a/frontend/plugins/services.ts
+++ b/frontend/plugins/services.ts
@@ -13,7 +13,7 @@ import { APIUserRepository } from '~/repositories/user/apiUserRepository'
import { APIMetricsRepository } from '~/repositories/metrics/apiMetricsRepository'
import { APIRoleRepository } from '~/repositories/role/apiRoleRepository'
import { APIProjectRepository } from '~/repositories/project/apiProjectRepository'
-import { LocalStorageOptionRepository} from '~/repositories/option/apiOptionRepository'
+import { LocalStorageOptionRepository } from '~/repositories/option/apiOptionRepository'
import { APIMemberRepository } from '~/repositories/member/apiMemberRepository'
import { APILabelRepository } from '~/repositories/label/apiLabelRepository'
import { APIExampleRepository } from '~/repositories/example/apiDocumentRepository'
@@ -33,7 +33,7 @@ import { Seq2seqApplicationService } from '~/services/application/tasks/seq2seq/
import { ConfigApplicationService } from '~/services/application/autoLabeling/configApplicationService'
import { TemplateApplicationService } from '~/services/application/autoLabeling/templateApplicationService'
import { APITextClassificationRepository } from '~/repositories/tasks/textClassification/apiTextClassification'
-import { TextClassificationApplicationService } from '~/services/application/tasks/textClassification/textClassificationApplicationService'
+import { TextClassificationService } from '~/services/application/tasks/textClassification/textClassificationApplicationService'
import { AuthApplicationService } from '~/services/application/auth/authApplicationService'
import { APIDownloadFormatRepository } from '~/repositories/download/apiDownloadFormatRepository'
import { APIDownloadRepository } from '~/repositories/download/apiDownloadRepository'
@@ -41,32 +41,32 @@ import { DownloadApplicationService } from '~/services/application/download/down
import { DownloadFormatApplicationService } from '~/services/application/download/downloadFormatApplicationService'
import { APITagRepository } from '~/repositories/tag/apiTagRepository'
import { TagApplicationService } from '~/services/application/tag/tagApplicationService'
-import { ApiRelationRepository } from "~/repositories/tasks/sequenceLabeling/apiRelationRepository"
+import { ApiRelationRepository } from '~/repositories/tasks/sequenceLabeling/apiRelationRepository'
export interface Services {
- categoryType: LabelApplicationService,
- spanType: LabelApplicationService,
- relationType: LabelApplicationService,
- member: MemberApplicationService,
- user: UserApplicationService,
- role: RoleApplicationService,
- project: ProjectApplicationService,
- comment: CommentApplicationService,
- metrics: MetricsApplicationService,
- example: ExampleApplicationService,
- textClassification: TextClassificationApplicationService,
- sequenceLabeling: SequenceLabelingApplicationService,
- seq2seq: Seq2seqApplicationService,
- option: OptionApplicationService,
- config: ConfigApplicationService,
- template: TemplateApplicationService,
- auth: AuthApplicationService,
- catalog: CatalogApplicationService,
- parse: ParseApplicationService,
- taskStatus: TaskStatusApplicationService,
- downloadFormat: DownloadFormatApplicationService,
- download: DownloadApplicationService,
- tag: TagApplicationService,
+ categoryType: LabelApplicationService
+ spanType: LabelApplicationService
+ relationType: LabelApplicationService
+ member: MemberApplicationService
+ user: UserApplicationService
+ role: RoleApplicationService
+ project: ProjectApplicationService
+ comment: CommentApplicationService
+ metrics: MetricsApplicationService
+ example: ExampleApplicationService
+ textClassification: TextClassificationService
+ sequenceLabeling: SequenceLabelingApplicationService
+ seq2seq: Seq2seqApplicationService
+ option: OptionApplicationService
+ config: ConfigApplicationService
+ template: TemplateApplicationService
+ auth: AuthApplicationService
+ catalog: CatalogApplicationService
+ parse: ParseApplicationService
+ taskStatus: TaskStatusApplicationService
+ downloadFormat: DownloadFormatApplicationService
+ download: DownloadApplicationService
+ tag: TagApplicationService
}
declare module 'vue/types/vue' {
@@ -76,21 +76,21 @@ declare module 'vue/types/vue' {
}
const plugin: Plugin = (_, inject) => {
- const memberRepository = new APIMemberRepository()
- const userRepository = new APIUserRepository()
- const roleRepository = new APIRoleRepository()
- const projectRepository = new APIProjectRepository()
- const commentRepository = new APICommentRepository()
+ const memberRepository = new APIMemberRepository()
+ const userRepository = new APIUserRepository()
+ const roleRepository = new APIRoleRepository()
+ const projectRepository = new APIProjectRepository()
+ const commentRepository = new APICommentRepository()
const metricsRepository = new APIMetricsRepository()
- const exampleRepository = new APIExampleRepository()
+ const exampleRepository = new APIExampleRepository()
const textClassificationRepository = new APITextClassificationRepository()
- const sequenceLabelingRepository = new APISequenceLabelingRepository()
+ const sequenceLabelingRepository = new APISequenceLabelingRepository()
const linkRepository = new ApiRelationRepository()
const seq2seqRepository = new APISeq2seqRepository()
- const optionRepository = new LocalStorageOptionRepository()
- const configRepository = new APIConfigRepository()
+ const optionRepository = new LocalStorageOptionRepository()
+ const configRepository = new APIConfigRepository()
const tagRepository = new APITagRepository()
- const templateRepository = new APITemplateRepository()
+ const templateRepository = new APITemplateRepository()
const authRepository = new APIAuthRepository()
const catalogRepository = new APICatalogRepository()
const parseRepository = new APIParseRepository()
@@ -98,18 +98,21 @@ const plugin: Plugin = (_, inject) => {
const downloadFormatRepository = new APIDownloadFormatRepository()
const downloadRepository = new APIDownloadRepository()
- const categoryType = new LabelApplicationService(new APILabelRepository('category-type'))
- const spanType = new LabelApplicationService(new APILabelRepository('span-type'))
- const relationType = new LabelApplicationService(new APILabelRepository('relation-type'))
- const member = new MemberApplicationService(memberRepository)
- const user = new UserApplicationService(userRepository)
- const role = new RoleApplicationService(roleRepository)
- const project = new ProjectApplicationService(projectRepository)
- const comment = new CommentApplicationService(commentRepository)
+ const categoryType = new LabelApplicationService(new APILabelRepository('category-type'))
+ const spanType = new LabelApplicationService(new APILabelRepository('span-type'))
+ const relationType = new LabelApplicationService(new APILabelRepository('relation-type'))
+ const member = new MemberApplicationService(memberRepository)
+ const user = new UserApplicationService(userRepository)
+ const role = new RoleApplicationService(roleRepository)
+ const project = new ProjectApplicationService(projectRepository)
+ const comment = new CommentApplicationService(commentRepository)
const metrics = new MetricsApplicationService(metricsRepository)
- const example = new ExampleApplicationService(exampleRepository)
- const textClassification = new TextClassificationApplicationService(textClassificationRepository)
- const sequenceLabeling = new SequenceLabelingApplicationService(sequenceLabelingRepository, linkRepository)
+ const example = new ExampleApplicationService(exampleRepository)
+ const textClassification = new TextClassificationService(textClassificationRepository)
+ const sequenceLabeling = new SequenceLabelingApplicationService(
+ sequenceLabelingRepository,
+ linkRepository
+ )
const seq2seq = new Seq2seqApplicationService(seq2seqRepository)
const option = new OptionApplicationService(optionRepository)
const config = new ConfigApplicationService(configRepository)
@@ -121,7 +124,7 @@ const plugin: Plugin = (_, inject) => {
const taskStatus = new TaskStatusApplicationService(taskStatusRepository)
const downloadFormat = new DownloadFormatApplicationService(downloadFormatRepository)
const download = new DownloadApplicationService(downloadRepository)
-
+
const services: Services = {
categoryType,
spanType,
@@ -145,7 +148,7 @@ const plugin: Plugin = (_, inject) => {
taskStatus,
downloadFormat,
download,
- tag,
+ tag
}
inject('services', services)
}
diff --git a/frontend/plugins/utils.js b/frontend/plugins/utils.js
index fced83963c..1b9f85cc06 100644
--- a/frontend/plugins/utils.js
+++ b/frontend/plugins/utils.js
@@ -1,20 +1,20 @@
-export const idealColor = function(hexString) {
+export const idealColor = function (hexString) {
// W3c offers a formula for calculating ideal color:
// https://www.w3.org/TR/AERT/#color-contrast
const r = parseInt(hexString.substr(1, 2), 16)
const g = parseInt(hexString.substr(3, 2), 16)
const b = parseInt(hexString.substr(5, 2), 16)
- return ((((r * 299) + (g * 587) + (b * 114)) / 1000) < 128) ? '#ffffff' : '#000000'
+ return (r * 299 + g * 587 + b * 114) / 1000 < 128 ? '#ffffff' : '#000000'
}
-export const translatedRoles = function(roles, mappings) {
+export const translatedRoles = function (roles, mappings) {
roles.forEach((role) => {
role.translatedName = translateRole(role.name, mappings)
})
return roles
}
-export const translateRole = function(role, mappings) {
+export const translateRole = function (role, mappings) {
if (role === 'project_admin') {
return mappings.projectAdmin
} else if (role === 'annotator') {
diff --git a/frontend/repositories/auth/apiAuthRepository.ts b/frontend/repositories/auth/apiAuthRepository.ts
index 440a57b08b..4a94ab95ef 100644
--- a/frontend/repositories/auth/apiAuthRepository.ts
+++ b/frontend/repositories/auth/apiAuthRepository.ts
@@ -2,9 +2,7 @@ import ApiService from '@/services/api.service'
import { AuthRepository } from '@/domain/models/auth/authRepository'
export class APIAuthRepository implements AuthRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
async login(username: string, password: string): Promise<void> {
const url = `/auth/login/`
diff --git a/frontend/repositories/autoLabeling/config/apiConfigRepository.ts b/frontend/repositories/autoLabeling/config/apiConfigRepository.ts
index 1342addc23..7fe347eca0 100644
--- a/frontend/repositories/autoLabeling/config/apiConfigRepository.ts
+++ b/frontend/repositories/autoLabeling/config/apiConfigRepository.ts
@@ -3,26 +3,22 @@ import { ConfigRepository, ConfigTestResponse } from '~/domain/models/autoLabeli
import { ConfigItemList, ConfigItem } from '~/domain/models/autoLabeling/config'
export interface ConfigItemResponse {
- id: number,
- model_name: string,
- model_attrs: object,
- template: string,
- label_mapping: object,
- task_type: string,
+ id: number
+ model_name: string
+ model_attrs: object
+ template: string
+ label_mapping: object
+ task_type: string
}
export class APIConfigRepository implements ConfigRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
async list(projectId: string): Promise<ConfigItemList> {
const url = `/projects/${projectId}/auto-labeling/configs`
const response = await this.request.get(url)
const responseItems: ConfigItemResponse[] = response.data
- return ConfigItemList.valueOf(
- responseItems.map(item => ConfigItem.valueOf(item))
- )
+ return ConfigItemList.valueOf(responseItems.map((item) => ConfigItem.valueOf(item)))
}
async create(projectId: string, item: ConfigItem): Promise<ConfigItem> {
@@ -52,22 +48,36 @@ export class APIConfigRepository implements ConfigRepository {
async testParameters(projectId: string, item: ConfigItem, text: string) {
const url = `/projects/${projectId}/auto-labeling/request-testing`
- const response = await this.request.post(url, {...item.toAPI(), text})
+ const response = await this.request.post(url, { ...item.toAPI(), text })
const responseItem: ConfigTestResponse = response.data
return responseItem
}
- async testTemplate(projectId: string, response: any, item: ConfigItem): Promise<ConfigTestResponse> {
+ async testTemplate(
+ projectId: string,
+ response: any,
+ item: ConfigItem
+ ): Promise<ConfigTestResponse> {
console.log(projectId)
const url = `/projects/${projectId}/auto-labeling/label-extractor-testing`
- const _response = await this.request.post(url, { response, ...item.toAPI() })
+ const _response = await this.request.post(url, {
+ response,
+ ...item.toAPI()
+ })
const responseItem: ConfigTestResponse = _response.data
return responseItem
}
- async testMapping(projectId: string, item: ConfigItem, response: any): Promise<ConfigTestResponse> {
+ async testMapping(
+ projectId: string,
+ item: ConfigItem,
+ response: any
+ ): Promise<ConfigTestResponse> {
const url = `/projects/${projectId}/auto-labeling/label-mapper-testing`
- const _response = await this.request.post(url, {...item.toAPI(), response})
+ const _response = await this.request.post(url, {
+ ...item.toAPI(),
+ response
+ })
const responseItem: ConfigTestResponse = _response.data
return responseItem
}
diff --git a/frontend/repositories/autoLabeling/template/apiTemplateRepository.ts b/frontend/repositories/autoLabeling/template/apiTemplateRepository.ts
index 15b3c84a23..1a53a35da6 100644
--- a/frontend/repositories/autoLabeling/template/apiTemplateRepository.ts
+++ b/frontend/repositories/autoLabeling/template/apiTemplateRepository.ts
@@ -3,9 +3,7 @@ import { TemplateRepository } from '~/domain/models/autoLabeling/templateReposit
import { ConfigTemplateItem, ConfigResponse } from '~/domain/models/autoLabeling/template'
export class APITemplateRepository implements TemplateRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
async list(projectId: string, taskName: string): Promise<string[]> {
const url = `/projects/${projectId}/auto-labeling/templates?task_name=${taskName}`
diff --git a/frontend/repositories/celery/apiTaskStatusRepository.ts b/frontend/repositories/celery/apiTaskStatusRepository.ts
index 9ded859264..378f86f35e 100644
--- a/frontend/repositories/celery/apiTaskStatusRepository.ts
+++ b/frontend/repositories/celery/apiTaskStatusRepository.ts
@@ -4,9 +4,7 @@ import { TaskStatusRepository } from '@/domain/models/celery/taskStatusRepositor
import { Status } from '@/domain/models/celery/status'
export class APITaskStatusRepository implements TaskStatusRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
async get(taskId: string): Promise<Status> {
const url = `/tasks/status/${taskId}`
diff --git a/frontend/repositories/comment/apiCommentRepository.ts b/frontend/repositories/comment/apiCommentRepository.ts
index 8b21b87178..db04ad4e31 100644
--- a/frontend/repositories/comment/apiCommentRepository.ts
+++ b/frontend/repositories/comment/apiCommentRepository.ts
@@ -3,13 +3,13 @@ import ApiService from '@/services/api.service'
import { CommentRepository, SearchOption } from '@/domain/models/comment/commentRepository'
import { CommentItem, CommentItemList } from '~/domain/models/comment/comment'
-
export class APICommentRepository implements CommentRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
- async listAll(projectId: string, { limit = '10', offset = '0', q = '' }: SearchOption): Promise<CommentItemList> {
+ async listAll(
+ projectId: string,
+ { limit = '10', offset = '0', q = '' }: SearchOption
+ ): Promise<CommentItemList> {
const url = `/projects/${projectId}/comments?q=${q}&limit=${limit}&offset=${offset}`
const response = await this.request.get(url)
return plainToInstance(CommentItemList, response.data)
@@ -23,7 +23,11 @@ export class APICommentRepository implements CommentRepository {
async create(projectId: string, exampleId: number, text: string): Promise<CommentItem> {
const url = `/projects/${projectId}/comments?example=${exampleId}`
- const response = await this.request.post(url, { projectId, exampleId, text })
+ const response = await this.request.post(url, {
+ projectId,
+ exampleId,
+ text
+ })
return plainToInstance(CommentItem, response.data)
}
diff --git a/frontend/repositories/download/apiDownloadFormatRepository.ts b/frontend/repositories/download/apiDownloadFormatRepository.ts
index 804330a0e1..13e5e1e163 100644
--- a/frontend/repositories/download/apiDownloadFormatRepository.ts
+++ b/frontend/repositories/download/apiDownloadFormatRepository.ts
@@ -4,9 +4,7 @@ import { DownloadFormatRepository } from '@/domain/models/download/downloadForma
import { Format } from '~/domain/models/download/format'
export class APIDownloadFormatRepository implements DownloadFormatRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
async list(projectId: string): Promise<Format[]> {
const url = `/projects/${projectId}/download-format`
diff --git a/frontend/repositories/download/apiDownloadRepository.ts b/frontend/repositories/download/apiDownloadRepository.ts
index 2938ce18e9..a18271103f 100644
--- a/frontend/repositories/download/apiDownloadRepository.ts
+++ b/frontend/repositories/download/apiDownloadRepository.ts
@@ -2,15 +2,13 @@ import ApiService from '@/services/api.service'
import { DownloadRepository } from '@/domain/models/download/downloadRepository'
export class APIDownloadRepository implements DownloadRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
async prepare(projectId: string, format: string, exportApproved: boolean): Promise<string> {
const url = `/projects/${projectId}/download`
const data = {
format,
- exportApproved,
+ exportApproved
}
const response = await this.request.post(url, data)
return response.data.task_id
@@ -19,7 +17,7 @@ export class APIDownloadRepository implements DownloadRepository {
async download(projectId: string, taskId: string): Promise<void> {
const url = `/projects/${projectId}/download?taskId=${taskId}`
const config = {
- responseType: 'blob',
+ responseType: 'blob'
}
const response = await this.request.get(url, config)
const downloadUrl = window.URL.createObjectURL(new Blob([response.data]))
diff --git a/frontend/repositories/example/apiDocumentRepository.ts b/frontend/repositories/example/apiDocumentRepository.ts
index df8a3f5805..08af0992e7 100644
--- a/frontend/repositories/example/apiDocumentRepository.ts
+++ b/frontend/repositories/example/apiDocumentRepository.ts
@@ -4,11 +4,12 @@ import { ExampleRepository, SearchOption } from '~/domain/models/example/example
import { ExampleItem, ExampleItemList } from '~/domain/models/example/example'
export class APIExampleRepository implements ExampleRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
- async list(projectId: string, { limit = '10', offset = '0', q = '', isChecked = '' }: SearchOption): Promise<ExampleItemList> {
+ async list(
+ projectId: string,
+ { limit = '10', offset = '0', q = '', isChecked = '' }: SearchOption
+ ): Promise<ExampleItemList> {
const url = `/projects/${projectId}/examples?limit=${limit}&offset=${offset}&q=${q}&confirmed=${isChecked}`
const response = await this.request.get(url)
return plainToInstance(ExampleItemList, response.data)
diff --git a/frontend/repositories/label/apiLabelRepository.ts b/frontend/repositories/label/apiLabelRepository.ts
index ff35e98242..858040ef8e 100644
--- a/frontend/repositories/label/apiLabelRepository.ts
+++ b/frontend/repositories/label/apiLabelRepository.ts
@@ -4,19 +4,16 @@ import { LabelRepository } from '~/domain/models/label/labelRepository'
import { LabelItem } from '~/domain/models/label/label'
export interface LabelItemResponse {
- id: number,
- text: string,
- prefix_key: string,
- suffix_key: string,
- background_color: string,
+ id: number
+ text: string
+ prefix_key: string
+ suffix_key: string
+ background_color: string
text_color: string
}
export class APILabelRepository implements LabelRepository {
- constructor(
- private readonly baseUrl = 'label',
- private readonly request = ApiService
- ) {}
+ constructor(private readonly baseUrl = 'label', private readonly request = ApiService) {}
async list(projectId: string): Promise<LabelItem[]> {
const url = `/projects/${projectId}/${this.baseUrl}s`
@@ -56,7 +53,7 @@ export class APILabelRepository implements LabelRepository {
}
try {
await this.request.post(url, payload, config)
- } catch(e: any) {
+ } catch (e: any) {
const data = e.response.data
if ('detail' in data) {
throw new Error(data.detail)
diff --git a/frontend/repositories/member/apiMemberRepository.ts b/frontend/repositories/member/apiMemberRepository.ts
index bb8da9e2cc..c64a66c12f 100644
--- a/frontend/repositories/member/apiMemberRepository.ts
+++ b/frontend/repositories/member/apiMemberRepository.ts
@@ -4,9 +4,7 @@ import { MemberRepository } from '@/domain/models/member/memberRepository'
import { MemberItem } from '~/domain/models/member/member'
export class APIMemberRepository implements MemberRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
async list(projectId: string): Promise<MemberItem[]> {
const url = `/projects/${projectId}/members`
diff --git a/frontend/repositories/metrics/apiMetricsRepository.ts b/frontend/repositories/metrics/apiMetricsRepository.ts
index 2d2545ab2e..71243fddc7 100644
--- a/frontend/repositories/metrics/apiMetricsRepository.ts
+++ b/frontend/repositories/metrics/apiMetricsRepository.ts
@@ -3,9 +3,7 @@ import { MetricsRepository } from '@/domain/models/metrics/metricsRepository'
import { Distribution, Progress, MyProgress } from '~/domain/models/metrics/metrics'
export class APIMetricsRepository implements MetricsRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
async fetchCategoryDistribution(projectId: string): Promise<Distribution> {
const url = `/projects/${projectId}/metrics/category-distribution`
diff --git a/frontend/repositories/option/apiOptionRepository.ts b/frontend/repositories/option/apiOptionRepository.ts
index 4eabac2dd3..c5eafb50dc 100644
--- a/frontend/repositories/option/apiOptionRepository.ts
+++ b/frontend/repositories/option/apiOptionRepository.ts
@@ -2,7 +2,6 @@ import { OptionRepository } from '../../domain/models/option/optionRepository'
import { OptionItem } from '~/domain/models/option/option'
export class LocalStorageOptionRepository implements OptionRepository {
-
findById(projectId: string): OptionItem {
const checkpoint = this.loadCheckpoint()
return OptionItem.valueOf(checkpoint[projectId] ? checkpoint[projectId] : { page: 1 })
diff --git a/frontend/repositories/project/apiProjectRepository.ts b/frontend/repositories/project/apiProjectRepository.ts
index 848c60c9d2..48134434b7 100644
--- a/frontend/repositories/project/apiProjectRepository.ts
+++ b/frontend/repositories/project/apiProjectRepository.ts
@@ -3,11 +3,8 @@ import ApiService from '@/services/api.service'
import { ProjectRepository, SearchOption } from '@/domain/models/project/projectRepository'
import { ProjectReadItem, ProjectWriteItem, ProjectItemList } from '~/domain/models/project/project'
-
export class APIProjectRepository implements ProjectRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
async list({ limit = '10', offset = '0', q = '' }: SearchOption): Promise<ProjectItemList> {
const url = `/projects?limit=${limit}&offset=${offset}&q=${q}`
diff --git a/frontend/repositories/role/apiRoleRepository.ts b/frontend/repositories/role/apiRoleRepository.ts
index e7ca6961c7..ee078ca897 100644
--- a/frontend/repositories/role/apiRoleRepository.ts
+++ b/frontend/repositories/role/apiRoleRepository.ts
@@ -4,9 +4,7 @@ import { RoleRepository } from '../../domain/models/role/roleRepository'
import { RoleItem } from '~/domain/models/role/role'
export class APIRoleRepository implements RoleRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
async list(): Promise<RoleItem[]> {
const url = `/roles`
diff --git a/frontend/repositories/tag/apiTagRepository.ts b/frontend/repositories/tag/apiTagRepository.ts
index e99b209c4e..58ac8e4ecc 100644
--- a/frontend/repositories/tag/apiTagRepository.ts
+++ b/frontend/repositories/tag/apiTagRepository.ts
@@ -4,9 +4,7 @@ import { TagRepository } from '~/domain/models/tag/tagRepository'
import { TagItem } from '~/domain/models/tag/tag'
export class APITagRepository implements TagRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
async list(projectId: string): Promise<TagItem[]> {
const url = `/projects/${projectId}/tags`
diff --git a/frontend/repositories/tasks/seq2seq/apiSeq2seq.ts b/frontend/repositories/tasks/seq2seq/apiSeq2seq.ts
index eea4e162c6..e337c41e87 100644
--- a/frontend/repositories/tasks/seq2seq/apiSeq2seq.ts
+++ b/frontend/repositories/tasks/seq2seq/apiSeq2seq.ts
@@ -1,7 +1,6 @@
import { AnnotationRepository } from '@/domain/models/tasks/annotationRepository'
import { Seq2seqLabel } from '~/domain/models/tasks/seq2seq'
-
export class APISeq2seqRepository extends AnnotationRepository<Seq2seqLabel> {
constructor() {
super(Seq2seqLabel)
diff --git a/frontend/repositories/tasks/sequenceLabeling/apiRelationRepository.ts b/frontend/repositories/tasks/sequenceLabeling/apiRelationRepository.ts
index bfa605498e..2be17e3a2c 100644
--- a/frontend/repositories/tasks/sequenceLabeling/apiRelationRepository.ts
+++ b/frontend/repositories/tasks/sequenceLabeling/apiRelationRepository.ts
@@ -1,38 +1,40 @@
import ApiService from '@/services/api.service'
-import { RelationRepository } from "~/domain/models/tasks/relationRepository"
-import { RelationItem } from "~/domain/models/tasks/relation"
+import { RelationRepository } from '~/domain/models/tasks/relationRepository'
+import { RelationItem } from '~/domain/models/tasks/relation'
export class ApiRelationRepository implements RelationRepository {
- constructor(
- private readonly request = ApiService
- ) {
- }
+ constructor(private readonly request = ApiService) {}
- async list(projectId: string, exampleId: number): Promise<RelationItem[]> {
- const url = `/projects/${projectId}/examples/${exampleId}/relations`
- const response = await this.request.get(url)
- return response.data.map((relation: any) => RelationItem.valueOf(relation))
- }
+ async list(projectId: string, exampleId: number): Promise<RelationItem[]> {
+ const url = `/projects/${projectId}/examples/${exampleId}/relations`
+ const response = await this.request.get(url)
+ return response.data.map((relation: any) => RelationItem.valueOf(relation))
+ }
- async create(projectId: string, exampleId: number, item: RelationItem): Promise<RelationItem> {
- const url = `/projects/${projectId}/examples/${exampleId}/relations`
- const response = await this.request.post(url, item.toObject())
- return RelationItem.valueOf(response.data)
- }
+ async create(projectId: string, exampleId: number, item: RelationItem): Promise<RelationItem> {
+ const url = `/projects/${projectId}/examples/${exampleId}/relations`
+ const response = await this.request.post(url, item.toObject())
+ return RelationItem.valueOf(response.data)
+ }
- async update(projectId: string, exampleId: number, relationId: number, relationType: number): Promise<RelationItem> {
- const url = `/projects/${projectId}/examples/${exampleId}/relations/${relationId}`
- const response = await this.request.patch(url, {type: relationType})
- return RelationItem.valueOf(response.data)
- }
+ async update(
+ projectId: string,
+ exampleId: number,
+ relationId: number,
+ relationType: number
+ ): Promise<RelationItem> {
+ const url = `/projects/${projectId}/examples/${exampleId}/relations/${relationId}`
+ const response = await this.request.patch(url, { type: relationType })
+ return RelationItem.valueOf(response.data)
+ }
- async delete(projectId: string, exampleId: number, relationId: number): Promise<void> {
- const url = `/projects/${projectId}/examples/${exampleId}/relations/${relationId}`
- await this.request.delete(url)
- }
+ async delete(projectId: string, exampleId: number, relationId: number): Promise<void> {
+ const url = `/projects/${projectId}/examples/${exampleId}/relations/${relationId}`
+ await this.request.delete(url)
+ }
- async bulkDelete(projectId: string, exampleId: number, relationIds: number[]): Promise<void> {
- const url = `/projects/${projectId}/examples/${exampleId}/relations`
- await this.request.delete(url, {ids: relationIds})
- }
+ async bulkDelete(projectId: string, exampleId: number, relationIds: number[]): Promise<void> {
+ const url = `/projects/${projectId}/examples/${exampleId}/relations`
+ await this.request.delete(url, { ids: relationIds })
+ }
}
diff --git a/frontend/repositories/tasks/sequenceLabeling/apiSequenceLabeling.ts b/frontend/repositories/tasks/sequenceLabeling/apiSequenceLabeling.ts
index 23ee3cac76..85436677db 100644
--- a/frontend/repositories/tasks/sequenceLabeling/apiSequenceLabeling.ts
+++ b/frontend/repositories/tasks/sequenceLabeling/apiSequenceLabeling.ts
@@ -1,7 +1,6 @@
import { AnnotationRepository } from '@/domain/models/tasks/annotationRepository'
import { Span } from '~/domain/models/tasks/sequenceLabeling'
-
export class APISequenceLabelingRepository extends AnnotationRepository<Span> {
constructor() {
super(Span)
diff --git a/frontend/repositories/tasks/textClassification/apiTextClassification.ts b/frontend/repositories/tasks/textClassification/apiTextClassification.ts
index b70ef886f6..a920be67f0 100644
--- a/frontend/repositories/tasks/textClassification/apiTextClassification.ts
+++ b/frontend/repositories/tasks/textClassification/apiTextClassification.ts
@@ -1,10 +1,9 @@
import { AnnotationRepository } from '@/domain/models/tasks/annotationRepository'
-import { TextClassificationItem } from '~/domain/models/tasks/textClassification'
+import { CategoryItem } from '~/domain/models/tasks/textClassification'
-
-export class APITextClassificationRepository extends AnnotationRepository<TextClassificationItem> {
+export class APITextClassificationRepository extends AnnotationRepository<CategoryItem> {
constructor() {
- super(TextClassificationItem)
+ super(CategoryItem)
}
protected baseUrl(projectId: string, docId: number): string {
diff --git a/frontend/repositories/upload/apiCatalogRepository.ts b/frontend/repositories/upload/apiCatalogRepository.ts
index bb9d3ad2de..8f3f42bb86 100644
--- a/frontend/repositories/upload/apiCatalogRepository.ts
+++ b/frontend/repositories/upload/apiCatalogRepository.ts
@@ -4,9 +4,7 @@ import { CatalogRepository } from '@/domain/models/upload/catalogRepository'
import { Catalog } from '~/domain/models/upload/catalog'
export class APICatalogRepository implements CatalogRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
async list(projectId: string): Promise<Catalog[]> {
const url = `/projects/${projectId}/catalog`
diff --git a/frontend/repositories/upload/apiParseRepository.ts b/frontend/repositories/upload/apiParseRepository.ts
index d60ddcad8e..b019e72084 100644
--- a/frontend/repositories/upload/apiParseRepository.ts
+++ b/frontend/repositories/upload/apiParseRepository.ts
@@ -2,11 +2,15 @@ import ApiService from '@/services/api.service'
import { ParseRepository } from '@/domain/models/upload/parseRepository'
export class APIParseRepository implements ParseRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
- async analyze(projectId: string, format: string, task: string, uploadIds: number[], option: object): Promise<string> {
+ async analyze(
+ projectId: string,
+ format: string,
+ task: string,
+ uploadIds: number[],
+ option: object
+ ): Promise<string> {
const url = `/projects/${projectId}/upload`
const data = {
format,
diff --git a/frontend/repositories/user/apiUserRepository.ts b/frontend/repositories/user/apiUserRepository.ts
index 1756ef89d5..3636b9177e 100644
--- a/frontend/repositories/user/apiUserRepository.ts
+++ b/frontend/repositories/user/apiUserRepository.ts
@@ -4,9 +4,7 @@ import { UserRepository } from '@/domain/models/user/userRepository'
import { UserItem } from '~/domain/models/user/user'
export class APIUserRepository implements UserRepository {
- constructor(
- private readonly request = ApiService
- ) {}
+ constructor(private readonly request = ApiService) {}
async getMe(): Promise<UserItem> {
const url = '/me'
diff --git a/frontend/rules/index.js b/frontend/rules/index.js
index 1469c58fe2..a9e5ba7242 100644
--- a/frontend/rules/index.js
+++ b/frontend/rules/index.js
@@ -1,83 +1,67 @@
// Rules for project label.
export const colorRules = (msg) => {
- return [
- v => !!v || msg.colorRequired
- ]
+ return [(v) => !!v || msg.colorRequired]
}
export const labelNameRules = (msg) => {
- return [
- v => !!v || msg.labelRequired,
- v => (v && v.length <= 30) || msg.labelLessThan30Chars
- ]
+ return [(v) => !!v || msg.labelRequired, (v) => (v && v.length <= 30) || msg.labelLessThan30Chars]
}
// Rules for project member.
export const userNameRules = (msg) => {
return [
- v => !!v || msg.userNameRequired,
- v => (v && v.length <= 30) || msg.userNameLessThan30Chars
+ (v) => !!v || msg.userNameRequired,
+ (v) => (v && v.length <= 30) || msg.userNameLessThan30Chars
]
}
export const roleRules = (msg) => {
- return [
- v => !!v || msg.roleRequired
- ]
+ return [(v) => !!v || msg.roleRequired]
}
// Rules for a project.
export const projectNameRules = (msg) => {
return [
- v => !!v || msg.projectNameRequired,
- v => (v && v.length <= 30) || msg.projectNameLessThan30Chars
+ (v) => !!v || msg.projectNameRequired,
+ (v) => (v && v.length <= 30) || msg.projectNameLessThan30Chars
]
}
export const descriptionRules = (msg) => {
return [
- v => !!v || msg.descriptionRequired,
- v => (v && v.length <= 100) || msg.descriptionLessThan30Chars
+ (v) => !!v || msg.descriptionRequired,
+ (v) => (v && v.length <= 100) || msg.descriptionLessThan30Chars
]
}
export const projectTypeRules = (msg) => {
- return [
- v => !!v || msg.projectTypeRequired
- ]
+ return [(v) => !!v || msg.projectTypeRequired]
}
// Rules for Document.
export const fileFormatRules = (msg) => {
- return [
- v => !!v || msg.fileFormatRequired
- ]
+ return [(v) => !!v || msg.fileFormatRequired]
}
export const uploadFileRules = (msg) => {
return [
- v => !!v || msg.fileRequired,
- v => !v || v.some(file => file.size < 100000000) || msg.fileLessThan1MB
+ (v) => !!v || msg.fileRequired,
+ (v) => !v || v.some((file) => file.size < 100000000) || msg.fileLessThan1MB
]
}
export const uploadSingleFileRules = (msg) => {
- return [
- v => !!v || msg.fileRequired,
- v => !v || v.size < 1000000 || msg.fileLessThan1MB
- ]
+ return [(v) => !!v || msg.fileRequired, (v) => !v || v.size < 1000000 || msg.fileLessThan1MB]
}
// Rules for user.
export const passwordRules = (msg) => {
return [
- v => !!v || msg.passwordRequired,
- v => (v && v.length <= 30) || msg.passwordLessThan30Chars
+ (v) => !!v || msg.passwordRequired,
+ (v) => (v && v.length <= 30) || msg.passwordLessThan30Chars
]
}
export const templateNameRules = () => {
- return [
- v => !!v || 'Name is required'
- ]
+ return [(v) => !!v || 'Name is required']
}
diff --git a/frontend/services/application/auth/authApplicationService.ts b/frontend/services/application/auth/authApplicationService.ts
index b3cbd64206..4010ad4fef 100644
--- a/frontend/services/application/auth/authApplicationService.ts
+++ b/frontend/services/application/auth/authApplicationService.ts
@@ -1,9 +1,7 @@
import { AuthRepository } from '~/domain/models/auth/authRepository'
export class AuthApplicationService {
- constructor(
- private readonly repository: AuthRepository
- ) {}
+ constructor(private readonly repository: AuthRepository) {}
public async login(username: string, password: string) {
await this.repository.login(username, password)
diff --git a/frontend/services/application/autoLabeling/configApplicationService.ts b/frontend/services/application/autoLabeling/configApplicationService.ts
index 9b74069658..5a4226570e 100644
--- a/frontend/services/application/autoLabeling/configApplicationService.ts
+++ b/frontend/services/application/autoLabeling/configApplicationService.ts
@@ -2,9 +2,7 @@ import { ConfigRepository } from '~/domain/models/autoLabeling/configRepository'
import { ConfigItemList, ConfigItem } from '~/domain/models/autoLabeling/config'
export class ConfigApplicationService {
- constructor(
- private readonly configRepository: ConfigRepository
- ) {}
+ constructor(private readonly configRepository: ConfigRepository) {}
public list(id: string): Promise<ConfigItemList> {
return this.configRepository.list(id)
@@ -19,36 +17,38 @@ export class ConfigApplicationService {
}
public testParameters(projectId: string, item: ConfigItem, text: string) {
- return this.configRepository.testParameters(projectId, item, text)
- .then((value) => {
- return value
- })
- .catch((error) => {
- const data = error.response.data
- throw new Error(data)
- })
+ return this.configRepository
+ .testParameters(projectId, item, text)
+ .then((value) => {
+ return value
+ })
+ .catch((error) => {
+ const data = error.response.data
+ throw new Error(data)
+ })
}
public testTemplate(projectId: string, response: any, item: ConfigItem) {
- return this.configRepository.testTemplate(projectId, response, item)
- .then((value) => {
- return value
- })
- .catch((error) => {
- const data = error.response.data
- throw new Error(data)
- })
+ return this.configRepository
+ .testTemplate(projectId, response, item)
+ .then((value) => {
+ return value
+ })
+ .catch((error) => {
+ const data = error.response.data
+ throw new Error(data)
+ })
}
public testMapping(projectId: string, item: ConfigItem, response: any) {
- return this.configRepository.testMapping(projectId, item, response)
- .then((value) => {
- return value
- })
- .catch((error) => {
- const data = error.response.data
- throw new Error(data)
- })
+ return this.configRepository
+ .testMapping(projectId, item, response)
+ .then((value) => {
+ return value
+ })
+ .catch((error) => {
+ const data = error.response.data
+ throw new Error(data)
+ })
}
-
}
diff --git a/frontend/services/application/autoLabeling/templateApplicationService.ts b/frontend/services/application/autoLabeling/templateApplicationService.ts
index 919f87f3d2..81abe537cc 100644
--- a/frontend/services/application/autoLabeling/templateApplicationService.ts
+++ b/frontend/services/application/autoLabeling/templateApplicationService.ts
@@ -2,9 +2,7 @@ import { TemplateRepository } from '~/domain/models/autoLabeling/templateReposit
import { ConfigTemplateItem } from '~/domain/models/autoLabeling/template'
export class TemplateApplicationService {
- constructor(
- private readonly repository: TemplateRepository
- ) {}
+ constructor(private readonly repository: TemplateRepository) {}
public list(id: string, taskName: string): Promise<string[]> {
return this.repository.list(id, taskName)
diff --git a/frontend/services/application/celery/taskStatusApplicationService.ts b/frontend/services/application/celery/taskStatusApplicationService.ts
index f7155904c4..df99e7addb 100644
--- a/frontend/services/application/celery/taskStatusApplicationService.ts
+++ b/frontend/services/application/celery/taskStatusApplicationService.ts
@@ -2,9 +2,7 @@ import { TaskStatusRepository } from '@/domain/models/celery/taskStatusRepositor
import { StatusDTO } from './statusData'
export class TaskStatusApplicationService {
- constructor(
- private readonly repository: TaskStatusRepository
- ) {}
+ constructor(private readonly repository: TaskStatusRepository) {}
public async get(taskId: string): Promise<StatusDTO> {
const item = await this.repository.get(taskId)
diff --git a/frontend/services/application/comment/commentApplicationService.ts b/frontend/services/application/comment/commentApplicationService.ts
index 985228bbc7..e88dc94b80 100644
--- a/frontend/services/application/comment/commentApplicationService.ts
+++ b/frontend/services/application/comment/commentApplicationService.ts
@@ -4,18 +4,19 @@ import { CommentRepository, SearchOption } from '~/domain/models/comment/comment
import { CommentItem } from '~/domain/models/comment/comment'
export class CommentApplicationService {
- constructor(
- private readonly repository: CommentRepository
- ) {}
+ constructor(private readonly repository: CommentRepository) {}
- public async listProjectComment(projectId: string, options: SearchOption): Promise<CommentListDTO> {
+ public async listProjectComment(
+ projectId: string,
+ options: SearchOption
+ ): Promise<CommentListDTO> {
const item = await this.repository.listAll(projectId, options)
return new CommentListDTO(item)
}
public async list(projectId: string, docId: number): Promise<CommentReadDTO[]> {
const items = await this.repository.list(projectId, docId)
- return items.map(item => new CommentReadDTO(item))
+ return items.map((item) => new CommentReadDTO(item))
}
public create(projectId: string, docId: number, text: string): Promise<CommentItem> {
@@ -32,7 +33,7 @@ export class CommentApplicationService {
}
public deleteBulk(projectId: string, items: CommentReadDTO[]): Promise<void> {
- const ids = items.map(item => item.id)
+ const ids = items.map((item) => item.id)
return this.repository.deleteBulk(projectId, ids)
}
}
diff --git a/frontend/services/application/comment/commentData.ts b/frontend/services/application/comment/commentData.ts
index af2ffe9fce..ed32102f79 100644
--- a/frontend/services/application/comment/commentData.ts
+++ b/frontend/services/application/comment/commentData.ts
@@ -1,34 +1,33 @@
import { CommentItem, CommentItemList } from '~/domain/models/comment/comment'
-
export class CommentReadDTO {
- id: number;
- user: number;
- username: string;
- example: number;
- text: string;
- createdAt: string;
+ id: number
+ user: number
+ username: string
+ example: number
+ text: string
+ createdAt: string
constructor(item: CommentItem) {
- this.id = item.id;
- this.user = item.user;
- this.username = item.username;
- this.example = item.example;
- this.text = item.text;
- this.createdAt = item.createdAt;
+ this.id = item.id
+ this.user = item.user
+ this.username = item.username
+ this.example = item.example
+ this.text = item.text
+ this.createdAt = item.createdAt
}
}
export class CommentListDTO {
count: number
- next : string | null
- prev : string | null
+ next: string | null
+ prev: string | null
items: CommentReadDTO[]
constructor(item: CommentItemList) {
this.count = item.count
this.next = item.next
this.prev = item.prev
- this.items = item.items.map(_ => new CommentReadDTO(_))
+ this.items = item.items.map((_) => new CommentReadDTO(_))
}
}
diff --git a/frontend/services/application/download/downloadApplicationService.ts b/frontend/services/application/download/downloadApplicationService.ts
index e7075499cb..d636e5a954 100644
--- a/frontend/services/application/download/downloadApplicationService.ts
+++ b/frontend/services/application/download/downloadApplicationService.ts
@@ -1,11 +1,13 @@
import { DownloadRepository } from '~/domain/models/download/downloadRepository'
export class DownloadApplicationService {
- constructor(
- private readonly repository: DownloadRepository
- ) {}
+ constructor(private readonly repository: DownloadRepository) {}
- public async request(projectId: string, format: string, exportApproved: boolean): Promise<string> {
+ public async request(
+ projectId: string,
+ format: string,
+ exportApproved: boolean
+ ): Promise<string> {
const item = await this.repository.prepare(projectId, format, exportApproved)
return item
}
diff --git a/frontend/services/application/download/downloadFormatApplicationService.ts b/frontend/services/application/download/downloadFormatApplicationService.ts
index ebbf4de671..dac649d924 100644
--- a/frontend/services/application/download/downloadFormatApplicationService.ts
+++ b/frontend/services/application/download/downloadFormatApplicationService.ts
@@ -2,12 +2,10 @@ import { FormatDTO } from './formatData'
import { DownloadFormatRepository } from '~/domain/models/download/downloadFormatRepository'
export class DownloadFormatApplicationService {
- constructor(
- private readonly repository: DownloadFormatRepository
- ) {}
+ constructor(private readonly repository: DownloadFormatRepository) {}
public async list(projectId: string): Promise<FormatDTO[]> {
const items = await this.repository.list(projectId)
- return items.map(item => new FormatDTO(item))
+ return items.map((item) => new FormatDTO(item))
}
}
diff --git a/frontend/services/application/download/formatData.ts b/frontend/services/application/download/formatData.ts
index 9294baafd8..f80e35da12 100644
--- a/frontend/services/application/download/formatData.ts
+++ b/frontend/services/application/download/formatData.ts
@@ -1,6 +1,5 @@
import { Format } from '~/domain/models/download/format'
-
export class FormatDTO {
name: string
example: string
diff --git a/frontend/services/application/example/exampleApplicationService.ts b/frontend/services/application/example/exampleApplicationService.ts
index 37cdcf162b..69e8f760fb 100644
--- a/frontend/services/application/example/exampleApplicationService.ts
+++ b/frontend/services/application/example/exampleApplicationService.ts
@@ -4,26 +4,29 @@ import { ExampleRepository, SearchOption } from '~/domain/models/example/example
import { ExampleItem } from '~/domain/models/example/example'
export class ExampleApplicationService {
- constructor(
- private readonly repository: ExampleRepository
- ) {}
+ constructor(private readonly repository: ExampleRepository) {}
public async list(projectId: string, options: SearchOption): Promise<ExampleListDTO> {
try {
const item = await this.repository.list(projectId, options)
return new ExampleListDTO(item)
- } catch(e: any) {
+ } catch (e: any) {
throw new Error(e.response.data.detail)
}
}
- public async fetchOne(projectId: string, page: string, q: string, isChecked: string): Promise<ExampleListDTO> {
+ public async fetchOne(
+ projectId: string,
+ page: string,
+ q: string,
+ isChecked: string
+ ): Promise<ExampleListDTO> {
const offset = (parseInt(page, 10) - 1).toString()
const options: SearchOption = {
limit: '1',
offset,
q,
- isChecked,
+ isChecked
}
return await this.list(projectId, options)
}
@@ -33,7 +36,7 @@ export class ExampleApplicationService {
const doc = this.toModel(item)
const response = await this.repository.create(projectId, doc)
return new ExampleDTO(response)
- } catch(e: any) {
+ } catch (e: any) {
throw new Error(e.response.data.detail)
}
}
@@ -42,13 +45,13 @@ export class ExampleApplicationService {
try {
const doc = this.toModel(item)
await this.repository.update(projectId, doc)
- } catch(e: any) {
+ } catch (e: any) {
throw new Error(e.response.data.detail)
}
}
public bulkDelete(projectId: string, items: ExampleDTO[]): Promise<void> {
- const ids = items.map(item => item.id)
+ const ids = items.map((item) => item.id)
return this.repository.bulkDelete(projectId, ids)
}
diff --git a/frontend/services/application/example/exampleData.ts b/frontend/services/application/example/exampleData.ts
index d368bd05a5..c6aac2e588 100644
--- a/frontend/services/application/example/exampleData.ts
+++ b/frontend/services/application/example/exampleData.ts
@@ -1,17 +1,16 @@
import { ExampleItem, ExampleItemList } from '~/domain/models/example/example'
-
export class ExampleDTO {
- id: number;
- text: string;
- meta: object;
- annotationApprover: boolean | null;
- commentCount: number;
- isApproved: boolean;
- fileUrl: string;
- filename: string;
- url: string;
- isConfirmed: boolean;
+ id: number
+ text: string
+ meta: object
+ annotationApprover: boolean | null
+ commentCount: number
+ isApproved: boolean
+ fileUrl: string
+ filename: string
+ url: string
+ isConfirmed: boolean
constructor(item: ExampleItem) {
this.id = item.id
@@ -29,14 +28,14 @@ export class ExampleDTO {
export class ExampleListDTO {
count: number
- next : string | null
- prev : string | null
+ next: string | null
+ prev: string | null
items: ExampleDTO[]
constructor(item: ExampleItemList) {
this.count = item.count
this.next = item.next
this.prev = item.prev
- this.items = item.items.map(_ => new ExampleDTO(_))
+ this.items = item.items.map((_) => new ExampleDTO(_))
}
}
diff --git a/frontend/services/application/label/labelApplicationService.ts b/frontend/services/application/label/labelApplicationService.ts
index f8a17f1aad..34dec0c99f 100644
--- a/frontend/services/application/label/labelApplicationService.ts
+++ b/frontend/services/application/label/labelApplicationService.ts
@@ -1,17 +1,14 @@
import { LabelDTO } from './labelData'
-import { CreateLabelCommand } from './labelCommand';
+import { CreateLabelCommand } from './labelCommand'
import { LabelRepository } from '~/domain/models/label/labelRepository'
import { LabelItem } from '~/domain/models/label/label'
-
export class LabelApplicationService {
- constructor(
- private readonly repository: LabelRepository
- ) {}
+ constructor(private readonly repository: LabelRepository) {}
public async list(id: string): Promise<LabelDTO[]> {
const items = await this.repository.list(id)
- return items.map(item => new LabelDTO(item))
+ return items.map((item) => new LabelDTO(item))
}
public async findById(projectId: string, labelId: number): Promise<LabelDTO> {
@@ -45,13 +42,13 @@ export class LabelApplicationService {
}
public bulkDelete(projectId: string, items: LabelDTO[]): Promise<void> {
- const ids = items.map(item => item.id)
+ const ids = items.map((item) => item.id)
return this.repository.bulkDelete(projectId, ids)
}
public async export(projectId: string) {
const items = await this.repository.list(projectId)
- const labels = items.map(item => new LabelDTO(item))
+ const labels = items.map((item) => new LabelDTO(item))
const url = window.URL.createObjectURL(new Blob([JSON.stringify(labels, null, 2)]))
const link = document.createElement('a')
link.href = url
diff --git a/frontend/services/application/member/memberApplicationService.ts b/frontend/services/application/member/memberApplicationService.ts
index 5740e96d1c..3ff225d915 100644
--- a/frontend/services/application/member/memberApplicationService.ts
+++ b/frontend/services/application/member/memberApplicationService.ts
@@ -4,15 +4,13 @@ import { MemberRepository } from '~/domain/models/member/memberRepository'
import { MemberItem } from '~/domain/models/member/member'
export class MemberApplicationService {
- constructor(
- private readonly repository: MemberRepository
- ) {}
+ constructor(private readonly repository: MemberRepository) {}
public async list(id: string): Promise<MemberDTO[]> {
try {
const items = await this.repository.list(id)
- return items.map(item => new MemberDTO(item))
- } catch(e: any) {
+ return items.map((item) => new MemberDTO(item))
+ } catch (e: any) {
throw new Error(e.response.data.detail)
}
}
@@ -21,7 +19,7 @@ export class MemberApplicationService {
try {
const member = plainToInstance(MemberItem, item)
await this.repository.create(projectId, member)
- } catch(e: any) {
+ } catch (e: any) {
throw new Error(e.response.data.detail)
}
}
@@ -30,13 +28,13 @@ export class MemberApplicationService {
try {
const member = plainToInstance(MemberItem, item)
await this.repository.update(projectId, member)
- } catch(e: any) {
+ } catch (e: any) {
throw new Error(e.response.data.detail)
}
}
public bulkDelete(projectId: string, items: MemberDTO[]): Promise<void> {
- const ids = items.map(item => item.id)
+ const ids = items.map((item) => item.id)
return this.repository.bulkDelete(projectId, ids)
}
diff --git a/frontend/services/application/member/memberData.ts b/frontend/services/application/member/memberData.ts
index 521085a408..74cf2c9410 100644
--- a/frontend/services/application/member/memberData.ts
+++ b/frontend/services/application/member/memberData.ts
@@ -1,18 +1,17 @@
import { MemberItem } from '~/domain/models/member/member'
-
export class MemberDTO {
- id: number;
- user: number;
- role: number;
- username: string;
- rolename: string;
+ id: number
+ user: number
+ role: number
+ username: string
+ rolename: string
constructor(item: MemberItem) {
- this.id = item.id;
- this.user = item.user;
- this.role = item.role;
- this.username = item.username;
- this.rolename = item.rolename;
+ this.id = item.id
+ this.user = item.user
+ this.role = item.role
+ this.username = item.username
+ this.rolename = item.rolename
}
}
diff --git a/frontend/services/application/metrics/metricsApplicationService.ts b/frontend/services/application/metrics/metricsApplicationService.ts
index fd53078a73..7429714d41 100644
--- a/frontend/services/application/metrics/metricsApplicationService.ts
+++ b/frontend/services/application/metrics/metricsApplicationService.ts
@@ -2,9 +2,7 @@ import { MetricsRepository } from '~/domain/models/metrics/metricsRepository'
import { Progress, Distribution, MyProgress } from '~/domain/models/metrics/metrics'
export class MetricsApplicationService {
- constructor(
- private readonly repository: MetricsRepository
- ) {}
+ constructor(private readonly repository: MetricsRepository) {}
public async fetchMemberProgress(projectId: string): Promise<Progress> {
return await this.repository.fetchMemberProgress(projectId)
diff --git a/frontend/services/application/option/optionApplicationService.ts b/frontend/services/application/option/optionApplicationService.ts
index c536ab4e95..4858f17f4b 100644
--- a/frontend/services/application/option/optionApplicationService.ts
+++ b/frontend/services/application/option/optionApplicationService.ts
@@ -3,9 +3,7 @@ import { OptionRepository } from '~/domain/models/option/optionRepository'
import { OptionItem } from '~/domain/models/option/option'
export class OptionApplicationService {
- constructor(
- private readonly repository: OptionRepository
- ) {}
+ constructor(private readonly repository: OptionRepository) {}
public findOption(projectId: string): OptionDTO {
const item = this.repository.findById(projectId)
diff --git a/frontend/services/application/option/optionData.ts b/frontend/services/application/option/optionData.ts
index f28aff20fe..e295eea91e 100644
--- a/frontend/services/application/option/optionData.ts
+++ b/frontend/services/application/option/optionData.ts
@@ -1,14 +1,13 @@
import { OptionItem } from '~/domain/models/option/option'
-
export class OptionDTO {
- page: number;
- q?: string;
- isChecked?: string;
+ page: number
+ q?: string
+ isChecked?: string
constructor(item: OptionItem) {
- this.page = item.page;
- this.q = item.q;
- this.isChecked = item.isChecked;
+ this.page = item.page
+ this.q = item.q
+ this.isChecked = item.isChecked
}
}
diff --git a/frontend/services/application/project/projectApplicationService.ts b/frontend/services/application/project/projectApplicationService.ts
index 132d51ca9c..51f9c2ef35 100644
--- a/frontend/services/application/project/projectApplicationService.ts
+++ b/frontend/services/application/project/projectApplicationService.ts
@@ -2,17 +2,14 @@ import { ProjectDTO, ProjectWriteDTO, ProjectListDTO } from './projectData'
import { ProjectRepository, SearchOption } from '~/domain/models/project/projectRepository'
import { ProjectWriteItem } from '~/domain/models/project/project'
-
export class ProjectApplicationService {
- constructor(
- private readonly repository: ProjectRepository
- ) {}
+ constructor(private readonly repository: ProjectRepository) {}
public async list(options: SearchOption): Promise<ProjectListDTO> {
try {
const items = await this.repository.list(options)
return new ProjectListDTO(items)
- } catch(e: any) {
+ } catch (e: any) {
throw new Error(e.response.data.detail)
}
}
@@ -27,7 +24,7 @@ export class ProjectApplicationService {
const project = this.toWriteModel(item)
const response = await this.repository.create(project)
return new ProjectDTO(response)
- } catch(e: any) {
+ } catch (e: any) {
throw new Error(e.response.data.detail)
}
}
@@ -37,13 +34,13 @@ export class ProjectApplicationService {
const project = this.toWriteModel(item)
project.tags = []
await this.repository.update(project)
- } catch(e: any) {
+ } catch (e: any) {
throw new Error(e.response.data.detail)
}
}
public bulkDelete(items: ProjectDTO[]): Promise<void> {
- const ids = items.map(item => item.id)
+ const ids = items.map((item) => item.id)
return this.repository.bulkDelete(ids)
}
diff --git a/frontend/services/application/project/projectData.ts b/frontend/services/application/project/projectData.ts
index 1294afea8b..908759b3a5 100644
--- a/frontend/services/application/project/projectData.ts
+++ b/frontend/services/application/project/projectData.ts
@@ -46,18 +46,31 @@ export class ProjectDTO {
}
}
-export type ProjectWriteDTO = Pick<ProjectDTO, 'id' | 'name' | 'description' | 'guideline' | 'projectType' | 'enableRandomOrder' | 'enableShareAnnotation' | 'singleClassClassification' | 'allowOverlapping' | 'graphemeMode' | 'useRelation'> & { tags: string[] }
+export type ProjectWriteDTO = Pick<
+ ProjectDTO,
+ | 'id'
+ | 'name'
+ | 'description'
+ | 'guideline'
+ | 'projectType'
+ | 'enableRandomOrder'
+ | 'enableShareAnnotation'
+ | 'singleClassClassification'
+ | 'allowOverlapping'
+ | 'graphemeMode'
+ | 'useRelation'
+> & { tags: string[] }
export class ProjectListDTO {
count: number
- next : string | null
- prev : string | null
+ next: string | null
+ prev: string | null
items: ProjectDTO[]
constructor(item: ProjectItemList) {
this.count = item.count
this.next = item.next
this.prev = item.prev
- this.items = item.items.map(_ => new ProjectDTO(_))
+ this.items = item.items.map((_) => new ProjectDTO(_))
}
}
diff --git a/frontend/services/application/role/roleApplicationService.ts b/frontend/services/application/role/roleApplicationService.ts
index 04c39e3c82..0f132bf61e 100644
--- a/frontend/services/application/role/roleApplicationService.ts
+++ b/frontend/services/application/role/roleApplicationService.ts
@@ -2,12 +2,10 @@ import { RoleDTO } from './roleData'
import { RoleRepository } from '~/domain/models/role/roleRepository'
export class RoleApplicationService {
- constructor(
- private readonly repository: RoleRepository
- ) {}
+ constructor(private readonly repository: RoleRepository) {}
public async list(): Promise<RoleDTO[]> {
const items = await this.repository.list()
- return items.map(item => new RoleDTO(item))
+ return items.map((item) => new RoleDTO(item))
}
}
diff --git a/frontend/services/application/role/roleData.ts b/frontend/services/application/role/roleData.ts
index a747548fe3..c9c33e09fd 100644
--- a/frontend/services/application/role/roleData.ts
+++ b/frontend/services/application/role/roleData.ts
@@ -1,12 +1,11 @@
import { RoleItem } from '~/domain/models/role/role'
-
export class RoleDTO {
- id: number;
- rolename: string;
+ id: number
+ rolename: string
constructor(item: RoleItem) {
- this.id = item.id;
- this.rolename = item.name;
+ this.id = item.id
+ this.rolename = item.name
}
}
diff --git a/frontend/services/application/tag/tagApplicationService.ts b/frontend/services/application/tag/tagApplicationService.ts
index 5b46b2d2cd..8bce95ce15 100644
--- a/frontend/services/application/tag/tagApplicationService.ts
+++ b/frontend/services/application/tag/tagApplicationService.ts
@@ -1,15 +1,12 @@
import { TagDTO } from './tagData'
import { TagRepository } from '~/domain/models/tag/tagRepository'
-
export class TagApplicationService {
- constructor(
- private readonly repository: TagRepository
- ) {}
+ constructor(private readonly repository: TagRepository) {}
public async list(id: string): Promise<TagDTO[]> {
const items = await this.repository.list(id)
- return items.map(item => new TagDTO(item))
+ return items.map((item) => new TagDTO(item))
}
public create(projectId: string, text: string): void {
diff --git a/frontend/services/application/tasks/annotationApplicationService.ts b/frontend/services/application/tasks/annotationApplicationService.ts
index 4796d69eb1..5cd2bad906 100644
--- a/frontend/services/application/tasks/annotationApplicationService.ts
+++ b/frontend/services/application/tasks/annotationApplicationService.ts
@@ -1,16 +1,13 @@
import { AnnotationRepository } from '~/domain/models/tasks/annotationRepository'
import { AnnotationModel } from '~/domain/models/tasks/interface'
-
export class AnnotationApplicationService<T extends AnnotationModel> {
- constructor(
- readonly repository: AnnotationRepository<T>
- ) {}
+ constructor(readonly repository: AnnotationRepository<T>) {}
public async delete(projectId: string, docId: number, annotationId: number): Promise<void> {
try {
await this.repository.delete(projectId, docId, annotationId)
- } catch(e) {
+ } catch (e) {
console.log(e.response.data.detail)
}
}
diff --git a/frontend/services/application/tasks/seq2seq/seq2seqApplicationService.ts b/frontend/services/application/tasks/seq2seq/seq2seqApplicationService.ts
index b8db47e670..3476eaa0f8 100644
--- a/frontend/services/application/tasks/seq2seq/seq2seqApplicationService.ts
+++ b/frontend/services/application/tasks/seq2seq/seq2seqApplicationService.ts
@@ -4,15 +4,13 @@ import { APISeq2seqRepository } from '~/repositories/tasks/seq2seq/apiSeq2seq'
import { Seq2seqLabel } from '~/domain/models/tasks/seq2seq'
export class Seq2seqApplicationService extends AnnotationApplicationService<Seq2seqLabel> {
- constructor(
- readonly repository: APISeq2seqRepository
- ) {
+ constructor(readonly repository: APISeq2seqRepository) {
super(new APISeq2seqRepository())
}
public async list(projectId: string, docId: number): Promise<Seq2seqDTO[]> {
const items = await this.repository.list(projectId, docId)
- return items.map(item => new Seq2seqDTO(item))
+ return items.map((item) => new Seq2seqDTO(item))
}
public async create(projectId: string, docId: number, text: string): Promise<void> {
@@ -20,7 +18,12 @@ export class Seq2seqApplicationService extends AnnotationApplicationService<Seq2
await this.repository.create(projectId, docId, item)
}
- public async changeText(projectId: string, docId: number, annotationId: number, text: string): Promise<void> {
+ public async changeText(
+ projectId: string,
+ docId: number,
+ annotationId: number,
+ text: string
+ ): Promise<void> {
await this.repository.update(projectId, docId, annotationId, text)
}
}
diff --git a/frontend/services/application/tasks/seq2seq/seq2seqData.ts b/frontend/services/application/tasks/seq2seq/seq2seqData.ts
index ce5c7a8898..4b081082fd 100644
--- a/frontend/services/application/tasks/seq2seq/seq2seqData.ts
+++ b/frontend/services/application/tasks/seq2seq/seq2seqData.ts
@@ -1,14 +1,13 @@
import { Seq2seqLabel } from '~/domain/models/tasks/seq2seq'
-
export class Seq2seqDTO {
- id: number;
- text: string;
- user: number;
+ id: number
+ text: string
+ user: number
constructor(item: Seq2seqLabel) {
- this.id = item.id;
- this.text = item.text;
- this.user = item.user;
+ this.id = item.id
+ this.text = item.text
+ this.user = item.user
}
}
diff --git a/frontend/services/application/tasks/sequenceLabeling/relationData.ts b/frontend/services/application/tasks/sequenceLabeling/relationData.ts
index 66cfab5b7e..c1afe08a82 100644
--- a/frontend/services/application/tasks/sequenceLabeling/relationData.ts
+++ b/frontend/services/application/tasks/sequenceLabeling/relationData.ts
@@ -12,4 +12,4 @@ export class RelationDTO {
this.toId = item.toId
this.labelId = item.type
}
-}
\ No newline at end of file
+}
diff --git a/frontend/services/application/tasks/sequenceLabeling/sequenceLabelingApplicationService.ts b/frontend/services/application/tasks/sequenceLabeling/sequenceLabelingApplicationService.ts
index e6abaf247a..086e00e904 100644
--- a/frontend/services/application/tasks/sequenceLabeling/sequenceLabelingApplicationService.ts
+++ b/frontend/services/application/tasks/sequenceLabeling/sequenceLabelingApplicationService.ts
@@ -3,54 +3,76 @@ import { RelationDTO } from './relationData'
import { SpanDTO } from './sequenceLabelingData'
import { APISequenceLabelingRepository } from '~/repositories/tasks/sequenceLabeling/apiSequenceLabeling'
import { Span } from '~/domain/models/tasks/sequenceLabeling'
-import { RelationRepository } from "~/domain/models/tasks/relationRepository"
-import { RelationItem } from "~/domain/models/tasks/relation"
+import { RelationRepository } from '~/domain/models/tasks/relationRepository'
+import { RelationItem } from '~/domain/models/tasks/relation'
export class SequenceLabelingApplicationService extends AnnotationApplicationService<Span> {
- constructor(
- readonly repository: APISequenceLabelingRepository,
- readonly relationRepository: RelationRepository
- ) {
- super(new APISequenceLabelingRepository())
- }
+ constructor(
+ readonly repository: APISequenceLabelingRepository,
+ readonly relationRepository: RelationRepository
+ ) {
+ super(new APISequenceLabelingRepository())
+ }
- public async list(projectId: string, docId: number): Promise<SpanDTO[]> {
- const items = await this.repository.list(projectId, docId)
- return items.map(item => new SpanDTO(item))
- }
+ public async list(projectId: string, docId: number): Promise<SpanDTO[]> {
+ const items = await this.repository.list(projectId, docId)
+ return items.map((item) => new SpanDTO(item))
+ }
- public async create(projectId: string, docId: number, labelId: number, startOffset: number, endOffset: number): Promise<void> {
- const item = new Span(0, labelId, 0, startOffset, endOffset)
- try {
- await this.repository.create(projectId, docId, item)
- } catch(e: any) {
- console.log(e.response.data.detail)
- }
+ public async create(
+ projectId: string,
+ docId: number,
+ labelId: number,
+ startOffset: number,
+ endOffset: number
+ ): Promise<void> {
+ const item = new Span(0, labelId, 0, startOffset, endOffset)
+ try {
+ await this.repository.create(projectId, docId, item)
+ } catch (e: any) {
+ console.log(e.response.data.detail)
}
+ }
- public async changeLabel(projectId: string, docId: number, annotationId: number, labelId: number): Promise<void> {
- try {
- await this.repository.update(projectId, docId, annotationId, labelId)
- } catch(e: any) {
- console.log(e.response.data.detail)
- }
+ public async changeLabel(
+ projectId: string,
+ docId: number,
+ annotationId: number,
+ labelId: number
+ ): Promise<void> {
+ try {
+ await this.repository.update(projectId, docId, annotationId, labelId)
+ } catch (e: any) {
+ console.log(e.response.data.detail)
}
+ }
- public async listRelations(projectId: string, docId: number): Promise<RelationDTO[]> {
- const items = await this.relationRepository.list(projectId, docId)
- return items.map(item => new RelationDTO(item))
- }
+ public async listRelations(projectId: string, docId: number): Promise<RelationDTO[]> {
+ const items = await this.relationRepository.list(projectId, docId)
+ return items.map((item) => new RelationDTO(item))
+ }
- public async createRelation(projectId: string, docId: number, fromId: number, toId: number, typeId: number): Promise<void> {
- const relation = new RelationItem(0, fromId, toId, typeId)
- await this.relationRepository.create(projectId, docId, relation)
- }
+ public async createRelation(
+ projectId: string,
+ docId: number,
+ fromId: number,
+ toId: number,
+ typeId: number
+ ): Promise<void> {
+ const relation = new RelationItem(0, fromId, toId, typeId)
+ await this.relationRepository.create(projectId, docId, relation)
+ }
- public async deleteRelation(projectId: string, docId: number, relationId: number): Promise<void> {
- await this.relationRepository.delete(projectId, docId, relationId)
- }
+ public async deleteRelation(projectId: string, docId: number, relationId: number): Promise<void> {
+ await this.relationRepository.delete(projectId, docId, relationId)
+ }
- public async updateRelation(projectId: string, docId: number, relationId: number, typeId: number): Promise<void> {
- await this.relationRepository.update(projectId, docId, relationId, typeId)
- }
+ public async updateRelation(
+ projectId: string,
+ docId: number,
+ relationId: number,
+ typeId: number
+ ): Promise<void> {
+ await this.relationRepository.update(projectId, docId, relationId, typeId)
+ }
}
diff --git a/frontend/services/application/tasks/sequenceLabeling/sequenceLabelingData.ts b/frontend/services/application/tasks/sequenceLabeling/sequenceLabelingData.ts
index 3266afc9a0..b5dcc2efdc 100644
--- a/frontend/services/application/tasks/sequenceLabeling/sequenceLabelingData.ts
+++ b/frontend/services/application/tasks/sequenceLabeling/sequenceLabelingData.ts
@@ -1,18 +1,17 @@
import { Span } from '~/domain/models/tasks/sequenceLabeling'
-
export class SpanDTO {
- id: number;
- label: number;
- user: number;
- startOffset: number;
- endOffset: number;
+ id: number
+ label: number
+ user: number
+ startOffset: number
+ endOffset: number
constructor(item: Span) {
- this.id = item.id;
- this.label = item.label;
- this.user = item.user;
- this.startOffset = item.startOffset;
- this.endOffset = item.endOffset;
+ this.id = item.id
+ this.label = item.label
+ this.user = item.user
+ this.startOffset = item.startOffset
+ this.endOffset = item.endOffset
}
}
diff --git a/frontend/services/application/tasks/textClassification/textClassificationApplicationService.ts b/frontend/services/application/tasks/textClassification/textClassificationApplicationService.ts
index 067f53c392..49fb818cc5 100644
--- a/frontend/services/application/tasks/textClassification/textClassificationApplicationService.ts
+++ b/frontend/services/application/tasks/textClassification/textClassificationApplicationService.ts
@@ -1,16 +1,15 @@
import { AnnotationApplicationService } from '../annotationApplicationService'
import { TextClassificationDTO } from './textClassificationData'
-import { TextClassificationItem } from '~/domain/models/tasks/textClassification'
-
-export class TextClassificationApplicationService extends AnnotationApplicationService<TextClassificationItem> {
+import { CategoryItem } from '~/domain/models/tasks/textClassification'
+export class TextClassificationService extends AnnotationApplicationService<CategoryItem> {
public async list(projectId: string, docId: number): Promise<TextClassificationDTO[]> {
const items = await this.repository.list(projectId, docId)
- return items.map(item => new TextClassificationDTO(item))
+ return items.map((item) => new TextClassificationDTO(item))
}
public async create(projectId: string, docId: number, labelId: number): Promise<void> {
- const item = new TextClassificationItem(0, labelId, 0)
+ const item = new CategoryItem(0, labelId, 0)
await this.repository.create(projectId, docId, item)
}
}
diff --git a/frontend/services/application/tasks/textClassification/textClassificationData.ts b/frontend/services/application/tasks/textClassification/textClassificationData.ts
index 6937aa3509..330eaf538a 100644
--- a/frontend/services/application/tasks/textClassification/textClassificationData.ts
+++ b/frontend/services/application/tasks/textClassification/textClassificationData.ts
@@ -1,14 +1,13 @@
-import { TextClassificationItem } from '~/domain/models/tasks/textClassification'
-
+import { CategoryItem } from '~/domain/models/tasks/textClassification'
export class TextClassificationDTO {
- id: number;
- label: number;
- user: number;
+ id: number
+ label: number
+ user: number
- constructor(item: TextClassificationItem) {
- this.id = item.id;
- this.label = item.label;
- this.user = item.user;
+ constructor(item: CategoryItem) {
+ this.id = item.id
+ this.label = item.label
+ this.user = item.user
}
}
diff --git a/frontend/services/application/upload/catalogApplicationService.ts b/frontend/services/application/upload/catalogApplicationService.ts
index 20216319cd..841358d5b8 100644
--- a/frontend/services/application/upload/catalogApplicationService.ts
+++ b/frontend/services/application/upload/catalogApplicationService.ts
@@ -2,12 +2,10 @@ import { CatalogDTO } from './catalogData'
import { CatalogRepository } from '~/domain/models/upload/catalogRepository'
export class CatalogApplicationService {
- constructor(
- private readonly repository: CatalogRepository
- ) {}
+ constructor(private readonly repository: CatalogRepository) {}
public async list(projectId: string): Promise<CatalogDTO[]> {
const items = await this.repository.list(projectId)
- return items.map(item => new CatalogDTO(item))
+ return items.map((item) => new CatalogDTO(item))
}
}
diff --git a/frontend/services/application/upload/catalogData.ts b/frontend/services/application/upload/catalogData.ts
index 781bbc9584..273988d5db 100644
--- a/frontend/services/application/upload/catalogData.ts
+++ b/frontend/services/application/upload/catalogData.ts
@@ -1,6 +1,5 @@
import { Catalog } from '~/domain/models/upload/catalog'
-
export class CatalogDTO {
name: string
example: string
diff --git a/frontend/services/application/upload/parseApplicationService.ts b/frontend/services/application/upload/parseApplicationService.ts
index 888f012da8..2e20c09a60 100644
--- a/frontend/services/application/upload/parseApplicationService.ts
+++ b/frontend/services/application/upload/parseApplicationService.ts
@@ -1,11 +1,15 @@
import { ParseRepository } from '~/domain/models/upload/parseRepository'
export class ParseApplicationService {
- constructor(
- private readonly repository: ParseRepository
- ) {}
+ constructor(private readonly repository: ParseRepository) {}
- public async analyze(projectId: string, format: string, task: string, uploadIds: number[], option: object): Promise<string> {
+ public async analyze(
+ projectId: string,
+ format: string,
+ task: string,
+ uploadIds: number[],
+ option: object
+ ): Promise<string> {
const item = await this.repository.analyze(projectId, format, task, uploadIds, option)
return item
}
diff --git a/frontend/services/application/user/userApplicationService.ts b/frontend/services/application/user/userApplicationService.ts
index c872a77d25..b6312db035 100644
--- a/frontend/services/application/user/userApplicationService.ts
+++ b/frontend/services/application/user/userApplicationService.ts
@@ -2,9 +2,7 @@ import { UserDTO } from './userData'
import { UserRepository } from '~/domain/models/user/userRepository'
export class UserApplicationService {
- constructor(
- private readonly repository: UserRepository
- ) {}
+ constructor(private readonly repository: UserRepository) {}
public async getMyProfile(): Promise<UserDTO> {
const item = await this.repository.getMe()
@@ -13,6 +11,6 @@ export class UserApplicationService {
public async list(query: string): Promise<UserDTO[]> {
const items = await this.repository.list(query)
- return items.map(item => new UserDTO(item))
+ return items.map((item) => new UserDTO(item))
}
}
diff --git a/frontend/services/application/user/userData.ts b/frontend/services/application/user/userData.ts
index 3d0bb5ddfb..36d3a6d639 100644
--- a/frontend/services/application/user/userData.ts
+++ b/frontend/services/application/user/userData.ts
@@ -1,14 +1,13 @@
import { UserItem } from '~/domain/models/user/user'
-
export class UserDTO {
- id: number;
- username: string;
- isStaff: boolean;
+ id: number
+ username: string
+ isStaff: boolean
constructor(item: UserItem) {
- this.id = item.id;
- this.username = item.username;
- this.isStaff = item.isStaff;
+ this.id = item.id
+ this.username = item.username
+ this.isStaff = item.isStaff
}
}
diff --git a/frontend/store/auth.js b/frontend/store/auth.js
index 7f3ab7fc77..a887588147 100644
--- a/frontend/store/auth.js
+++ b/frontend/store/auth.js
@@ -43,7 +43,7 @@ export const actions = {
try {
await this.$services.auth.login(authData.username, authData.password)
commit('setAuthenticated', true)
- } catch(error) {
+ } catch (error) {
throw new Error('The credential is invalid')
}
},
diff --git a/frontend/store/config.js b/frontend/store/config.js
index f38057b26f..04d48b9245 100644
--- a/frontend/store/config.js
+++ b/frontend/store/config.js
@@ -1,5 +1,5 @@
export const state = () => ({
- rtl: false,
+ rtl: false
})
export const mutations = {
@@ -11,11 +11,11 @@ export const mutations = {
export const getters = {
isRTL(state) {
return state.rtl
- },
+ }
}
export const actions = {
toggleRTL({ commit }) {
commit('changeRTLState')
- },
+ }
}
diff --git a/frontend/store/projects.js b/frontend/store/projects.js
index be94e44c60..00e44300a7 100644
--- a/frontend/store/projects.js
+++ b/frontend/store/projects.js
@@ -1,5 +1,5 @@
export const state = () => ({
- current: {},
+ current: {}
})
export const getters = {
@@ -15,7 +15,7 @@ export const getters = {
},
getLink(state) {
return state.current.pageLink
- },
+ }
}
export const mutations = {
@@ -29,7 +29,7 @@ export const actions = {
try {
const response = await this.$services.project.findById(projectId)
commit('setCurrent', response)
- } catch(error) {
+ } catch (error) {
throw new Error(error)
}
}
diff --git a/frontend/test/unit/components/tasks/toolbar/forms/formGuideline.spec.js b/frontend/test/unit/components/tasks/toolbar/forms/formGuideline.spec.js
index 5c6ed4d68f..d784daf299 100644
--- a/frontend/test/unit/components/tasks/toolbar/forms/formGuideline.spec.js
+++ b/frontend/test/unit/components/tasks/toolbar/forms/formGuideline.spec.js
@@ -11,7 +11,7 @@ const factory = () => {
propsData: {
guidelineText: 'Hello'
},
- mocks:{ $t }
+ mocks: { $t }
})
}
diff --git a/frontend/tsconfig.json b/frontend/tsconfig.json
index e48f495a07..c3c0f522f1 100644
--- a/frontend/tsconfig.json
+++ b/frontend/tsconfig.json
@@ -3,11 +3,7 @@
"target": "ES2018",
"module": "ESNext",
"moduleResolution": "Node",
- "lib": [
- "ESNext",
- "ESNext.AsyncIterable",
- "DOM"
- ],
+ "lib": ["ESNext", "ESNext.AsyncIterable", "DOM"],
"esModuleInterop": true,
"allowJs": true,
"sourceMap": true,
@@ -17,20 +13,10 @@
"strictPropertyInitialization": false,
"baseUrl": ".",
"paths": {
- "~/*": [
- "./*"
- ],
- "@/*": [
- "./*"
- ]
+ "~/*": ["./*"],
+ "@/*": ["./*"]
},
- "types": [
- "@types/node",
- "@nuxt/types",
- "nuxt-i18n"
- ]
+ "types": ["@types/node", "@nuxt/types", "nuxt-i18n"]
},
- "exclude": [
- "node_modules"
- ]
+ "exclude": ["node_modules"]
}
diff --git a/frontend/vue-shim.d.ts b/frontend/vue-shim.d.ts
index a456e142ad..6de952b69c 100644
--- a/frontend/vue-shim.d.ts
+++ b/frontend/vue-shim.d.ts
@@ -1,5 +1,5 @@
-declare module "*.vue" {
+declare module '*.vue' {
import Vue from 'vue'
export default Vue
}
-declare module "v-annotator"
\ No newline at end of file
+declare module 'v-annotator'
|
typeddjango__django-stubs-939 | Support mypy 0.950
mypy 0.950 was released a short while ago (https://github.com/python/mypy/releases/tag/v0.950).
`django-stubs` currently enforces a version less than 0.950 (https://github.com/typeddjango/django-stubs/blob/master/setup.py#L23), please enable support for 0.950.
| [
{
"content": "import os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\n\ndef find_stub_files(name: str) -> List[str]:\n result = []\n for root, _dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\ndependencies = [\n \"mypy>=0.930,<0.950\",\n \"django\",\n \"django-stubs-ext>=0.4.0\",\n \"tomli\",\n # Types:\n \"typing-extensions\",\n \"types-pytz\",\n \"types-PyYAML\",\n]\n\nsetup(\n name=\"django-stubs\",\n version=\"1.10.1\",\n description=\"Mypy stubs for Django\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n url=\"https://github.com/typeddjango/django-stubs\",\n author=\"Maksim Kurnikov\",\n author_email=\"[email protected]\",\n py_modules=[],\n python_requires=\">=3.7\",\n install_requires=dependencies,\n packages=[\"django-stubs\", *find_packages(exclude=[\"scripts\"])],\n package_data={\n \"django-stubs\": find_stub_files(\"django-stubs\"),\n \"mypy_django_plugin\": [\"py.typed\"],\n },\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Typing :: Typed\",\n \"Framework :: Django\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n ],\n project_urls={\n \"Release notes\": \"https://github.com/typeddjango/django-stubs/releases\",\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "import os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\n\ndef find_stub_files(name: str) -> List[str]:\n result = []\n for root, _dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\ndependencies = [\n \"mypy>=0.930,<0.960\",\n \"django\",\n \"django-stubs-ext>=0.4.0\",\n \"tomli\",\n # Types:\n \"typing-extensions\",\n \"types-pytz\",\n \"types-PyYAML\",\n]\n\nsetup(\n name=\"django-stubs\",\n version=\"1.10.1\",\n description=\"Mypy stubs for Django\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n url=\"https://github.com/typeddjango/django-stubs\",\n author=\"Maksim Kurnikov\",\n author_email=\"[email protected]\",\n py_modules=[],\n python_requires=\">=3.7\",\n install_requires=dependencies,\n packages=[\"django-stubs\", *find_packages(exclude=[\"scripts\"])],\n package_data={\n \"django-stubs\": find_stub_files(\"django-stubs\"),\n \"mypy_django_plugin\": [\"py.typed\"],\n },\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Typing :: Typed\",\n \"Framework :: Django\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n ],\n project_urls={\n \"Release notes\": \"https://github.com/typeddjango/django-stubs/releases\",\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/requirements.txt b/requirements.txt
index a96bbd456..ab2789184 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -10,4 +10,4 @@ psycopg2-binary
-e .
# Overrides:
-mypy==0.942
+mypy==0.950
diff --git a/setup.py b/setup.py
index c4987d135..18098e28c 100644
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@ def find_stub_files(name: str) -> List[str]:
readme = f.read()
dependencies = [
- "mypy>=0.930,<0.950",
+ "mypy>=0.930,<0.960",
"django",
"django-stubs-ext>=0.4.0",
"tomli",
diff --git a/tests/typecheck/db/test_connection.yml b/tests/typecheck/db/test_connection.yml
index 4f3134171..6b3f88bf1 100644
--- a/tests/typecheck/db/test_connection.yml
+++ b/tests/typecheck/db/test_connection.yml
@@ -7,7 +7,7 @@
- case: raw_connections
main: |
from django.db import connections
- reveal_type(connections["test"]) # N: Revealed type is "django.db.backends.base.base.BaseDatabaseWrapper*"
+ reveal_type(connections["test"]) # N: Revealed type is "django.db.backends.base.base.BaseDatabaseWrapper"
for connection in connections.all():
with connection.cursor() as cursor:
reveal_type(cursor) # N: Revealed type is "django.db.backends.utils.CursorWrapper"
diff --git a/tests/typecheck/fields/test_base.yml b/tests/typecheck/fields/test_base.yml
index f9c671fa7..714916213 100644
--- a/tests/typecheck/fields/test_base.yml
+++ b/tests/typecheck/fields/test_base.yml
@@ -2,11 +2,11 @@
main: |
from myapp.models import User
user = User(small_int=1, name='user', slug='user', text='user')
- reveal_type(user.id) # N: Revealed type is "builtins.int*"
- reveal_type(user.small_int) # N: Revealed type is "builtins.int*"
- reveal_type(user.name) # N: Revealed type is "builtins.str*"
- reveal_type(user.slug) # N: Revealed type is "builtins.str*"
- reveal_type(user.text) # N: Revealed type is "builtins.str*"
+ reveal_type(user.id) # N: Revealed type is "builtins.int"
+ reveal_type(user.small_int) # N: Revealed type is "builtins.int"
+ reveal_type(user.name) # N: Revealed type is "builtins.str"
+ reveal_type(user.slug) # N: Revealed type is "builtins.str"
+ reveal_type(user.text) # N: Revealed type is "builtins.str"
installed_apps:
- myapp
files:
@@ -25,9 +25,9 @@
main: |
from myapp.models import Booking
booking = Booking()
- reveal_type(booking.id) # N: Revealed type is "builtins.int*"
+ reveal_type(booking.id) # N: Revealed type is "builtins.int"
reveal_type(booking.time_range) # N: Revealed type is "Any"
- reveal_type(booking.some_decimal) # N: Revealed type is "decimal.Decimal*"
+ reveal_type(booking.some_decimal) # N: Revealed type is "decimal.Decimal"
installed_apps:
- myapp
files:
@@ -47,7 +47,7 @@
disable_cache: true
main: |
from myapp.models import User
- reveal_type(User().id) # N: Revealed type is "builtins.int*"
+ reveal_type(User().id) # N: Revealed type is "builtins.int"
installed_apps:
- myapp
files:
@@ -62,7 +62,7 @@
disable_cache: true
main: |
from myapp.models import User
- reveal_type(User().my_pk) # N: Revealed type is "builtins.int*"
+ reveal_type(User().my_pk) # N: Revealed type is "builtins.int"
User().id # E: "User" has no attribute "id"
installed_apps:
- myapp
@@ -97,7 +97,7 @@
MyModel(notnulltext=None) # E: Incompatible type for "notnulltext" of "MyModel" (got "None", expected "Union[str, int, Combinable]")
MyModel(notnulltext="")
MyModel().notnulltext = None # E: Incompatible types in assignment (expression has type "None", variable has type "Union[str, int, Combinable]")
- reveal_type(MyModel().notnulltext) # N: Revealed type is "builtins.str*"
+ reveal_type(MyModel().notnulltext) # N: Revealed type is "builtins.str"
installed_apps:
- myapp
files:
@@ -133,7 +133,7 @@
- case: fields_inside_mixins_used_in_model_subclasses_resolved_as_primitives
main: |
from myapp.models import MyModel, AuthMixin
- reveal_type(MyModel().username) # N: Revealed type is "builtins.str*"
+ reveal_type(MyModel().username) # N: Revealed type is "builtins.str"
installed_apps:
- myapp
files:
@@ -156,10 +156,10 @@
class Book(models.Model):
published = cast(models.Field[Year, Year], models.IntegerField())
book = Book()
- reveal_type(book.published) # N: Revealed type is "main.Year*"
+ reveal_type(book.published) # N: Revealed type is "main.Year"
book.published = 2006 # E: Incompatible types in assignment (expression has type "int", variable has type "Year")
book.published = Year(2006)
- reveal_type(book.published) # N: Revealed type is "main.Year*"
+ reveal_type(book.published) # N: Revealed type is "main.Year"
def accepts_int(arg: int) -> None: ...
accepts_int(book.published)
@@ -179,4 +179,4 @@
small = models.SmallAutoField(primary_key=True)
obj = MyModel()
- reveal_type(obj.small) # N: Revealed type is "builtins.int*"
+ reveal_type(obj.small) # N: Revealed type is "builtins.int"
diff --git a/tests/typecheck/fields/test_nullable.yml b/tests/typecheck/fields/test_nullable.yml
index f3ddaaa91..16ccc3381 100644
--- a/tests/typecheck/fields/test_nullable.yml
+++ b/tests/typecheck/fields/test_nullable.yml
@@ -34,7 +34,7 @@
- case: nullable_field_with_strict_optional_true
main: |
from myapp.models import MyModel
- reveal_type(MyModel().text) # N: Revealed type is "builtins.str*"
+ reveal_type(MyModel().text) # N: Revealed type is "builtins.str"
reveal_type(MyModel().text_nullable) # N: Revealed type is "Union[builtins.str, None]"
MyModel().text = None # E: Incompatible types in assignment (expression has type "None", variable has type "Union[str, int, Combinable]")
MyModel().text_nullable = None
diff --git a/tests/typecheck/fields/test_postgres_fields.yml b/tests/typecheck/fields/test_postgres_fields.yml
index 7d94fc04b..ebaf4e1a2 100644
--- a/tests/typecheck/fields/test_postgres_fields.yml
+++ b/tests/typecheck/fields/test_postgres_fields.yml
@@ -2,7 +2,7 @@
main: |
from myapp.models import User
user = User(array=[])
- reveal_type(user.array) # N: Revealed type is "builtins.list*[Any]"
+ reveal_type(user.array) # N: Revealed type is "builtins.list[Any]"
installed_apps:
- myapp
files:
@@ -19,8 +19,8 @@
main: |
from myapp.models import User
user = User()
- reveal_type(user.members) # N: Revealed type is "builtins.list*[builtins.int]"
- reveal_type(user.members_as_text) # N: Revealed type is "builtins.list*[builtins.str]"
+ reveal_type(user.members) # N: Revealed type is "builtins.list[builtins.int]"
+ reveal_type(user.members_as_text) # N: Revealed type is "builtins.list[builtins.str]"
installed_apps:
- myapp
files:
diff --git a/tests/typecheck/fields/test_related.yml b/tests/typecheck/fields/test_related.yml
index 439820c5c..5f5cac65f 100644
--- a/tests/typecheck/fields/test_related.yml
+++ b/tests/typecheck/fields/test_related.yml
@@ -2,7 +2,7 @@
main: |
from myapp.models import Book, Publisher
book = Book()
- reveal_type(book.publisher) # N: Revealed type is "myapp.models.Publisher*"
+ reveal_type(book.publisher) # N: Revealed type is "myapp.models.Publisher"
publisher = Publisher()
reveal_type(publisher.books) # N: Revealed type is "django.db.models.manager.RelatedManager[myapp.models.Book]"
installed_apps:
@@ -22,8 +22,8 @@
main: |
from myapp.models import Book
book = Book()
- reveal_type(book.publisher_id) # N: Revealed type is "builtins.int*"
- reveal_type(book.owner_id) # N: Revealed type is "builtins.int*"
+ reveal_type(book.publisher_id) # N: Revealed type is "builtins.int"
+ reveal_type(book.owner_id) # N: Revealed type is "builtins.int"
installed_apps:
- django.contrib.auth
- myapp
@@ -42,8 +42,8 @@
main: |
from myapp.models import Book, Publisher
book = Book()
- reveal_type(book.publisher) # N: Revealed type is "myapp.models.Publisher*"
- reveal_type(book.publisher2) # N: Revealed type is "myapp.models.Publisher*"
+ reveal_type(book.publisher) # N: Revealed type is "myapp.models.Publisher"
+ reveal_type(book.publisher2) # N: Revealed type is "myapp.models.Publisher"
publisher = Publisher()
reveal_type(publisher.books) # N: Revealed type is "django.db.models.manager.RelatedManager[myapp.models.Book]"
@@ -66,7 +66,7 @@
main: |
from myapp2.models import Book
book = Book()
- reveal_type(book.publisher) # N: Revealed type is "myapp.models.Publisher*"
+ reveal_type(book.publisher) # N: Revealed type is "myapp.models.Publisher"
installed_apps:
- myapp
- myapp2
@@ -88,7 +88,7 @@
main: |
from myapp.models import User, Profile
reveal_type(User().profile) # N: Revealed type is "myapp.models.Profile"
- reveal_type(Profile().user) # N: Revealed type is "myapp.models.User*"
+ reveal_type(Profile().user) # N: Revealed type is "myapp.models.User"
installed_apps:
- myapp
files:
@@ -197,7 +197,7 @@
- case: models_imported_inside_init_file_one_to_one_field
main: |
from myapp2.models import Profile
- reveal_type(Profile().user) # N: Revealed type is "myapp.models.user.User*"
+ reveal_type(Profile().user) # N: Revealed type is "myapp.models.user.User"
reveal_type(Profile().user.profile) # N: Revealed type is "myapp2.models.Profile"
installed_apps:
- myapp
@@ -223,7 +223,7 @@
- case: models_triple_circular_reference
main: |
from myapp.models import App
- reveal_type(App().owner) # N: Revealed type is "myapp.models.user.User*"
+ reveal_type(App().owner) # N: Revealed type is "myapp.models.user.User"
reveal_type(App().owner.profile) # N: Revealed type is "myapp.models.profile.Profile"
installed_apps:
- myapp
@@ -253,7 +253,7 @@
- case: many_to_many_field_converts_to_queryset_of_model_type
main: |
from myapp.models import App, Member
- reveal_type(Member().apps) # N: Revealed type is "django.db.models.manager.RelatedManager*[myapp.models.App]"
+ reveal_type(Member().apps) # N: Revealed type is "django.db.models.manager.RelatedManager[myapp.models.App]"
reveal_type(App().members) # N: Revealed type is "django.db.models.manager.RelatedManager[myapp.models.Member]"
installed_apps:
- myapp
@@ -270,7 +270,7 @@
- case: many_to_many_works_with_string_if_imported
main: |
from myapp.models import Member
- reveal_type(Member().apps) # N: Revealed type is "django.db.models.manager.RelatedManager*[myapp2.models.App]"
+ reveal_type(Member().apps) # N: Revealed type is "django.db.models.manager.RelatedManager[myapp2.models.App]"
installed_apps:
- myapp
- myapp2
@@ -291,7 +291,7 @@
- case: foreign_key_with_self
main: |
from myapp.models import User
- reveal_type(User().parent) # N: Revealed type is "myapp.models.User*"
+ reveal_type(User().parent) # N: Revealed type is "myapp.models.User"
installed_apps:
- myapp
files:
@@ -305,7 +305,7 @@
- case: many_to_many_with_self
main: |
from myapp.models import User
- reveal_type(User().friends) # N: Revealed type is "django.db.models.manager.RelatedManager*[myapp.models.User]"
+ reveal_type(User().friends) # N: Revealed type is "django.db.models.manager.RelatedManager[myapp.models.User]"
installed_apps:
- myapp
files:
@@ -354,14 +354,14 @@
import datetime
from myapp.models import Book, Book2
- reveal_type(Book().publisher_id) # N: Revealed type is "builtins.str*"
+ reveal_type(Book().publisher_id) # N: Revealed type is "builtins.str"
Book(publisher_id=1)
Book(publisher_id='hello')
Book(publisher_id=datetime.datetime.now()) # E: Incompatible type for "publisher_id" of "Book" (got "datetime", expected "Union[str, int, Combinable]")
Book.objects.create(publisher_id=1)
Book.objects.create(publisher_id='hello')
- reveal_type(Book2().publisher_id) # N: Revealed type is "builtins.int*"
+ reveal_type(Book2().publisher_id) # N: Revealed type is "builtins.int"
Book2(publisher_id=1)
Book2(publisher_id=[]) # E: Incompatible type for "publisher_id" of "Book2" (got "List[Any]", expected "Union[float, int, str, Combinable]")
Book2.objects.create(publisher_id=1)
@@ -387,7 +387,7 @@
- case: if_model_is_defined_as_name_of_the_class_look_for_it_in_the_same_app
main: |
from myapp.models import Book
- reveal_type(Book().publisher) # N: Revealed type is "myapp.models.publisher.Publisher*"
+ reveal_type(Book().publisher) # N: Revealed type is "myapp.models.publisher.Publisher"
installed_apps:
- myapp
files:
@@ -434,7 +434,7 @@
main: |
from myapp.models import Book, Publisher
book = Book()
- reveal_type(book.publisher) # N: Revealed type is "myapp.models.Publisher*"
+ reveal_type(book.publisher) # N: Revealed type is "myapp.models.Publisher"
publisher = Publisher()
reveal_type(publisher.books)
@@ -461,7 +461,7 @@
main: |
from myapp.models import Book
book = Book()
- reveal_type(book.publisher) # N: Revealed type is "myapp.models.Publisher*"
+ reveal_type(book.publisher) # N: Revealed type is "myapp.models.Publisher"
custom_settings: |
INSTALLED_APPS = ('django.contrib.contenttypes', 'myapp')
BOOK_RELATED_MODEL = 'myapp.Publisher'
@@ -481,7 +481,7 @@
- case: foreign_key_with_custom_app_name
main: |
from myapp.models import MyMain
- reveal_type(MyMain().user) # N: Revealed type is "myapp2.models.MyUser*"
+ reveal_type(MyMain().user) # N: Revealed type is "myapp2.models.MyUser"
installed_apps:
- myapp
- myapp2.apps.MyApp2Config
@@ -509,7 +509,7 @@
- case: related_field_to_extracted_from_function
main: |
from myapp.models import Profile
- reveal_type(Profile().user) # N: Revealed type is "myapp.models.User*"
+ reveal_type(Profile().user) # N: Revealed type is "myapp.models.User"
installed_apps:
- myapp
files:
@@ -576,8 +576,8 @@
- case: test_foreign_key_from_superclass_inherits_correctly
main: |
from myapp.models import MyUser, Book, Article, LibraryEntity
- reveal_type(Book().registered_by_user) # N: Revealed type is "myapp.models.MyUser*"
- reveal_type(Article().registered_by_user) # N: Revealed type is "myapp.models.MyUser*"
+ reveal_type(Book().registered_by_user) # N: Revealed type is "myapp.models.MyUser"
+ reveal_type(Article().registered_by_user) # N: Revealed type is "myapp.models.MyUser"
user = MyUser()
reveal_type(user.book_set) # N: Revealed type is "django.db.models.manager.RelatedManager[myapp.models.Book]"
@@ -604,16 +604,16 @@
- case: test_foreign_key_from_superclass_inherits_correctly_when_also_inheriting_manager
main: |
from myapp.models import MyUser, Book, Article, LibraryEntity
- reveal_type(Book().registered_by_user) # N: Revealed type is "myapp.models.MyUser*"
- reveal_type(Article().registered_by_user) # N: Revealed type is "myapp.models.MyUser*"
+ reveal_type(Book().registered_by_user) # N: Revealed type is "myapp.models.MyUser"
+ reveal_type(Article().registered_by_user) # N: Revealed type is "myapp.models.MyUser"
user = MyUser()
reveal_type(user.book_set) # N: Revealed type is "myapp.models.Book_RelatedManager"
reveal_type(user.article_set) # N: Revealed type is "myapp.models.Article_RelatedManager"
- reveal_type(user.book_set.add) # N: Revealed type is "def (*objs: Union[myapp.models.Book*, builtins.int], *, bulk: builtins.bool =)"
- reveal_type(user.article_set.add) # N: Revealed type is "def (*objs: Union[myapp.models.Article*, builtins.int], *, bulk: builtins.bool =)"
- reveal_type(user.book_set.filter) # N: Revealed type is "def (*args: Any, **kwargs: Any) -> myapp.models.LibraryEntityQuerySet[myapp.models.Book*]"
- reveal_type(user.article_set.filter) # N: Revealed type is "def (*args: Any, **kwargs: Any) -> myapp.models.LibraryEntityQuerySet[myapp.models.Article*]"
+ reveal_type(user.book_set.add) # N: Revealed type is "def (*objs: Union[myapp.models.Book, builtins.int], *, bulk: builtins.bool =)"
+ reveal_type(user.article_set.add) # N: Revealed type is "def (*objs: Union[myapp.models.Article, builtins.int], *, bulk: builtins.bool =)"
+ reveal_type(user.book_set.filter) # N: Revealed type is "def (*args: Any, **kwargs: Any) -> myapp.models.LibraryEntityQuerySet[myapp.models.Book]"
+ reveal_type(user.article_set.filter) # N: Revealed type is "def (*args: Any, **kwargs: Any) -> myapp.models.LibraryEntityQuerySet[myapp.models.Article]"
reveal_type(user.book_set.queryset_method()) # N: Revealed type is "builtins.int"
reveal_type(user.article_set.queryset_method()) # N: Revealed type is "builtins.int"
installed_apps:
@@ -665,8 +665,8 @@
- case: resolve_primary_keys_for_foreign_keys_with_abstract_self_model
main: |
from myapp.models import User
- reveal_type(User().parent) # N: Revealed type is "myapp.models.User*"
- reveal_type(User().parent_id) # N: Revealed type is "builtins.int*"
+ reveal_type(User().parent) # N: Revealed type is "myapp.models.User"
+ reveal_type(User().parent_id) # N: Revealed type is "builtins.int"
reveal_type(User().parent2) # N: Revealed type is "Union[myapp.models.User, None]"
reveal_type(User().parent2_id) # N: Revealed type is "Union[builtins.int, None]"
@@ -690,11 +690,11 @@
main: |
from myapp.models import User, Order, Product
reveal_type(User().orders) # N: Revealed type is "myapp.models.Order_RelatedManager"
- reveal_type(User().orders.get()) # N: Revealed type is "myapp.models.Order*"
+ reveal_type(User().orders.get()) # N: Revealed type is "myapp.models.Order"
reveal_type(User().orders.manager_method()) # N: Revealed type is "builtins.int"
reveal_type(Product.objects.queryset_method()) # N: Revealed type is "builtins.int"
reveal_type(Order().products) # N: Revealed type is "myapp.models.Product_RelatedManager"
- reveal_type(Order().products.get()) # N: Revealed type is "myapp.models.Product*"
+ reveal_type(Order().products.get()) # N: Revealed type is "myapp.models.Product"
reveal_type(Order().products.queryset_method()) # N: Revealed type is "builtins.int"
if 1 == 2:
manager = User().products
@@ -731,11 +731,11 @@
from myapp.models.user import User
reveal_type(Store().purchases) # N: Revealed type is "myapp.models.purchase.Purchase_RelatedManager"
reveal_type(Store().purchases.queryset_method()) # N: Revealed type is "myapp.models.querysets.PurchaseQuerySet"
- reveal_type(Store().purchases.filter()) # N: Revealed type is "myapp.models.querysets.PurchaseQuerySet[myapp.models.purchase.Purchase*]"
+ reveal_type(Store().purchases.filter()) # N: Revealed type is "myapp.models.querysets.PurchaseQuerySet[myapp.models.purchase.Purchase]"
reveal_type(Store().purchases.filter().queryset_method()) # N: Revealed type is "myapp.models.querysets.PurchaseQuerySet"
reveal_type(User().purchases) # N: Revealed type is "myapp.models.purchase.Purchase_RelatedManager"
reveal_type(User().purchases.queryset_method()) # N: Revealed type is "myapp.models.querysets.PurchaseQuerySet"
- reveal_type(User().purchases.filter()) # N: Revealed type is "myapp.models.querysets.PurchaseQuerySet[myapp.models.purchase.Purchase*]"
+ reveal_type(User().purchases.filter()) # N: Revealed type is "myapp.models.querysets.PurchaseQuerySet[myapp.models.purchase.Purchase]"
reveal_type(User().purchases.filter().queryset_method()) # N: Revealed type is "myapp.models.querysets.PurchaseQuerySet"
installed_apps:
- myapp
diff --git a/tests/typecheck/managers/querysets/test_annotate.yml b/tests/typecheck/managers/querysets/test_annotate.yml
index ccfecf63a..adaafd975 100644
--- a/tests/typecheck/managers/querysets/test_annotate.yml
+++ b/tests/typecheck/managers/querysets/test_annotate.yml
@@ -122,10 +122,10 @@
reveal_type(qs) # N: Revealed type is "django.db.models.query._QuerySet[django_stubs_ext.WithAnnotations[myapp__models__User, TypedDict({'foo': Any})], django_stubs_ext.WithAnnotations[myapp__models__User, TypedDict({'foo': Any})]]"
annotated = qs.get()
- reveal_type(annotated) # N: Revealed type is "django_stubs_ext.WithAnnotations[myapp__models__User, TypedDict({'foo': Any})]*"
+ reveal_type(annotated) # N: Revealed type is "django_stubs_ext.WithAnnotations[myapp__models__User, TypedDict({'foo': Any})]"
reveal_type(annotated.foo) # N: Revealed type is "Any"
print(annotated.bar) # E: "WithAnnotations[myapp__models__User, TypedDict({'foo': Any})]" has no attribute "bar"
- reveal_type(annotated.username) # N: Revealed type is "builtins.str*"
+ reveal_type(annotated.username) # N: Revealed type is "builtins.str"
installed_apps:
- myapp
@@ -196,10 +196,10 @@
qs = User.objects.annotate(foo=F('id'))
qs = qs.annotate(bar=F('id'))
annotated = qs.get()
- reveal_type(annotated) # N: Revealed type is "django_stubs_ext.WithAnnotations[myapp__models__User, TypedDict({'foo': Any, 'bar': Any})]*"
+ reveal_type(annotated) # N: Revealed type is "django_stubs_ext.WithAnnotations[myapp__models__User, TypedDict({'foo': Any, 'bar': Any})]"
reveal_type(annotated.foo) # N: Revealed type is "Any"
reveal_type(annotated.bar) # N: Revealed type is "Any"
- reveal_type(annotated.username) # N: Revealed type is "builtins.str*"
+ reveal_type(annotated.username) # N: Revealed type is "builtins.str"
installed_apps:
- myapp
files:
@@ -280,19 +280,19 @@
values_list_flat_known = Blog.objects.annotate(foo=F('id')).values_list('text', flat=True).get()
# Even though it's annotated, we still know the lookup's type.
- reveal_type(values_list_flat_known) # N: Revealed type is "builtins.str*"
+ reveal_type(values_list_flat_known) # N: Revealed type is "builtins.str"
values_list_flat_unknown = Blog.objects.annotate(foo=F('id')).values_list('foo', flat=True).get()
# We don't know the type of an unknown lookup
reveal_type(values_list_flat_unknown) # N: Revealed type is "Any"
values_no_params = Blog.objects.annotate(foo=F('id')).values().get()
- reveal_type(values_no_params) # N: Revealed type is "builtins.dict*[builtins.str, Any]"
+ reveal_type(values_no_params) # N: Revealed type is "builtins.dict[builtins.str, Any]"
values_list_no_params = Blog.objects.annotate(foo=F('id')).values_list().get()
- reveal_type(values_list_no_params) # N: Revealed type is "builtins.tuple*[Any, ...]"
+ reveal_type(values_list_no_params) # N: Revealed type is "builtins.tuple[Any, ...]"
values_list_flat_no_params = Blog.objects.annotate(foo=F('id')).values_list(flat=True).get()
- reveal_type(values_list_flat_no_params) # N: Revealed type is "builtins.int*"
+ reveal_type(values_list_flat_no_params) # N: Revealed type is "builtins.int"
values_list_named_no_params = Blog.objects.annotate(foo=F('id')).values_list(named=True).get()
reveal_type(values_list_named_no_params.foo) # N: Revealed type is "Any"
@@ -324,13 +324,13 @@
before_values_no_params = Blog.objects.values().annotate(foo=F('id')).get()
- reveal_type(before_values_no_params) # N: Revealed type is "builtins.dict*[builtins.str, Any]"
+ reveal_type(before_values_no_params) # N: Revealed type is "builtins.dict[builtins.str, Any]"
before_values_list_no_params = Blog.objects.values_list().annotate(foo=F('id')).get()
- reveal_type(before_values_list_no_params) # N: Revealed type is "builtins.tuple*[Any, ...]"
+ reveal_type(before_values_list_no_params) # N: Revealed type is "builtins.tuple[Any, ...]"
before_values_list_flat_no_params = Blog.objects.values_list(flat=True).annotate(foo=F('id')).get()
- reveal_type(before_values_list_flat_no_params) # N: Revealed type is "builtins.int*"
+ reveal_type(before_values_list_flat_no_params) # N: Revealed type is "builtins.int"
before_values_list_named_no_params = Blog.objects.values_list(named=True).annotate(foo=F('id')).get()
reveal_type(before_values_list_named_no_params.foo) # N: Revealed type is "Any"
diff --git a/tests/typecheck/managers/querysets/test_basic_methods.yml b/tests/typecheck/managers/querysets/test_basic_methods.yml
index 301a07a05..c008415e2 100644
--- a/tests/typecheck/managers/querysets/test_basic_methods.yml
+++ b/tests/typecheck/managers/querysets/test_basic_methods.yml
@@ -4,24 +4,24 @@
from myapp.models import Blog
qs = Blog.objects.all()
- reveal_type(qs) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.Blog*, myapp.models.Blog*]"
- reveal_type(qs.get(id=1)) # N: Revealed type is "myapp.models.Blog*"
- reveal_type(iter(qs)) # N: Revealed type is "typing.Iterator*[myapp.models.Blog*]"
- reveal_type(qs.iterator()) # N: Revealed type is "typing.Iterator[myapp.models.Blog*]"
- reveal_type(qs.first()) # N: Revealed type is "Union[myapp.models.Blog*, None]"
- reveal_type(qs.earliest()) # N: Revealed type is "myapp.models.Blog*"
- reveal_type(qs[0]) # N: Revealed type is "myapp.models.Blog*"
+ reveal_type(qs) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.Blog, myapp.models.Blog]"
+ reveal_type(qs.get(id=1)) # N: Revealed type is "myapp.models.Blog"
+ reveal_type(iter(qs)) # N: Revealed type is "typing.Iterator[myapp.models.Blog]"
+ reveal_type(qs.iterator()) # N: Revealed type is "typing.Iterator[myapp.models.Blog]"
+ reveal_type(qs.first()) # N: Revealed type is "Union[myapp.models.Blog, None]"
+ reveal_type(qs.earliest()) # N: Revealed type is "myapp.models.Blog"
+ reveal_type(qs[0]) # N: Revealed type is "myapp.models.Blog"
reveal_type(qs[:9]) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.Blog, myapp.models.Blog]"
- reveal_type(qs.create()) # N: Revealed type is "myapp.models.Blog*"
- reveal_type(qs.get_or_create()) # N: Revealed type is "Tuple[myapp.models.Blog*, builtins.bool]"
+ reveal_type(qs.create()) # N: Revealed type is "myapp.models.Blog"
+ reveal_type(qs.get_or_create()) # N: Revealed type is "Tuple[myapp.models.Blog, builtins.bool]"
reveal_type(qs.exists()) # N: Revealed type is "builtins.bool"
reveal_type(qs.none()) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.Blog, myapp.models.Blog]"
- reveal_type(qs.update_or_create()) # N: Revealed type is "Tuple[myapp.models.Blog*, builtins.bool]"
+ reveal_type(qs.update_or_create()) # N: Revealed type is "Tuple[myapp.models.Blog, builtins.bool]"
reveal_type(qs.explain()) # N: Revealed type is "builtins.str"
reveal_type(qs.raw(qs.explain())) # N: Revealed type is "django.db.models.query.RawQuerySet[Any]"
# .dates / .datetimes
- reveal_type(Blog.objects.dates("created_at", "day")) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.Blog*, datetime.date]"
- reveal_type(Blog.objects.datetimes("created_at", "day")) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.Blog*, datetime.datetime]"
+ reveal_type(Blog.objects.dates("created_at", "day")) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.Blog, datetime.date]"
+ reveal_type(Blog.objects.datetimes("created_at", "day")) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.Blog, datetime.datetime]"
# AND-ing QuerySets
reveal_type(Blog.objects.all() & Blog.objects.all()) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.Blog, myapp.models.Blog]"
@@ -29,9 +29,9 @@
# bulk methods
reveal_type(qs.count()) # N: Revealed type is "builtins.int"
reveal_type(qs.update(created_at=timezone.now())) # N: Revealed type is "builtins.int"
- reveal_type(qs.in_bulk()) # N: Revealed type is "builtins.dict[Any, myapp.models.Blog*]"
+ reveal_type(qs.in_bulk()) # N: Revealed type is "builtins.dict[Any, myapp.models.Blog]"
reveal_type(qs.bulk_update(list(qs), fields=["created_at"])) # N: Revealed type is "builtins.int"
- reveal_type(qs.bulk_create([])) # N: Revealed type is "builtins.list[myapp.models.Blog*]"
+ reveal_type(qs.bulk_create([])) # N: Revealed type is "builtins.list[myapp.models.Blog]"
reveal_type(qs.delete()) # N: Revealed type is "Tuple[builtins.int, builtins.dict[builtins.str, builtins.int]]"
installed_apps:
- myapp
diff --git a/tests/typecheck/managers/querysets/test_from_queryset.yml b/tests/typecheck/managers/querysets/test_from_queryset.yml
index 07f921569..caa8249d4 100644
--- a/tests/typecheck/managers/querysets/test_from_queryset.yml
+++ b/tests/typecheck/managers/querysets/test_from_queryset.yml
@@ -2,10 +2,10 @@
main: |
from myapp.models import MyModel
reveal_type(MyModel().objects) # N: Revealed type is "myapp.models.NewManager[myapp.models.MyModel]"
- reveal_type(MyModel().objects.get()) # N: Revealed type is "myapp.models.MyModel*"
+ reveal_type(MyModel().objects.get()) # N: Revealed type is "myapp.models.MyModel"
reveal_type(MyModel().objects.queryset_method()) # N: Revealed type is "builtins.str"
reveal_type(MyModel.objects.filter(id=1).queryset_method()) # N: Revealed type is "builtins.str"
- reveal_type(MyModel.objects.filter(id=1)) # N: Revealed type is "myapp.models.ModelQuerySet[myapp.models.MyModel*]"
+ reveal_type(MyModel.objects.filter(id=1)) # N: Revealed type is "myapp.models.ModelQuerySet[myapp.models.MyModel]"
installed_apps:
- myapp
files:
@@ -27,13 +27,13 @@
from myapp.models import MyModel
reveal_type(MyModel.objects) # N: Revealed type is "myapp.models.NewManager[myapp.models.MyModel]"
reveal_type(MyModel.objects) # N: Revealed type is "myapp.models.NewManager[myapp.models.MyModel]"
- reveal_type(MyModel.objects.get()) # N: Revealed type is "myapp.models.MyModel*"
+ reveal_type(MyModel.objects.get()) # N: Revealed type is "myapp.models.MyModel"
reveal_type(MyModel.objects.queryset_method()) # N: Revealed type is "myapp.querysets.ModelQuerySet"
reveal_type(MyModel.objects.queryset_method_2()) # N: Revealed type is "typing.Iterable[myapp.querysets.Custom]"
reveal_type(MyModel.objects.queryset_method_3()) # N: Revealed type is "builtins.str"
reveal_type(MyModel.objects.queryset_method_4([])) # N: Revealed type is "None"
reveal_type(MyModel.objects.filter(id=1).queryset_method()) # N: Revealed type is "myapp.querysets.ModelQuerySet"
- reveal_type(MyModel.objects.filter(id=1)) # N: Revealed type is "myapp.querysets.ModelQuerySet[myapp.models.MyModel*]"
+ reveal_type(MyModel.objects.filter(id=1)) # N: Revealed type is "myapp.querysets.ModelQuerySet[myapp.models.MyModel]"
installed_apps:
- myapp
files:
@@ -75,7 +75,7 @@
main: |
from myapp.models import MyModel
reveal_type(MyModel().objects) # N: Revealed type is "myapp.models.NewManager[myapp.models.MyModel]"
- reveal_type(MyModel().objects.get()) # N: Revealed type is "myapp.models.MyModel*"
+ reveal_type(MyModel().objects.get()) # N: Revealed type is "myapp.models.MyModel"
reveal_type(MyModel().objects.queryset_method()) # N: Revealed type is "builtins.str"
installed_apps:
- myapp
@@ -149,7 +149,7 @@
main: |
from myapp.models import MyModel
reveal_type(MyModel().objects) # N: Revealed type is "myapp.models.NewManager[myapp.models.MyModel]"
- reveal_type(MyModel().objects.get()) # N: Revealed type is "myapp.models.MyModel*"
+ reveal_type(MyModel().objects.get()) # N: Revealed type is "myapp.models.MyModel"
reveal_type(MyModel().objects.queryset_method()) # N: Revealed type is "builtins.str"
installed_apps:
- myapp
@@ -173,7 +173,7 @@
main: |
from myapp.models import MyModel
reveal_type(MyModel().objects) # N: Revealed type is "myapp.managers.NewManager[myapp.models.MyModel]"
- reveal_type(MyModel().objects.get()) # N: Revealed type is "myapp.models.MyModel*"
+ reveal_type(MyModel().objects.get()) # N: Revealed type is "myapp.models.MyModel"
reveal_type(MyModel().objects.queryset_method) # N: Revealed type is "def (param: Union[builtins.str, None] =) -> Union[builtins.str, None]"
reveal_type(MyModel().objects.queryset_method('str')) # N: Revealed type is "Union[builtins.str, None]"
installed_apps:
@@ -203,7 +203,7 @@
main: |
from myapp.models import MyModel
reveal_type(MyModel().objects) # N: Revealed type is "myapp.managers.NewManager[myapp.models.MyModel]"
- reveal_type(MyModel().objects.get()) # N: Revealed type is "myapp.models.MyModel*"
+ reveal_type(MyModel().objects.get()) # N: Revealed type is "myapp.models.MyModel"
reveal_type(MyModel().objects.base_queryset_method) # N: Revealed type is "def (param: Union[builtins.int, builtins.str]) -> <nothing>"
reveal_type(MyModel().objects.base_queryset_method(2)) # N: Revealed type is "<nothing>"
installed_apps:
@@ -350,25 +350,25 @@
- case: from_queryset_includes_methods_returning_queryset
main: |
from myapp.models import MyModel
- reveal_type(MyModel.objects.none) # N: Revealed type is "def () -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.all) # N: Revealed type is "def () -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.filter) # N: Revealed type is "def (*args: Any, **kwargs: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.exclude) # N: Revealed type is "def (*args: Any, **kwargs: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.complex_filter) # N: Revealed type is "def (filter_obj: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.union) # N: Revealed type is "def (*other_qs: Any, *, all: builtins.bool =) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.intersection) # N: Revealed type is "def (*other_qs: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.difference) # N: Revealed type is "def (*other_qs: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.select_for_update) # N: Revealed type is "def (nowait: builtins.bool =, skip_locked: builtins.bool =, of: typing.Sequence[builtins.str] =, no_key: builtins.bool =) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.select_related) # N: Revealed type is "def (*fields: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.prefetch_related) # N: Revealed type is "def (*lookups: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.annotate) # N: Revealed type is "def (*args: Any, **kwargs: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.alias) # N: Revealed type is "def (*args: Any, **kwargs: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.order_by) # N: Revealed type is "def (*field_names: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.distinct) # N: Revealed type is "def (*field_names: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.reverse) # N: Revealed type is "def () -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.defer) # N: Revealed type is "def (*fields: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.only) # N: Revealed type is "def (*fields: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
- reveal_type(MyModel.objects.using) # N: Revealed type is "def (alias: Union[builtins.str, None]) -> myapp.models.MyQuerySet[myapp.models.MyModel*]"
+ reveal_type(MyModel.objects.none) # N: Revealed type is "def () -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.all) # N: Revealed type is "def () -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.filter) # N: Revealed type is "def (*args: Any, **kwargs: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.exclude) # N: Revealed type is "def (*args: Any, **kwargs: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.complex_filter) # N: Revealed type is "def (filter_obj: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.union) # N: Revealed type is "def (*other_qs: Any, *, all: builtins.bool =) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.intersection) # N: Revealed type is "def (*other_qs: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.difference) # N: Revealed type is "def (*other_qs: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.select_for_update) # N: Revealed type is "def (nowait: builtins.bool =, skip_locked: builtins.bool =, of: typing.Sequence[builtins.str] =, no_key: builtins.bool =) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.select_related) # N: Revealed type is "def (*fields: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.prefetch_related) # N: Revealed type is "def (*lookups: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.annotate) # N: Revealed type is "def (*args: Any, **kwargs: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.alias) # N: Revealed type is "def (*args: Any, **kwargs: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.order_by) # N: Revealed type is "def (*field_names: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.distinct) # N: Revealed type is "def (*field_names: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.reverse) # N: Revealed type is "def () -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.defer) # N: Revealed type is "def (*fields: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.only) # N: Revealed type is "def (*fields: Any) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
+ reveal_type(MyModel.objects.using) # N: Revealed type is "def (alias: Union[builtins.str, None]) -> myapp.models.MyQuerySet[myapp.models.MyModel]"
installed_apps:
- myapp
files:
diff --git a/tests/typecheck/managers/querysets/test_values_list.yml b/tests/typecheck/managers/querysets/test_values_list.yml
index e954ab964..0ac67638d 100644
--- a/tests/typecheck/managers/querysets/test_values_list.yml
+++ b/tests/typecheck/managers/querysets/test_values_list.yml
@@ -37,7 +37,7 @@
reveal_type(query.all().get()) # N: Revealed type is "Tuple[builtins.str]"
reveal_type(query.filter(age__gt=16).get()) # N: Revealed type is "Tuple[builtins.str]"
reveal_type(query.exclude(age__lte=16).get()) # N: Revealed type is "Tuple[builtins.str]"
- reveal_type(query.annotate(name_length=Length("name")).get()) # N: Revealed type is "builtins.tuple*[Any, ...]"
+ reveal_type(query.annotate(name_length=Length("name")).get()) # N: Revealed type is "builtins.tuple[Any, ...]"
installed_apps:
- myapp
files:
@@ -79,12 +79,12 @@
- case: values_list_flat_true_methods
main: |
from myapp.models import MyUser, MyUser2
- reveal_type(MyUser.objects.values_list('name', flat=True).get()) # N: Revealed type is "builtins.str*"
+ reveal_type(MyUser.objects.values_list('name', flat=True).get()) # N: Revealed type is "builtins.str"
reveal_type(MyUser.objects.values_list('name', 'age', flat=True).get())
# flat=True without specified fields returns primary key values
- reveal_type(MyUser.objects.values_list(flat=True)[0]) # N: Revealed type is "builtins.int*"
- reveal_type(MyUser2.objects.values_list(flat=True)[0]) # N: Revealed type is "builtins.str*"
+ reveal_type(MyUser.objects.values_list(flat=True)[0]) # N: Revealed type is "builtins.int"
+ reveal_type(MyUser2.objects.values_list(flat=True)[0]) # N: Revealed type is "builtins.str"
out: |
main:3: error: 'flat' is not valid when 'values_list' is called with more than one field
main:3: note: Revealed type is "Any"
@@ -217,7 +217,7 @@
reveal_type(Blog.objects.values_list('id', flat=True)) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.Blog, builtins.int]"
reveal_type(Blog.objects.values_list('publisher_id', flat=True)) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.Blog, builtins.int]"
# is Iterable[int]
- reveal_type(list(Blog.objects.values_list('id', flat=True))) # N: Revealed type is "builtins.list[builtins.int*]"
+ reveal_type(list(Blog.objects.values_list('id', flat=True))) # N: Revealed type is "builtins.list[builtins.int]"
installed_apps:
- myapp
files:
diff --git a/tests/typecheck/managers/test_managers.yml b/tests/typecheck/managers/test_managers.yml
index 9ee3eb388..2323b28ec 100644
--- a/tests/typecheck/managers/test_managers.yml
+++ b/tests/typecheck/managers/test_managers.yml
@@ -2,7 +2,7 @@
main: |
from myapp.models import User
reveal_type(User.objects) # N: Revealed type is "django.db.models.manager.Manager[myapp.models.User]"
- reveal_type(User.objects.get()) # N: Revealed type is "myapp.models.User*"
+ reveal_type(User.objects.get()) # N: Revealed type is "myapp.models.User"
installed_apps:
- myapp
files:
@@ -59,7 +59,7 @@
main: |
from myapp.models import Base, MyModel
base_instance = Base(MyModel)
- reveal_type(base_instance.model_cls._base_manager) # N: Revealed type is "django.db.models.manager.BaseManager[myapp.models.MyModel*]"
+ reveal_type(base_instance.model_cls._base_manager) # N: Revealed type is "django.db.models.manager.BaseManager[myapp.models.MyModel]"
installed_apps:
- myapp
files:
@@ -78,7 +78,7 @@
pass
class Child(Base[MyModel]):
def method(self) -> None:
- reveal_type(self.model_cls._base_manager) # N: Revealed type is "django.db.models.manager.BaseManager[myapp.models.MyModel*]"
+ reveal_type(self.model_cls._base_manager) # N: Revealed type is "django.db.models.manager.BaseManager[myapp.models.MyModel]"
- case: if_custom_manager_defined_it_is_set_to_default_manager
main: |
@@ -126,7 +126,7 @@
main: |
from myapp.models import MyUser
reveal_type(MyUser.objects) # N: Revealed type is "myapp.models.UserManager[myapp.models.MyUser]"
- reveal_type(MyUser.objects.get()) # N: Revealed type is "myapp.models.MyUser*"
+ reveal_type(MyUser.objects.get()) # N: Revealed type is "myapp.models.MyUser"
reveal_type(MyUser.objects.get_or_404()) # N: Revealed type is "myapp.models.MyUser"
installed_apps:
- myapp
@@ -222,10 +222,10 @@
main: |
from myapp.models import UnrelatedModel, MyModel
reveal_type(UnrelatedModel.objects) # N: Revealed type is "django.db.models.manager.Manager[myapp.models.UnrelatedModel]"
- reveal_type(UnrelatedModel.objects.first()) # N: Revealed type is "Union[myapp.models.UnrelatedModel*, None]"
+ reveal_type(UnrelatedModel.objects.first()) # N: Revealed type is "Union[myapp.models.UnrelatedModel, None]"
reveal_type(MyModel.objects) # N: Revealed type is "django.db.models.manager.Manager[myapp.models.MyModel]"
- reveal_type(MyModel.objects.first()) # N: Revealed type is "Union[myapp.models.MyModel*, None]"
+ reveal_type(MyModel.objects.first()) # N: Revealed type is "Union[myapp.models.MyModel, None]"
installed_apps:
- myapp
files:
@@ -243,10 +243,10 @@
main: |
from myapp.models import UnrelatedModel2, MyModel2
reveal_type(UnrelatedModel2.objects) # N: Revealed type is "django.db.models.manager.Manager[myapp.models.UnrelatedModel2]"
- reveal_type(UnrelatedModel2.objects.first()) # N: Revealed type is "Union[myapp.models.UnrelatedModel2*, None]"
+ reveal_type(UnrelatedModel2.objects.first()) # N: Revealed type is "Union[myapp.models.UnrelatedModel2, None]"
reveal_type(MyModel2.objects) # N: Revealed type is "django.db.models.manager.Manager[myapp.models.MyModel2]"
- reveal_type(MyModel2.objects.first()) # N: Revealed type is "Union[myapp.models.MyModel2*, None]"
+ reveal_type(MyModel2.objects.first()) # N: Revealed type is "Union[myapp.models.MyModel2, None]"
installed_apps:
- myapp
files:
@@ -264,10 +264,10 @@
main: |
from myapp.models import ParentOfMyModel3, MyModel3
reveal_type(ParentOfMyModel3.objects) # N: Revealed type is "django.db.models.manager.Manager[myapp.models.ParentOfMyModel3]"
- reveal_type(ParentOfMyModel3.objects.first()) # N: Revealed type is "Union[myapp.models.ParentOfMyModel3*, None]"
+ reveal_type(ParentOfMyModel3.objects.first()) # N: Revealed type is "Union[myapp.models.ParentOfMyModel3, None]"
reveal_type(MyModel3.objects) # N: Revealed type is "django.db.models.manager.Manager[myapp.models.MyModel3]"
- reveal_type(MyModel3.objects.first()) # N: Revealed type is "Union[myapp.models.MyModel3*, None]"
+ reveal_type(MyModel3.objects.first()) # N: Revealed type is "Union[myapp.models.MyModel3, None]"
installed_apps:
- myapp
files:
@@ -285,10 +285,10 @@
main: |
from myapp.models import ParentOfMyModel4, MyModel4
reveal_type(ParentOfMyModel4.objects) # N: Revealed type is "django.db.models.manager.Manager[myapp.models.ParentOfMyModel4]"
- reveal_type(ParentOfMyModel4.objects.first()) # N: Revealed type is "Union[myapp.models.ParentOfMyModel4*, None]"
+ reveal_type(ParentOfMyModel4.objects.first()) # N: Revealed type is "Union[myapp.models.ParentOfMyModel4, None]"
reveal_type(MyModel4.objects) # N: Revealed type is "django.db.models.manager.Manager[myapp.models.MyModel4]"
- reveal_type(MyModel4.objects.first()) # N: Revealed type is "Union[myapp.models.MyModel4*, None]"
+ reveal_type(MyModel4.objects.first()) # N: Revealed type is "Union[myapp.models.MyModel4, None]"
installed_apps:
- myapp
files:
@@ -333,15 +333,15 @@
main: |
from myapp.models import User
reveal_type(User.objects) # N: Revealed type is "myapp.models.User_MyManager2[myapp.models.User]"
- reveal_type(User.objects.select_related()) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.User*, myapp.models.User*]"
- reveal_type(User.objects.get()) # N: Revealed type is "myapp.models.User*"
+ reveal_type(User.objects.select_related()) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.User, myapp.models.User]"
+ reveal_type(User.objects.get()) # N: Revealed type is "myapp.models.User"
reveal_type(User.objects.get_instance()) # N: Revealed type is "builtins.int"
reveal_type(User.objects.get_instance_untyped('hello')) # N: Revealed type is "Any"
from myapp.models import ChildUser
reveal_type(ChildUser.objects) # N: Revealed type is "myapp.models.ChildUser_MyManager2[myapp.models.ChildUser]"
- reveal_type(ChildUser.objects.select_related()) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.ChildUser*, myapp.models.ChildUser*]"
- reveal_type(ChildUser.objects.get()) # N: Revealed type is "myapp.models.ChildUser*"
+ reveal_type(ChildUser.objects.select_related()) # N: Revealed type is "django.db.models.query._QuerySet[myapp.models.ChildUser, myapp.models.ChildUser]"
+ reveal_type(ChildUser.objects.get()) # N: Revealed type is "myapp.models.ChildUser"
reveal_type(ChildUser.objects.get_instance()) # N: Revealed type is "builtins.int"
reveal_type(ChildUser.objects.get_instance_untyped('hello')) # N: Revealed type is "Any"
installed_apps:
diff --git a/tests/typecheck/models/test_contrib_models.yml b/tests/typecheck/models/test_contrib_models.yml
index 94409e463..6a2838d34 100644
--- a/tests/typecheck/models/test_contrib_models.yml
+++ b/tests/typecheck/models/test_contrib_models.yml
@@ -1,14 +1,14 @@
- case: contrib_auth_model_fields
main: |
from django.contrib.auth.models import User
- reveal_type(User().username) # N: Revealed type is "builtins.str*"
- reveal_type(User().password) # N: Revealed type is "builtins.str*"
- reveal_type(User().first_name) # N: Revealed type is "builtins.str*"
- reveal_type(User().last_name) # N: Revealed type is "builtins.str*"
- reveal_type(User().email) # N: Revealed type is "builtins.str*"
+ reveal_type(User().username) # N: Revealed type is "builtins.str"
+ reveal_type(User().password) # N: Revealed type is "builtins.str"
+ reveal_type(User().first_name) # N: Revealed type is "builtins.str"
+ reveal_type(User().last_name) # N: Revealed type is "builtins.str"
+ reveal_type(User().email) # N: Revealed type is "builtins.str"
reveal_type(User().is_staff) # N: Revealed type is "builtins.bool"
reveal_type(User().is_active) # N: Revealed type is "builtins.bool"
- reveal_type(User().date_joined) # N: Revealed type is "datetime.datetime*"
+ reveal_type(User().date_joined) # N: Revealed type is "datetime.datetime"
reveal_type(User().last_login) # N: Revealed type is "Union[datetime.datetime, None]"
reveal_type(User().is_authenticated) # N: Revealed type is "Literal[True]"
reveal_type(User().is_anonymous) # N: Revealed type is "Literal[False]"
@@ -18,11 +18,11 @@
reveal_type(AnonymousUser().is_anonymous) # N: Revealed type is "Literal[True]"
from django.contrib.auth.models import Permission
- reveal_type(Permission().name) # N: Revealed type is "builtins.str*"
- reveal_type(Permission().codename) # N: Revealed type is "builtins.str*"
+ reveal_type(Permission().name) # N: Revealed type is "builtins.str"
+ reveal_type(Permission().codename) # N: Revealed type is "builtins.str"
from django.contrib.auth.models import PermissionsMixin
reveal_type(PermissionsMixin().is_superuser) # N: Revealed type is "builtins.bool"
from django.contrib.auth.models import Group
- reveal_type(Group().name) # N: Revealed type is "builtins.str*"
+ reveal_type(Group().name) # N: Revealed type is "builtins.str"
diff --git a/tests/typecheck/models/test_create.yml b/tests/typecheck/models/test_create.yml
index 3376adfde..2d6c1a2cc 100644
--- a/tests/typecheck/models/test_create.yml
+++ b/tests/typecheck/models/test_create.yml
@@ -19,9 +19,9 @@
main: |
from myapp.models import Child
c = Child.objects.create(name='Maxim', lastname='Maxim2')
- reveal_type(c.id) # N: Revealed type is "builtins.int*"
- reveal_type(c.name) # N: Revealed type is "builtins.str*"
- reveal_type(c.lastname) # N: Revealed type is "builtins.str*"
+ reveal_type(c.id) # N: Revealed type is "builtins.int"
+ reveal_type(c.name) # N: Revealed type is "builtins.str"
+ reveal_type(c.lastname) # N: Revealed type is "builtins.str"
installed_apps:
- myapp
files:
@@ -96,19 +96,19 @@
main: |
from myapp.models import MyModel
first = MyModel(id=None)
- reveal_type(first.id) # N: Revealed type is "builtins.int*"
+ reveal_type(first.id) # N: Revealed type is "builtins.int"
first = MyModel.objects.create(id=None)
- reveal_type(first.id) # N: Revealed type is "builtins.int*"
+ reveal_type(first.id) # N: Revealed type is "builtins.int"
first = MyModel()
first.id = None
- reveal_type(first.id) # N: Revealed type is "builtins.int*"
+ reveal_type(first.id) # N: Revealed type is "builtins.int"
from myapp.models import MyModel2
MyModel2(id=None) # E: Incompatible type for "id" of "MyModel2" (got "None", expected "Union[float, int, str, Combinable]")
MyModel2.objects.create(id=None) # E: Incompatible type for "id" of "MyModel2" (got "None", expected "Union[float, int, str, Combinable]")
second = MyModel2()
second.id = None # E: Incompatible types in assignment (expression has type "None", variable has type "Union[float, int, str, Combinable]")
- reveal_type(second.id) # N: Revealed type is "builtins.int*"
+ reveal_type(second.id) # N: Revealed type is "builtins.int"
# default set but no primary key doesn't allow None
from myapp.models import MyModel3
@@ -116,7 +116,7 @@
MyModel3.objects.create(default=None) # E: Incompatible type for "default" of "MyModel3" (got "None", expected "Union[float, int, str, Combinable]")
third = MyModel3()
third.default = None # E: Incompatible types in assignment (expression has type "None", variable has type "Union[float, int, str, Combinable]")
- reveal_type(third.default) # N: Revealed type is "builtins.int*"
+ reveal_type(third.default) # N: Revealed type is "builtins.int"
installed_apps:
- myapp
files:
diff --git a/tests/typecheck/models/test_inheritance.yml b/tests/typecheck/models/test_inheritance.yml
index bc04f5d1f..55e7e338c 100644
--- a/tests/typecheck/models/test_inheritance.yml
+++ b/tests/typecheck/models/test_inheritance.yml
@@ -69,7 +69,7 @@
- case: fields_recognized_if_base_model_is_subclass_of_models_model
main: |
from myapp.models import User
- reveal_type(User().username) # N: Revealed type is "builtins.str*"
+ reveal_type(User().username) # N: Revealed type is "builtins.str"
installed_apps:
- myapp
files:
@@ -90,8 +90,8 @@
- case: django_contrib_gis_base_model_mixin_inheritance
main: |
from myapp.models import User
- reveal_type(User().name) # N: Revealed type is "builtins.str*"
- reveal_type(User().updated_at) # N: Revealed type is "datetime.datetime*"
+ reveal_type(User().name) # N: Revealed type is "builtins.str"
+ reveal_type(User().updated_at) # N: Revealed type is "datetime.datetime"
installed_apps:
- myapp
files:
diff --git a/tests/typecheck/models/test_init.yml b/tests/typecheck/models/test_init.yml
index 2fcf84ccc..ebc100671 100644
--- a/tests/typecheck/models/test_init.yml
+++ b/tests/typecheck/models/test_init.yml
@@ -283,10 +283,10 @@
unset_set_type=[],
)
out: |
- main:4: note: Revealed type is "builtins.int*"
- main:5: note: Revealed type is "builtins.int*"
- main:6: note: Revealed type is "builtins.list*[builtins.int]"
- main:7: note: Revealed type is "builtins.int*"
+ main:4: note: Revealed type is "builtins.int"
+ main:5: note: Revealed type is "builtins.int"
+ main:6: note: Revealed type is "builtins.list[builtins.int]"
+ main:7: note: Revealed type is "builtins.int"
main:8: note: Revealed type is "Any"
main:9: error: Incompatible types in assignment (expression has type "str", variable has type "int")
main:10: error: Incompatible types in assignment (expression has type "str", variable has type "Union[int, float]")
diff --git a/tests/typecheck/models/test_primary_key.yml b/tests/typecheck/models/test_primary_key.yml
index 1c989d6dd..a715b9476 100644
--- a/tests/typecheck/models/test_primary_key.yml
+++ b/tests/typecheck/models/test_primary_key.yml
@@ -2,8 +2,8 @@
main: |
from myapp.models import MyModel
x = MyModel.objects.get(id=1)
- reveal_type(x.id) # N: Revealed type is "builtins.int*"
- reveal_type(x.pk) # N: Revealed type is "builtins.int*"
+ reveal_type(x.id) # N: Revealed type is "builtins.int"
+ reveal_type(x.pk) # N: Revealed type is "builtins.int"
MyModel.objects.get(pk=1)
installed_apps:
@@ -15,16 +15,16 @@
from django.db import models
class MyModel(models.Model):
def __str__(self):
- reveal_type(self.id) # N: Revealed type is "builtins.int*"
- reveal_type(self.pk) # N: Revealed type is "builtins.int*"
+ reveal_type(self.id) # N: Revealed type is "builtins.int"
+ reveal_type(self.pk) # N: Revealed type is "builtins.int"
- case: test_access_to_id_field_through_self_if_primary_key_is_defined
main: |
from myapp.models import MyModel
x = MyModel.objects.get(id='a')
- reveal_type(x.id) # N: Revealed type is "builtins.str*"
- reveal_type(x.pk) # N: Revealed type is "builtins.str*"
+ reveal_type(x.id) # N: Revealed type is "builtins.str"
+ reveal_type(x.pk) # N: Revealed type is "builtins.str"
MyModel.objects.get(pk='a')
installed_apps:
@@ -37,16 +37,16 @@
class MyModel(models.Model):
id = models.CharField(max_length=10, primary_key=True)
def __str__(self):
- reveal_type(self.id) # N: Revealed type is "builtins.str*"
- reveal_type(self.pk) # N: Revealed type is "builtins.str*"
+ reveal_type(self.id) # N: Revealed type is "builtins.str"
+ reveal_type(self.pk) # N: Revealed type is "builtins.str"
- case: test_access_to_id_field_through_self_if_primary_key_has_different_name
main: |
from myapp.models import MyModel
x = MyModel.objects.get(primary='a')
- reveal_type(x.primary) # N: Revealed type is "builtins.str*"
- reveal_type(x.pk) # N: Revealed type is "builtins.str*"
+ reveal_type(x.primary) # N: Revealed type is "builtins.str"
+ reveal_type(x.pk) # N: Revealed type is "builtins.str"
x.id # E: "MyModel" has no attribute "id"
MyModel.objects.get(pk='a')
@@ -61,6 +61,6 @@
class MyModel(models.Model):
primary = models.CharField(max_length=10, primary_key=True)
def __str__(self):
- reveal_type(self.primary) # N: Revealed type is "builtins.str*"
- reveal_type(self.pk) # N: Revealed type is "builtins.str*"
+ reveal_type(self.primary) # N: Revealed type is "builtins.str"
+ reveal_type(self.pk) # N: Revealed type is "builtins.str"
self.id # E: "MyModel" has no attribute "id"
diff --git a/tests/typecheck/models/test_proxy_models.yml b/tests/typecheck/models/test_proxy_models.yml
index 7648756c6..5e2eaa4db 100644
--- a/tests/typecheck/models/test_proxy_models.yml
+++ b/tests/typecheck/models/test_proxy_models.yml
@@ -4,7 +4,7 @@
Blog(publisher=Publisher())
Blog.objects.create(publisher=Publisher())
Blog().publisher = Publisher()
- reveal_type(Blog().publisher) # N: Revealed type is "myapp.models.PublisherProxy*"
+ reveal_type(Blog().publisher) # N: Revealed type is "myapp.models.PublisherProxy"
installed_apps:
- myapp
files:
diff --git a/tests/typecheck/test/test_client.yml b/tests/typecheck/test/test_client.yml
index 212946531..e7ce034c7 100644
--- a/tests/typecheck/test/test_client.yml
+++ b/tests/typecheck/test/test_client.yml
@@ -26,8 +26,8 @@
from django.test.client import RequestFactory, AsyncRequestFactory
factory = RequestFactory()
request = factory.get('foo')
- reveal_type(request) # N: Revealed type is "django.core.handlers.wsgi.WSGIRequest*"
+ reveal_type(request) # N: Revealed type is "django.core.handlers.wsgi.WSGIRequest"
async_factory = AsyncRequestFactory()
async_request = async_factory.get('foo')
- reveal_type(async_request) # N: Revealed type is "django.core.handlers.asgi.ASGIRequest*"
+ reveal_type(async_request) # N: Revealed type is "django.core.handlers.asgi.ASGIRequest"
diff --git a/tests/typecheck/test_config.yml b/tests/typecheck/test_config.yml
index 295dc2069..91569061a 100644
--- a/tests/typecheck/test_config.yml
+++ b/tests/typecheck/test_config.yml
@@ -2,8 +2,8 @@
main: |
from myapp.models import MyModel
mymodel = MyModel(user_id=1)
- reveal_type(mymodel.id) # N: Revealed type is "builtins.int*"
- reveal_type(mymodel.user) # N: Revealed type is "django.contrib.auth.models.User*"
+ reveal_type(mymodel.id) # N: Revealed type is "builtins.int"
+ reveal_type(mymodel.user) # N: Revealed type is "django.contrib.auth.models.User"
reveal_type(mymodel.objects) # N: Revealed type is "django.db.models.manager.Manager[myapp.models.MyModel]"
mypy_config: |
[mypy.plugins.django-stubs]
@@ -20,13 +20,13 @@
class MyModel(models.Model):
user = models.ForeignKey('auth.User', on_delete=models.CASCADE)
if TYPE_CHECKING:
- reveal_type(MyModel().user) # N: Revealed type is "django.contrib.auth.models.User*"
+ reveal_type(MyModel().user) # N: Revealed type is "django.contrib.auth.models.User"
- case: generate_pyproject_toml_and_settings_file_from_installed_apps_key
main: |
from myapp.models import MyModel
mymodel = MyModel(user_id=1)
- reveal_type(mymodel.id) # N: Revealed type is "builtins.int*"
+ reveal_type(mymodel.id) # N: Revealed type is "builtins.int"
installed_apps:
- django.contrib.auth
- myapp
diff --git a/tests/typecheck/test_formsets.yml b/tests/typecheck/test_formsets.yml
index ed773142b..073798a04 100644
--- a/tests/typecheck/test_formsets.yml
+++ b/tests/typecheck/test_formsets.yml
@@ -6,7 +6,7 @@
ArticleFS: Type[forms.BaseInlineFormSet[Article, Category, Any]] = forms.inlineformset_factory(Category, Article)
ArticleFS(instance=Article()) # E: Argument "instance" to "BaseInlineFormSet" has incompatible type "Article"; expected "Optional[Category]"
fs = ArticleFS(instance=Category())
- reveal_type(fs.instance) # N: Revealed type is "myapp.models.Category*"
+ reveal_type(fs.instance) # N: Revealed type is "myapp.models.Category"
installed_apps:
- myapp
files:
diff --git a/tests/typecheck/test_settings.yml b/tests/typecheck/test_settings.yml
index 4cdb26325..70874fa11 100644
--- a/tests/typecheck/test_settings.yml
+++ b/tests/typecheck/test_settings.yml
@@ -5,8 +5,8 @@
# standard settings
reveal_type(settings.AUTH_USER_MODEL) # N: Revealed type is "builtins.str"
reveal_type(settings.ROOT_DIR) # N: Revealed type is "builtins.str"
- reveal_type(settings.APPS_DIR) # N: Revealed type is "pathlib.Path*"
- reveal_type(settings.NUMBERS) # N: Revealed type is "builtins.list[builtins.str*]"
+ reveal_type(settings.APPS_DIR) # N: Revealed type is "pathlib.Path"
+ reveal_type(settings.NUMBERS) # N: Revealed type is "builtins.list[builtins.str]"
reveal_type(settings.DICT) # N: Revealed type is "builtins.dict[Any, Any]"
custom_settings: |
from base import *
@@ -37,8 +37,8 @@
from settings.basic_settings import *
main: |
from django.conf import settings
- reveal_type(settings.MEDIA_ROOT) # N: Revealed type is "pathlib.Path*"
- reveal_type(settings.MEDIA_ROOT / 'part') # N: Revealed type is "pathlib.Path*"
+ reveal_type(settings.MEDIA_ROOT) # N: Revealed type is "pathlib.Path"
+ reveal_type(settings.MEDIA_ROOT / 'part') # N: Revealed type is "pathlib.Path"
files:
- path: settings/__init__.py
- path: settings/basic_settings.py
diff --git a/tests/typecheck/test_shortcuts.yml b/tests/typecheck/test_shortcuts.yml
index 6659878fd..976656b04 100644
--- a/tests/typecheck/test_shortcuts.yml
+++ b/tests/typecheck/test_shortcuts.yml
@@ -3,13 +3,13 @@
from django.shortcuts import get_object_or_404, get_list_or_404
from myapp.models import MyModel
- reveal_type(get_object_or_404(MyModel)) # N: Revealed type is "myapp.models.MyModel*"
- reveal_type(get_object_or_404(MyModel.objects)) # N: Revealed type is "myapp.models.MyModel*"
- reveal_type(get_object_or_404(MyModel.objects.get_queryset())) # N: Revealed type is "myapp.models.MyModel*"
+ reveal_type(get_object_or_404(MyModel)) # N: Revealed type is "myapp.models.MyModel"
+ reveal_type(get_object_or_404(MyModel.objects)) # N: Revealed type is "myapp.models.MyModel"
+ reveal_type(get_object_or_404(MyModel.objects.get_queryset())) # N: Revealed type is "myapp.models.MyModel"
- reveal_type(get_list_or_404(MyModel)) # N: Revealed type is "builtins.list[myapp.models.MyModel*]"
- reveal_type(get_list_or_404(MyModel.objects)) # N: Revealed type is "builtins.list[myapp.models.MyModel*]"
- reveal_type(get_list_or_404(MyModel.objects.get_queryset())) # N: Revealed type is "builtins.list[myapp.models.MyModel*]"
+ reveal_type(get_list_or_404(MyModel)) # N: Revealed type is "builtins.list[myapp.models.MyModel]"
+ reveal_type(get_list_or_404(MyModel.objects)) # N: Revealed type is "builtins.list[myapp.models.MyModel]"
+ reveal_type(get_list_or_404(MyModel.objects.get_queryset())) # N: Revealed type is "builtins.list[myapp.models.MyModel]"
installed_apps:
- myapp
files:
diff --git a/tests/typecheck/utils/test_datastructures.yml b/tests/typecheck/utils/test_datastructures.yml
index b152545a3..13a48d8d3 100644
--- a/tests/typecheck/utils/test_datastructures.yml
+++ b/tests/typecheck/utils/test_datastructures.yml
@@ -10,39 +10,39 @@
d3: Tuple[Tuple[str, List[Union[str, int]]], ...] = (('foo', ['Foo']), ('bar', [2, 3]))
var3 = MultiValueDict(d3)
reveal_type(var1) # N: Revealed type is "django.utils.datastructures.MultiValueDict[Any, Any]"
- reveal_type(var2) # N: Revealed type is "django.utils.datastructures.MultiValueDict[builtins.str*, Union[builtins.str, builtins.int]]"
- reveal_type(var3) # N: Revealed type is "django.utils.datastructures.MultiValueDict[builtins.str*, Union[builtins.str, builtins.int]]"
+ reveal_type(var2) # N: Revealed type is "django.utils.datastructures.MultiValueDict[builtins.str, Union[builtins.str, builtins.int]]"
+ reveal_type(var3) # N: Revealed type is "django.utils.datastructures.MultiValueDict[builtins.str, Union[builtins.str, builtins.int]]"
# __getitem__, get, getlist (with proofs)
d = MultiValueDict({'foo': ['Foo']})
d.setlist('bar', [])
# actually 'Foo'
- reveal_type(d['foo']) # N: Revealed type is "Union[builtins.str*, builtins.list[builtins.object]]"
+ reveal_type(d['foo']) # N: Revealed type is "Union[builtins.str, builtins.list[builtins.object]]"
# actually []
- reveal_type(d['bar']) # N: Revealed type is "Union[builtins.str*, builtins.list[builtins.object]]"
+ reveal_type(d['bar']) # N: Revealed type is "Union[builtins.str, builtins.list[builtins.object]]"
# actually None
- reveal_type(d.get('bar')) # N: Revealed type is "Union[builtins.str*, None]"
+ reveal_type(d.get('bar')) # N: Revealed type is "Union[builtins.str, None]"
# actually 1
- reveal_type(d.get('bar', 1)) # N: Revealed type is "Union[builtins.str, builtins.int*]"
+ reveal_type(d.get('bar', 1)) # N: Revealed type is "Union[builtins.str, builtins.int]"
# actually []
reveal_type(d.getlist('bar')) # N: Revealed type is "builtins.list[builtins.str]"
# actually []
- reveal_type(d.getlist('bar', [1])) # N: Revealed type is "Union[builtins.list[builtins.str], builtins.list*[builtins.int*]]"
+ reveal_type(d.getlist('bar', [1])) # N: Revealed type is "Union[builtins.list[builtins.str], builtins.list[builtins.int]]"
# actually True (note that default can be not a list)
- reveal_type(d.getlist('baz', True)) # N: Revealed type is "Union[builtins.list[builtins.str], builtins.bool*]"
+ reveal_type(d.getlist('baz', True)) # N: Revealed type is "Union[builtins.list[builtins.str], builtins.bool]"
# setters
- reveal_type(d.setlistdefault('baz')) # N: Revealed type is "builtins.list[builtins.str*]"
+ reveal_type(d.setlistdefault('baz')) # N: Revealed type is "builtins.list[builtins.str]"
d.setlistdefault('baz', [1]) # E: List item 0 has incompatible type "int"; expected "str"
- reveal_type(d.setlistdefault('baz', [])) # N: Revealed type is "builtins.list[builtins.str*]"
+ reveal_type(d.setlistdefault('baz', [])) # N: Revealed type is "builtins.list[builtins.str]"
d.appendlist('baz', 'Baz')
d.appendlist('baz', 1) # E: Argument 2 to "appendlist" of "MultiValueDict" has incompatible type "int"; expected "str"
# iterators
# actually [('foo', 'Foo'), ('bar', [])]
- reveal_type(list(d.items())) # N: Revealed type is "builtins.list[Tuple[builtins.str*, Union[builtins.str*, builtins.list[builtins.object]]]]"
- reveal_type(list(d.keys())) # N: Revealed type is "builtins.list[builtins.str*]"
+ reveal_type(list(d.items())) # N: Revealed type is "builtins.list[Tuple[builtins.str, Union[builtins.str, builtins.list[builtins.object]]]]"
+ reveal_type(list(d.keys())) # N: Revealed type is "builtins.list[builtins.str]"
# actually ['Foo', []]
- reveal_type(list(d.values())) # N: Revealed type is "builtins.list[Union[builtins.str*, builtins.list[builtins.object]]]"
+ reveal_type(list(d.values())) # N: Revealed type is "builtins.list[Union[builtins.str, builtins.list[builtins.object]]]"
# actually {'foo': 'Foo', 'bar': []}
- reveal_type(d.dict()) # N: Revealed type is "builtins.dict[builtins.str*, Union[builtins.str*, builtins.list[builtins.object]]]"
+ reveal_type(d.dict()) # N: Revealed type is "builtins.dict[builtins.str, Union[builtins.str, builtins.list[builtins.object]]]"
diff --git a/tests/typecheck/utils/test_encoding.yml b/tests/typecheck/utils/test_encoding.yml
index 102d319ae..3816895f9 100644
--- a/tests/typecheck/utils/test_encoding.yml
+++ b/tests/typecheck/utils/test_encoding.yml
@@ -5,10 +5,10 @@
pass
reveal_type(force_bytes(123)) # N: Revealed type is "builtins.bytes"
- reveal_type(force_bytes(123, strings_only=True)) # N: Revealed type is "builtins.int*"
+ reveal_type(force_bytes(123, strings_only=True)) # N: Revealed type is "builtins.int"
reveal_type(force_str(123)) # N: Revealed type is "builtins.str"
- reveal_type(force_str(123, strings_only=True)) # N: Revealed type is "builtins.int*"
- reveal_type(force_str('foo')) # N: Revealed type is "builtins.str*"
- reveal_type(force_str('foo', strings_only=True)) # N: Revealed type is "builtins.str*"
- reveal_type(force_str(S('foo'), strings_only=True)) # N: Revealed type is "main.S*"
+ reveal_type(force_str(123, strings_only=True)) # N: Revealed type is "builtins.int"
+ reveal_type(force_str('foo')) # N: Revealed type is "builtins.str"
+ reveal_type(force_str('foo', strings_only=True)) # N: Revealed type is "builtins.str"
+ reveal_type(force_str(S('foo'), strings_only=True)) # N: Revealed type is "main.S"
diff --git a/tests/typecheck/utils/test_functional.yml b/tests/typecheck/utils/test_functional.yml
index e0c18fff6..eb4417bfa 100644
--- a/tests/typecheck/utils/test_functional.yml
+++ b/tests/typecheck/utils/test_functional.yml
@@ -7,12 +7,12 @@
@cached_property
def attr(self) -> List[str]: ...
- reveal_type(attr) # N: Revealed type is "django.utils.functional.cached_property[builtins.list*[builtins.str]]"
+ reveal_type(attr) # N: Revealed type is "django.utils.functional.cached_property[builtins.list[builtins.str]]"
reveal_type(attr.name) # N: Revealed type is "builtins.str"
- reveal_type(Foo.attr) # N: Revealed type is "django.utils.functional.cached_property[builtins.list*[builtins.str]]"
- reveal_type(Foo.attr.func) # N: Revealed type is "def (*Any, **Any) -> builtins.list*[builtins.str]"
+ reveal_type(Foo.attr) # N: Revealed type is "django.utils.functional.cached_property[builtins.list[builtins.str]]"
+ reveal_type(Foo.attr.func) # N: Revealed type is "def (*Any, **Any) -> builtins.list[builtins.str]"
f = Foo()
- reveal_type(f.attr) # N: Revealed type is "builtins.list*[builtins.str]"
+ reveal_type(f.attr) # N: Revealed type is "builtins.list[builtins.str]"
f.attr.name # E: "List[str]" has no attribute "name"
diff --git a/tests/typecheck/views/generic/test_edit.yml b/tests/typecheck/views/generic/test_edit.yml
index 569a7e16d..895e5795b 100644
--- a/tests/typecheck/views/generic/test_edit.yml
+++ b/tests/typecheck/views/generic/test_edit.yml
@@ -46,11 +46,11 @@
class MyCreateView(CreateView[Article, ArticleModelForm]):
def some(self) -> None:
- reveal_type(self.get_form_class()) # N: Revealed type is "Type[main.ArticleModelForm*]"
+ reveal_type(self.get_form_class()) # N: Revealed type is "Type[main.ArticleModelForm]"
class MyUpdateView(UpdateView[Article, ArticleModelForm]):
def some(self) -> None:
- reveal_type(self.get_form_class()) # N: Revealed type is "Type[main.ArticleModelForm*]"
+ reveal_type(self.get_form_class()) # N: Revealed type is "Type[main.ArticleModelForm]"
installed_apps:
- myapp
files:
@@ -79,7 +79,7 @@
class MyCreateView(CreateView[Article, ArticleModelForm]):
def some(self) -> None:
- reveal_type(self.get_form()) # N: Revealed type is "main.ArticleModelForm*"
+ reveal_type(self.get_form()) # N: Revealed type is "main.ArticleModelForm"
reveal_type(self.get_form(SubArticleModelForm)) # N: Revealed type is "main.SubArticleModelForm"
reveal_type(self.get_form(AnotherArticleModelForm)) # N: Revealed type is "main.AnotherArticleModelForm" # E: Argument 1 to "get_form" of "FormMixin" has incompatible type "Type[AnotherArticleModelForm]"; expected "Optional[Type[ArticleModelForm]]"
installed_apps:
diff --git a/tests/typecheck/views/test_function_based_views.yml b/tests/typecheck/views/test_function_based_views.yml
index 21ec80018..97f99f203 100644
--- a/tests/typecheck/views/test_function_based_views.yml
+++ b/tests/typecheck/views/test_function_based_views.yml
@@ -24,25 +24,25 @@
def empty_response(request: HttpRequest) -> HttpResponse:
response = HttpResponse()
- reveal_type(response.content) # N: Revealed type is "builtins.bytes*"
+ reveal_type(response.content) # N: Revealed type is "builtins.bytes"
return response
def str_response(request: HttpRequest) -> HttpResponse:
response = HttpResponse()
response.content = 'It works!'
- reveal_type(response.content) # N: Revealed type is "builtins.bytes*"
+ reveal_type(response.content) # N: Revealed type is "builtins.bytes"
return response
def bytes_response(request: HttpRequest) -> HttpResponse:
response = HttpResponse()
response.content = b'It works!'
- reveal_type(response.content) # N: Revealed type is "builtins.bytes*"
+ reveal_type(response.content) # N: Revealed type is "builtins.bytes"
return response
def object_response(request: HttpRequest) -> HttpResponse:
response = HttpResponse()
response.content = _('It works!')
- reveal_type(response.content) # N: Revealed type is "builtins.bytes*"
+ reveal_type(response.content) # N: Revealed type is "builtins.bytes"
return response
- case: streaming_http_response
@@ -74,29 +74,29 @@
def empty_response(request: HttpRequest) -> StreamingHttpResponse:
response = StreamingHttpResponse()
- reveal_type(response.streaming_content) # N: Revealed type is "typing.Iterator*[builtins.bytes]"
+ reveal_type(response.streaming_content) # N: Revealed type is "typing.Iterator[builtins.bytes]"
return response
def str_response(request: HttpRequest) -> StreamingHttpResponse:
response = StreamingHttpResponse()
response.streaming_content = ['It works!']
- reveal_type(response.streaming_content) # N: Revealed type is "typing.Iterator*[builtins.bytes]"
+ reveal_type(response.streaming_content) # N: Revealed type is "typing.Iterator[builtins.bytes]"
return response
def bytes_response(request: HttpRequest) -> StreamingHttpResponse:
response = StreamingHttpResponse()
response.streaming_content = [b'It works!']
- reveal_type(response.streaming_content) # N: Revealed type is "typing.Iterator*[builtins.bytes]"
+ reveal_type(response.streaming_content) # N: Revealed type is "typing.Iterator[builtins.bytes]"
return response
def object_response(request: HttpRequest) -> StreamingHttpResponse:
response = StreamingHttpResponse()
response.streaming_content = [_('It works!')]
- reveal_type(response.streaming_content) # N: Revealed type is "typing.Iterator*[builtins.bytes]"
+ reveal_type(response.streaming_content) # N: Revealed type is "typing.Iterator[builtins.bytes]"
return response
def mixed_response(request: HttpRequest) -> StreamingHttpResponse:
response = StreamingHttpResponse()
response.streaming_content = [_('Yes'), '/', _('No')]
- reveal_type(response.streaming_content) # N: Revealed type is "typing.Iterator*[builtins.bytes]"
+ reveal_type(response.streaming_content) # N: Revealed type is "typing.Iterator[builtins.bytes]"
return response
|
scikit-hep__pyhf-1041 | Class returned by pyhf.Workspace.combine
# Question
Not a bug so I'm opening this as a question: The `pyhf.Workspace.combine` classmethod returns a `Workspace` explicitly instead of `cls`.
https://github.com/scikit-hep/pyhf/blob/e260626689f46414be185d834499cc65dce5a4b0/src/pyhf/workspace.py#L678
To work better with classes that want to inherit from `pyhf.Workspace`, I think it would be better to return the class as
```python
return cls(newspec)
```
# Relevant Issues and Pull Requests
none I'm aware of
| [
{
"content": "\"\"\"\npyhf workspaces hold the three data items:\n\n* the statistical model p(data|parameters)\n* the observed data (optional)\n* fit configurations (\"measurements\")\n\"\"\"\nimport logging\nimport jsonpatch\nimport copy\nimport collections\nfrom . import exceptions\nfrom . import utils\nfrom .pdf import Model\nfrom .mixins import _ChannelSummaryMixin\n\nlog = logging.getLogger(__name__)\n\n\ndef _join_items(join, left_items, right_items, key='name'):\n \"\"\"\n Join two lists of dictionaries along the given key.\n\n This is meant to be as generic as possible for any pairs of lists of dictionaries for many join operations.\n\n Args:\n join (`str`): The join operation to apply. See ~pyhf.workspace.Workspace for valid join operations.\n left_items (`list`): A list of dictionaries to join on the left\n right_items (`list`): A list of dictionaries to join on the right\n\n Returns:\n :obj:`list`: A joined list of dictionaries.\n\n \"\"\"\n if join == 'right outer':\n primary_items, secondary_items = right_items, left_items\n else:\n primary_items, secondary_items = left_items, right_items\n joined_items = copy.deepcopy(primary_items)\n for secondary_item in secondary_items:\n # outer join: merge primary and secondary, matching where possible\n if join == 'outer' and secondary_item in primary_items:\n continue\n # left/right outer join: only add secondary if existing item (by key value) is not in primary\n # NB: this will be slow for large numbers of items\n elif join in ['left outer', 'right outer'] and secondary_item[key] in [\n item[key] for item in joined_items\n ]:\n continue\n joined_items.append(copy.deepcopy(secondary_item))\n return joined_items\n\n\ndef _join_versions(join, left_version, right_version):\n \"\"\"\n Join two workspace versions.\n\n Raises:\n ~pyhf.exceptions.InvalidWorkspaceOperation: Versions are incompatible.\n\n Args:\n join (`str`): The join operation to apply. See ~pyhf.workspace.Workspace for valid join operations.\n left_version (`str`): The left workspace version.\n right_version (`str`): The right workspace version.\n\n Returns:\n :obj:`str`: The workspace version.\n\n \"\"\"\n if left_version != right_version:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces of different versions cannot be combined: {left_version} != {right_version}\"\n )\n return left_version\n\n\ndef _join_channels(join, left_channels, right_channels):\n \"\"\"\n Join two workspace channel specifications.\n\n Raises:\n ~pyhf.exceptions.InvalidWorkspaceOperation: Channel specifications are incompatible.\n\n Args:\n join (`str`): The join operation to apply. See ~pyhf.workspace.Workspace for valid join operations.\n left_channels (`list`): The left channel specification.\n right_channels (`list`): The right channel specification.\n\n Returns:\n :obj:`list`: A joined list of channels. Each channel follows the :obj:`defs.json#/definitions/channel` `schema <https://scikit-hep.org/pyhf/likelihood.html#channel>`__\n\n \"\"\"\n\n joined_channels = _join_items(join, left_channels, right_channels)\n if join == 'none':\n common_channels = set(c['name'] for c in left_channels).intersection(\n c['name'] for c in right_channels\n )\n if common_channels:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces cannot have any channels in common with the same name: {common_channels}. You can also try a different join operation: {Workspace.valid_joins}.\"\n )\n\n elif join == 'outer':\n counted_channels = collections.Counter(\n channel['name'] for channel in joined_channels\n )\n incompatible_channels = [\n channel for channel, count in counted_channels.items() if count > 1\n ]\n if incompatible_channels:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces cannot have channels in common with incompatible structure: {incompatible_channels}. You can also try a different join operation: {Workspace.valid_joins}.\"\n )\n return joined_channels\n\n\ndef _join_observations(join, left_observations, right_observations):\n \"\"\"\n Join two workspace observation specifications.\n\n Raises:\n ~pyhf.exceptions.InvalidWorkspaceOperation: Observation specifications are incompatible.\n\n Args:\n join (`str`): The join operation to apply. See ~pyhf.workspace.Workspace for valid join operations.\n left_observations (`list`): The left observation specification.\n right_observations (`list`): The right observation specification.\n\n Returns:\n :obj:`list`: A joined list of observations. Each observation follows the :obj:`defs.json#/definitions/observation` `schema <https://scikit-hep.org/pyhf/likelihood.html#observations>`__\n\n \"\"\"\n joined_observations = _join_items(join, left_observations, right_observations)\n if join == 'none':\n common_observations = set(\n obs['name'] for obs in left_observations\n ).intersection(obs['name'] for obs in right_observations)\n if common_observations:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces cannot have any observations in common with the same name: {common_observations}. You can also try a different join operation: {Workspace.valid_joins}.\"\n )\n\n elif join == 'outer':\n counted_observations = collections.Counter(\n observation['name'] for observation in joined_observations\n )\n incompatible_observations = [\n observation\n for observation, count in counted_observations.items()\n if count > 1\n ]\n if incompatible_observations:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces cannot have observations in common with incompatible structure: {incompatible_observations}. You can also try a different join operation: {Workspace.valid_joins}.\"\n )\n return joined_observations\n\n\ndef _join_parameter_configs(measurement_name, left_parameters, right_parameters):\n \"\"\"\n Join two measurement parameter config specifications.\n\n Only uses by :method:`_join_measurements` when join='outer'.\n\n Raises:\n ~pyhf.exceptions.InvalidWorkspaceOperation: Parameter configuration specifications are incompatible.\n\n Args:\n measurement_name (`str`): The name of the measurement being joined (a detail for raising exceptions correctly)\n left_parameters (`list`): The left parameter configuration specification.\n right_parameters (`list`): The right parameter configuration specification.\n\n Returns:\n :obj:`list`: A joined list of parameter configurations. Each parameter configuration follows the :obj:`defs.json#/definitions/config` schema\n\n \"\"\"\n joined_parameter_configs = _join_items('outer', left_parameters, right_parameters)\n counted_parameter_configs = collections.Counter(\n parameter['name'] for parameter in joined_parameter_configs\n )\n incompatible_parameter_configs = [\n parameter for parameter, count in counted_parameter_configs.items() if count > 1\n ]\n if incompatible_parameter_configs:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces cannot have a measurement ({measurement_name}) with incompatible parameter configs: {incompatible_parameter_configs}. You can also try a different join operation: {Workspace.valid_joins}.\"\n )\n return joined_parameter_configs\n\n\ndef _join_measurements(join, left_measurements, right_measurements):\n \"\"\"\n Join two workspace measurement specifications.\n\n Raises:\n ~pyhf.exceptions.InvalidWorkspaceOperation: Measurement specifications are incompatible.\n\n Args:\n join (`str`): The join operation to apply. See ~pyhf.workspace.Workspace for valid join operations.\n left_measurements (`list`): The left measurement specification.\n right_measurements (`list`): The right measurement specification.\n\n Returns:\n :obj:`list`: A joined list of measurements. Each measurement follows the :obj:`defs.json#/definitions/measurement` `schema <https://scikit-hep.org/pyhf/likelihood.html#measurements>`__\n\n \"\"\"\n joined_measurements = _join_items(join, left_measurements, right_measurements)\n if join == 'none':\n common_measurements = set(\n meas['name'] for meas in left_measurements\n ).intersection(meas['name'] for meas in right_measurements)\n if common_measurements:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces cannot have any measurements in common with the same name: {common_measurements}. You can also try a different join operation: {Workspace.valid_joins}.\"\n )\n\n elif join == 'outer':\n # need to store a mapping of measurement name to all measurement objects with that name\n _measurement_mapping = {}\n for measurement in joined_measurements:\n _measurement_mapping.setdefault(measurement['name'], []).append(measurement)\n # first check for incompatible POI\n # then merge parameter configs\n incompatible_poi = [\n measurement_name\n for measurement_name, measurements in _measurement_mapping.items()\n if len(set(measurement['config']['poi'] for measurement in measurements))\n > 1\n ]\n if incompatible_poi:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces cannot have the same measurements with incompatible POI: {incompatible_poi}.\"\n )\n\n joined_measurements = []\n for measurement_name, measurements in _measurement_mapping.items():\n if len(measurements) != 1:\n new_measurement = {\n 'name': measurement_name,\n 'config': {\n 'poi': measurements[0]['config']['poi'],\n 'parameters': _join_parameter_configs(\n measurement_name,\n *[\n measurement['config']['parameters']\n for measurement in measurements\n ],\n ),\n },\n }\n else:\n new_measurement = measurements[0]\n joined_measurements.append(new_measurement)\n return joined_measurements\n\n\nclass Workspace(_ChannelSummaryMixin, dict):\n \"\"\"\n A JSON-serializable object that is built from an object that follows the :obj:`workspace.json` `schema <https://scikit-hep.org/pyhf/likelihood.html#workspace>`__.\n \"\"\"\n\n valid_joins = ['none', 'outer', 'left outer', 'right outer']\n\n def __init__(self, spec, **config_kwargs):\n \"\"\"Workspaces hold the model, data and measurements.\"\"\"\n super(Workspace, self).__init__(spec, channels=spec['channels'])\n self.schema = config_kwargs.pop('schema', 'workspace.json')\n self.version = config_kwargs.pop('version', spec.get('version', None))\n\n # run jsonschema validation of input specification against the (provided) schema\n log.info(f\"Validating spec against schema: {self.schema}\")\n utils.validate(self, self.schema, version=self.version)\n\n self.measurement_names = []\n for measurement in self.get('measurements', []):\n self.measurement_names.append(measurement['name'])\n\n self.observations = {}\n for obs in self['observations']:\n self.observations[obs['name']] = obs['data']\n\n def __eq__(self, other):\n \"\"\"Equality is defined as equal dict representations.\"\"\"\n if not isinstance(other, Workspace):\n return False\n return dict(self) == dict(other)\n\n def __ne__(self, other):\n \"\"\"Negation of equality.\"\"\"\n return not self == other\n\n def __repr__(self):\n \"\"\"Representation of the Workspace.\"\"\"\n return object.__repr__(self)\n\n def get_measurement(\n self, poi_name=None, measurement_name=None, measurement_index=None\n ):\n \"\"\"\n Get (or create) a measurement object.\n\n The following logic is used:\n\n 1. if the poi name is given, create a measurement object for that poi\n 2. if the measurement name is given, find the measurement for the given name\n 3. if the measurement index is given, return the measurement at that index\n 4. if there are measurements but none of the above have been specified, return the 0th measurement\n\n Raises:\n ~pyhf.exceptions.InvalidMeasurement: If the measurement was not found\n\n Args:\n poi_name (`str`): The name of the parameter of interest to create a new measurement from\n measurement_name (`str`): The name of the measurement to use\n measurement_index (`int`): The index of the measurement to use\n\n Returns:\n :obj:`dict`: A measurement object adhering to the schema defs.json#/definitions/measurement\n\n \"\"\"\n measurement = None\n if poi_name is not None:\n measurement = {\n 'name': 'NormalMeasurement',\n 'config': {'poi': poi_name, 'parameters': []},\n }\n elif self.measurement_names:\n if measurement_name is not None:\n if measurement_name not in self.measurement_names:\n log.debug(\n 'measurements defined:\\n\\t{0:s}'.format(\n '\\n\\t'.join(self.measurement_names)\n )\n )\n raise exceptions.InvalidMeasurement(\n 'no measurement by name \\'{0:s}\\' was found in the workspace, pick from one of the valid ones above'.format(\n measurement_name\n )\n )\n measurement = self['measurements'][\n self.measurement_names.index(measurement_name)\n ]\n else:\n if measurement_index is None and len(self.measurement_names) > 1:\n log.warning(\n 'multiple measurements defined. Taking the first measurement.'\n )\n\n measurement_index = (\n measurement_index if measurement_index is not None else 0\n )\n try:\n measurement = self['measurements'][measurement_index]\n except IndexError:\n raise exceptions.InvalidMeasurement(\n f\"The measurement index {measurement_index} is out of bounds as only {len(self.measurement_names)} measurement(s) have been defined.\"\n )\n else:\n raise exceptions.InvalidMeasurement(\"No measurements have been defined.\")\n\n utils.validate(measurement, 'measurement.json', self.version)\n return measurement\n\n def model(self, **config_kwargs):\n \"\"\"\n Create a model object with/without patches applied.\n\n See :func:`pyhf.workspace.Workspace.get_measurement` and :class:`pyhf.pdf.Model` for possible keyword arguments.\n\n Args:\n patches: A list of JSON patches to apply to the model specification\n config_kwargs: Possible keyword arguments for the measurement and model configuration\n\n Returns:\n ~pyhf.pdf.Model: A model object adhering to the schema model.json\n\n \"\"\"\n\n poi_name = config_kwargs.pop('poi_name', None)\n measurement_name = config_kwargs.pop('measurement_name', None)\n measurement_index = config_kwargs.pop('measurement_index', None)\n measurement = self.get_measurement(\n poi_name=poi_name,\n measurement_name=measurement_name,\n measurement_index=measurement_index,\n )\n log.debug(\n 'model being created for measurement {0:s}'.format(measurement['name'])\n )\n\n patches = config_kwargs.pop('patches', [])\n\n modelspec = {\n 'channels': self['channels'],\n 'parameters': measurement['config']['parameters'],\n }\n for patch in patches:\n modelspec = jsonpatch.JsonPatch(patch).apply(modelspec)\n\n return Model(modelspec, poi_name=measurement['config']['poi'], **config_kwargs)\n\n def data(self, model, with_aux=True):\n \"\"\"\n Return the data for the supplied model with or without auxiliary data from the model.\n\n The model is needed as the order of the data depends on the order of the channels in the model.\n\n Raises:\n KeyError: Invalid or missing channel\n\n Args:\n model (~pyhf.pdf.Model): A model object adhering to the schema model.json\n with_aux (bool): Whether to include auxiliary data from the model or not\n\n Returns:\n :obj:`list`: data\n\n \"\"\"\n try:\n observed_data = sum(\n (self.observations[c] for c in model.config.channels), []\n )\n except KeyError:\n log.error(\n \"Invalid channel: the workspace does not have observation data for one of the channels in the model.\"\n )\n raise\n if with_aux:\n observed_data += model.config.auxdata\n return observed_data\n\n def _prune_and_rename(\n self,\n prune_modifiers=None,\n prune_modifier_types=None,\n prune_samples=None,\n prune_channels=None,\n prune_measurements=None,\n rename_modifiers=None,\n rename_samples=None,\n rename_channels=None,\n rename_measurements=None,\n ):\n \"\"\"\n Return a new, pruned, renamed workspace specification. This will not modify the original workspace.\n\n Pruning removes pieces of the workspace whose name or type matches the\n user-provided lists. The pruned, renamed workspace must also be a valid\n workspace.\n\n A workspace is composed of many named components, such as channels and\n samples, as well as types of systematics (e.g. `histosys`). Components\n can be removed (pruned away) filtering on name or be renamed according\n to the provided :obj:`dict` mapping. Additionally, modifiers of\n specific types can be removed (pruned away).\n\n This function also handles specific peculiarities, such as\n renaming/removing a channel which needs to rename/remove the\n corresponding `observation`.\n\n Args:\n prune_modifiers: A :obj:`str` or a :obj:`list` of modifiers to prune.\n prune_modifier_types: A :obj:`str` or a :obj:`list` of modifier types to prune.\n prune_samples: A :obj:`str` or a :obj:`list` of samples to prune.\n prune_channels: A :obj:`str` or a :obj:`list` of channels to prune.\n prune_measurements: A :obj:`str` or a :obj:`list` of measurements to prune.\n rename_modifiers: A :obj:`dict` mapping old modifier name to new modifier name.\n rename_samples: A :obj:`dict` mapping old sample name to new sample name.\n rename_channels: A :obj:`dict` mapping old channel name to new channel name.\n rename_measurements: A :obj:`dict` mapping old measurement name to new measurement name.\n\n Returns:\n ~pyhf.workspace.Workspace: A new workspace object with the specified components removed or renamed\n\n \"\"\"\n # avoid mutable defaults\n prune_modifiers = [] if prune_modifiers is None else prune_modifiers\n prune_modifier_types = (\n [] if prune_modifier_types is None else prune_modifier_types\n )\n prune_samples = [] if prune_samples is None else prune_samples\n prune_channels = [] if prune_channels is None else prune_channels\n prune_measurements = [] if prune_measurements is None else prune_measurements\n rename_modifiers = {} if rename_modifiers is None else rename_modifiers\n rename_samples = {} if rename_samples is None else rename_samples\n rename_channels = {} if rename_channels is None else rename_channels\n rename_measurements = {} if rename_measurements is None else rename_measurements\n\n newspec = {\n 'channels': [\n {\n 'name': rename_channels.get(channel['name'], channel['name']),\n 'samples': [\n {\n 'name': rename_samples.get(sample['name'], sample['name']),\n 'data': sample['data'],\n 'modifiers': [\n dict(\n modifier,\n name=rename_modifiers.get(\n modifier['name'], modifier['name']\n ),\n )\n for modifier in sample['modifiers']\n if modifier['name'] not in prune_modifiers\n and modifier['type'] not in prune_modifier_types\n ],\n }\n for sample in channel['samples']\n if sample['name'] not in prune_samples\n ],\n }\n for channel in self['channels']\n if channel['name'] not in prune_channels\n ],\n 'measurements': [\n {\n 'name': rename_measurements.get(\n measurement['name'], measurement['name']\n ),\n 'config': {\n 'parameters': [\n dict(\n parameter,\n name=rename_modifiers.get(\n parameter['name'], parameter['name']\n ),\n )\n for parameter in measurement['config']['parameters']\n if parameter['name'] not in prune_modifiers\n ],\n 'poi': rename_modifiers.get(\n measurement['config']['poi'], measurement['config']['poi']\n ),\n },\n }\n for measurement in self['measurements']\n if measurement['name'] not in prune_measurements\n ],\n 'observations': [\n dict(\n copy.deepcopy(observation),\n name=rename_channels.get(observation['name'], observation['name']),\n )\n for observation in self['observations']\n if observation['name'] not in prune_channels\n ],\n 'version': self['version'],\n }\n return Workspace(newspec)\n\n def prune(\n self,\n modifiers=None,\n modifier_types=None,\n samples=None,\n channels=None,\n measurements=None,\n ):\n \"\"\"\n Return a new, pruned workspace specification. This will not modify the original workspace.\n\n The pruned workspace must also be a valid workspace.\n\n Args:\n modifiers: A :obj:`str` or a :obj:`list` of modifiers to prune.\n modifier_types: A :obj:`str` or a :obj:`list` of modifier types to prune.\n samples: A :obj:`str` or a :obj:`list` of samples to prune.\n channels: A :obj:`str` or a :obj:`list` of channels to prune.\n measurements: A :obj:`str` or a :obj:`list` of measurements to prune.\n\n Returns:\n ~pyhf.workspace.Workspace: A new workspace object with the specified components removed\n\n \"\"\"\n # avoid mutable defaults\n modifiers = [] if modifiers is None else modifiers\n modifier_types = [] if modifier_types is None else modifier_types\n samples = [] if samples is None else samples\n channels = [] if channels is None else channels\n measurements = [] if measurements is None else measurements\n\n return self._prune_and_rename(\n prune_modifiers=modifiers,\n prune_modifier_types=modifier_types,\n prune_samples=samples,\n prune_channels=channels,\n prune_measurements=measurements,\n )\n\n def rename(self, modifiers=None, samples=None, channels=None, measurements=None):\n \"\"\"\n Return a new workspace specification with certain elements renamed.\n\n This will not modify the original workspace.\n The renamed workspace must also be a valid workspace.\n\n Args:\n modifiers: A :obj:`dict` mapping old modifier name to new modifier name.\n samples: A :obj:`dict` mapping old sample name to new sample name.\n channels: A :obj:`dict` mapping old channel name to new channel name.\n measurements: A :obj:`dict` mapping old measurement name to new measurement name.\n\n Returns:\n ~pyhf.workspace.Workspace: A new workspace object with the specified components renamed\n\n \"\"\"\n # avoid mutable defaults\n modifiers = {} if modifiers is None else modifiers\n samples = {} if samples is None else samples\n channels = {} if channels is None else channels\n measurements = {} if measurements is None else measurements\n\n return self._prune_and_rename(\n rename_modifiers=modifiers,\n rename_samples=samples,\n rename_channels=channels,\n rename_measurements=measurements,\n )\n\n @classmethod\n def combine(cls, left, right, join='none'):\n \"\"\"\n Return a new workspace specification that is the combination of the two workspaces.\n\n The new workspace must also be a valid workspace. A combination of\n workspaces is done by combining the set of:\n\n - channels,\n - observations, and\n - measurements\n\n between the two workspaces. If the two workspaces have modifiers that\n follow the same naming convention, then correlations across the two\n workspaces may be possible. In particular, the `lumi` modifier will be\n fully-correlated.\n\n If the two workspaces have the same measurement (with the same POI),\n those measurements will get merged.\n\n Raises:\n ~pyhf.exceptions.InvalidWorkspaceOperation: The workspaces have common channel names, incompatible measurements, or incompatible schema versions.\n\n Args:\n left (~pyhf.workspace.Workspace): A workspace\n right (~pyhf.workspace.Workspace): Another workspace\n join (:obj:`str`): How to join the two workspaces. Pick from \"none\", \"outer\", \"left outer\", or \"right outer\".\n\n Returns:\n ~pyhf.workspace.Workspace: A new combined workspace object\n\n \"\"\"\n if join not in Workspace.valid_joins:\n raise ValueError(\n f\"Workspaces must be joined using one of the valid join operations ({Workspace.valid_joins}); not {join}\"\n )\n if join in ['left outer', 'right outer']:\n log.warning(\n \"You are using an unsafe join operation. This will silence exceptions that might be raised during a normal 'outer' operation.\"\n )\n\n new_version = _join_versions(join, left['version'], right['version'])\n new_channels = _join_channels(join, left['channels'], right['channels'])\n new_observations = _join_observations(\n join, left['observations'], right['observations']\n )\n new_measurements = _join_measurements(\n join, left['measurements'], right['measurements']\n )\n\n newspec = {\n 'channels': new_channels,\n 'measurements': new_measurements,\n 'observations': new_observations,\n 'version': new_version,\n }\n return Workspace(newspec)\n",
"path": "src/pyhf/workspace.py"
}
] | [
{
"content": "\"\"\"\npyhf workspaces hold the three data items:\n\n* the statistical model p(data|parameters)\n* the observed data (optional)\n* fit configurations (\"measurements\")\n\"\"\"\nimport logging\nimport jsonpatch\nimport copy\nimport collections\nfrom . import exceptions\nfrom . import utils\nfrom .pdf import Model\nfrom .mixins import _ChannelSummaryMixin\n\nlog = logging.getLogger(__name__)\n\n\ndef _join_items(join, left_items, right_items, key='name'):\n \"\"\"\n Join two lists of dictionaries along the given key.\n\n This is meant to be as generic as possible for any pairs of lists of dictionaries for many join operations.\n\n Args:\n join (`str`): The join operation to apply. See ~pyhf.workspace.Workspace for valid join operations.\n left_items (`list`): A list of dictionaries to join on the left\n right_items (`list`): A list of dictionaries to join on the right\n\n Returns:\n :obj:`list`: A joined list of dictionaries.\n\n \"\"\"\n if join == 'right outer':\n primary_items, secondary_items = right_items, left_items\n else:\n primary_items, secondary_items = left_items, right_items\n joined_items = copy.deepcopy(primary_items)\n for secondary_item in secondary_items:\n # outer join: merge primary and secondary, matching where possible\n if join == 'outer' and secondary_item in primary_items:\n continue\n # left/right outer join: only add secondary if existing item (by key value) is not in primary\n # NB: this will be slow for large numbers of items\n elif join in ['left outer', 'right outer'] and secondary_item[key] in [\n item[key] for item in joined_items\n ]:\n continue\n joined_items.append(copy.deepcopy(secondary_item))\n return joined_items\n\n\ndef _join_versions(join, left_version, right_version):\n \"\"\"\n Join two workspace versions.\n\n Raises:\n ~pyhf.exceptions.InvalidWorkspaceOperation: Versions are incompatible.\n\n Args:\n join (`str`): The join operation to apply. See ~pyhf.workspace.Workspace for valid join operations.\n left_version (`str`): The left workspace version.\n right_version (`str`): The right workspace version.\n\n Returns:\n :obj:`str`: The workspace version.\n\n \"\"\"\n if left_version != right_version:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces of different versions cannot be combined: {left_version} != {right_version}\"\n )\n return left_version\n\n\ndef _join_channels(join, left_channels, right_channels):\n \"\"\"\n Join two workspace channel specifications.\n\n Raises:\n ~pyhf.exceptions.InvalidWorkspaceOperation: Channel specifications are incompatible.\n\n Args:\n join (`str`): The join operation to apply. See ~pyhf.workspace.Workspace for valid join operations.\n left_channels (`list`): The left channel specification.\n right_channels (`list`): The right channel specification.\n\n Returns:\n :obj:`list`: A joined list of channels. Each channel follows the :obj:`defs.json#/definitions/channel` `schema <https://scikit-hep.org/pyhf/likelihood.html#channel>`__\n\n \"\"\"\n\n joined_channels = _join_items(join, left_channels, right_channels)\n if join == 'none':\n common_channels = set(c['name'] for c in left_channels).intersection(\n c['name'] for c in right_channels\n )\n if common_channels:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces cannot have any channels in common with the same name: {common_channels}. You can also try a different join operation: {Workspace.valid_joins}.\"\n )\n\n elif join == 'outer':\n counted_channels = collections.Counter(\n channel['name'] for channel in joined_channels\n )\n incompatible_channels = [\n channel for channel, count in counted_channels.items() if count > 1\n ]\n if incompatible_channels:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces cannot have channels in common with incompatible structure: {incompatible_channels}. You can also try a different join operation: {Workspace.valid_joins}.\"\n )\n return joined_channels\n\n\ndef _join_observations(join, left_observations, right_observations):\n \"\"\"\n Join two workspace observation specifications.\n\n Raises:\n ~pyhf.exceptions.InvalidWorkspaceOperation: Observation specifications are incompatible.\n\n Args:\n join (`str`): The join operation to apply. See ~pyhf.workspace.Workspace for valid join operations.\n left_observations (`list`): The left observation specification.\n right_observations (`list`): The right observation specification.\n\n Returns:\n :obj:`list`: A joined list of observations. Each observation follows the :obj:`defs.json#/definitions/observation` `schema <https://scikit-hep.org/pyhf/likelihood.html#observations>`__\n\n \"\"\"\n joined_observations = _join_items(join, left_observations, right_observations)\n if join == 'none':\n common_observations = set(\n obs['name'] for obs in left_observations\n ).intersection(obs['name'] for obs in right_observations)\n if common_observations:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces cannot have any observations in common with the same name: {common_observations}. You can also try a different join operation: {Workspace.valid_joins}.\"\n )\n\n elif join == 'outer':\n counted_observations = collections.Counter(\n observation['name'] for observation in joined_observations\n )\n incompatible_observations = [\n observation\n for observation, count in counted_observations.items()\n if count > 1\n ]\n if incompatible_observations:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces cannot have observations in common with incompatible structure: {incompatible_observations}. You can also try a different join operation: {Workspace.valid_joins}.\"\n )\n return joined_observations\n\n\ndef _join_parameter_configs(measurement_name, left_parameters, right_parameters):\n \"\"\"\n Join two measurement parameter config specifications.\n\n Only uses by :method:`_join_measurements` when join='outer'.\n\n Raises:\n ~pyhf.exceptions.InvalidWorkspaceOperation: Parameter configuration specifications are incompatible.\n\n Args:\n measurement_name (`str`): The name of the measurement being joined (a detail for raising exceptions correctly)\n left_parameters (`list`): The left parameter configuration specification.\n right_parameters (`list`): The right parameter configuration specification.\n\n Returns:\n :obj:`list`: A joined list of parameter configurations. Each parameter configuration follows the :obj:`defs.json#/definitions/config` schema\n\n \"\"\"\n joined_parameter_configs = _join_items('outer', left_parameters, right_parameters)\n counted_parameter_configs = collections.Counter(\n parameter['name'] for parameter in joined_parameter_configs\n )\n incompatible_parameter_configs = [\n parameter for parameter, count in counted_parameter_configs.items() if count > 1\n ]\n if incompatible_parameter_configs:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces cannot have a measurement ({measurement_name}) with incompatible parameter configs: {incompatible_parameter_configs}. You can also try a different join operation: {Workspace.valid_joins}.\"\n )\n return joined_parameter_configs\n\n\ndef _join_measurements(join, left_measurements, right_measurements):\n \"\"\"\n Join two workspace measurement specifications.\n\n Raises:\n ~pyhf.exceptions.InvalidWorkspaceOperation: Measurement specifications are incompatible.\n\n Args:\n join (`str`): The join operation to apply. See ~pyhf.workspace.Workspace for valid join operations.\n left_measurements (`list`): The left measurement specification.\n right_measurements (`list`): The right measurement specification.\n\n Returns:\n :obj:`list`: A joined list of measurements. Each measurement follows the :obj:`defs.json#/definitions/measurement` `schema <https://scikit-hep.org/pyhf/likelihood.html#measurements>`__\n\n \"\"\"\n joined_measurements = _join_items(join, left_measurements, right_measurements)\n if join == 'none':\n common_measurements = set(\n meas['name'] for meas in left_measurements\n ).intersection(meas['name'] for meas in right_measurements)\n if common_measurements:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces cannot have any measurements in common with the same name: {common_measurements}. You can also try a different join operation: {Workspace.valid_joins}.\"\n )\n\n elif join == 'outer':\n # need to store a mapping of measurement name to all measurement objects with that name\n _measurement_mapping = {}\n for measurement in joined_measurements:\n _measurement_mapping.setdefault(measurement['name'], []).append(measurement)\n # first check for incompatible POI\n # then merge parameter configs\n incompatible_poi = [\n measurement_name\n for measurement_name, measurements in _measurement_mapping.items()\n if len(set(measurement['config']['poi'] for measurement in measurements))\n > 1\n ]\n if incompatible_poi:\n raise exceptions.InvalidWorkspaceOperation(\n f\"Workspaces cannot have the same measurements with incompatible POI: {incompatible_poi}.\"\n )\n\n joined_measurements = []\n for measurement_name, measurements in _measurement_mapping.items():\n if len(measurements) != 1:\n new_measurement = {\n 'name': measurement_name,\n 'config': {\n 'poi': measurements[0]['config']['poi'],\n 'parameters': _join_parameter_configs(\n measurement_name,\n *[\n measurement['config']['parameters']\n for measurement in measurements\n ],\n ),\n },\n }\n else:\n new_measurement = measurements[0]\n joined_measurements.append(new_measurement)\n return joined_measurements\n\n\nclass Workspace(_ChannelSummaryMixin, dict):\n \"\"\"\n A JSON-serializable object that is built from an object that follows the :obj:`workspace.json` `schema <https://scikit-hep.org/pyhf/likelihood.html#workspace>`__.\n \"\"\"\n\n valid_joins = ['none', 'outer', 'left outer', 'right outer']\n\n def __init__(self, spec, **config_kwargs):\n \"\"\"Workspaces hold the model, data and measurements.\"\"\"\n super(Workspace, self).__init__(spec, channels=spec['channels'])\n self.schema = config_kwargs.pop('schema', 'workspace.json')\n self.version = config_kwargs.pop('version', spec.get('version', None))\n\n # run jsonschema validation of input specification against the (provided) schema\n log.info(f\"Validating spec against schema: {self.schema}\")\n utils.validate(self, self.schema, version=self.version)\n\n self.measurement_names = []\n for measurement in self.get('measurements', []):\n self.measurement_names.append(measurement['name'])\n\n self.observations = {}\n for obs in self['observations']:\n self.observations[obs['name']] = obs['data']\n\n def __eq__(self, other):\n \"\"\"Equality is defined as equal dict representations.\"\"\"\n if not isinstance(other, Workspace):\n return False\n return dict(self) == dict(other)\n\n def __ne__(self, other):\n \"\"\"Negation of equality.\"\"\"\n return not self == other\n\n def __repr__(self):\n \"\"\"Representation of the Workspace.\"\"\"\n return object.__repr__(self)\n\n def get_measurement(\n self, poi_name=None, measurement_name=None, measurement_index=None\n ):\n \"\"\"\n Get (or create) a measurement object.\n\n The following logic is used:\n\n 1. if the poi name is given, create a measurement object for that poi\n 2. if the measurement name is given, find the measurement for the given name\n 3. if the measurement index is given, return the measurement at that index\n 4. if there are measurements but none of the above have been specified, return the 0th measurement\n\n Raises:\n ~pyhf.exceptions.InvalidMeasurement: If the measurement was not found\n\n Args:\n poi_name (`str`): The name of the parameter of interest to create a new measurement from\n measurement_name (`str`): The name of the measurement to use\n measurement_index (`int`): The index of the measurement to use\n\n Returns:\n :obj:`dict`: A measurement object adhering to the schema defs.json#/definitions/measurement\n\n \"\"\"\n measurement = None\n if poi_name is not None:\n measurement = {\n 'name': 'NormalMeasurement',\n 'config': {'poi': poi_name, 'parameters': []},\n }\n elif self.measurement_names:\n if measurement_name is not None:\n if measurement_name not in self.measurement_names:\n log.debug(\n 'measurements defined:\\n\\t{0:s}'.format(\n '\\n\\t'.join(self.measurement_names)\n )\n )\n raise exceptions.InvalidMeasurement(\n 'no measurement by name \\'{0:s}\\' was found in the workspace, pick from one of the valid ones above'.format(\n measurement_name\n )\n )\n measurement = self['measurements'][\n self.measurement_names.index(measurement_name)\n ]\n else:\n if measurement_index is None and len(self.measurement_names) > 1:\n log.warning(\n 'multiple measurements defined. Taking the first measurement.'\n )\n\n measurement_index = (\n measurement_index if measurement_index is not None else 0\n )\n try:\n measurement = self['measurements'][measurement_index]\n except IndexError:\n raise exceptions.InvalidMeasurement(\n f\"The measurement index {measurement_index} is out of bounds as only {len(self.measurement_names)} measurement(s) have been defined.\"\n )\n else:\n raise exceptions.InvalidMeasurement(\"No measurements have been defined.\")\n\n utils.validate(measurement, 'measurement.json', self.version)\n return measurement\n\n def model(self, **config_kwargs):\n \"\"\"\n Create a model object with/without patches applied.\n\n See :func:`pyhf.workspace.Workspace.get_measurement` and :class:`pyhf.pdf.Model` for possible keyword arguments.\n\n Args:\n patches: A list of JSON patches to apply to the model specification\n config_kwargs: Possible keyword arguments for the measurement and model configuration\n\n Returns:\n ~pyhf.pdf.Model: A model object adhering to the schema model.json\n\n \"\"\"\n\n poi_name = config_kwargs.pop('poi_name', None)\n measurement_name = config_kwargs.pop('measurement_name', None)\n measurement_index = config_kwargs.pop('measurement_index', None)\n measurement = self.get_measurement(\n poi_name=poi_name,\n measurement_name=measurement_name,\n measurement_index=measurement_index,\n )\n log.debug(\n 'model being created for measurement {0:s}'.format(measurement['name'])\n )\n\n patches = config_kwargs.pop('patches', [])\n\n modelspec = {\n 'channels': self['channels'],\n 'parameters': measurement['config']['parameters'],\n }\n for patch in patches:\n modelspec = jsonpatch.JsonPatch(patch).apply(modelspec)\n\n return Model(modelspec, poi_name=measurement['config']['poi'], **config_kwargs)\n\n def data(self, model, with_aux=True):\n \"\"\"\n Return the data for the supplied model with or without auxiliary data from the model.\n\n The model is needed as the order of the data depends on the order of the channels in the model.\n\n Raises:\n KeyError: Invalid or missing channel\n\n Args:\n model (~pyhf.pdf.Model): A model object adhering to the schema model.json\n with_aux (bool): Whether to include auxiliary data from the model or not\n\n Returns:\n :obj:`list`: data\n\n \"\"\"\n try:\n observed_data = sum(\n (self.observations[c] for c in model.config.channels), []\n )\n except KeyError:\n log.error(\n \"Invalid channel: the workspace does not have observation data for one of the channels in the model.\"\n )\n raise\n if with_aux:\n observed_data += model.config.auxdata\n return observed_data\n\n def _prune_and_rename(\n self,\n prune_modifiers=None,\n prune_modifier_types=None,\n prune_samples=None,\n prune_channels=None,\n prune_measurements=None,\n rename_modifiers=None,\n rename_samples=None,\n rename_channels=None,\n rename_measurements=None,\n ):\n \"\"\"\n Return a new, pruned, renamed workspace specification. This will not modify the original workspace.\n\n Pruning removes pieces of the workspace whose name or type matches the\n user-provided lists. The pruned, renamed workspace must also be a valid\n workspace.\n\n A workspace is composed of many named components, such as channels and\n samples, as well as types of systematics (e.g. `histosys`). Components\n can be removed (pruned away) filtering on name or be renamed according\n to the provided :obj:`dict` mapping. Additionally, modifiers of\n specific types can be removed (pruned away).\n\n This function also handles specific peculiarities, such as\n renaming/removing a channel which needs to rename/remove the\n corresponding `observation`.\n\n Args:\n prune_modifiers: A :obj:`str` or a :obj:`list` of modifiers to prune.\n prune_modifier_types: A :obj:`str` or a :obj:`list` of modifier types to prune.\n prune_samples: A :obj:`str` or a :obj:`list` of samples to prune.\n prune_channels: A :obj:`str` or a :obj:`list` of channels to prune.\n prune_measurements: A :obj:`str` or a :obj:`list` of measurements to prune.\n rename_modifiers: A :obj:`dict` mapping old modifier name to new modifier name.\n rename_samples: A :obj:`dict` mapping old sample name to new sample name.\n rename_channels: A :obj:`dict` mapping old channel name to new channel name.\n rename_measurements: A :obj:`dict` mapping old measurement name to new measurement name.\n\n Returns:\n ~pyhf.workspace.Workspace: A new workspace object with the specified components removed or renamed\n\n \"\"\"\n # avoid mutable defaults\n prune_modifiers = [] if prune_modifiers is None else prune_modifiers\n prune_modifier_types = (\n [] if prune_modifier_types is None else prune_modifier_types\n )\n prune_samples = [] if prune_samples is None else prune_samples\n prune_channels = [] if prune_channels is None else prune_channels\n prune_measurements = [] if prune_measurements is None else prune_measurements\n rename_modifiers = {} if rename_modifiers is None else rename_modifiers\n rename_samples = {} if rename_samples is None else rename_samples\n rename_channels = {} if rename_channels is None else rename_channels\n rename_measurements = {} if rename_measurements is None else rename_measurements\n\n newspec = {\n 'channels': [\n {\n 'name': rename_channels.get(channel['name'], channel['name']),\n 'samples': [\n {\n 'name': rename_samples.get(sample['name'], sample['name']),\n 'data': sample['data'],\n 'modifiers': [\n dict(\n modifier,\n name=rename_modifiers.get(\n modifier['name'], modifier['name']\n ),\n )\n for modifier in sample['modifiers']\n if modifier['name'] not in prune_modifiers\n and modifier['type'] not in prune_modifier_types\n ],\n }\n for sample in channel['samples']\n if sample['name'] not in prune_samples\n ],\n }\n for channel in self['channels']\n if channel['name'] not in prune_channels\n ],\n 'measurements': [\n {\n 'name': rename_measurements.get(\n measurement['name'], measurement['name']\n ),\n 'config': {\n 'parameters': [\n dict(\n parameter,\n name=rename_modifiers.get(\n parameter['name'], parameter['name']\n ),\n )\n for parameter in measurement['config']['parameters']\n if parameter['name'] not in prune_modifiers\n ],\n 'poi': rename_modifiers.get(\n measurement['config']['poi'], measurement['config']['poi']\n ),\n },\n }\n for measurement in self['measurements']\n if measurement['name'] not in prune_measurements\n ],\n 'observations': [\n dict(\n copy.deepcopy(observation),\n name=rename_channels.get(observation['name'], observation['name']),\n )\n for observation in self['observations']\n if observation['name'] not in prune_channels\n ],\n 'version': self['version'],\n }\n return Workspace(newspec)\n\n def prune(\n self,\n modifiers=None,\n modifier_types=None,\n samples=None,\n channels=None,\n measurements=None,\n ):\n \"\"\"\n Return a new, pruned workspace specification. This will not modify the original workspace.\n\n The pruned workspace must also be a valid workspace.\n\n Args:\n modifiers: A :obj:`str` or a :obj:`list` of modifiers to prune.\n modifier_types: A :obj:`str` or a :obj:`list` of modifier types to prune.\n samples: A :obj:`str` or a :obj:`list` of samples to prune.\n channels: A :obj:`str` or a :obj:`list` of channels to prune.\n measurements: A :obj:`str` or a :obj:`list` of measurements to prune.\n\n Returns:\n ~pyhf.workspace.Workspace: A new workspace object with the specified components removed\n\n \"\"\"\n # avoid mutable defaults\n modifiers = [] if modifiers is None else modifiers\n modifier_types = [] if modifier_types is None else modifier_types\n samples = [] if samples is None else samples\n channels = [] if channels is None else channels\n measurements = [] if measurements is None else measurements\n\n return self._prune_and_rename(\n prune_modifiers=modifiers,\n prune_modifier_types=modifier_types,\n prune_samples=samples,\n prune_channels=channels,\n prune_measurements=measurements,\n )\n\n def rename(self, modifiers=None, samples=None, channels=None, measurements=None):\n \"\"\"\n Return a new workspace specification with certain elements renamed.\n\n This will not modify the original workspace.\n The renamed workspace must also be a valid workspace.\n\n Args:\n modifiers: A :obj:`dict` mapping old modifier name to new modifier name.\n samples: A :obj:`dict` mapping old sample name to new sample name.\n channels: A :obj:`dict` mapping old channel name to new channel name.\n measurements: A :obj:`dict` mapping old measurement name to new measurement name.\n\n Returns:\n ~pyhf.workspace.Workspace: A new workspace object with the specified components renamed\n\n \"\"\"\n # avoid mutable defaults\n modifiers = {} if modifiers is None else modifiers\n samples = {} if samples is None else samples\n channels = {} if channels is None else channels\n measurements = {} if measurements is None else measurements\n\n return self._prune_and_rename(\n rename_modifiers=modifiers,\n rename_samples=samples,\n rename_channels=channels,\n rename_measurements=measurements,\n )\n\n @classmethod\n def combine(cls, left, right, join='none'):\n \"\"\"\n Return a new workspace specification that is the combination of the two workspaces.\n\n The new workspace must also be a valid workspace. A combination of\n workspaces is done by combining the set of:\n\n - channels,\n - observations, and\n - measurements\n\n between the two workspaces. If the two workspaces have modifiers that\n follow the same naming convention, then correlations across the two\n workspaces may be possible. In particular, the `lumi` modifier will be\n fully-correlated.\n\n If the two workspaces have the same measurement (with the same POI),\n those measurements will get merged.\n\n Raises:\n ~pyhf.exceptions.InvalidWorkspaceOperation: The workspaces have common channel names, incompatible measurements, or incompatible schema versions.\n\n Args:\n left (~pyhf.workspace.Workspace): A workspace\n right (~pyhf.workspace.Workspace): Another workspace\n join (:obj:`str`): How to join the two workspaces. Pick from \"none\", \"outer\", \"left outer\", or \"right outer\".\n\n Returns:\n ~pyhf.workspace.Workspace: A new combined workspace object\n\n \"\"\"\n if join not in Workspace.valid_joins:\n raise ValueError(\n f\"Workspaces must be joined using one of the valid join operations ({Workspace.valid_joins}); not {join}\"\n )\n if join in ['left outer', 'right outer']:\n log.warning(\n \"You are using an unsafe join operation. This will silence exceptions that might be raised during a normal 'outer' operation.\"\n )\n\n new_version = _join_versions(join, left['version'], right['version'])\n new_channels = _join_channels(join, left['channels'], right['channels'])\n new_observations = _join_observations(\n join, left['observations'], right['observations']\n )\n new_measurements = _join_measurements(\n join, left['measurements'], right['measurements']\n )\n\n newspec = {\n 'channels': new_channels,\n 'measurements': new_measurements,\n 'observations': new_observations,\n 'version': new_version,\n }\n return cls(newspec)\n",
"path": "src/pyhf/workspace.py"
}
] | diff --git a/src/pyhf/workspace.py b/src/pyhf/workspace.py
index d2515bb3bb..6534bed5b0 100644
--- a/src/pyhf/workspace.py
+++ b/src/pyhf/workspace.py
@@ -675,4 +675,4 @@ def combine(cls, left, right, join='none'):
'observations': new_observations,
'version': new_version,
}
- return Workspace(newspec)
+ return cls(newspec)
diff --git a/tests/test_workspace.py b/tests/test_workspace.py
index b67c2306a7..91d908b21a 100644
--- a/tests/test_workspace.py
+++ b/tests/test_workspace.py
@@ -717,3 +717,32 @@ def test_workspace_equality(workspace_factory):
assert ws == ws
assert ws == ws_other
assert ws != 'not a workspace'
+
+
+def test_workspace_inheritance(workspace_factory):
+ ws = workspace_factory()
+ new_ws = ws.rename(
+ channels={'channel1': 'channel3', 'channel2': 'channel4'},
+ samples={
+ 'background1': 'background3',
+ 'background2': 'background4',
+ 'signal': 'signal2',
+ },
+ modifiers={
+ 'syst1': 'syst4',
+ 'bkg1Shape': 'bkg3Shape',
+ 'bkg2Shape': 'bkg4Shape',
+ },
+ measurements={
+ 'GaussExample': 'OtherGaussExample',
+ 'GammaExample': 'OtherGammaExample',
+ 'ConstExample': 'OtherConstExample',
+ 'LogNormExample': 'OtherLogNormExample',
+ },
+ )
+
+ class FooWorkspace(pyhf.Workspace):
+ pass
+
+ combined = FooWorkspace.combine(ws, new_ws)
+ assert isinstance(combined, FooWorkspace)
|
mkdocs__mkdocs-836 | Error when using a unicode filename on Windows
I am not totally sure this is the right place, but you will tell me :)
In a mkdocs-material-project I wanted to use diacritics in the filename, like this:
```
/Kapitel
1. Einstieg
2. Übersicht
3. Etcetera
```
And such an **"Ü"** seems to break everything:
```
C:\Python27\lib\urllib.py:1303: UnicodeWarning: Unicode equal comparison failed to convert both arguments to Unicode - interpreting them as being unequal
return ''.join(map(quoter, s))
ERROR - Error building page Allgemeines\1. Richtlinien.md
Traceback (most recent call last):
File "C:\Python27\lib\runpy.py", line 162, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "C:\Python27\lib\runpy.py", line 72, in _run_code
exec code in run_globals
File "C:\Python27\Scripts\mkdocs.exe\__main__.py", line 9, in <module>
File "C:\Python27\lib\site-packages\click\core.py", line 716, in __call__
return self.main(*args, **kwargs)
File "C:\Python27\lib\site-packages\click\core.py", line 696, in main
rv = self.invoke(ctx)
File "C:\Python27\lib\site-packages\click\core.py", line 1060, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Python27\lib\site-packages\click\core.py", line 889, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Python27\lib\site-packages\click\core.py", line 534, in invoke
return callback(*args, **kwargs)
File "C:\Python27\lib\site-packages\mkdocs\__main__.py", line 115, in serve_command
livereload=livereload,
File "C:\Python27\lib\site-packages\mkdocs\commands\serve.py", line 78, in serve
config = builder()
File "C:\Python27\lib\site-packages\mkdocs\commands\serve.py", line 74, in builder
build(config, live_server=True, clean_site_dir=True)
File "C:\Python27\lib\site-packages\mkdocs\commands\build.py", line 289, in build
build_pages(config)
File "C:\Python27\lib\site-packages\mkdocs\commands\build.py", line 249, in build_pages
dump_json)
File "C:\Python27\lib\site-packages\mkdocs\commands\build.py", line 184, in _build_page
output_content = template.render(context)
File "C:\Python27\lib\site-packages\jinja2\environment.py", line 989, in render
return self.environment.handle_exception(exc_info, True)
File "C:\Python27\lib\site-packages\jinja2\environment.py", line 754, in handle_exception
reraise(exc_type, exc_value, tb)
File "C:\Python27\lib\site-packages\material\base.html", line 102, in top-level template code
{% include "drawer.html" %}
File "C:\Python27\lib\site-packages\material\drawer.html", line 41, in top-level template code
{% include "nav.html" %}
File "C:\Python27\lib\site-packages\material\nav.html", line 6, in top-level template code
{% include 'nav.html' %}
File "C:\Python27\lib\site-packages\material\nav.html", line 12, in top-level template code
<a class="{% if nav_item.active %}current{% endif %}" title="{{ nav_item.title }}" href="{{ nav_item.url }}">
File "C:\Python27\lib\site-packages\jinja2\environment.py", line 408, in getattr
return getattr(obj, attribute)
File "C:\Python27\lib\site-packages\mkdocs\nav.py", line 153, in url
return self.url_context.make_relative(self.abs_url)
File "C:\Python27\lib\site-packages\mkdocs\nav.py", line 105, in make_relative
return utils.path_to_url(relative_path)
File "C:\Python27\lib\site-packages\mkdocs\utils\__init__.py", line 324, in path_to_url
return pathname2url(path)
File "C:\Python27\lib\nturl2path.py", line 54, in pathname2url
return urllib.quote('/'.join(components))
File "C:\Python27\lib\urllib.py", line 1303, in quote
return ''.join(map(quoter, s))
KeyError: u'\xdc'
```
Is this already known?
| [
{
"content": "# coding: utf-8\n\n\"\"\"\nStandalone file utils.\n\nNothing in this module should have an knowledge of config or the layout\nand structure of the site and pages in the site.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport markdown\nimport os\nimport pkg_resources\nimport shutil\nimport sys\nimport yaml\n\nfrom mkdocs import toc, exceptions\n\ntry: # pragma: no cover\n from urllib.parse import urlparse, urlunparse, urljoin # noqa\n from urllib.request import pathname2url # noqa\n from collections import UserDict # noqa\nexcept ImportError: # pragma: no cover\n from urlparse import urlparse, urlunparse, urljoin # noqa\n from urllib import pathname2url # noqa\n from UserDict import UserDict # noqa\n\n\nPY3 = sys.version_info[0] == 3\n\nif PY3: # pragma: no cover\n string_types = str, # noqa\n text_type = str # noqa\nelse: # pragma: no cover\n string_types = basestring, # noqa\n text_type = unicode # noqa\n\nlog = logging.getLogger(__name__)\n\n\ndef yaml_load(source, loader=yaml.Loader):\n \"\"\"\n Wrap PyYaml's loader so we can extend it to suit our needs.\n\n Load all strings as unicode.\n http://stackoverflow.com/a/2967461/3609487\n \"\"\"\n\n def construct_yaml_str(self, node):\n \"\"\"\n Override the default string handling function to always return\n unicode objects.\n \"\"\"\n return self.construct_scalar(node)\n\n class Loader(loader):\n \"\"\"\n Define a custom loader derived from the global loader to leave the\n global loader unaltered.\n \"\"\"\n\n # Attach our unicode constructor to our custom loader ensuring all strings\n # will be unicode on translation.\n Loader.add_constructor('tag:yaml.org,2002:str', construct_yaml_str)\n\n try:\n return yaml.load(source, Loader)\n finally:\n # TODO: Remove this when external calls are properly cleaning up file\n # objects. Some mkdocs internal calls, sometimes in test lib, will\n # load configs with a file object but never close it. On some\n # systems, if a delete action is performed on that file without Python\n # closing that object, there will be an access error. This will\n # process the file and close it as there should be no more use for the\n # file once we process the yaml content.\n if hasattr(source, 'close'):\n source.close()\n\n\ndef reduce_list(data_set):\n \"\"\" Reduce duplicate items in a list and preserve order \"\"\"\n seen = set()\n return [item for item in data_set if\n item not in seen and not seen.add(item)]\n\n\ndef copy_file(source_path, output_path):\n \"\"\"\n Copy source_path to output_path, making sure any parent directories exist.\n \"\"\"\n output_dir = os.path.dirname(output_path)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n shutil.copy(source_path, output_path)\n\n\ndef write_file(content, output_path):\n \"\"\"\n Write content to output_path, making sure any parent directories exist.\n \"\"\"\n output_dir = os.path.dirname(output_path)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n open(output_path, 'wb').write(content)\n\n\ndef clean_directory(directory):\n \"\"\"\n Remove the content of a directory recursively but not the directory itself.\n \"\"\"\n if not os.path.exists(directory):\n return\n\n for entry in os.listdir(directory):\n\n # Don't remove hidden files from the directory. We never copy files\n # that are hidden, so we shouldn't delete them either.\n if entry.startswith('.'):\n continue\n\n path = os.path.join(directory, entry)\n if os.path.isdir(path):\n shutil.rmtree(path, True)\n else:\n os.unlink(path)\n\n\ndef copy_media_files(from_dir, to_dir):\n \"\"\"\n Recursively copy all files except markdown and HTML into another directory.\n \"\"\"\n for (source_dir, dirnames, filenames) in os.walk(from_dir):\n relative_path = os.path.relpath(source_dir, from_dir)\n output_dir = os.path.normpath(os.path.join(to_dir, relative_path))\n\n # Filter filenames starting with a '.'\n filenames = [f for f in filenames if not f.startswith('.')]\n\n # Filter the dirnames that start with a '.' and update the list in\n # place to prevent us walking these.\n dirnames[:] = [d for d in dirnames if not d.startswith('.')]\n\n for filename in filenames:\n if not is_markdown_file(filename):\n source_path = os.path.join(source_dir, filename)\n output_path = os.path.join(output_dir, filename)\n copy_file(source_path, output_path)\n\n\ndef get_html_path(path):\n \"\"\"\n Map a source file path to an output html path.\n\n Paths like 'index.md' will be converted to 'index.html'\n Paths like 'about.md' will be converted to 'about/index.html'\n Paths like 'api-guide/core.md' will be converted to 'api-guide/core/index.html'\n \"\"\"\n path = os.path.splitext(path)[0]\n if os.path.basename(path) == 'index':\n return path + '.html'\n return \"/\".join((path, 'index.html'))\n\n\ndef get_url_path(path, use_directory_urls=True):\n \"\"\"\n Map a source file path to an output html path.\n\n Paths like 'index.md' will be converted to '/'\n Paths like 'about.md' will be converted to '/about/'\n Paths like 'api-guide/core.md' will be converted to '/api-guide/core/'\n\n If `use_directory_urls` is `False`, returned URLs will include the a trailing\n `index.html` rather than just returning the directory path.\n \"\"\"\n path = get_html_path(path)\n url = '/' + path.replace(os.path.sep, '/')\n if use_directory_urls:\n return url[:-len('index.html')]\n return url\n\n\ndef is_homepage(path):\n return os.path.splitext(path)[0] == 'index'\n\n\ndef is_markdown_file(path):\n \"\"\"\n Return True if the given file path is a Markdown file.\n\n http://superuser.com/questions/249436/file-extension-for-markdown-files\n \"\"\"\n ext = os.path.splitext(path)[1].lower()\n return ext in [\n '.markdown',\n '.mdown',\n '.mkdn',\n '.mkd',\n '.md',\n ]\n\n\ndef is_css_file(path):\n \"\"\"\n Return True if the given file path is a CSS file.\n \"\"\"\n ext = os.path.splitext(path)[1].lower()\n return ext in [\n '.css',\n ]\n\n\ndef is_javascript_file(path):\n \"\"\"\n Return True if the given file path is a Javascript file.\n \"\"\"\n ext = os.path.splitext(path)[1].lower()\n return ext in [\n '.js',\n '.javascript'\n ]\n\n\ndef is_html_file(path):\n \"\"\"\n Return True if the given file path is an HTML file.\n \"\"\"\n ext = os.path.splitext(path)[1].lower()\n return ext in [\n '.html',\n '.htm',\n ]\n\n\ndef is_template_file(path):\n \"\"\"\n Return True if the given file path is an HTML file.\n \"\"\"\n ext = os.path.splitext(path)[1].lower()\n return ext in [\n '.html',\n '.htm',\n '.xml',\n ]\n\n\ndef create_media_urls(nav, path_list):\n \"\"\"\n Return a list of URLs that have been processed correctly for inclusion in\n a page.\n \"\"\"\n final_urls = []\n\n for path in path_list:\n # Allow links to fully qualified URL's\n parsed = urlparse(path)\n if parsed.netloc:\n final_urls.append(path)\n continue\n # We must be looking at a local path.\n url = path_to_url(path)\n relative_url = '%s/%s' % (nav.url_context.make_relative('/'), url)\n final_urls.append(relative_url)\n\n return final_urls\n\n\ndef create_relative_media_url(nav, url):\n \"\"\"\n For a current page, create a relative url based on the given URL.\n\n On index.md (which becomes /index.html):\n image.png -> ./image.png\n /image.png -> ./image.png\n\n On sub/page.md (which becomes /sub/page/index.html):\n image.png -> ../image.png\n /image.png -> ../../image.png\n\n On sub/index.md (which becomes /sub/index.html):\n image.png -> ./image.png\n /image.png -> ./image.png\n\n \"\"\"\n\n # Allow links to fully qualified URL's\n parsed = urlparse(url)\n if parsed.netloc:\n return url\n\n # If the URL we are looking at starts with a /, then it should be\n # considered as absolute and will be 'relative' to the root.\n if url.startswith('/'):\n base = '/'\n url = url[1:]\n else:\n base = nav.url_context.base_path\n\n relative_base = nav.url_context.make_relative(base)\n if relative_base == \".\" and url.startswith(\"./\"):\n relative_url = url\n else:\n relative_url = '%s/%s' % (relative_base, url)\n\n # TODO: Fix this, this is a hack. Relative urls are not being calculated\n # correctly for images in the same directory as the markdown. I think this\n # is due to us moving it into a directory with index.html, but I'm not sure\n if (nav.file_context.current_file.endswith(\"/index.md\") is False and\n nav.url_context.base_path != '/' and\n relative_url.startswith(\"./\")):\n relative_url = \".%s\" % relative_url\n\n return relative_url\n\n\ndef path_to_url(path):\n \"\"\"Convert a system path to a URL.\"\"\"\n\n if os.path.sep == '/':\n return path\n\n return pathname2url(path)\n\n\ndef convert_markdown(markdown_source, extensions=None, extension_configs=None):\n \"\"\"\n Convert the Markdown source file to HTML content, and additionally\n return the parsed table of contents, and a dictionary of any metadata\n that was specified in the Markdown file.\n `extensions` is an optional sequence of Python Markdown extensions to add\n to the default set.\n \"\"\"\n md = markdown.Markdown(\n extensions=extensions or [],\n extension_configs=extension_configs or {}\n )\n html_content = md.convert(markdown_source)\n\n # On completely blank markdown files, no Meta or tox properties are added\n # to the generated document.\n meta = getattr(md, 'Meta', {})\n toc_html = getattr(md, 'toc', '')\n\n # Post process the generated table of contents into a data structure\n table_of_contents = toc.TableOfContents(toc_html)\n\n return (html_content, table_of_contents, meta)\n\n\ndef get_themes():\n \"\"\"Return a dict of theme names and their locations\"\"\"\n\n themes = {}\n builtins = pkg_resources.get_entry_map(dist='mkdocs', group='mkdocs.themes')\n\n for theme in pkg_resources.iter_entry_points(group='mkdocs.themes'):\n\n if theme.name in builtins and theme.dist.key != 'mkdocs':\n raise exceptions.ConfigurationError(\n \"The theme {0} is a builtin theme but {1} provides a theme \"\n \"with the same name\".format(theme.name, theme.dist.key))\n\n elif theme.name in themes:\n multiple_packages = [themes[theme.name].dist.key, theme.dist.key]\n log.warning(\"The theme %s is provided by the Python packages \"\n \"'%s'. The one in %s will be used.\",\n theme.name, ','.join(multiple_packages), theme.dist.key)\n\n themes[theme.name] = theme\n\n themes = dict((name, os.path.dirname(os.path.abspath(theme.load().__file__)))\n for name, theme in themes.items())\n\n return themes\n\n\ndef get_theme_names():\n \"\"\"Return a list containing all the names of all the builtin themes.\"\"\"\n\n return get_themes().keys()\n\n\ndef filename_to_title(filename):\n\n title = os.path.splitext(filename)[0]\n title = title.replace('-', ' ').replace('_', ' ')\n # Capitalize if the filename was all lowercase, otherwise leave it as-is.\n if title.lower() == title:\n title = title.capitalize()\n\n return title\n\n\ndef find_or_create_node(branch, key):\n \"\"\"\n Given a list, look for dictionary with a key matching key and return it's\n value. If it doesn't exist, create it with the value of an empty list and\n return that.\n \"\"\"\n\n for node in branch:\n if not isinstance(node, dict):\n continue\n\n if key in node:\n return node[key]\n\n new_branch = []\n node = {key: new_branch}\n branch.append(node)\n return new_branch\n\n\ndef nest_paths(paths):\n \"\"\"\n Given a list of paths, convert them into a nested structure that will match\n the pages config.\n \"\"\"\n nested = []\n\n for path in paths:\n\n if os.path.sep not in path:\n nested.append(path)\n continue\n\n directory, _ = os.path.split(path)\n parts = directory.split(os.path.sep)\n\n branch = nested\n for part in parts:\n part = filename_to_title(part)\n branch = find_or_create_node(branch, part)\n\n branch.append(path)\n\n return nested\n",
"path": "mkdocs/utils/__init__.py"
}
] | [
{
"content": "# coding: utf-8\n\n\"\"\"\nStandalone file utils.\n\nNothing in this module should have an knowledge of config or the layout\nand structure of the site and pages in the site.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport markdown\nimport os\nimport pkg_resources\nimport shutil\nimport sys\nimport yaml\n\nfrom mkdocs import toc, exceptions\n\ntry: # pragma: no cover\n from urllib.parse import urlparse, urlunparse, urljoin # noqa\n from urllib.request import pathname2url # noqa\n from collections import UserDict # noqa\nexcept ImportError: # pragma: no cover\n from urlparse import urlparse, urlunparse, urljoin # noqa\n from urllib import pathname2url # noqa\n from UserDict import UserDict # noqa\n\n\nPY3 = sys.version_info[0] == 3\n\nif PY3: # pragma: no cover\n string_types = str, # noqa\n text_type = str # noqa\nelse: # pragma: no cover\n string_types = basestring, # noqa\n text_type = unicode # noqa\n\nlog = logging.getLogger(__name__)\n\n\ndef yaml_load(source, loader=yaml.Loader):\n \"\"\"\n Wrap PyYaml's loader so we can extend it to suit our needs.\n\n Load all strings as unicode.\n http://stackoverflow.com/a/2967461/3609487\n \"\"\"\n\n def construct_yaml_str(self, node):\n \"\"\"\n Override the default string handling function to always return\n unicode objects.\n \"\"\"\n return self.construct_scalar(node)\n\n class Loader(loader):\n \"\"\"\n Define a custom loader derived from the global loader to leave the\n global loader unaltered.\n \"\"\"\n\n # Attach our unicode constructor to our custom loader ensuring all strings\n # will be unicode on translation.\n Loader.add_constructor('tag:yaml.org,2002:str', construct_yaml_str)\n\n try:\n return yaml.load(source, Loader)\n finally:\n # TODO: Remove this when external calls are properly cleaning up file\n # objects. Some mkdocs internal calls, sometimes in test lib, will\n # load configs with a file object but never close it. On some\n # systems, if a delete action is performed on that file without Python\n # closing that object, there will be an access error. This will\n # process the file and close it as there should be no more use for the\n # file once we process the yaml content.\n if hasattr(source, 'close'):\n source.close()\n\n\ndef reduce_list(data_set):\n \"\"\" Reduce duplicate items in a list and preserve order \"\"\"\n seen = set()\n return [item for item in data_set if\n item not in seen and not seen.add(item)]\n\n\ndef copy_file(source_path, output_path):\n \"\"\"\n Copy source_path to output_path, making sure any parent directories exist.\n \"\"\"\n output_dir = os.path.dirname(output_path)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n shutil.copy(source_path, output_path)\n\n\ndef write_file(content, output_path):\n \"\"\"\n Write content to output_path, making sure any parent directories exist.\n \"\"\"\n output_dir = os.path.dirname(output_path)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n open(output_path, 'wb').write(content)\n\n\ndef clean_directory(directory):\n \"\"\"\n Remove the content of a directory recursively but not the directory itself.\n \"\"\"\n if not os.path.exists(directory):\n return\n\n for entry in os.listdir(directory):\n\n # Don't remove hidden files from the directory. We never copy files\n # that are hidden, so we shouldn't delete them either.\n if entry.startswith('.'):\n continue\n\n path = os.path.join(directory, entry)\n if os.path.isdir(path):\n shutil.rmtree(path, True)\n else:\n os.unlink(path)\n\n\ndef copy_media_files(from_dir, to_dir):\n \"\"\"\n Recursively copy all files except markdown and HTML into another directory.\n \"\"\"\n for (source_dir, dirnames, filenames) in os.walk(from_dir):\n relative_path = os.path.relpath(source_dir, from_dir)\n output_dir = os.path.normpath(os.path.join(to_dir, relative_path))\n\n # Filter filenames starting with a '.'\n filenames = [f for f in filenames if not f.startswith('.')]\n\n # Filter the dirnames that start with a '.' and update the list in\n # place to prevent us walking these.\n dirnames[:] = [d for d in dirnames if not d.startswith('.')]\n\n for filename in filenames:\n if not is_markdown_file(filename):\n source_path = os.path.join(source_dir, filename)\n output_path = os.path.join(output_dir, filename)\n copy_file(source_path, output_path)\n\n\ndef get_html_path(path):\n \"\"\"\n Map a source file path to an output html path.\n\n Paths like 'index.md' will be converted to 'index.html'\n Paths like 'about.md' will be converted to 'about/index.html'\n Paths like 'api-guide/core.md' will be converted to 'api-guide/core/index.html'\n \"\"\"\n path = os.path.splitext(path)[0]\n if os.path.basename(path) == 'index':\n return path + '.html'\n return \"/\".join((path, 'index.html'))\n\n\ndef get_url_path(path, use_directory_urls=True):\n \"\"\"\n Map a source file path to an output html path.\n\n Paths like 'index.md' will be converted to '/'\n Paths like 'about.md' will be converted to '/about/'\n Paths like 'api-guide/core.md' will be converted to '/api-guide/core/'\n\n If `use_directory_urls` is `False`, returned URLs will include the a trailing\n `index.html` rather than just returning the directory path.\n \"\"\"\n path = get_html_path(path)\n url = '/' + path.replace(os.path.sep, '/')\n if use_directory_urls:\n return url[:-len('index.html')]\n return url\n\n\ndef is_homepage(path):\n return os.path.splitext(path)[0] == 'index'\n\n\ndef is_markdown_file(path):\n \"\"\"\n Return True if the given file path is a Markdown file.\n\n http://superuser.com/questions/249436/file-extension-for-markdown-files\n \"\"\"\n ext = os.path.splitext(path)[1].lower()\n return ext in [\n '.markdown',\n '.mdown',\n '.mkdn',\n '.mkd',\n '.md',\n ]\n\n\ndef is_css_file(path):\n \"\"\"\n Return True if the given file path is a CSS file.\n \"\"\"\n ext = os.path.splitext(path)[1].lower()\n return ext in [\n '.css',\n ]\n\n\ndef is_javascript_file(path):\n \"\"\"\n Return True if the given file path is a Javascript file.\n \"\"\"\n ext = os.path.splitext(path)[1].lower()\n return ext in [\n '.js',\n '.javascript'\n ]\n\n\ndef is_html_file(path):\n \"\"\"\n Return True if the given file path is an HTML file.\n \"\"\"\n ext = os.path.splitext(path)[1].lower()\n return ext in [\n '.html',\n '.htm',\n ]\n\n\ndef is_template_file(path):\n \"\"\"\n Return True if the given file path is an HTML file.\n \"\"\"\n ext = os.path.splitext(path)[1].lower()\n return ext in [\n '.html',\n '.htm',\n '.xml',\n ]\n\n\ndef create_media_urls(nav, path_list):\n \"\"\"\n Return a list of URLs that have been processed correctly for inclusion in\n a page.\n \"\"\"\n final_urls = []\n\n for path in path_list:\n # Allow links to fully qualified URL's\n parsed = urlparse(path)\n if parsed.netloc:\n final_urls.append(path)\n continue\n # We must be looking at a local path.\n url = path_to_url(path)\n relative_url = '%s/%s' % (nav.url_context.make_relative('/'), url)\n final_urls.append(relative_url)\n\n return final_urls\n\n\ndef create_relative_media_url(nav, url):\n \"\"\"\n For a current page, create a relative url based on the given URL.\n\n On index.md (which becomes /index.html):\n image.png -> ./image.png\n /image.png -> ./image.png\n\n On sub/page.md (which becomes /sub/page/index.html):\n image.png -> ../image.png\n /image.png -> ../../image.png\n\n On sub/index.md (which becomes /sub/index.html):\n image.png -> ./image.png\n /image.png -> ./image.png\n\n \"\"\"\n\n # Allow links to fully qualified URL's\n parsed = urlparse(url)\n if parsed.netloc:\n return url\n\n # If the URL we are looking at starts with a /, then it should be\n # considered as absolute and will be 'relative' to the root.\n if url.startswith('/'):\n base = '/'\n url = url[1:]\n else:\n base = nav.url_context.base_path\n\n relative_base = nav.url_context.make_relative(base)\n if relative_base == \".\" and url.startswith(\"./\"):\n relative_url = url\n else:\n relative_url = '%s/%s' % (relative_base, url)\n\n # TODO: Fix this, this is a hack. Relative urls are not being calculated\n # correctly for images in the same directory as the markdown. I think this\n # is due to us moving it into a directory with index.html, but I'm not sure\n if (nav.file_context.current_file.endswith(\"/index.md\") is False and\n nav.url_context.base_path != '/' and\n relative_url.startswith(\"./\")):\n relative_url = \".%s\" % relative_url\n\n return relative_url\n\n\ndef path_to_url(path):\n \"\"\"Convert a system path to a URL.\"\"\"\n\n if os.path.sep == '/':\n return path\n\n if sys.version_info < (3, 0):\n path = path.encode('utf8')\n return pathname2url(path)\n\n\ndef convert_markdown(markdown_source, extensions=None, extension_configs=None):\n \"\"\"\n Convert the Markdown source file to HTML content, and additionally\n return the parsed table of contents, and a dictionary of any metadata\n that was specified in the Markdown file.\n `extensions` is an optional sequence of Python Markdown extensions to add\n to the default set.\n \"\"\"\n md = markdown.Markdown(\n extensions=extensions or [],\n extension_configs=extension_configs or {}\n )\n html_content = md.convert(markdown_source)\n\n # On completely blank markdown files, no Meta or tox properties are added\n # to the generated document.\n meta = getattr(md, 'Meta', {})\n toc_html = getattr(md, 'toc', '')\n\n # Post process the generated table of contents into a data structure\n table_of_contents = toc.TableOfContents(toc_html)\n\n return (html_content, table_of_contents, meta)\n\n\ndef get_themes():\n \"\"\"Return a dict of theme names and their locations\"\"\"\n\n themes = {}\n builtins = pkg_resources.get_entry_map(dist='mkdocs', group='mkdocs.themes')\n\n for theme in pkg_resources.iter_entry_points(group='mkdocs.themes'):\n\n if theme.name in builtins and theme.dist.key != 'mkdocs':\n raise exceptions.ConfigurationError(\n \"The theme {0} is a builtin theme but {1} provides a theme \"\n \"with the same name\".format(theme.name, theme.dist.key))\n\n elif theme.name in themes:\n multiple_packages = [themes[theme.name].dist.key, theme.dist.key]\n log.warning(\"The theme %s is provided by the Python packages \"\n \"'%s'. The one in %s will be used.\",\n theme.name, ','.join(multiple_packages), theme.dist.key)\n\n themes[theme.name] = theme\n\n themes = dict((name, os.path.dirname(os.path.abspath(theme.load().__file__)))\n for name, theme in themes.items())\n\n return themes\n\n\ndef get_theme_names():\n \"\"\"Return a list containing all the names of all the builtin themes.\"\"\"\n\n return get_themes().keys()\n\n\ndef filename_to_title(filename):\n\n title = os.path.splitext(filename)[0]\n title = title.replace('-', ' ').replace('_', ' ')\n # Capitalize if the filename was all lowercase, otherwise leave it as-is.\n if title.lower() == title:\n title = title.capitalize()\n\n return title\n\n\ndef find_or_create_node(branch, key):\n \"\"\"\n Given a list, look for dictionary with a key matching key and return it's\n value. If it doesn't exist, create it with the value of an empty list and\n return that.\n \"\"\"\n\n for node in branch:\n if not isinstance(node, dict):\n continue\n\n if key in node:\n return node[key]\n\n new_branch = []\n node = {key: new_branch}\n branch.append(node)\n return new_branch\n\n\ndef nest_paths(paths):\n \"\"\"\n Given a list of paths, convert them into a nested structure that will match\n the pages config.\n \"\"\"\n nested = []\n\n for path in paths:\n\n if os.path.sep not in path:\n nested.append(path)\n continue\n\n directory, _ = os.path.split(path)\n parts = directory.split(os.path.sep)\n\n branch = nested\n for part in parts:\n part = filename_to_title(part)\n branch = find_or_create_node(branch, part)\n\n branch.append(path)\n\n return nested\n",
"path": "mkdocs/utils/__init__.py"
}
] | diff --git a/docs/about/release-notes.md b/docs/about/release-notes.md
index fae106a539..28681bfc87 100644
--- a/docs/about/release-notes.md
+++ b/docs/about/release-notes.md
@@ -15,8 +15,9 @@ You can determine your currently installed version using `mkdocs --version`:
## Version 0.16 (2016-02-??)
-* Add a flag (-e/--theme-dir) to specifiy theme directory with the commands
+* Add a flag (-e/--theme-dir) to specifiy theme directory with the commands
`mkdocs build` and `mkdocs serve` (#832)
+* Fixed issues with Unicode filenames under Windows and Python 2. (#833)
## Version 0.15.3 (2016-02-18)
diff --git a/mkdocs/tests/integration.py b/mkdocs/tests/integration.py
index 5c0b10eb00..dcd8633bbb 100644
--- a/mkdocs/tests/integration.py
+++ b/mkdocs/tests/integration.py
@@ -35,17 +35,20 @@
required=True)
def main(output=None):
+ print("Building themes.")
for theme in sorted(MKDOCS_THEMES):
+ print("Building theme: {0}".format(theme))
project_dir = os.path.dirname(MKDOCS_CONFIG)
out = os.path.join(output, theme)
command = ['mkdocs', 'build', '-v', '--site-dir', out, '--theme', theme]
subprocess.check_call(command, cwd=project_dir)
+ print("Building test projects.")
for project in os.listdir(TEST_PROJECTS):
-
+ print("Building test project: {0}".format(project))
project_dir = os.path.join(TEST_PROJECTS, project)
out = os.path.join(output, project)
- command = ['mkdocs', 'build', '--site-dir', out]
+ command = ['mkdocs', 'build', '-v', '--site-dir', out]
subprocess.check_call(command, cwd=project_dir)
print("Theme and integration builds are available in {0}".format(output))
diff --git a/mkdocs/tests/integration/unicode/docs/index.md b/mkdocs/tests/integration/unicode/docs/index.md
new file mode 100644
index 0000000000..15bfba3adf
--- /dev/null
+++ b/mkdocs/tests/integration/unicode/docs/index.md
@@ -0,0 +1,2 @@
+# Unicode Test Documentation 📖
+
diff --git "a/mkdocs/tests/integration/unicode/docs/\303\234bersicht.md" "b/mkdocs/tests/integration/unicode/docs/\303\234bersicht.md"
new file mode 100644
index 0000000000..da37213adb
--- /dev/null
+++ "b/mkdocs/tests/integration/unicode/docs/\303\234bersicht.md"
@@ -0,0 +1,17 @@
+# Welcome to MkDocs
+
+For full documentation visit [mkdocs.org](http://mkdocs.org).
+
+## Commands
+
+* `mkdocs new [dir-name]` - Create a new project.
+* `mkdocs serve` - Start the live-reloading docs server.
+* `mkdocs build` - Build the documentation site.
+* `mkdocs help` - Print this help message.
+
+## Project layout
+
+ mkdocs.yml # The configuration file.
+ docs/
+ index.md # The documentation homepage.
+ ... # Other markdown pages, images and other files.
diff --git "a/mkdocs/tests/integration/unicode/docs/\342\231\252.md" "b/mkdocs/tests/integration/unicode/docs/\342\231\252.md"
new file mode 100644
index 0000000000..da37213adb
--- /dev/null
+++ "b/mkdocs/tests/integration/unicode/docs/\342\231\252.md"
@@ -0,0 +1,17 @@
+# Welcome to MkDocs
+
+For full documentation visit [mkdocs.org](http://mkdocs.org).
+
+## Commands
+
+* `mkdocs new [dir-name]` - Create a new project.
+* `mkdocs serve` - Start the live-reloading docs server.
+* `mkdocs build` - Build the documentation site.
+* `mkdocs help` - Print this help message.
+
+## Project layout
+
+ mkdocs.yml # The configuration file.
+ docs/
+ index.md # The documentation homepage.
+ ... # Other markdown pages, images and other files.
diff --git a/mkdocs/tests/integration/unicode/mkdocs.yml b/mkdocs/tests/integration/unicode/mkdocs.yml
new file mode 100644
index 0000000000..c97182f51a
--- /dev/null
+++ b/mkdocs/tests/integration/unicode/mkdocs.yml
@@ -0,0 +1 @@
+site_name: My Docs
diff --git a/mkdocs/utils/__init__.py b/mkdocs/utils/__init__.py
index 9b85fc0de7..be021277d1 100644
--- a/mkdocs/utils/__init__.py
+++ b/mkdocs/utils/__init__.py
@@ -321,6 +321,8 @@ def path_to_url(path):
if os.path.sep == '/':
return path
+ if sys.version_info < (3, 0):
+ path = path.encode('utf8')
return pathname2url(path)
|
bokeh__bokeh-4805 | Update add_glyph docstring
plot.py add_glyph returns GlyphRenderer not Glyph
this tripped me up for a minute
| [
{
"content": "\"\"\" Models for representing top-level plot objects.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom six import string_types\nimport warnings\n\nfrom ..core.query import find\nfrom ..core import validation\nfrom ..core.validation.errors import REQUIRED_RANGE\nfrom ..core.validation.warnings import (\n MISSING_RENDERERS, NO_DATA_RENDERERS, MALFORMED_CATEGORY_LABEL,\n SNAPPED_TOOLBAR_ANNOTATIONS)\nfrom ..core.enums import Location\nfrom ..core.property_mixins import LineProps, FillProps\nfrom ..core.properties import (\n Bool, Int, String, Enum, Auto, Instance, Either,\n List, Dict, Include, Override, TitleProp)\nfrom ..util.string import nice_join\n\nfrom .annotations import Legend, Title\nfrom .axes import Axis\nfrom .glyphs import Glyph\nfrom .grids import Grid\nfrom .ranges import Range, FactorRange\nfrom .renderers import Renderer, GlyphRenderer, DataRenderer, TileRenderer, DynamicImageRenderer\nfrom .sources import DataSource, ColumnDataSource\nfrom .tools import Tool, ToolEvents, Toolbar\nfrom .layouts import LayoutDOM\n\nfrom ..util.plot_utils import _list_attr_splat, _select_helper\n\n# See all the way at the bottom of Plot for where this is used.\nDEP_MSG_0_12_0 = \"\"\"\n Plot property '%s' was deprecated in 0.12.0 and will be removed. Use '%s' instead.\n \"\"\"\n\n# We create an empty title by default\nDEFAULT_TITLE = lambda: Title(text=\"\")\n\n\nclass Plot(LayoutDOM):\n \"\"\" Model representing a plot, containing glyphs, guides, annotations.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n if \"tool_events\" not in kwargs:\n kwargs[\"tool_events\"] = ToolEvents()\n\n if \"toolbar\" in kwargs and \"logo\" in kwargs:\n raise ValueError(\"Conflicing properties set on plot: toolbar, logo.\")\n\n if \"toolbar\" in kwargs and \"tools\" in kwargs:\n raise ValueError(\"Conflicing properties set on plot: toolbar, tools.\")\n\n if \"toolbar\" not in kwargs:\n tools = kwargs.pop('tools', [])\n logo = kwargs.pop('logo', 'normal')\n\n kwargs[\"toolbar\"] = Toolbar(tools=tools, logo=logo)\n\n if \"border_fill\" in kwargs and \"border_fill_color\" in kwargs:\n raise ValueError(\"Conflicting properties set on plot: border_fill, border_fill_color.\")\n\n if \"background_fill\" in kwargs and \"background_fill_color\" in kwargs:\n raise ValueError(\"Conflicting properties set on plot: background_fill, background_fill_color.\")\n\n super(LayoutDOM, self).__init__(**kwargs)\n\n def select(self, *args, **kwargs):\n ''' Query this object and all of its references for objects that\n match the given selector.\n\n There are a few different ways to call the ``select`` method.\n The most general is to supply a JSON-like query dictionary as the\n single argument or as keyword arguments:\n\n Args:\n selector (JSON-like) : some sample text\n\n Keyword Arguments:\n kwargs : query dict key/values as keyword arguments\n\n For convenience, queries on just names can be made by supplying\n the ``name`` string as the single parameter:\n\n Args:\n name (str) : the name to query on\n\n Also queries on just type can be made simply by supplying the\n ``Model`` subclass as the single parameter:\n\n Args:\n type (Model) : the type to query on\n\n Returns:\n seq[Model]\n\n Examples:\n\n .. code-block:: python\n\n # These two are equivalent\n p.select({\"type\": HoverTool})\n p.select(HoverTool)\n\n # These two are also equivalent\n p.select({\"name\": \"mycircle\"})\n p.select(\"mycircle\")\n\n # Keyword arguments can be supplied in place of selector dict\n p.select({\"name\": \"foo\", \"type\": HoverTool})\n p.select(name=\"foo\", type=HoverTool)\n\n '''\n\n selector = _select_helper(args, kwargs)\n\n # Want to pass selector that is a dictionary\n return _list_attr_splat(find(self.references(), selector, {'plot': self}))\n\n def row(self, row, gridplot):\n ''' Return whether this plot is in a given row of a GridPlot.\n\n Args:\n row (int) : index of the row to test\n gridplot (GridPlot) : the GridPlot to check\n\n Returns:\n bool\n\n '''\n return self in gridplot.row(row)\n\n def column(self, col, gridplot):\n ''' Return whether this plot is in a given column of a GridPlot.\n\n Args:\n col (int) : index of the column to test\n gridplot (GridPlot) : the GridPlot to check\n\n Returns:\n bool\n\n '''\n return self in gridplot.column(col)\n\n def _axis(self, *sides):\n objs = []\n for s in sides:\n objs.extend(getattr(self, s, []))\n axis = [obj for obj in objs if isinstance(obj, Axis)]\n return _list_attr_splat(axis)\n\n @property\n def xaxis(self):\n \"\"\" Splattable list of :class:`~bokeh.models.axes.Axis` objects for the x dimension.\n\n \"\"\"\n return self._axis(\"above\", \"below\")\n\n @property\n def yaxis(self):\n \"\"\" Splattable list of :class:`~bokeh.models.axes.Axis` objects for the y dimension.\n\n \"\"\"\n return self._axis(\"left\", \"right\")\n\n @property\n def axis(self):\n \"\"\" Splattable list of :class:`~bokeh.models.axes.Axis` objects.\n\n \"\"\"\n return _list_attr_splat(self.xaxis + self.yaxis)\n\n @property\n def legend(self):\n \"\"\"Splattable list of :class:`~bokeh.models.annotations.Legend` objects.\n\n \"\"\"\n legends = [obj for obj in self.renderers if isinstance(obj, Legend)]\n return _list_attr_splat(legends)\n\n def _grid(self, dimension):\n grid = [obj for obj in self.renderers if isinstance(obj, Grid) and obj.dimension==dimension]\n return _list_attr_splat(grid)\n\n @property\n def xgrid(self):\n \"\"\" Splattable list of :class:`~bokeh.models.grids.Grid` objects for the x dimension.\n\n \"\"\"\n return self._grid(0)\n\n @property\n def ygrid(self):\n \"\"\" Splattable list of :class:`~bokeh.models.grids.Grid` objects for the y dimension.\n\n \"\"\"\n return self._grid(1)\n\n @property\n def grid(self):\n \"\"\" Splattable list of :class:`~bokeh.models.grids.Grid` objects.\n\n \"\"\"\n return _list_attr_splat(self.xgrid + self.ygrid)\n\n @property\n def tools(self):\n return self.toolbar.tools\n\n @tools.setter\n def tools(self, tools):\n self.toolbar.tools = tools\n\n\n def add_layout(self, obj, place='center'):\n ''' Adds an object to the plot in a specified place.\n\n Args:\n obj (Renderer) : the object to add to the Plot\n place (str, optional) : where to add the object (default: 'center')\n Valid places are: 'left', 'right', 'above', 'below', 'center'.\n\n Returns:\n None\n\n '''\n valid_places = ['left', 'right', 'above', 'below', 'center']\n if place not in valid_places:\n raise ValueError(\n \"Invalid place '%s' specified. Valid place values are: %s\" % (place, nice_join(valid_places))\n )\n\n if hasattr(obj, 'plot'):\n if obj.plot is not None:\n raise ValueError(\"object to be added already has 'plot' attribute set\")\n obj.plot = self\n\n self.renderers.append(obj)\n\n if place is not 'center':\n getattr(self, place).append(obj)\n\n def add_tools(self, *tools):\n ''' Adds tools to the plot.\n\n Args:\n *tools (Tool) : the tools to add to the Plot\n\n Returns:\n None\n\n '''\n if not all(isinstance(tool, Tool) for tool in tools):\n raise ValueError(\"All arguments to add_tool must be Tool subclasses.\")\n\n for tool in tools:\n if tool.plot is not None:\n raise ValueError(\"tool %s to be added already has 'plot' attribute set\" % tool)\n tool.plot = self\n if hasattr(tool, 'overlay'):\n self.renderers.append(tool.overlay)\n self.toolbar.tools.append(tool)\n\n def add_glyph(self, source_or_glyph, glyph=None, **kw):\n ''' Adds a glyph to the plot with associated data sources and ranges.\n\n This function will take care of creating and configuring a Glyph object,\n and then add it to the plot's list of renderers.\n\n Args:\n source (DataSource) : a data source for the glyphs to all use\n glyph (Glyph) : the glyph to add to the Plot\n\n\n Keyword Arguments:\n Any additional keyword arguments are passed on as-is to the\n Glyph initializer.\n\n Returns:\n Glyph\n\n '''\n if glyph is not None:\n source = source_or_glyph\n else:\n source, glyph = ColumnDataSource(), source_or_glyph\n\n if not isinstance(source, DataSource):\n raise ValueError(\"'source' argument to add_glyph() must be DataSource subclass\")\n\n if not isinstance(glyph, Glyph):\n raise ValueError(\"'glyph' argument to add_glyph() must be Glyph subclass\")\n\n g = GlyphRenderer(data_source=source, glyph=glyph, **kw)\n self.renderers.append(g)\n return g\n\n def add_tile(self, tile_source, **kw):\n '''Adds new TileRenderer into the Plot.renderers\n\n Args:\n tile_source (TileSource) : a tile source instance which contain tileset configuration\n\n Keyword Arguments:\n Additional keyword arguments are passed on as-is to the tile renderer\n\n Returns:\n TileRenderer : TileRenderer\n\n '''\n tile_renderer = TileRenderer(tile_source=tile_source, **kw)\n self.renderers.append(tile_renderer)\n return tile_renderer\n\n def add_dynamic_image(self, image_source, **kw):\n '''Adds new DynamicImageRenderer into the Plot.renderers\n\n Args:\n image_source (ImageSource) : a image source instance which contain image configuration\n\n Keyword Arguments:\n Additional keyword arguments are passed on as-is to the dynamic image renderer\n\n Returns:\n DynamicImageRenderer : DynamicImageRenderer\n\n '''\n image_renderer = DynamicImageRenderer(image_source=image_source, **kw)\n self.renderers.append(image_renderer)\n return image_renderer\n\n @validation.error(REQUIRED_RANGE)\n def _check_required_range(self):\n missing = []\n if not self.x_range: missing.append('x_range')\n if not self.y_range: missing.append('y_range')\n if missing:\n return \", \".join(missing) + \" [%s]\" % self\n\n @validation.warning(MISSING_RENDERERS)\n def _check_missing_renderers(self):\n if len(self.renderers) == 0:\n return str(self)\n\n @validation.warning(NO_DATA_RENDERERS)\n def _check_no_data_renderers(self):\n if len(self.select(DataRenderer)) == 0:\n return str(self)\n\n @validation.warning(MALFORMED_CATEGORY_LABEL)\n def _check_colon_in_category_label(self):\n if not self.x_range: return\n if not self.y_range: return\n\n broken = []\n\n for range_name in ['x_range', 'y_range']:\n category_range = getattr(self, range_name)\n if not isinstance(category_range, FactorRange): continue\n\n for value in category_range.factors:\n if not isinstance(value, string_types): break\n if ':' in value:\n broken.append((range_name, value))\n break\n\n if broken:\n field_msg = ' '.join('[range:%s] [first_value: %s]' % (field, value)\n for field, value in broken)\n return '%s [renderer: %s]' % (field_msg, self)\n\n @validation.warning(SNAPPED_TOOLBAR_ANNOTATIONS)\n def _check_snapped_toolbar_and_axis(self):\n if not self.toolbar_sticky: return\n if self.toolbar_location is None: return\n\n objs = getattr(self, self.toolbar_location)\n if len(objs) > 0:\n return str(self)\n\n __deprecated_attributes__ = (\n 'background_fill', 'border_fill', 'logo', 'tools', 'responsive',\n 'title_text_baseline', 'title_text_align', 'title_text_alpha', 'title_text_color',\n 'title_text_font_style', 'title_text_font_size', 'title_text_font', 'title_standoff'\n )\n\n x_range = Instance(Range, help=\"\"\"\n The (default) data range of the horizontal dimension of the plot.\n \"\"\")\n\n y_range = Instance(Range, help=\"\"\"\n The (default) data range of the vertical dimension of the plot.\n \"\"\")\n\n x_mapper_type = Either(Auto, String, help=\"\"\"\n What kind of mapper to use to convert x-coordinates in data space\n into x-coordinates in screen space.\n\n Typically this can be determined automatically, but this property\n can be useful to, e.g., show datetime values as floating point\n \"seconds since epoch\" instead of formatted dates.\n \"\"\")\n\n y_mapper_type = Either(Auto, String, help=\"\"\"\n What kind of mapper to use to convert y-coordinates in data space\n into y-coordinates in screen space.\n\n Typically this can be determined automatically, but this property\n can be useful to, e.g., show datetime values as floating point\n \"seconds since epoch\" instead of formatted dates\n \"\"\")\n\n extra_x_ranges = Dict(String, Instance(Range), help=\"\"\"\n Additional named ranges to make available for mapping x-coordinates.\n\n This is useful for adding additional axes.\n \"\"\")\n\n extra_y_ranges = Dict(String, Instance(Range), help=\"\"\"\n Additional named ranges to make available for mapping y-coordinates.\n\n This is useful for adding additional axes.\n \"\"\")\n\n hidpi = Bool(default=True, help=\"\"\"\n Whether to use HiDPI mode when available.\n \"\"\")\n\n title = TitleProp(default=DEFAULT_TITLE, help=\"\"\"\n A title for the plot. Can be a text string or a Title annotation. Default is Title(text=\"\").\n \"\"\")\n\n title_location = Enum(Location, default=\"above\", help=\"\"\"\n Where the title will be located. Titles on the left or right side\n will be rotated.\n \"\"\")\n\n outline_props = Include(LineProps, help=\"\"\"\n The %s for the plot border outline.\n \"\"\")\n\n outline_line_color = Override(default=\"#e5e5e5\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n A list of all renderers for this plot, including guides and annotations\n in addition to glyphs and markers.\n\n This property can be manipulated by hand, but the ``add_glyph`` and\n ``add_layout`` methods are recommended to help make sure all necessary\n setup is performed.\n \"\"\")\n\n toolbar = Instance(Toolbar, help=\"\"\"\n The toolbar associated with this plot which holds all the tools.\n\n The toolbar is automatically created with the plot.\n \"\"\")\n\n toolbar_location = Enum(Location, default=\"right\", help=\"\"\"\n Where the toolbar will be located. If set to None, no toolbar\n will be attached to the plot.\n \"\"\")\n\n toolbar_sticky = Bool(default=True, help=\"\"\"\n Stick the toolbar to the edge of the plot. Default: True. If False,\n the toolbar will be outside of the axes, titles etc.\n \"\"\")\n\n tool_events = Instance(ToolEvents, help=\"\"\"\n A ToolEvents object to share and report tool events.\n \"\"\")\n\n left = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area to the left of the plot.\n \"\"\")\n\n right = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area to the right of the plot.\n \"\"\")\n\n above = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area above of the plot.\n \"\"\")\n\n below = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area below of the plot.\n \"\"\")\n\n plot_height = Int(600, help=\"\"\"\n Total height of the entire plot (including any axes, titles,\n border padding, etc.)\n\n .. note::\n This corresponds directly to the height of the HTML\n canvas that will be used.\n\n \"\"\")\n\n plot_width = Int(600, help=\"\"\"\n Total width of the entire plot (including any axes, titles,\n border padding, etc.)\n\n .. note::\n This corresponds directly to the width of the HTML\n canvas that will be used.\n\n \"\"\")\n\n background_props = Include(FillProps, help=\"\"\"\n The %s for the plot background style.\n \"\"\")\n\n background_fill_color = Override(default='#ffffff')\n\n border_props = Include(FillProps, help=\"\"\"\n The %s for the plot border style.\n \"\"\")\n\n border_fill_color = Override(default='#ffffff')\n\n min_border_top = Int(help=\"\"\"\n Minimum size in pixels of the padding region above the top of the\n central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border_bottom = Int(help=\"\"\"\n Minimum size in pixels of the padding region below the bottom of\n the central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border_left = Int(help=\"\"\"\n Minimum size in pixels of the padding region to the left of\n the central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border_right = Int(help=\"\"\"\n Minimum size in pixels of the padding region to the right of\n the central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border = Int(5, help=\"\"\"\n A convenience property to set all all the ``min_border_X`` properties\n to the same value. If an individual border property is explicitly set,\n it will override ``min_border``.\n \"\"\")\n\n h_symmetry = Bool(True, help=\"\"\"\n Whether the total horizontal padding on both sides of the plot will\n be made equal (the left or right padding amount, whichever is larger).\n \"\"\")\n\n v_symmetry = Bool(False, help=\"\"\"\n Whether the total vertical padding on both sides of the plot will\n be made equal (the top or bottom padding amount, whichever is larger).\n \"\"\")\n\n lod_factor = Int(10, help=\"\"\"\n Decimation factor to use when applying level-of-detail decimation.\n \"\"\")\n\n lod_threshold = Int(2000, help=\"\"\"\n A number of data points, above which level-of-detail downsampling may\n be performed by glyph renderers. Set to ``None`` to disable any\n level-of-detail downsampling.\n \"\"\")\n\n lod_interval = Int(300, help=\"\"\"\n Interval (in ms) during which an interactive tool event will enable\n level-of-detail downsampling.\n \"\"\")\n\n lod_timeout = Int(500, help=\"\"\"\n Timeout (in ms) for checking whether interactive tool events are still\n occurring. Once level-of-detail mode is enabled, a check is made every\n ``lod_timeout`` ms. If no interactive tool events have happened,\n level-of-detail mode is disabled.\n \"\"\")\n\n webgl = Bool(False, help=\"\"\"\n Whether WebGL is enabled for this plot. If True, the glyphs that\n support this will render via WebGL instead of the 2D canvas.\n \"\"\")\n\n #\n # DEPRECATED PROPERTIES\n #\n\n @property\n def responsive(self):\n warnings.warn(DEP_MSG_0_12_0 % ('responsive', 'Plot.sizing_mode'))\n return self.sizing_mode != \"fixed\"\n\n @responsive.setter\n def responsive(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('responsive', 'Plot.sizing_mode'))\n warnings.warn(\"\"\"\n The 'responsive' property has been deprecated in 0.12.0. It has been\n replaced by 'sizing_mode' which accepts one of five modes:\n\n fixed, scale_width, scale_height, scale_both, stretch_both\n\n 'responsive = False' is the equivalent of 'sizing_mode = \"fixed\"'\n\n 'responsive = True' is the equivalent of 'sizing_mode = \"scale_width\"'\n \"\"\")\n if value is True:\n self.sizing_mode = \"scale_width\"\n elif value is False:\n self.sizing_mode = \"fixed\"\n else:\n raise ValueError(\"Plot.responsive only accepts True or False, got: %r\" % value)\n\n @property\n def background_fill(self):\n warnings.warn(\n \"\"\"\n Plot property 'background_fill' was deprecated in Bokeh\n 0.11.0 and will be removed. Use 'background_fill_color' instead.\n \"\"\")\n return self.background_fill_color\n\n @background_fill.setter\n def background_fill(self, color):\n warnings.warn(\n \"\"\"\n Plot property 'background_fill' was deprecated in Bokeh\n 0.11.0 and will be removed. Use 'background_fill_color' instead.\n \"\"\")\n self.background_fill_color = color\n\n @property\n def border_fill(self):\n warnings.warn(\n \"\"\"\n Plot property 'border_fill' was deprecated in Bokeh 0.11.0 and\n will be removed. Use 'border_fill_color' instead.\n \"\"\")\n return self.border_fill_color\n\n @border_fill.setter\n def border_fill(self, color):\n warnings.warn(\n \"\"\"\n Plot property 'border_fill' was deprecated in Bokeh 0.11.0 and\n will be removed. Use 'border_fill_color' instead.\n \"\"\")\n self.border_fill_color = color\n\n @property\n def logo(self):\n warnings.warn(DEP_MSG_0_12_0 % ('logo', 'Plot.toolbar.logo'))\n return self.toolbar.logo\n\n @logo.setter\n def logo(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('logo', 'Plot.toolbar.logo'))\n self.toolbar.logo = value\n\n @property\n def title_standoff(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_standoff', 'Plot.title.offset'))\n return self.title.offset\n\n @title_standoff.setter\n def title_standoff(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_standoff', 'Plot.title.offset'))\n self.title.offset = value\n\n @property\n def title_text_font(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font', 'Plot.title.text_font'))\n return self.title.text_font\n\n @title_text_font.setter\n def title_text_font(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font', 'Plot.title.text_font'))\n self.title.text_font = value\n\n @property\n def title_text_font_size(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_size', 'Plot.title.text_font_size'))\n return self.title.text_font_size\n\n @title_text_font_size.setter\n def title_text_font_size(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_size', 'Plot.title.text_font_size'))\n self.title.text_font_size = value\n\n @property\n def title_text_font_style(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_style', 'Plot.title.text_font_style'))\n return self.title.text_font_style\n\n @title_text_font_style.setter\n def title_text_font_style(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_style', 'Plot.title.text_font_style'))\n self.title.text_font_style = value\n\n @property\n def title_text_color(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_color', 'Plot.title.text_color'))\n return self.title.text_color\n\n @title_text_color.setter\n def title_text_color(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_color', 'Plot.title.text_color'))\n self.title.text_color = value\n\n @property\n def title_text_alpha(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_alpha', 'Plot.title.text_alpha'))\n return self.title.text_alpha\n\n @title_text_alpha.setter\n def title_text_alpha(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_alpha', 'Plot.title.text_alpha'))\n self.title.text_alpha = value\n\n @property\n def title_text_align(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_align', 'Plot.title.align'))\n warnings.warn(\"\"\"``title_text_align`` was deprecated in 0.12.0 and is no longer\n available on the new Title object. There is a new ``plot.title.title_align`` which is\n similar but not exactly the same. The new ``title_align`` both positions and aligns the title.\n If you need the exact ``title_text_align`` behavior, please add a title by creating a\n Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n return self.title.align\n\n @title_text_align.setter\n def title_text_align(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_align', 'Plot.title.align'))\n warnings.warn(\"\"\"``title_text_align`` was deprecated in 0.12.0 and is no longer\n available on the new Title object. There is a new ``plot.title.title_align`` which is\n similar but not exactly the same. The new ``title_align`` both positions and aligns the title.\n If you need the exact ``title_text_align`` behavior, please add a title by creating a\n Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n self.title.align = value\n\n @property\n def title_text_baseline(self):\n warnings.warn(\"\"\"title_text_baseline was deprecated in 0.12.0 and is no longer\n available on the new Title object. If you need to alter the text_baseline, please\n add a title by creating a Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n return None\n\n @title_text_baseline.setter\n def title_text_baseline(self, value):\n warnings.warn(\"\"\"title_text_baseline was deprecated in 0.12.0 and is no longer\n available on the new Title object. If you need to alter the text_baseline, please\n add a title by creating a Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n",
"path": "bokeh/models/plots.py"
}
] | [
{
"content": "\"\"\" Models for representing top-level plot objects.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom six import string_types\nimport warnings\n\nfrom ..core.query import find\nfrom ..core import validation\nfrom ..core.validation.errors import REQUIRED_RANGE\nfrom ..core.validation.warnings import (\n MISSING_RENDERERS, NO_DATA_RENDERERS, MALFORMED_CATEGORY_LABEL,\n SNAPPED_TOOLBAR_ANNOTATIONS)\nfrom ..core.enums import Location\nfrom ..core.property_mixins import LineProps, FillProps\nfrom ..core.properties import (\n Bool, Int, String, Enum, Auto, Instance, Either,\n List, Dict, Include, Override, TitleProp)\nfrom ..util.string import nice_join\n\nfrom .annotations import Legend, Title\nfrom .axes import Axis\nfrom .glyphs import Glyph\nfrom .grids import Grid\nfrom .ranges import Range, FactorRange\nfrom .renderers import Renderer, GlyphRenderer, DataRenderer, TileRenderer, DynamicImageRenderer\nfrom .sources import DataSource, ColumnDataSource\nfrom .tools import Tool, ToolEvents, Toolbar\nfrom .layouts import LayoutDOM\n\nfrom ..util.plot_utils import _list_attr_splat, _select_helper\n\n# See all the way at the bottom of Plot for where this is used.\nDEP_MSG_0_12_0 = \"\"\"\n Plot property '%s' was deprecated in 0.12.0 and will be removed. Use '%s' instead.\n \"\"\"\n\n# We create an empty title by default\nDEFAULT_TITLE = lambda: Title(text=\"\")\n\n\nclass Plot(LayoutDOM):\n \"\"\" Model representing a plot, containing glyphs, guides, annotations.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n if \"tool_events\" not in kwargs:\n kwargs[\"tool_events\"] = ToolEvents()\n\n if \"toolbar\" in kwargs and \"logo\" in kwargs:\n raise ValueError(\"Conflicing properties set on plot: toolbar, logo.\")\n\n if \"toolbar\" in kwargs and \"tools\" in kwargs:\n raise ValueError(\"Conflicing properties set on plot: toolbar, tools.\")\n\n if \"toolbar\" not in kwargs:\n tools = kwargs.pop('tools', [])\n logo = kwargs.pop('logo', 'normal')\n\n kwargs[\"toolbar\"] = Toolbar(tools=tools, logo=logo)\n\n if \"border_fill\" in kwargs and \"border_fill_color\" in kwargs:\n raise ValueError(\"Conflicting properties set on plot: border_fill, border_fill_color.\")\n\n if \"background_fill\" in kwargs and \"background_fill_color\" in kwargs:\n raise ValueError(\"Conflicting properties set on plot: background_fill, background_fill_color.\")\n\n super(LayoutDOM, self).__init__(**kwargs)\n\n def select(self, *args, **kwargs):\n ''' Query this object and all of its references for objects that\n match the given selector.\n\n There are a few different ways to call the ``select`` method.\n The most general is to supply a JSON-like query dictionary as the\n single argument or as keyword arguments:\n\n Args:\n selector (JSON-like) : some sample text\n\n Keyword Arguments:\n kwargs : query dict key/values as keyword arguments\n\n For convenience, queries on just names can be made by supplying\n the ``name`` string as the single parameter:\n\n Args:\n name (str) : the name to query on\n\n Also queries on just type can be made simply by supplying the\n ``Model`` subclass as the single parameter:\n\n Args:\n type (Model) : the type to query on\n\n Returns:\n seq[Model]\n\n Examples:\n\n .. code-block:: python\n\n # These two are equivalent\n p.select({\"type\": HoverTool})\n p.select(HoverTool)\n\n # These two are also equivalent\n p.select({\"name\": \"mycircle\"})\n p.select(\"mycircle\")\n\n # Keyword arguments can be supplied in place of selector dict\n p.select({\"name\": \"foo\", \"type\": HoverTool})\n p.select(name=\"foo\", type=HoverTool)\n\n '''\n\n selector = _select_helper(args, kwargs)\n\n # Want to pass selector that is a dictionary\n return _list_attr_splat(find(self.references(), selector, {'plot': self}))\n\n def row(self, row, gridplot):\n ''' Return whether this plot is in a given row of a GridPlot.\n\n Args:\n row (int) : index of the row to test\n gridplot (GridPlot) : the GridPlot to check\n\n Returns:\n bool\n\n '''\n return self in gridplot.row(row)\n\n def column(self, col, gridplot):\n ''' Return whether this plot is in a given column of a GridPlot.\n\n Args:\n col (int) : index of the column to test\n gridplot (GridPlot) : the GridPlot to check\n\n Returns:\n bool\n\n '''\n return self in gridplot.column(col)\n\n def _axis(self, *sides):\n objs = []\n for s in sides:\n objs.extend(getattr(self, s, []))\n axis = [obj for obj in objs if isinstance(obj, Axis)]\n return _list_attr_splat(axis)\n\n @property\n def xaxis(self):\n \"\"\" Splattable list of :class:`~bokeh.models.axes.Axis` objects for the x dimension.\n\n \"\"\"\n return self._axis(\"above\", \"below\")\n\n @property\n def yaxis(self):\n \"\"\" Splattable list of :class:`~bokeh.models.axes.Axis` objects for the y dimension.\n\n \"\"\"\n return self._axis(\"left\", \"right\")\n\n @property\n def axis(self):\n \"\"\" Splattable list of :class:`~bokeh.models.axes.Axis` objects.\n\n \"\"\"\n return _list_attr_splat(self.xaxis + self.yaxis)\n\n @property\n def legend(self):\n \"\"\"Splattable list of :class:`~bokeh.models.annotations.Legend` objects.\n\n \"\"\"\n legends = [obj for obj in self.renderers if isinstance(obj, Legend)]\n return _list_attr_splat(legends)\n\n def _grid(self, dimension):\n grid = [obj for obj in self.renderers if isinstance(obj, Grid) and obj.dimension==dimension]\n return _list_attr_splat(grid)\n\n @property\n def xgrid(self):\n \"\"\" Splattable list of :class:`~bokeh.models.grids.Grid` objects for the x dimension.\n\n \"\"\"\n return self._grid(0)\n\n @property\n def ygrid(self):\n \"\"\" Splattable list of :class:`~bokeh.models.grids.Grid` objects for the y dimension.\n\n \"\"\"\n return self._grid(1)\n\n @property\n def grid(self):\n \"\"\" Splattable list of :class:`~bokeh.models.grids.Grid` objects.\n\n \"\"\"\n return _list_attr_splat(self.xgrid + self.ygrid)\n\n @property\n def tools(self):\n return self.toolbar.tools\n\n @tools.setter\n def tools(self, tools):\n self.toolbar.tools = tools\n\n\n def add_layout(self, obj, place='center'):\n ''' Adds an object to the plot in a specified place.\n\n Args:\n obj (Renderer) : the object to add to the Plot\n place (str, optional) : where to add the object (default: 'center')\n Valid places are: 'left', 'right', 'above', 'below', 'center'.\n\n Returns:\n None\n\n '''\n valid_places = ['left', 'right', 'above', 'below', 'center']\n if place not in valid_places:\n raise ValueError(\n \"Invalid place '%s' specified. Valid place values are: %s\" % (place, nice_join(valid_places))\n )\n\n if hasattr(obj, 'plot'):\n if obj.plot is not None:\n raise ValueError(\"object to be added already has 'plot' attribute set\")\n obj.plot = self\n\n self.renderers.append(obj)\n\n if place is not 'center':\n getattr(self, place).append(obj)\n\n def add_tools(self, *tools):\n ''' Adds tools to the plot.\n\n Args:\n *tools (Tool) : the tools to add to the Plot\n\n Returns:\n None\n\n '''\n if not all(isinstance(tool, Tool) for tool in tools):\n raise ValueError(\"All arguments to add_tool must be Tool subclasses.\")\n\n for tool in tools:\n if tool.plot is not None:\n raise ValueError(\"tool %s to be added already has 'plot' attribute set\" % tool)\n tool.plot = self\n if hasattr(tool, 'overlay'):\n self.renderers.append(tool.overlay)\n self.toolbar.tools.append(tool)\n\n def add_glyph(self, source_or_glyph, glyph=None, **kw):\n ''' Adds a glyph to the plot with associated data sources and ranges.\n\n This function will take care of creating and configuring a Glyph object,\n and then add it to the plot's list of renderers.\n\n Args:\n source (DataSource) : a data source for the glyphs to all use\n glyph (Glyph) : the glyph to add to the Plot\n\n\n Keyword Arguments:\n Any additional keyword arguments are passed on as-is to the\n Glyph initializer.\n\n Returns:\n GlyphRenderer\n\n '''\n if glyph is not None:\n source = source_or_glyph\n else:\n source, glyph = ColumnDataSource(), source_or_glyph\n\n if not isinstance(source, DataSource):\n raise ValueError(\"'source' argument to add_glyph() must be DataSource subclass\")\n\n if not isinstance(glyph, Glyph):\n raise ValueError(\"'glyph' argument to add_glyph() must be Glyph subclass\")\n\n g = GlyphRenderer(data_source=source, glyph=glyph, **kw)\n self.renderers.append(g)\n return g\n\n def add_tile(self, tile_source, **kw):\n '''Adds new TileRenderer into the Plot.renderers\n\n Args:\n tile_source (TileSource) : a tile source instance which contain tileset configuration\n\n Keyword Arguments:\n Additional keyword arguments are passed on as-is to the tile renderer\n\n Returns:\n TileRenderer : TileRenderer\n\n '''\n tile_renderer = TileRenderer(tile_source=tile_source, **kw)\n self.renderers.append(tile_renderer)\n return tile_renderer\n\n def add_dynamic_image(self, image_source, **kw):\n '''Adds new DynamicImageRenderer into the Plot.renderers\n\n Args:\n image_source (ImageSource) : a image source instance which contain image configuration\n\n Keyword Arguments:\n Additional keyword arguments are passed on as-is to the dynamic image renderer\n\n Returns:\n DynamicImageRenderer : DynamicImageRenderer\n\n '''\n image_renderer = DynamicImageRenderer(image_source=image_source, **kw)\n self.renderers.append(image_renderer)\n return image_renderer\n\n @validation.error(REQUIRED_RANGE)\n def _check_required_range(self):\n missing = []\n if not self.x_range: missing.append('x_range')\n if not self.y_range: missing.append('y_range')\n if missing:\n return \", \".join(missing) + \" [%s]\" % self\n\n @validation.warning(MISSING_RENDERERS)\n def _check_missing_renderers(self):\n if len(self.renderers) == 0:\n return str(self)\n\n @validation.warning(NO_DATA_RENDERERS)\n def _check_no_data_renderers(self):\n if len(self.select(DataRenderer)) == 0:\n return str(self)\n\n @validation.warning(MALFORMED_CATEGORY_LABEL)\n def _check_colon_in_category_label(self):\n if not self.x_range: return\n if not self.y_range: return\n\n broken = []\n\n for range_name in ['x_range', 'y_range']:\n category_range = getattr(self, range_name)\n if not isinstance(category_range, FactorRange): continue\n\n for value in category_range.factors:\n if not isinstance(value, string_types): break\n if ':' in value:\n broken.append((range_name, value))\n break\n\n if broken:\n field_msg = ' '.join('[range:%s] [first_value: %s]' % (field, value)\n for field, value in broken)\n return '%s [renderer: %s]' % (field_msg, self)\n\n @validation.warning(SNAPPED_TOOLBAR_ANNOTATIONS)\n def _check_snapped_toolbar_and_axis(self):\n if not self.toolbar_sticky: return\n if self.toolbar_location is None: return\n\n objs = getattr(self, self.toolbar_location)\n if len(objs) > 0:\n return str(self)\n\n __deprecated_attributes__ = (\n 'background_fill', 'border_fill', 'logo', 'tools', 'responsive',\n 'title_text_baseline', 'title_text_align', 'title_text_alpha', 'title_text_color',\n 'title_text_font_style', 'title_text_font_size', 'title_text_font', 'title_standoff'\n )\n\n x_range = Instance(Range, help=\"\"\"\n The (default) data range of the horizontal dimension of the plot.\n \"\"\")\n\n y_range = Instance(Range, help=\"\"\"\n The (default) data range of the vertical dimension of the plot.\n \"\"\")\n\n x_mapper_type = Either(Auto, String, help=\"\"\"\n What kind of mapper to use to convert x-coordinates in data space\n into x-coordinates in screen space.\n\n Typically this can be determined automatically, but this property\n can be useful to, e.g., show datetime values as floating point\n \"seconds since epoch\" instead of formatted dates.\n \"\"\")\n\n y_mapper_type = Either(Auto, String, help=\"\"\"\n What kind of mapper to use to convert y-coordinates in data space\n into y-coordinates in screen space.\n\n Typically this can be determined automatically, but this property\n can be useful to, e.g., show datetime values as floating point\n \"seconds since epoch\" instead of formatted dates\n \"\"\")\n\n extra_x_ranges = Dict(String, Instance(Range), help=\"\"\"\n Additional named ranges to make available for mapping x-coordinates.\n\n This is useful for adding additional axes.\n \"\"\")\n\n extra_y_ranges = Dict(String, Instance(Range), help=\"\"\"\n Additional named ranges to make available for mapping y-coordinates.\n\n This is useful for adding additional axes.\n \"\"\")\n\n hidpi = Bool(default=True, help=\"\"\"\n Whether to use HiDPI mode when available.\n \"\"\")\n\n title = TitleProp(default=DEFAULT_TITLE, help=\"\"\"\n A title for the plot. Can be a text string or a Title annotation. Default is Title(text=\"\").\n \"\"\")\n\n title_location = Enum(Location, default=\"above\", help=\"\"\"\n Where the title will be located. Titles on the left or right side\n will be rotated.\n \"\"\")\n\n outline_props = Include(LineProps, help=\"\"\"\n The %s for the plot border outline.\n \"\"\")\n\n outline_line_color = Override(default=\"#e5e5e5\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n A list of all renderers for this plot, including guides and annotations\n in addition to glyphs and markers.\n\n This property can be manipulated by hand, but the ``add_glyph`` and\n ``add_layout`` methods are recommended to help make sure all necessary\n setup is performed.\n \"\"\")\n\n toolbar = Instance(Toolbar, help=\"\"\"\n The toolbar associated with this plot which holds all the tools.\n\n The toolbar is automatically created with the plot.\n \"\"\")\n\n toolbar_location = Enum(Location, default=\"right\", help=\"\"\"\n Where the toolbar will be located. If set to None, no toolbar\n will be attached to the plot.\n \"\"\")\n\n toolbar_sticky = Bool(default=True, help=\"\"\"\n Stick the toolbar to the edge of the plot. Default: True. If False,\n the toolbar will be outside of the axes, titles etc.\n \"\"\")\n\n tool_events = Instance(ToolEvents, help=\"\"\"\n A ToolEvents object to share and report tool events.\n \"\"\")\n\n left = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area to the left of the plot.\n \"\"\")\n\n right = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area to the right of the plot.\n \"\"\")\n\n above = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area above of the plot.\n \"\"\")\n\n below = List(Instance(Renderer), help=\"\"\"\n A list of renderers to occupy the area below of the plot.\n \"\"\")\n\n plot_height = Int(600, help=\"\"\"\n Total height of the entire plot (including any axes, titles,\n border padding, etc.)\n\n .. note::\n This corresponds directly to the height of the HTML\n canvas that will be used.\n\n \"\"\")\n\n plot_width = Int(600, help=\"\"\"\n Total width of the entire plot (including any axes, titles,\n border padding, etc.)\n\n .. note::\n This corresponds directly to the width of the HTML\n canvas that will be used.\n\n \"\"\")\n\n background_props = Include(FillProps, help=\"\"\"\n The %s for the plot background style.\n \"\"\")\n\n background_fill_color = Override(default='#ffffff')\n\n border_props = Include(FillProps, help=\"\"\"\n The %s for the plot border style.\n \"\"\")\n\n border_fill_color = Override(default='#ffffff')\n\n min_border_top = Int(help=\"\"\"\n Minimum size in pixels of the padding region above the top of the\n central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border_bottom = Int(help=\"\"\"\n Minimum size in pixels of the padding region below the bottom of\n the central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border_left = Int(help=\"\"\"\n Minimum size in pixels of the padding region to the left of\n the central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border_right = Int(help=\"\"\"\n Minimum size in pixels of the padding region to the right of\n the central plot region.\n\n .. note::\n This is a *minimum*. The padding region may expand as needed to\n accommodate titles or axes, etc.\n\n \"\"\")\n\n min_border = Int(5, help=\"\"\"\n A convenience property to set all all the ``min_border_X`` properties\n to the same value. If an individual border property is explicitly set,\n it will override ``min_border``.\n \"\"\")\n\n h_symmetry = Bool(True, help=\"\"\"\n Whether the total horizontal padding on both sides of the plot will\n be made equal (the left or right padding amount, whichever is larger).\n \"\"\")\n\n v_symmetry = Bool(False, help=\"\"\"\n Whether the total vertical padding on both sides of the plot will\n be made equal (the top or bottom padding amount, whichever is larger).\n \"\"\")\n\n lod_factor = Int(10, help=\"\"\"\n Decimation factor to use when applying level-of-detail decimation.\n \"\"\")\n\n lod_threshold = Int(2000, help=\"\"\"\n A number of data points, above which level-of-detail downsampling may\n be performed by glyph renderers. Set to ``None`` to disable any\n level-of-detail downsampling.\n \"\"\")\n\n lod_interval = Int(300, help=\"\"\"\n Interval (in ms) during which an interactive tool event will enable\n level-of-detail downsampling.\n \"\"\")\n\n lod_timeout = Int(500, help=\"\"\"\n Timeout (in ms) for checking whether interactive tool events are still\n occurring. Once level-of-detail mode is enabled, a check is made every\n ``lod_timeout`` ms. If no interactive tool events have happened,\n level-of-detail mode is disabled.\n \"\"\")\n\n webgl = Bool(False, help=\"\"\"\n Whether WebGL is enabled for this plot. If True, the glyphs that\n support this will render via WebGL instead of the 2D canvas.\n \"\"\")\n\n #\n # DEPRECATED PROPERTIES\n #\n\n @property\n def responsive(self):\n warnings.warn(DEP_MSG_0_12_0 % ('responsive', 'Plot.sizing_mode'))\n return self.sizing_mode != \"fixed\"\n\n @responsive.setter\n def responsive(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('responsive', 'Plot.sizing_mode'))\n warnings.warn(\"\"\"\n The 'responsive' property has been deprecated in 0.12.0. It has been\n replaced by 'sizing_mode' which accepts one of five modes:\n\n fixed, scale_width, scale_height, scale_both, stretch_both\n\n 'responsive = False' is the equivalent of 'sizing_mode = \"fixed\"'\n\n 'responsive = True' is the equivalent of 'sizing_mode = \"scale_width\"'\n \"\"\")\n if value is True:\n self.sizing_mode = \"scale_width\"\n elif value is False:\n self.sizing_mode = \"fixed\"\n else:\n raise ValueError(\"Plot.responsive only accepts True or False, got: %r\" % value)\n\n @property\n def background_fill(self):\n warnings.warn(\n \"\"\"\n Plot property 'background_fill' was deprecated in Bokeh\n 0.11.0 and will be removed. Use 'background_fill_color' instead.\n \"\"\")\n return self.background_fill_color\n\n @background_fill.setter\n def background_fill(self, color):\n warnings.warn(\n \"\"\"\n Plot property 'background_fill' was deprecated in Bokeh\n 0.11.0 and will be removed. Use 'background_fill_color' instead.\n \"\"\")\n self.background_fill_color = color\n\n @property\n def border_fill(self):\n warnings.warn(\n \"\"\"\n Plot property 'border_fill' was deprecated in Bokeh 0.11.0 and\n will be removed. Use 'border_fill_color' instead.\n \"\"\")\n return self.border_fill_color\n\n @border_fill.setter\n def border_fill(self, color):\n warnings.warn(\n \"\"\"\n Plot property 'border_fill' was deprecated in Bokeh 0.11.0 and\n will be removed. Use 'border_fill_color' instead.\n \"\"\")\n self.border_fill_color = color\n\n @property\n def logo(self):\n warnings.warn(DEP_MSG_0_12_0 % ('logo', 'Plot.toolbar.logo'))\n return self.toolbar.logo\n\n @logo.setter\n def logo(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('logo', 'Plot.toolbar.logo'))\n self.toolbar.logo = value\n\n @property\n def title_standoff(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_standoff', 'Plot.title.offset'))\n return self.title.offset\n\n @title_standoff.setter\n def title_standoff(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_standoff', 'Plot.title.offset'))\n self.title.offset = value\n\n @property\n def title_text_font(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font', 'Plot.title.text_font'))\n return self.title.text_font\n\n @title_text_font.setter\n def title_text_font(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font', 'Plot.title.text_font'))\n self.title.text_font = value\n\n @property\n def title_text_font_size(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_size', 'Plot.title.text_font_size'))\n return self.title.text_font_size\n\n @title_text_font_size.setter\n def title_text_font_size(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_size', 'Plot.title.text_font_size'))\n self.title.text_font_size = value\n\n @property\n def title_text_font_style(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_style', 'Plot.title.text_font_style'))\n return self.title.text_font_style\n\n @title_text_font_style.setter\n def title_text_font_style(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_font_style', 'Plot.title.text_font_style'))\n self.title.text_font_style = value\n\n @property\n def title_text_color(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_color', 'Plot.title.text_color'))\n return self.title.text_color\n\n @title_text_color.setter\n def title_text_color(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_color', 'Plot.title.text_color'))\n self.title.text_color = value\n\n @property\n def title_text_alpha(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_alpha', 'Plot.title.text_alpha'))\n return self.title.text_alpha\n\n @title_text_alpha.setter\n def title_text_alpha(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_alpha', 'Plot.title.text_alpha'))\n self.title.text_alpha = value\n\n @property\n def title_text_align(self):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_align', 'Plot.title.align'))\n warnings.warn(\"\"\"``title_text_align`` was deprecated in 0.12.0 and is no longer\n available on the new Title object. There is a new ``plot.title.title_align`` which is\n similar but not exactly the same. The new ``title_align`` both positions and aligns the title.\n If you need the exact ``title_text_align`` behavior, please add a title by creating a\n Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n return self.title.align\n\n @title_text_align.setter\n def title_text_align(self, value):\n warnings.warn(DEP_MSG_0_12_0 % ('title_text_align', 'Plot.title.align'))\n warnings.warn(\"\"\"``title_text_align`` was deprecated in 0.12.0 and is no longer\n available on the new Title object. There is a new ``plot.title.title_align`` which is\n similar but not exactly the same. The new ``title_align`` both positions and aligns the title.\n If you need the exact ``title_text_align`` behavior, please add a title by creating a\n Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n self.title.align = value\n\n @property\n def title_text_baseline(self):\n warnings.warn(\"\"\"title_text_baseline was deprecated in 0.12.0 and is no longer\n available on the new Title object. If you need to alter the text_baseline, please\n add a title by creating a Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n return None\n\n @title_text_baseline.setter\n def title_text_baseline(self, value):\n warnings.warn(\"\"\"title_text_baseline was deprecated in 0.12.0 and is no longer\n available on the new Title object. If you need to alter the text_baseline, please\n add a title by creating a Label (``bokeh.models.annotations.Label``) and manually adding\n it to the plot by doing, for example ``plot.add_layout(Label(), 'above')``.\n \"\"\")\n",
"path": "bokeh/models/plots.py"
}
] | diff --git a/bokeh/models/plots.py b/bokeh/models/plots.py
index 86a04895ba0..314cf848c87 100644
--- a/bokeh/models/plots.py
+++ b/bokeh/models/plots.py
@@ -282,7 +282,7 @@ def add_glyph(self, source_or_glyph, glyph=None, **kw):
Glyph initializer.
Returns:
- Glyph
+ GlyphRenderer
'''
if glyph is not None:
|
coqui-ai__TTS-1532 | Missing `f` prefix on f-strings
Some strings looks like they're meant to be f-strings but are missing the `f` prefix meaning variable interpolation won't happen.
https://github.com/coqui-ai/TTS/blob/c410bc58ef3bd07b72ab05d29bbdc2a6df47afea/TTS/tts/layers/tacotron/attentions.py#L487
I found this issue automatically. I'm a bot. Beep Boop 🦊. See other issues I found in your repo [here](https://codereview.doctor/coqui-ai/TTS)
| [
{
"content": "import torch\nfrom scipy.stats import betabinom\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom TTS.tts.layers.tacotron.common_layers import Linear\n\n\nclass LocationLayer(nn.Module):\n \"\"\"Layers for Location Sensitive Attention\n\n Args:\n attention_dim (int): number of channels in the input tensor.\n attention_n_filters (int, optional): number of filters in convolution. Defaults to 32.\n attention_kernel_size (int, optional): kernel size of convolution filter. Defaults to 31.\n \"\"\"\n\n def __init__(self, attention_dim, attention_n_filters=32, attention_kernel_size=31):\n super().__init__()\n self.location_conv1d = nn.Conv1d(\n in_channels=2,\n out_channels=attention_n_filters,\n kernel_size=attention_kernel_size,\n stride=1,\n padding=(attention_kernel_size - 1) // 2,\n bias=False,\n )\n self.location_dense = Linear(attention_n_filters, attention_dim, bias=False, init_gain=\"tanh\")\n\n def forward(self, attention_cat):\n \"\"\"\n Shapes:\n attention_cat: [B, 2, C]\n \"\"\"\n processed_attention = self.location_conv1d(attention_cat)\n processed_attention = self.location_dense(processed_attention.transpose(1, 2))\n return processed_attention\n\n\nclass GravesAttention(nn.Module):\n \"\"\"Graves Attention as is ref1 with updates from ref2.\n ref1: https://arxiv.org/abs/1910.10288\n ref2: https://arxiv.org/pdf/1906.01083.pdf\n\n Args:\n query_dim (int): number of channels in query tensor.\n K (int): number of Gaussian heads to be used for computing attention.\n \"\"\"\n\n COEF = 0.3989422917366028 # numpy.sqrt(1/(2*numpy.pi))\n\n def __init__(self, query_dim, K):\n\n super().__init__()\n self._mask_value = 1e-8\n self.K = K\n # self.attention_alignment = 0.05\n self.eps = 1e-5\n self.J = None\n self.N_a = nn.Sequential(\n nn.Linear(query_dim, query_dim, bias=True), nn.ReLU(), nn.Linear(query_dim, 3 * K, bias=True)\n )\n self.attention_weights = None\n self.mu_prev = None\n self.init_layers()\n\n def init_layers(self):\n torch.nn.init.constant_(self.N_a[2].bias[(2 * self.K) : (3 * self.K)], 1.0) # bias mean\n torch.nn.init.constant_(self.N_a[2].bias[self.K : (2 * self.K)], 10) # bias std\n\n def init_states(self, inputs):\n if self.J is None or inputs.shape[1] + 1 > self.J.shape[-1]:\n self.J = torch.arange(0, inputs.shape[1] + 2.0).to(inputs.device) + 0.5\n self.attention_weights = torch.zeros(inputs.shape[0], inputs.shape[1]).to(inputs.device)\n self.mu_prev = torch.zeros(inputs.shape[0], self.K).to(inputs.device)\n\n # pylint: disable=R0201\n # pylint: disable=unused-argument\n def preprocess_inputs(self, inputs):\n return None\n\n def forward(self, query, inputs, processed_inputs, mask):\n \"\"\"\n Shapes:\n query: [B, C_attention_rnn]\n inputs: [B, T_in, C_encoder]\n processed_inputs: place_holder\n mask: [B, T_in]\n \"\"\"\n gbk_t = self.N_a(query)\n gbk_t = gbk_t.view(gbk_t.size(0), -1, self.K)\n\n # attention model parameters\n # each B x K\n g_t = gbk_t[:, 0, :]\n b_t = gbk_t[:, 1, :]\n k_t = gbk_t[:, 2, :]\n\n # dropout to decorrelate attention heads\n g_t = torch.nn.functional.dropout(g_t, p=0.5, training=self.training)\n\n # attention GMM parameters\n sig_t = torch.nn.functional.softplus(b_t) + self.eps\n\n mu_t = self.mu_prev + torch.nn.functional.softplus(k_t)\n g_t = torch.softmax(g_t, dim=-1) + self.eps\n\n j = self.J[: inputs.size(1) + 1]\n\n # attention weights\n phi_t = g_t.unsqueeze(-1) * (1 / (1 + torch.sigmoid((mu_t.unsqueeze(-1) - j) / sig_t.unsqueeze(-1))))\n\n # discritize attention weights\n alpha_t = torch.sum(phi_t, 1)\n alpha_t = alpha_t[:, 1:] - alpha_t[:, :-1]\n alpha_t[alpha_t == 0] = 1e-8\n\n # apply masking\n if mask is not None:\n alpha_t.data.masked_fill_(~mask, self._mask_value)\n\n context = torch.bmm(alpha_t.unsqueeze(1), inputs).squeeze(1)\n self.attention_weights = alpha_t\n self.mu_prev = mu_t\n return context\n\n\nclass OriginalAttention(nn.Module):\n \"\"\"Bahdanau Attention with various optional modifications.\n - Location sensitive attnetion: https://arxiv.org/abs/1712.05884\n - Forward Attention: https://arxiv.org/abs/1807.06736 + state masking at inference\n - Using sigmoid instead of softmax normalization\n - Attention windowing at inference time\n\n Note:\n Location Sensitive Attention extends the additive attention mechanism\n to use cumulative attention weights from previous decoder time steps with the current time step features.\n\n Forward attention computes most probable monotonic alignment. The modified attention probabilities at each\n timestep are computed recursively by the forward algorithm.\n\n Transition agent in the forward attention explicitly gates the attention mechanism whether to move forward or\n stay at each decoder timestep.\n\n Attention windowing is a inductive prior that prevents the model from attending to previous and future timesteps\n beyond a certain window.\n\n Args:\n query_dim (int): number of channels in the query tensor.\n embedding_dim (int): number of channels in the vakue tensor. In general, the value tensor is the output of the encoder layer.\n attention_dim (int): number of channels of the inner attention layers.\n location_attention (bool): enable/disable location sensitive attention.\n attention_location_n_filters (int): number of location attention filters.\n attention_location_kernel_size (int): filter size of location attention convolution layer.\n windowing (int): window size for attention windowing. if it is 5, for computing the attention, it only considers the time steps [(t-5), ..., (t+5)] of the input.\n norm (str): normalization method applied to the attention weights. 'softmax' or 'sigmoid'\n forward_attn (bool): enable/disable forward attention.\n trans_agent (bool): enable/disable transition agent in the forward attention.\n forward_attn_mask (int): enable/disable an explicit masking in forward attention. It is useful to set at especially inference time.\n \"\"\"\n\n # Pylint gets confused by PyTorch conventions here\n # pylint: disable=attribute-defined-outside-init\n def __init__(\n self,\n query_dim,\n embedding_dim,\n attention_dim,\n location_attention,\n attention_location_n_filters,\n attention_location_kernel_size,\n windowing,\n norm,\n forward_attn,\n trans_agent,\n forward_attn_mask,\n ):\n super().__init__()\n self.query_layer = Linear(query_dim, attention_dim, bias=False, init_gain=\"tanh\")\n self.inputs_layer = Linear(embedding_dim, attention_dim, bias=False, init_gain=\"tanh\")\n self.v = Linear(attention_dim, 1, bias=True)\n if trans_agent:\n self.ta = nn.Linear(query_dim + embedding_dim, 1, bias=True)\n if location_attention:\n self.location_layer = LocationLayer(\n attention_dim,\n attention_location_n_filters,\n attention_location_kernel_size,\n )\n self._mask_value = -float(\"inf\")\n self.windowing = windowing\n self.win_idx = None\n self.norm = norm\n self.forward_attn = forward_attn\n self.trans_agent = trans_agent\n self.forward_attn_mask = forward_attn_mask\n self.location_attention = location_attention\n\n def init_win_idx(self):\n self.win_idx = -1\n self.win_back = 2\n self.win_front = 6\n\n def init_forward_attn(self, inputs):\n B = inputs.shape[0]\n T = inputs.shape[1]\n self.alpha = torch.cat([torch.ones([B, 1]), torch.zeros([B, T])[:, :-1] + 1e-7], dim=1).to(inputs.device)\n self.u = (0.5 * torch.ones([B, 1])).to(inputs.device)\n\n def init_location_attention(self, inputs):\n B = inputs.size(0)\n T = inputs.size(1)\n self.attention_weights_cum = torch.zeros([B, T], device=inputs.device)\n\n def init_states(self, inputs):\n B = inputs.size(0)\n T = inputs.size(1)\n self.attention_weights = torch.zeros([B, T], device=inputs.device)\n if self.location_attention:\n self.init_location_attention(inputs)\n if self.forward_attn:\n self.init_forward_attn(inputs)\n if self.windowing:\n self.init_win_idx()\n\n def preprocess_inputs(self, inputs):\n return self.inputs_layer(inputs)\n\n def update_location_attention(self, alignments):\n self.attention_weights_cum += alignments\n\n def get_location_attention(self, query, processed_inputs):\n attention_cat = torch.cat((self.attention_weights.unsqueeze(1), self.attention_weights_cum.unsqueeze(1)), dim=1)\n processed_query = self.query_layer(query.unsqueeze(1))\n processed_attention_weights = self.location_layer(attention_cat)\n energies = self.v(torch.tanh(processed_query + processed_attention_weights + processed_inputs))\n energies = energies.squeeze(-1)\n return energies, processed_query\n\n def get_attention(self, query, processed_inputs):\n processed_query = self.query_layer(query.unsqueeze(1))\n energies = self.v(torch.tanh(processed_query + processed_inputs))\n energies = energies.squeeze(-1)\n return energies, processed_query\n\n def apply_windowing(self, attention, inputs):\n back_win = self.win_idx - self.win_back\n front_win = self.win_idx + self.win_front\n if back_win > 0:\n attention[:, :back_win] = -float(\"inf\")\n if front_win < inputs.shape[1]:\n attention[:, front_win:] = -float(\"inf\")\n # this is a trick to solve a special problem.\n # but it does not hurt.\n if self.win_idx == -1:\n attention[:, 0] = attention.max()\n # Update the window\n self.win_idx = torch.argmax(attention, 1).long()[0].item()\n return attention\n\n def apply_forward_attention(self, alignment):\n # forward attention\n fwd_shifted_alpha = F.pad(self.alpha[:, :-1].clone().to(alignment.device), (1, 0, 0, 0))\n # compute transition potentials\n alpha = ((1 - self.u) * self.alpha + self.u * fwd_shifted_alpha + 1e-8) * alignment\n # force incremental alignment\n if not self.training and self.forward_attn_mask:\n _, n = fwd_shifted_alpha.max(1)\n val, _ = alpha.max(1)\n for b in range(alignment.shape[0]):\n alpha[b, n[b] + 3 :] = 0\n alpha[b, : (n[b] - 1)] = 0 # ignore all previous states to prevent repetition.\n alpha[b, (n[b] - 2)] = 0.01 * val[b] # smoothing factor for the prev step\n # renormalize attention weights\n alpha = alpha / alpha.sum(dim=1, keepdim=True)\n return alpha\n\n def forward(self, query, inputs, processed_inputs, mask):\n \"\"\"\n shapes:\n query: [B, C_attn_rnn]\n inputs: [B, T_en, D_en]\n processed_inputs: [B, T_en, D_attn]\n mask: [B, T_en]\n \"\"\"\n if self.location_attention:\n attention, _ = self.get_location_attention(query, processed_inputs)\n else:\n attention, _ = self.get_attention(query, processed_inputs)\n # apply masking\n if mask is not None:\n attention.data.masked_fill_(~mask, self._mask_value)\n # apply windowing - only in eval mode\n if not self.training and self.windowing:\n attention = self.apply_windowing(attention, inputs)\n\n # normalize attention values\n if self.norm == \"softmax\":\n alignment = torch.softmax(attention, dim=-1)\n elif self.norm == \"sigmoid\":\n alignment = torch.sigmoid(attention) / torch.sigmoid(attention).sum(dim=1, keepdim=True)\n else:\n raise ValueError(\"Unknown value for attention norm type\")\n\n if self.location_attention:\n self.update_location_attention(alignment)\n\n # apply forward attention if enabled\n if self.forward_attn:\n alignment = self.apply_forward_attention(alignment)\n self.alpha = alignment\n\n context = torch.bmm(alignment.unsqueeze(1), inputs)\n context = context.squeeze(1)\n self.attention_weights = alignment\n\n # compute transition agent\n if self.forward_attn and self.trans_agent:\n ta_input = torch.cat([context, query.squeeze(1)], dim=-1)\n self.u = torch.sigmoid(self.ta(ta_input))\n return context\n\n\nclass MonotonicDynamicConvolutionAttention(nn.Module):\n \"\"\"Dynamic convolution attention from\n https://arxiv.org/pdf/1910.10288.pdf\n\n\n query -> linear -> tanh -> linear ->|\n | mask values\n v | |\n atten_w(t-1) -|-> conv1d_dynamic -> linear -|-> tanh -> + -> softmax -> * -> * -> context\n |-> conv1d_static -> linear -| |\n |-> conv1d_prior -> log ----------------|\n\n query: attention rnn output.\n\n Note:\n Dynamic convolution attention is an alternation of the location senstive attention with\n dynamically computed convolution filters from the previous attention scores and a set of\n constraints to keep the attention alignment diagonal.\n DCA is sensitive to mixed precision training and might cause instable training.\n\n Args:\n query_dim (int): number of channels in the query tensor.\n embedding_dim (int): number of channels in the value tensor.\n static_filter_dim (int): number of channels in the convolution layer computing the static filters.\n static_kernel_size (int): kernel size for the convolution layer computing the static filters.\n dynamic_filter_dim (int): number of channels in the convolution layer computing the dynamic filters.\n dynamic_kernel_size (int): kernel size for the convolution layer computing the dynamic filters.\n prior_filter_len (int, optional): [description]. Defaults to 11 from the paper.\n alpha (float, optional): [description]. Defaults to 0.1 from the paper.\n beta (float, optional): [description]. Defaults to 0.9 from the paper.\n \"\"\"\n\n def __init__(\n self,\n query_dim,\n embedding_dim, # pylint: disable=unused-argument\n attention_dim,\n static_filter_dim,\n static_kernel_size,\n dynamic_filter_dim,\n dynamic_kernel_size,\n prior_filter_len=11,\n alpha=0.1,\n beta=0.9,\n ):\n super().__init__()\n self._mask_value = 1e-8\n self.dynamic_filter_dim = dynamic_filter_dim\n self.dynamic_kernel_size = dynamic_kernel_size\n self.prior_filter_len = prior_filter_len\n self.attention_weights = None\n # setup key and query layers\n self.query_layer = nn.Linear(query_dim, attention_dim)\n self.key_layer = nn.Linear(attention_dim, dynamic_filter_dim * dynamic_kernel_size, bias=False)\n self.static_filter_conv = nn.Conv1d(\n 1,\n static_filter_dim,\n static_kernel_size,\n padding=(static_kernel_size - 1) // 2,\n bias=False,\n )\n self.static_filter_layer = nn.Linear(static_filter_dim, attention_dim, bias=False)\n self.dynamic_filter_layer = nn.Linear(dynamic_filter_dim, attention_dim)\n self.v = nn.Linear(attention_dim, 1, bias=False)\n\n prior = betabinom.pmf(range(prior_filter_len), prior_filter_len - 1, alpha, beta)\n self.register_buffer(\"prior\", torch.FloatTensor(prior).flip(0))\n\n # pylint: disable=unused-argument\n def forward(self, query, inputs, processed_inputs, mask):\n \"\"\"\n query: [B, C_attn_rnn]\n inputs: [B, T_en, D_en]\n processed_inputs: place holder.\n mask: [B, T_en]\n \"\"\"\n # compute prior filters\n prior_filter = F.conv1d(\n F.pad(self.attention_weights.unsqueeze(1), (self.prior_filter_len - 1, 0)), self.prior.view(1, 1, -1)\n )\n prior_filter = torch.log(prior_filter.clamp_min_(1e-6)).squeeze(1)\n G = self.key_layer(torch.tanh(self.query_layer(query)))\n # compute dynamic filters\n dynamic_filter = F.conv1d(\n self.attention_weights.unsqueeze(0),\n G.view(-1, 1, self.dynamic_kernel_size),\n padding=(self.dynamic_kernel_size - 1) // 2,\n groups=query.size(0),\n )\n dynamic_filter = dynamic_filter.view(query.size(0), self.dynamic_filter_dim, -1).transpose(1, 2)\n # compute static filters\n static_filter = self.static_filter_conv(self.attention_weights.unsqueeze(1)).transpose(1, 2)\n alignment = (\n self.v(\n torch.tanh(self.static_filter_layer(static_filter) + self.dynamic_filter_layer(dynamic_filter))\n ).squeeze(-1)\n + prior_filter\n )\n # compute attention weights\n attention_weights = F.softmax(alignment, dim=-1)\n # apply masking\n if mask is not None:\n attention_weights.data.masked_fill_(~mask, self._mask_value)\n self.attention_weights = attention_weights\n # compute context\n context = torch.bmm(attention_weights.unsqueeze(1), inputs).squeeze(1)\n return context\n\n def preprocess_inputs(self, inputs): # pylint: disable=no-self-use\n return None\n\n def init_states(self, inputs):\n B = inputs.size(0)\n T = inputs.size(1)\n self.attention_weights = torch.zeros([B, T], device=inputs.device)\n self.attention_weights[:, 0] = 1.0\n\n\ndef init_attn(\n attn_type,\n query_dim,\n embedding_dim,\n attention_dim,\n location_attention,\n attention_location_n_filters,\n attention_location_kernel_size,\n windowing,\n norm,\n forward_attn,\n trans_agent,\n forward_attn_mask,\n attn_K,\n):\n if attn_type == \"original\":\n return OriginalAttention(\n query_dim,\n embedding_dim,\n attention_dim,\n location_attention,\n attention_location_n_filters,\n attention_location_kernel_size,\n windowing,\n norm,\n forward_attn,\n trans_agent,\n forward_attn_mask,\n )\n if attn_type == \"graves\":\n return GravesAttention(query_dim, attn_K)\n if attn_type == \"dynamic_convolution\":\n return MonotonicDynamicConvolutionAttention(\n query_dim,\n embedding_dim,\n attention_dim,\n static_filter_dim=8,\n static_kernel_size=21,\n dynamic_filter_dim=8,\n dynamic_kernel_size=21,\n prior_filter_len=11,\n alpha=0.1,\n beta=0.9,\n )\n\n raise RuntimeError(\" [!] Given Attention Type '{attn_type}' is not exist.\")\n",
"path": "TTS/tts/layers/tacotron/attentions.py"
}
] | [
{
"content": "import torch\nfrom scipy.stats import betabinom\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom TTS.tts.layers.tacotron.common_layers import Linear\n\n\nclass LocationLayer(nn.Module):\n \"\"\"Layers for Location Sensitive Attention\n\n Args:\n attention_dim (int): number of channels in the input tensor.\n attention_n_filters (int, optional): number of filters in convolution. Defaults to 32.\n attention_kernel_size (int, optional): kernel size of convolution filter. Defaults to 31.\n \"\"\"\n\n def __init__(self, attention_dim, attention_n_filters=32, attention_kernel_size=31):\n super().__init__()\n self.location_conv1d = nn.Conv1d(\n in_channels=2,\n out_channels=attention_n_filters,\n kernel_size=attention_kernel_size,\n stride=1,\n padding=(attention_kernel_size - 1) // 2,\n bias=False,\n )\n self.location_dense = Linear(attention_n_filters, attention_dim, bias=False, init_gain=\"tanh\")\n\n def forward(self, attention_cat):\n \"\"\"\n Shapes:\n attention_cat: [B, 2, C]\n \"\"\"\n processed_attention = self.location_conv1d(attention_cat)\n processed_attention = self.location_dense(processed_attention.transpose(1, 2))\n return processed_attention\n\n\nclass GravesAttention(nn.Module):\n \"\"\"Graves Attention as is ref1 with updates from ref2.\n ref1: https://arxiv.org/abs/1910.10288\n ref2: https://arxiv.org/pdf/1906.01083.pdf\n\n Args:\n query_dim (int): number of channels in query tensor.\n K (int): number of Gaussian heads to be used for computing attention.\n \"\"\"\n\n COEF = 0.3989422917366028 # numpy.sqrt(1/(2*numpy.pi))\n\n def __init__(self, query_dim, K):\n\n super().__init__()\n self._mask_value = 1e-8\n self.K = K\n # self.attention_alignment = 0.05\n self.eps = 1e-5\n self.J = None\n self.N_a = nn.Sequential(\n nn.Linear(query_dim, query_dim, bias=True), nn.ReLU(), nn.Linear(query_dim, 3 * K, bias=True)\n )\n self.attention_weights = None\n self.mu_prev = None\n self.init_layers()\n\n def init_layers(self):\n torch.nn.init.constant_(self.N_a[2].bias[(2 * self.K) : (3 * self.K)], 1.0) # bias mean\n torch.nn.init.constant_(self.N_a[2].bias[self.K : (2 * self.K)], 10) # bias std\n\n def init_states(self, inputs):\n if self.J is None or inputs.shape[1] + 1 > self.J.shape[-1]:\n self.J = torch.arange(0, inputs.shape[1] + 2.0).to(inputs.device) + 0.5\n self.attention_weights = torch.zeros(inputs.shape[0], inputs.shape[1]).to(inputs.device)\n self.mu_prev = torch.zeros(inputs.shape[0], self.K).to(inputs.device)\n\n # pylint: disable=R0201\n # pylint: disable=unused-argument\n def preprocess_inputs(self, inputs):\n return None\n\n def forward(self, query, inputs, processed_inputs, mask):\n \"\"\"\n Shapes:\n query: [B, C_attention_rnn]\n inputs: [B, T_in, C_encoder]\n processed_inputs: place_holder\n mask: [B, T_in]\n \"\"\"\n gbk_t = self.N_a(query)\n gbk_t = gbk_t.view(gbk_t.size(0), -1, self.K)\n\n # attention model parameters\n # each B x K\n g_t = gbk_t[:, 0, :]\n b_t = gbk_t[:, 1, :]\n k_t = gbk_t[:, 2, :]\n\n # dropout to decorrelate attention heads\n g_t = torch.nn.functional.dropout(g_t, p=0.5, training=self.training)\n\n # attention GMM parameters\n sig_t = torch.nn.functional.softplus(b_t) + self.eps\n\n mu_t = self.mu_prev + torch.nn.functional.softplus(k_t)\n g_t = torch.softmax(g_t, dim=-1) + self.eps\n\n j = self.J[: inputs.size(1) + 1]\n\n # attention weights\n phi_t = g_t.unsqueeze(-1) * (1 / (1 + torch.sigmoid((mu_t.unsqueeze(-1) - j) / sig_t.unsqueeze(-1))))\n\n # discritize attention weights\n alpha_t = torch.sum(phi_t, 1)\n alpha_t = alpha_t[:, 1:] - alpha_t[:, :-1]\n alpha_t[alpha_t == 0] = 1e-8\n\n # apply masking\n if mask is not None:\n alpha_t.data.masked_fill_(~mask, self._mask_value)\n\n context = torch.bmm(alpha_t.unsqueeze(1), inputs).squeeze(1)\n self.attention_weights = alpha_t\n self.mu_prev = mu_t\n return context\n\n\nclass OriginalAttention(nn.Module):\n \"\"\"Bahdanau Attention with various optional modifications.\n - Location sensitive attnetion: https://arxiv.org/abs/1712.05884\n - Forward Attention: https://arxiv.org/abs/1807.06736 + state masking at inference\n - Using sigmoid instead of softmax normalization\n - Attention windowing at inference time\n\n Note:\n Location Sensitive Attention extends the additive attention mechanism\n to use cumulative attention weights from previous decoder time steps with the current time step features.\n\n Forward attention computes most probable monotonic alignment. The modified attention probabilities at each\n timestep are computed recursively by the forward algorithm.\n\n Transition agent in the forward attention explicitly gates the attention mechanism whether to move forward or\n stay at each decoder timestep.\n\n Attention windowing is a inductive prior that prevents the model from attending to previous and future timesteps\n beyond a certain window.\n\n Args:\n query_dim (int): number of channels in the query tensor.\n embedding_dim (int): number of channels in the vakue tensor. In general, the value tensor is the output of the encoder layer.\n attention_dim (int): number of channels of the inner attention layers.\n location_attention (bool): enable/disable location sensitive attention.\n attention_location_n_filters (int): number of location attention filters.\n attention_location_kernel_size (int): filter size of location attention convolution layer.\n windowing (int): window size for attention windowing. if it is 5, for computing the attention, it only considers the time steps [(t-5), ..., (t+5)] of the input.\n norm (str): normalization method applied to the attention weights. 'softmax' or 'sigmoid'\n forward_attn (bool): enable/disable forward attention.\n trans_agent (bool): enable/disable transition agent in the forward attention.\n forward_attn_mask (int): enable/disable an explicit masking in forward attention. It is useful to set at especially inference time.\n \"\"\"\n\n # Pylint gets confused by PyTorch conventions here\n # pylint: disable=attribute-defined-outside-init\n def __init__(\n self,\n query_dim,\n embedding_dim,\n attention_dim,\n location_attention,\n attention_location_n_filters,\n attention_location_kernel_size,\n windowing,\n norm,\n forward_attn,\n trans_agent,\n forward_attn_mask,\n ):\n super().__init__()\n self.query_layer = Linear(query_dim, attention_dim, bias=False, init_gain=\"tanh\")\n self.inputs_layer = Linear(embedding_dim, attention_dim, bias=False, init_gain=\"tanh\")\n self.v = Linear(attention_dim, 1, bias=True)\n if trans_agent:\n self.ta = nn.Linear(query_dim + embedding_dim, 1, bias=True)\n if location_attention:\n self.location_layer = LocationLayer(\n attention_dim,\n attention_location_n_filters,\n attention_location_kernel_size,\n )\n self._mask_value = -float(\"inf\")\n self.windowing = windowing\n self.win_idx = None\n self.norm = norm\n self.forward_attn = forward_attn\n self.trans_agent = trans_agent\n self.forward_attn_mask = forward_attn_mask\n self.location_attention = location_attention\n\n def init_win_idx(self):\n self.win_idx = -1\n self.win_back = 2\n self.win_front = 6\n\n def init_forward_attn(self, inputs):\n B = inputs.shape[0]\n T = inputs.shape[1]\n self.alpha = torch.cat([torch.ones([B, 1]), torch.zeros([B, T])[:, :-1] + 1e-7], dim=1).to(inputs.device)\n self.u = (0.5 * torch.ones([B, 1])).to(inputs.device)\n\n def init_location_attention(self, inputs):\n B = inputs.size(0)\n T = inputs.size(1)\n self.attention_weights_cum = torch.zeros([B, T], device=inputs.device)\n\n def init_states(self, inputs):\n B = inputs.size(0)\n T = inputs.size(1)\n self.attention_weights = torch.zeros([B, T], device=inputs.device)\n if self.location_attention:\n self.init_location_attention(inputs)\n if self.forward_attn:\n self.init_forward_attn(inputs)\n if self.windowing:\n self.init_win_idx()\n\n def preprocess_inputs(self, inputs):\n return self.inputs_layer(inputs)\n\n def update_location_attention(self, alignments):\n self.attention_weights_cum += alignments\n\n def get_location_attention(self, query, processed_inputs):\n attention_cat = torch.cat((self.attention_weights.unsqueeze(1), self.attention_weights_cum.unsqueeze(1)), dim=1)\n processed_query = self.query_layer(query.unsqueeze(1))\n processed_attention_weights = self.location_layer(attention_cat)\n energies = self.v(torch.tanh(processed_query + processed_attention_weights + processed_inputs))\n energies = energies.squeeze(-1)\n return energies, processed_query\n\n def get_attention(self, query, processed_inputs):\n processed_query = self.query_layer(query.unsqueeze(1))\n energies = self.v(torch.tanh(processed_query + processed_inputs))\n energies = energies.squeeze(-1)\n return energies, processed_query\n\n def apply_windowing(self, attention, inputs):\n back_win = self.win_idx - self.win_back\n front_win = self.win_idx + self.win_front\n if back_win > 0:\n attention[:, :back_win] = -float(\"inf\")\n if front_win < inputs.shape[1]:\n attention[:, front_win:] = -float(\"inf\")\n # this is a trick to solve a special problem.\n # but it does not hurt.\n if self.win_idx == -1:\n attention[:, 0] = attention.max()\n # Update the window\n self.win_idx = torch.argmax(attention, 1).long()[0].item()\n return attention\n\n def apply_forward_attention(self, alignment):\n # forward attention\n fwd_shifted_alpha = F.pad(self.alpha[:, :-1].clone().to(alignment.device), (1, 0, 0, 0))\n # compute transition potentials\n alpha = ((1 - self.u) * self.alpha + self.u * fwd_shifted_alpha + 1e-8) * alignment\n # force incremental alignment\n if not self.training and self.forward_attn_mask:\n _, n = fwd_shifted_alpha.max(1)\n val, _ = alpha.max(1)\n for b in range(alignment.shape[0]):\n alpha[b, n[b] + 3 :] = 0\n alpha[b, : (n[b] - 1)] = 0 # ignore all previous states to prevent repetition.\n alpha[b, (n[b] - 2)] = 0.01 * val[b] # smoothing factor for the prev step\n # renormalize attention weights\n alpha = alpha / alpha.sum(dim=1, keepdim=True)\n return alpha\n\n def forward(self, query, inputs, processed_inputs, mask):\n \"\"\"\n shapes:\n query: [B, C_attn_rnn]\n inputs: [B, T_en, D_en]\n processed_inputs: [B, T_en, D_attn]\n mask: [B, T_en]\n \"\"\"\n if self.location_attention:\n attention, _ = self.get_location_attention(query, processed_inputs)\n else:\n attention, _ = self.get_attention(query, processed_inputs)\n # apply masking\n if mask is not None:\n attention.data.masked_fill_(~mask, self._mask_value)\n # apply windowing - only in eval mode\n if not self.training and self.windowing:\n attention = self.apply_windowing(attention, inputs)\n\n # normalize attention values\n if self.norm == \"softmax\":\n alignment = torch.softmax(attention, dim=-1)\n elif self.norm == \"sigmoid\":\n alignment = torch.sigmoid(attention) / torch.sigmoid(attention).sum(dim=1, keepdim=True)\n else:\n raise ValueError(\"Unknown value for attention norm type\")\n\n if self.location_attention:\n self.update_location_attention(alignment)\n\n # apply forward attention if enabled\n if self.forward_attn:\n alignment = self.apply_forward_attention(alignment)\n self.alpha = alignment\n\n context = torch.bmm(alignment.unsqueeze(1), inputs)\n context = context.squeeze(1)\n self.attention_weights = alignment\n\n # compute transition agent\n if self.forward_attn and self.trans_agent:\n ta_input = torch.cat([context, query.squeeze(1)], dim=-1)\n self.u = torch.sigmoid(self.ta(ta_input))\n return context\n\n\nclass MonotonicDynamicConvolutionAttention(nn.Module):\n \"\"\"Dynamic convolution attention from\n https://arxiv.org/pdf/1910.10288.pdf\n\n\n query -> linear -> tanh -> linear ->|\n | mask values\n v | |\n atten_w(t-1) -|-> conv1d_dynamic -> linear -|-> tanh -> + -> softmax -> * -> * -> context\n |-> conv1d_static -> linear -| |\n |-> conv1d_prior -> log ----------------|\n\n query: attention rnn output.\n\n Note:\n Dynamic convolution attention is an alternation of the location senstive attention with\n dynamically computed convolution filters from the previous attention scores and a set of\n constraints to keep the attention alignment diagonal.\n DCA is sensitive to mixed precision training and might cause instable training.\n\n Args:\n query_dim (int): number of channels in the query tensor.\n embedding_dim (int): number of channels in the value tensor.\n static_filter_dim (int): number of channels in the convolution layer computing the static filters.\n static_kernel_size (int): kernel size for the convolution layer computing the static filters.\n dynamic_filter_dim (int): number of channels in the convolution layer computing the dynamic filters.\n dynamic_kernel_size (int): kernel size for the convolution layer computing the dynamic filters.\n prior_filter_len (int, optional): [description]. Defaults to 11 from the paper.\n alpha (float, optional): [description]. Defaults to 0.1 from the paper.\n beta (float, optional): [description]. Defaults to 0.9 from the paper.\n \"\"\"\n\n def __init__(\n self,\n query_dim,\n embedding_dim, # pylint: disable=unused-argument\n attention_dim,\n static_filter_dim,\n static_kernel_size,\n dynamic_filter_dim,\n dynamic_kernel_size,\n prior_filter_len=11,\n alpha=0.1,\n beta=0.9,\n ):\n super().__init__()\n self._mask_value = 1e-8\n self.dynamic_filter_dim = dynamic_filter_dim\n self.dynamic_kernel_size = dynamic_kernel_size\n self.prior_filter_len = prior_filter_len\n self.attention_weights = None\n # setup key and query layers\n self.query_layer = nn.Linear(query_dim, attention_dim)\n self.key_layer = nn.Linear(attention_dim, dynamic_filter_dim * dynamic_kernel_size, bias=False)\n self.static_filter_conv = nn.Conv1d(\n 1,\n static_filter_dim,\n static_kernel_size,\n padding=(static_kernel_size - 1) // 2,\n bias=False,\n )\n self.static_filter_layer = nn.Linear(static_filter_dim, attention_dim, bias=False)\n self.dynamic_filter_layer = nn.Linear(dynamic_filter_dim, attention_dim)\n self.v = nn.Linear(attention_dim, 1, bias=False)\n\n prior = betabinom.pmf(range(prior_filter_len), prior_filter_len - 1, alpha, beta)\n self.register_buffer(\"prior\", torch.FloatTensor(prior).flip(0))\n\n # pylint: disable=unused-argument\n def forward(self, query, inputs, processed_inputs, mask):\n \"\"\"\n query: [B, C_attn_rnn]\n inputs: [B, T_en, D_en]\n processed_inputs: place holder.\n mask: [B, T_en]\n \"\"\"\n # compute prior filters\n prior_filter = F.conv1d(\n F.pad(self.attention_weights.unsqueeze(1), (self.prior_filter_len - 1, 0)), self.prior.view(1, 1, -1)\n )\n prior_filter = torch.log(prior_filter.clamp_min_(1e-6)).squeeze(1)\n G = self.key_layer(torch.tanh(self.query_layer(query)))\n # compute dynamic filters\n dynamic_filter = F.conv1d(\n self.attention_weights.unsqueeze(0),\n G.view(-1, 1, self.dynamic_kernel_size),\n padding=(self.dynamic_kernel_size - 1) // 2,\n groups=query.size(0),\n )\n dynamic_filter = dynamic_filter.view(query.size(0), self.dynamic_filter_dim, -1).transpose(1, 2)\n # compute static filters\n static_filter = self.static_filter_conv(self.attention_weights.unsqueeze(1)).transpose(1, 2)\n alignment = (\n self.v(\n torch.tanh(self.static_filter_layer(static_filter) + self.dynamic_filter_layer(dynamic_filter))\n ).squeeze(-1)\n + prior_filter\n )\n # compute attention weights\n attention_weights = F.softmax(alignment, dim=-1)\n # apply masking\n if mask is not None:\n attention_weights.data.masked_fill_(~mask, self._mask_value)\n self.attention_weights = attention_weights\n # compute context\n context = torch.bmm(attention_weights.unsqueeze(1), inputs).squeeze(1)\n return context\n\n def preprocess_inputs(self, inputs): # pylint: disable=no-self-use\n return None\n\n def init_states(self, inputs):\n B = inputs.size(0)\n T = inputs.size(1)\n self.attention_weights = torch.zeros([B, T], device=inputs.device)\n self.attention_weights[:, 0] = 1.0\n\n\ndef init_attn(\n attn_type,\n query_dim,\n embedding_dim,\n attention_dim,\n location_attention,\n attention_location_n_filters,\n attention_location_kernel_size,\n windowing,\n norm,\n forward_attn,\n trans_agent,\n forward_attn_mask,\n attn_K,\n):\n if attn_type == \"original\":\n return OriginalAttention(\n query_dim,\n embedding_dim,\n attention_dim,\n location_attention,\n attention_location_n_filters,\n attention_location_kernel_size,\n windowing,\n norm,\n forward_attn,\n trans_agent,\n forward_attn_mask,\n )\n if attn_type == \"graves\":\n return GravesAttention(query_dim, attn_K)\n if attn_type == \"dynamic_convolution\":\n return MonotonicDynamicConvolutionAttention(\n query_dim,\n embedding_dim,\n attention_dim,\n static_filter_dim=8,\n static_kernel_size=21,\n dynamic_filter_dim=8,\n dynamic_kernel_size=21,\n prior_filter_len=11,\n alpha=0.1,\n beta=0.9,\n )\n\n raise RuntimeError(f\" [!] Given Attention Type '{attn_type}' is not exist.\")\n",
"path": "TTS/tts/layers/tacotron/attentions.py"
}
] | diff --git a/TTS/tts/layers/tacotron/attentions.py b/TTS/tts/layers/tacotron/attentions.py
index 8c30a00a4a..d8a90d7201 100644
--- a/TTS/tts/layers/tacotron/attentions.py
+++ b/TTS/tts/layers/tacotron/attentions.py
@@ -484,4 +484,4 @@ def init_attn(
beta=0.9,
)
- raise RuntimeError(" [!] Given Attention Type '{attn_type}' is not exist.")
+ raise RuntimeError(f" [!] Given Attention Type '{attn_type}' is not exist.")
|
Lightning-AI__torchmetrics-959 | PSNR - Higher is better.
## 🐛 Bug
`PSNR.higher_is_better` should be `True`
### Additional context
This is a simple change, created [PR#959](https://github.com/PyTorchLightning/metrics/pull/959) with the change.
| [
{
"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom torch import Tensor, tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.image.psnr import _psnr_compute, _psnr_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities import rank_zero_warn\n\n\nclass PeakSignalNoiseRatio(Metric):\n r\"\"\"\n Computes `Computes Peak Signal-to-Noise Ratio`_ (PSNR):\n\n .. math:: \\text{PSNR}(I, J) = 10 * \\log_{10} \\left(\\frac{\\max(I)^2}{\\text{MSE}(I, J)}\\right)\n\n Where :math:`\\text{MSE}` denotes the `mean-squared-error`_ function.\n\n Args:\n data_range:\n the range of the data. If None, it is determined from the data (max - min).\n The ``data_range`` must be given when ``dim`` is not None.\n base: a base of a logarithm to use.\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'`` or ``None``: no reduction will be applied\n\n dim:\n Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is\n None meaning scores will be reduced across all dimensions and all batches.\n compute_on_step:\n Forward only calls ``update()`` and returns None if this is set to False.\n\n .. deprecated:: v0.8\n Argument has no use anymore and will be removed v0.9.\n\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Raises:\n ValueError:\n If ``dim`` is not ``None`` and ``data_range`` is not given.\n\n Example:\n >>> from torchmetrics import PeakSignalNoiseRatio\n >>> psnr = PeakSignalNoiseRatio()\n >>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]])\n >>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])\n >>> psnr(preds, target)\n tensor(2.5527)\n\n .. note::\n Half precision is only support on GPU for this metric\n\n \"\"\"\n min_target: Tensor\n max_target: Tensor\n higher_is_better = False\n\n def __init__(\n self,\n data_range: Optional[float] = None,\n base: float = 10.0,\n reduction: Literal[\"elementwise_mean\", \"sum\", \"none\", None] = \"elementwise_mean\",\n dim: Optional[Union[int, Tuple[int, ...]]] = None,\n compute_on_step: Optional[bool] = None,\n **kwargs: Dict[str, Any],\n ) -> None:\n super().__init__(compute_on_step=compute_on_step, **kwargs)\n\n if dim is None and reduction != \"elementwise_mean\":\n rank_zero_warn(f\"The `reduction={reduction}` will not have any effect when `dim` is None.\")\n\n if dim is None:\n self.add_state(\"sum_squared_error\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n else:\n self.add_state(\"sum_squared_error\", default=[])\n self.add_state(\"total\", default=[])\n\n if data_range is None:\n if dim is not None:\n # Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to\n # calculate `data_range` in the future.\n raise ValueError(\"The `data_range` must be given when `dim` is not None.\")\n\n self.data_range = None\n self.add_state(\"min_target\", default=tensor(0.0), dist_reduce_fx=torch.min)\n self.add_state(\"max_target\", default=tensor(0.0), dist_reduce_fx=torch.max)\n else:\n self.add_state(\"data_range\", default=tensor(float(data_range)), dist_reduce_fx=\"mean\")\n self.base = base\n self.reduction = reduction\n self.dim = tuple(dim) if isinstance(dim, Sequence) else dim\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n sum_squared_error, n_obs = _psnr_update(preds, target, dim=self.dim)\n if self.dim is None:\n if self.data_range is None:\n # keep track of min and max target values\n self.min_target = min(target.min(), self.min_target)\n self.max_target = max(target.max(), self.max_target)\n\n self.sum_squared_error += sum_squared_error\n self.total += n_obs\n else:\n self.sum_squared_error.append(sum_squared_error)\n self.total.append(n_obs)\n\n def compute(self) -> Tensor:\n \"\"\"Compute peak signal-to-noise ratio over state.\"\"\"\n if self.data_range is not None:\n data_range = self.data_range\n else:\n data_range = self.max_target - self.min_target\n\n if self.dim is None:\n sum_squared_error = self.sum_squared_error\n total = self.total\n else:\n sum_squared_error = torch.cat([values.flatten() for values in self.sum_squared_error])\n total = torch.cat([values.flatten() for values in self.total])\n return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction)\n",
"path": "torchmetrics/image/psnr.py"
}
] | [
{
"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom torch import Tensor, tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.image.psnr import _psnr_compute, _psnr_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities import rank_zero_warn\n\n\nclass PeakSignalNoiseRatio(Metric):\n r\"\"\"\n Computes `Computes Peak Signal-to-Noise Ratio`_ (PSNR):\n\n .. math:: \\text{PSNR}(I, J) = 10 * \\log_{10} \\left(\\frac{\\max(I)^2}{\\text{MSE}(I, J)}\\right)\n\n Where :math:`\\text{MSE}` denotes the `mean-squared-error`_ function.\n\n Args:\n data_range:\n the range of the data. If None, it is determined from the data (max - min).\n The ``data_range`` must be given when ``dim`` is not None.\n base: a base of a logarithm to use.\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'`` or ``None``: no reduction will be applied\n\n dim:\n Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is\n None meaning scores will be reduced across all dimensions and all batches.\n compute_on_step:\n Forward only calls ``update()`` and returns None if this is set to False.\n\n .. deprecated:: v0.8\n Argument has no use anymore and will be removed v0.9.\n\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Raises:\n ValueError:\n If ``dim`` is not ``None`` and ``data_range`` is not given.\n\n Example:\n >>> from torchmetrics import PeakSignalNoiseRatio\n >>> psnr = PeakSignalNoiseRatio()\n >>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]])\n >>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])\n >>> psnr(preds, target)\n tensor(2.5527)\n\n .. note::\n Half precision is only support on GPU for this metric\n\n \"\"\"\n min_target: Tensor\n max_target: Tensor\n higher_is_better = True\n\n def __init__(\n self,\n data_range: Optional[float] = None,\n base: float = 10.0,\n reduction: Literal[\"elementwise_mean\", \"sum\", \"none\", None] = \"elementwise_mean\",\n dim: Optional[Union[int, Tuple[int, ...]]] = None,\n compute_on_step: Optional[bool] = None,\n **kwargs: Dict[str, Any],\n ) -> None:\n super().__init__(compute_on_step=compute_on_step, **kwargs)\n\n if dim is None and reduction != \"elementwise_mean\":\n rank_zero_warn(f\"The `reduction={reduction}` will not have any effect when `dim` is None.\")\n\n if dim is None:\n self.add_state(\"sum_squared_error\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n else:\n self.add_state(\"sum_squared_error\", default=[])\n self.add_state(\"total\", default=[])\n\n if data_range is None:\n if dim is not None:\n # Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to\n # calculate `data_range` in the future.\n raise ValueError(\"The `data_range` must be given when `dim` is not None.\")\n\n self.data_range = None\n self.add_state(\"min_target\", default=tensor(0.0), dist_reduce_fx=torch.min)\n self.add_state(\"max_target\", default=tensor(0.0), dist_reduce_fx=torch.max)\n else:\n self.add_state(\"data_range\", default=tensor(float(data_range)), dist_reduce_fx=\"mean\")\n self.base = base\n self.reduction = reduction\n self.dim = tuple(dim) if isinstance(dim, Sequence) else dim\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n sum_squared_error, n_obs = _psnr_update(preds, target, dim=self.dim)\n if self.dim is None:\n if self.data_range is None:\n # keep track of min and max target values\n self.min_target = min(target.min(), self.min_target)\n self.max_target = max(target.max(), self.max_target)\n\n self.sum_squared_error += sum_squared_error\n self.total += n_obs\n else:\n self.sum_squared_error.append(sum_squared_error)\n self.total.append(n_obs)\n\n def compute(self) -> Tensor:\n \"\"\"Compute peak signal-to-noise ratio over state.\"\"\"\n if self.data_range is not None:\n data_range = self.data_range\n else:\n data_range = self.max_target - self.min_target\n\n if self.dim is None:\n sum_squared_error = self.sum_squared_error\n total = self.total\n else:\n sum_squared_error = torch.cat([values.flatten() for values in self.sum_squared_error])\n total = torch.cat([values.flatten() for values in self.total])\n return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction)\n",
"path": "torchmetrics/image/psnr.py"
}
] | diff --git a/torchmetrics/image/psnr.py b/torchmetrics/image/psnr.py
index ba01b85b533..18b89452045 100644
--- a/torchmetrics/image/psnr.py
+++ b/torchmetrics/image/psnr.py
@@ -70,7 +70,7 @@ class PeakSignalNoiseRatio(Metric):
"""
min_target: Tensor
max_target: Tensor
- higher_is_better = False
+ higher_is_better = True
def __init__(
self,
|
beeware__toga-267 | GTK+: TypeError: on_close() takes 2 positional arguments but 3 were given
When creating a new Window and then closing it a TypeError is created in Linux. Once you have created an App, calling the following will reproduce the error:
window = toga.Window()
window.app = app
window.show()
Then close the window and you get:
TypeError: on_close() takes 2 positional arguments but 3 were given
| [
{
"content": "import asyncio\nimport os\nimport signal\nimport sys\n\ntry:\n import gi\nexcept ImportError:\n # app.py is the first module that will be imported when you import toga_gtk.\n #\n # If Gtk can't be imported, it may be because we're in a virtualenv,\n # and the system python libraries aren't visible. This can be fixed by\n # creating a symlink into the site-packages\n # Try creating a symlink to the system library location.\n # base_packages_dir is where the packages installed by the package manager\n # can be found.\n # gi_system_install_path is where gi can be found in the packages dir.\n # installer_command is the command the user can run to install gi.\n py_version = \"%d.%d\" % (sys.version_info.major, sys.version_info.minor)\n\n if sys.version_info.major == 3:\n if os.path.isdir('/usr/lib64/python%s/site-packages/' % (py_version,)):\n # Fedora\n base_packages_dir = '/usr/lib64/python%s/site-packages/' % (py_version,)\n gi_system_install_path = '/usr/lib64/python%s/site-packages/gi' % (py_version,)\n installer_command = 'dnf install pygobject3 python3-gobject'\n elif os.path.isdir('/usr/lib/python3/dist-packages/'):\n # Ubuntu, Debian\n base_packages_dir = '/usr/lib/python3/dist-packages/'\n gi_system_install_path = '/usr/local/lib/python3/dist-packages/gi'\n installer_command = 'apt-get install python3-gi'\n elif os.path.isdir('/usr/lib/python%s/site-packages/' % (py_version,)):\n # Arch\n base_packages_dir = '/usr/lib/python%s/site-packages/' % (py_version,)\n gi_system_install_path = '/usr/lib/python%s/site-packages/gi' % (py_version,)\n installer_command = 'pacman -S python-gobject'\n else:\n raise RuntimeError(\"Unable to locate your Python packages dir.\")\n else:\n raise RuntimeError(\"Toga requires Python 3.\")\n\n # Use the location of this package to guide us to\n # the location of the virtualenv.\n gi_symlink_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gi')\n pygtkcompat_symlink_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'pygtkcompat')\n\n if gi_symlink_path == gi_system_install_path:\n # If we're not in a virtualenv, just raise the original import error.\n raise\n else:\n gi_path = os.path.join(base_packages_dir, 'gi')\n pygtkcompat_path = os.path.join(base_packages_dir, 'pygtkcompat')\n if os.path.exists(gi_path) and os.path.isdir(gi_path):\n # If we can identify the gi library, create a symlink to it.\n try:\n print(\"Creating symlink (%s & %s) to system GTK+ libraries...\" % (gi_symlink_path, pygtkcompat_symlink_path))\n os.symlink(gi_path, gi_symlink_path)\n os.symlink(pygtkcompat_path, pygtkcompat_symlink_path)\n\n # The call to os.symlink will return almost immediately,\n # but for some reason, it may not be fully flushed to\n # the file system. One way to fix this is to start\n # the process again. This call to os.execl restarts the\n # program with the same arguments, replacing the original\n # operating system process.\n os.execl(sys.executable, sys.executable, *sys.argv)\n except OSError:\n raise RuntimeError(\"Unable to automatically create symlink to system Python GTK+ bindings.\")\n else:\n raise RuntimeError(\"Unable to locate the Python GTK+ bindings. Have you run '%s'?\" % installer_command)\n\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gio, GLib\n\n\nfrom toga.command import GROUP_BREAK, SECTION_BREAK, Command, Group\n# from .command import Command, Group\nimport toga\nfrom .window import Window\nfrom toga import Icon\nfrom toga.utils import wrapped_handler\n\nimport gbulb\n\n\nclass MainWindow(Window):\n _IMPL_CLASS = Gtk.ApplicationWindow\n\n def on_close(self, widget):\n pass\n\n\nclass App:\n \"\"\"\n Todo:\n * Creation of Menus is not working.\n * Disabling of menu items is not working.\n * App Icon is not showing up\n \"\"\"\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n gbulb.install(gtk=True)\n self.loop = asyncio.get_event_loop()\n\n self.create()\n\n def create(self):\n Icon.app_icon = Icon.load(self.interface.icon, default=Icon.TIBERIUS_ICON)\n # Stimulate the build of the app\n self.native = Gtk.Application(application_id=self.interface.app_id, flags=Gio.ApplicationFlags.FLAGS_NONE)\n\n # Connect the GTK signal that will cause app startup to occur\n self.native.connect('startup', self.startup)\n self.native.connect('activate', self.activate)\n # self.native.connect('shutdown', self.shutdown)\n\n self.actions = None\n\n def startup(self, data=None):\n self.interface.commands.add(\n Command(None, 'About ' + self.interface.name, group=toga.Group.APP),\n Command(None, 'Preferences', group=toga.Group.APP),\n # Quit should always be the last item, in a section on it's own\n Command(lambda s: self.exit(), 'Quit ' + self.interface.name, shortcut='q', group=toga.Group.APP, section=sys.maxsize),\n Command(None, 'Visit homepage', group=toga.Group.HELP)\n )\n\n self.interface.startup()\n\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self._actions = {}\n self.create_menus()\n # self.interface.main_window._impl.create_toolbar()\n\n def activate(self, data=None):\n pass\n\n def open_document(self, fileURL):\n '''Add a new document to this app.'''\n print(\"STUB: If you want to handle opening documents, implement App.open_document(fileURL)\")\n\n def create_menus(self):\n # Only create the menu if the menu item index has been created.\n if hasattr(self, '_actions'):\n self._actions = {}\n menubar = Gio.Menu()\n label = None\n submenu = None\n section = None\n for cmd in self.interface.commands:\n if cmd == GROUP_BREAK:\n if section:\n submenu.append_section(None, section)\n\n if label == '*':\n self.native.set_app_menu(submenu)\n else:\n menubar.append_submenu(label, submenu)\n\n label = None\n submenu = None\n section = None\n elif cmd == SECTION_BREAK:\n submenu.append_section(None, section)\n section = None\n\n else:\n if submenu is None:\n label = cmd.group.label\n submenu = Gio.Menu()\n\n if section is None:\n section = Gio.Menu()\n\n try:\n action = self._actions[cmd]\n except KeyError:\n cmd_id = \"command-%s\" % id(cmd)\n action = Gio.SimpleAction.new(cmd_id, None)\n if cmd.action:\n action.connect(\"activate\", wrapped_handler(cmd, cmd.action))\n cmd._widgets.append(action)\n self._actions[cmd] = action\n self.native.add_action(action)\n\n cmd._impl._set_enabled(cmd.enabled)\n\n item = Gio.MenuItem.new(cmd.label, 'app.' + cmd_id)\n if cmd.shortcut:\n item.set_attribute_value('accel', GLib.Variant('s', '<Primary>%s' % cmd.shortcut.upper()))\n\n # item.set_attribute_value('accel', GLib.Variant(cmd.shortcut, '<Primary>%s' % cmd.shortcut.upper()))\n\n section.append_item(item)\n\n if section:\n submenu.append_section(None, section)\n\n if submenu:\n if label == '*':\n self.native.set_app_menu(submenu)\n else:\n menubar.append_submenu(label, submenu)\n\n # Set the menu for the app.\n self.native.set_menubar(menubar)\n\n def main_loop(self):\n # Modify signal handlers to make sure Ctrl-C is caught and handled.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n self.loop.run_forever(application=self.native)\n\n def exit(self):\n self.native.quit()\n",
"path": "src/gtk/toga_gtk/app.py"
}
] | [
{
"content": "import asyncio\nimport os\nimport signal\nimport sys\n\ntry:\n import gi\nexcept ImportError:\n # app.py is the first module that will be imported when you import toga_gtk.\n #\n # If Gtk can't be imported, it may be because we're in a virtualenv,\n # and the system python libraries aren't visible. This can be fixed by\n # creating a symlink into the site-packages\n # Try creating a symlink to the system library location.\n # base_packages_dir is where the packages installed by the package manager\n # can be found.\n # gi_system_install_path is where gi can be found in the packages dir.\n # installer_command is the command the user can run to install gi.\n py_version = \"%d.%d\" % (sys.version_info.major, sys.version_info.minor)\n\n if sys.version_info.major == 3:\n if os.path.isdir('/usr/lib64/python%s/site-packages/' % (py_version,)):\n # Fedora\n base_packages_dir = '/usr/lib64/python%s/site-packages/' % (py_version,)\n gi_system_install_path = '/usr/lib64/python%s/site-packages/gi' % (py_version,)\n installer_command = 'dnf install pygobject3 python3-gobject'\n elif os.path.isdir('/usr/lib/python3/dist-packages/'):\n # Ubuntu, Debian\n base_packages_dir = '/usr/lib/python3/dist-packages/'\n gi_system_install_path = '/usr/local/lib/python3/dist-packages/gi'\n installer_command = 'apt-get install python3-gi'\n elif os.path.isdir('/usr/lib/python%s/site-packages/' % (py_version,)):\n # Arch\n base_packages_dir = '/usr/lib/python%s/site-packages/' % (py_version,)\n gi_system_install_path = '/usr/lib/python%s/site-packages/gi' % (py_version,)\n installer_command = 'pacman -S python-gobject'\n else:\n raise RuntimeError(\"Unable to locate your Python packages dir.\")\n else:\n raise RuntimeError(\"Toga requires Python 3.\")\n\n # Use the location of this package to guide us to\n # the location of the virtualenv.\n gi_symlink_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gi')\n pygtkcompat_symlink_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'pygtkcompat')\n\n if gi_symlink_path == gi_system_install_path:\n # If we're not in a virtualenv, just raise the original import error.\n raise\n else:\n gi_path = os.path.join(base_packages_dir, 'gi')\n pygtkcompat_path = os.path.join(base_packages_dir, 'pygtkcompat')\n if os.path.exists(gi_path) and os.path.isdir(gi_path):\n # If we can identify the gi library, create a symlink to it.\n try:\n print(\"Creating symlink (%s & %s) to system GTK+ libraries...\" % (gi_symlink_path, pygtkcompat_symlink_path))\n os.symlink(gi_path, gi_symlink_path)\n os.symlink(pygtkcompat_path, pygtkcompat_symlink_path)\n\n # The call to os.symlink will return almost immediately,\n # but for some reason, it may not be fully flushed to\n # the file system. One way to fix this is to start\n # the process again. This call to os.execl restarts the\n # program with the same arguments, replacing the original\n # operating system process.\n os.execl(sys.executable, sys.executable, *sys.argv)\n except OSError:\n raise RuntimeError(\"Unable to automatically create symlink to system Python GTK+ bindings.\")\n else:\n raise RuntimeError(\"Unable to locate the Python GTK+ bindings. Have you run '%s'?\" % installer_command)\n\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gio, GLib\n\n\nfrom toga.command import GROUP_BREAK, SECTION_BREAK, Command, Group\n# from .command import Command, Group\nimport toga\nfrom .window import Window\nfrom toga import Icon\nfrom toga.utils import wrapped_handler\n\nimport gbulb\n\n\nclass MainWindow(Window):\n _IMPL_CLASS = Gtk.ApplicationWindow\n\n def on_close(self, widget, data):\n pass\n\n\nclass App:\n \"\"\"\n Todo:\n * Creation of Menus is not working.\n * Disabling of menu items is not working.\n * App Icon is not showing up\n \"\"\"\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n gbulb.install(gtk=True)\n self.loop = asyncio.get_event_loop()\n\n self.create()\n\n def create(self):\n Icon.app_icon = Icon.load(self.interface.icon, default=Icon.TIBERIUS_ICON)\n # Stimulate the build of the app\n self.native = Gtk.Application(application_id=self.interface.app_id, flags=Gio.ApplicationFlags.FLAGS_NONE)\n\n # Connect the GTK signal that will cause app startup to occur\n self.native.connect('startup', self.startup)\n self.native.connect('activate', self.activate)\n # self.native.connect('shutdown', self.shutdown)\n\n self.actions = None\n\n def startup(self, data=None):\n self.interface.commands.add(\n Command(None, 'About ' + self.interface.name, group=toga.Group.APP),\n Command(None, 'Preferences', group=toga.Group.APP),\n # Quit should always be the last item, in a section on it's own\n Command(lambda s: self.exit(), 'Quit ' + self.interface.name, shortcut='q', group=toga.Group.APP, section=sys.maxsize),\n Command(None, 'Visit homepage', group=toga.Group.HELP)\n )\n\n self.interface.startup()\n\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self._actions = {}\n self.create_menus()\n # self.interface.main_window._impl.create_toolbar()\n\n def activate(self, data=None):\n pass\n\n def open_document(self, fileURL):\n '''Add a new document to this app.'''\n print(\"STUB: If you want to handle opening documents, implement App.open_document(fileURL)\")\n\n def create_menus(self):\n # Only create the menu if the menu item index has been created.\n if hasattr(self, '_actions'):\n self._actions = {}\n menubar = Gio.Menu()\n label = None\n submenu = None\n section = None\n for cmd in self.interface.commands:\n if cmd == GROUP_BREAK:\n if section:\n submenu.append_section(None, section)\n\n if label == '*':\n self.native.set_app_menu(submenu)\n else:\n menubar.append_submenu(label, submenu)\n\n label = None\n submenu = None\n section = None\n elif cmd == SECTION_BREAK:\n submenu.append_section(None, section)\n section = None\n\n else:\n if submenu is None:\n label = cmd.group.label\n submenu = Gio.Menu()\n\n if section is None:\n section = Gio.Menu()\n\n try:\n action = self._actions[cmd]\n except KeyError:\n cmd_id = \"command-%s\" % id(cmd)\n action = Gio.SimpleAction.new(cmd_id, None)\n if cmd.action:\n action.connect(\"activate\", wrapped_handler(cmd, cmd.action))\n cmd._widgets.append(action)\n self._actions[cmd] = action\n self.native.add_action(action)\n\n cmd._impl._set_enabled(cmd.enabled)\n\n item = Gio.MenuItem.new(cmd.label, 'app.' + cmd_id)\n if cmd.shortcut:\n item.set_attribute_value('accel', GLib.Variant('s', '<Primary>%s' % cmd.shortcut.upper()))\n\n # item.set_attribute_value('accel', GLib.Variant(cmd.shortcut, '<Primary>%s' % cmd.shortcut.upper()))\n\n section.append_item(item)\n\n if section:\n submenu.append_section(None, section)\n\n if submenu:\n if label == '*':\n self.native.set_app_menu(submenu)\n else:\n menubar.append_submenu(label, submenu)\n\n # Set the menu for the app.\n self.native.set_menubar(menubar)\n\n def main_loop(self):\n # Modify signal handlers to make sure Ctrl-C is caught and handled.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n self.loop.run_forever(application=self.native)\n\n def exit(self):\n self.native.quit()\n",
"path": "src/gtk/toga_gtk/app.py"
}
] | diff --git a/src/gtk/toga_gtk/app.py b/src/gtk/toga_gtk/app.py
index 0119ea05fe..765fe10064 100644
--- a/src/gtk/toga_gtk/app.py
+++ b/src/gtk/toga_gtk/app.py
@@ -86,7 +86,7 @@
class MainWindow(Window):
_IMPL_CLASS = Gtk.ApplicationWindow
- def on_close(self, widget):
+ def on_close(self, widget, data):
pass
|
ipython__ipython-9645 | Readlinelike display of options crashes ipython terminal.
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Setup script for IPython.\n\nUnder Posix environments it works like a typical setup.py script.\nUnder Windows, the command sdist is not supported, since IPython\nrequires utilities which are not available under Windows.\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2008-2011, IPython Development Team.\n# Copyright (c) 2001-2007, Fernando Perez <[email protected]>\n# Copyright (c) 2001, Janko Hauser <[email protected]>\n# Copyright (c) 2001, Nathaniel Gray <[email protected]>\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.rst, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\nimport sys\n\n# This check is also made in IPython/__init__, don't forget to update both when\n# changing Python version requirements.\nv = sys.version_info\nif v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):\n error = \"ERROR: IPython requires Python version 2.7 or 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\nPY3 = (sys.version_info[0] >= 3)\n\n# At least we're on the python version we need, move on.\n\n#-------------------------------------------------------------------------------\n# Imports\n#-------------------------------------------------------------------------------\n\n# Stdlib imports\nimport os\n\nfrom glob import glob\n\n# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly\n# update it when the contents of directories change.\nif os.path.exists('MANIFEST'): os.remove('MANIFEST')\n\nfrom distutils.core import setup\n\n# Our own imports\nfrom setupbase import target_update\n\nfrom setupbase import (\n setup_args,\n find_packages,\n find_package_data,\n check_package_data_first,\n find_entry_points,\n build_scripts_entrypt,\n find_data_files,\n git_prebuild,\n install_symlinked,\n install_lib_symlink,\n install_scripts_for_symlink,\n unsymlink,\n)\n\nisfile = os.path.isfile\npjoin = os.path.join\n\n#-------------------------------------------------------------------------------\n# Handle OS specific things\n#-------------------------------------------------------------------------------\n\nif os.name in ('nt','dos'):\n os_name = 'windows'\nelse:\n os_name = os.name\n\n# Under Windows, 'sdist' has not been supported. Now that the docs build with\n# Sphinx it might work, but let's not turn it on until someone confirms that it\n# actually works.\nif os_name == 'windows' and 'sdist' in sys.argv:\n print('The sdist command is not available under Windows. Exiting.')\n sys.exit(1)\n\n\n#-------------------------------------------------------------------------------\n# Things related to the IPython documentation\n#-------------------------------------------------------------------------------\n\n# update the manuals when building a source dist\nif len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):\n\n # List of things to be updated. Each entry is a triplet of args for\n # target_update()\n to_update = [\n ('docs/man/ipython.1.gz',\n ['docs/man/ipython.1'],\n 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),\n ]\n\n\n [ target_update(*t) for t in to_update ]\n\n#---------------------------------------------------------------------------\n# Find all the packages, package data, and data_files\n#---------------------------------------------------------------------------\n\npackages = find_packages()\npackage_data = find_package_data()\n\ndata_files = find_data_files()\n\nsetup_args['packages'] = packages\nsetup_args['package_data'] = package_data\nsetup_args['data_files'] = data_files\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n# imports here, so they are after setuptools import if there was one\nfrom distutils.command.sdist import sdist\nfrom distutils.command.upload import upload\n\nclass UploadWindowsInstallers(upload):\n\n description = \"Upload Windows installers to PyPI (only used from tools/release_windows.py)\"\n user_options = upload.user_options + [\n ('files=', 'f', 'exe file (or glob) to upload')\n ]\n def initialize_options(self):\n upload.initialize_options(self)\n meta = self.distribution.metadata\n base = '{name}-{version}'.format(\n name=meta.get_name(),\n version=meta.get_version()\n )\n self.files = os.path.join('dist', '%s.*.exe' % base)\n\n def run(self):\n for dist_file in glob(self.files):\n self.upload_file('bdist_wininst', 'any', dist_file)\n\nsetup_args['cmdclass'] = {\n 'build_py': \\\n check_package_data_first(git_prebuild('IPython')),\n 'sdist' : git_prebuild('IPython', sdist),\n 'upload_wininst' : UploadWindowsInstallers,\n 'symlink': install_symlinked,\n 'install_lib_symlink': install_lib_symlink,\n 'install_scripts_sym': install_scripts_for_symlink,\n 'unsymlink': unsymlink,\n}\n\n\n#---------------------------------------------------------------------------\n# Handle scripts, dependencies, and setuptools specific things\n#---------------------------------------------------------------------------\n\n# For some commands, use setuptools. Note that we do NOT list install here!\n# If you want a setuptools-enhanced install, just run 'setupegg.py install'\nneeds_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',\n 'bdist', 'bdist_dumb', 'bdist_wininst', 'bdist_wheel',\n 'egg_info', 'easy_install', 'upload', 'install_egg_info',\n ))\n\nif len(needs_setuptools.intersection(sys.argv)) > 0:\n import setuptools\n\n# This dict is used for passing extra arguments that are setuptools\n# specific to setup\nsetuptools_extra_args = {}\n\n# setuptools requirements\n\nextras_require = dict(\n parallel = ['ipyparallel'],\n qtconsole = ['qtconsole'],\n doc = ['Sphinx>=1.3'],\n test = ['nose>=0.10.1', 'requests', 'testpath', 'pygments', 'nbformat', 'ipykernel', 'numpy'],\n terminal = [],\n kernel = ['ipykernel'],\n nbformat = ['nbformat'],\n notebook = ['notebook', 'ipywidgets'],\n nbconvert = ['nbconvert'],\n)\n\ninstall_requires = [\n 'setuptools>=18.5',\n 'decorator',\n 'pickleshare',\n 'simplegeneric>0.8',\n 'traitlets>=4.2',\n 'prompt_toolkit>=1.0.1,<2.0.0',\n 'pygments',\n]\n\n# Platform-specific dependencies:\n# This is the correct way to specify these,\n# but requires pip >= 6. pip < 6 ignores these.\n\nextras_require.update({\n ':python_version == \"2.7\"': ['backports.shutil_get_terminal_size'],\n ':python_version == \"2.7\" or python_version == \"3.3\"': ['pathlib2'],\n ':sys_platform != \"win32\"': ['pexpect'],\n ':sys_platform == \"darwin\"': ['appnope'],\n ':sys_platform == \"win32\"': ['colorama', 'win_unicode_console'],\n 'test:python_version == \"2.7\"': ['mock'],\n})\n# FIXME: re-specify above platform dependencies for pip < 6\n# These would result in non-portable bdists.\nif not any(arg.startswith('bdist') for arg in sys.argv):\n if sys.version_info < (3, 3):\n extras_require['test'].append('mock')\n\n if sys.platform == 'darwin':\n install_requires.extend(['appnope'])\n have_readline = False\n try:\n import readline\n except ImportError:\n pass\n else:\n if 'libedit' not in readline.__doc__:\n have_readline = True\n if not have_readline:\n install_requires.extend(['gnureadline'])\n\n if sys.platform.startswith('win'):\n extras_require['terminal'].append('pyreadline>=2.0')\n else:\n install_requires.append('pexpect')\n \n # workaround pypa/setuptools#147, where setuptools misspells\n # platform_python_implementation as python_implementation\n if 'setuptools' in sys.modules:\n for key in list(extras_require):\n if 'platform_python_implementation' in key:\n new_key = key.replace('platform_python_implementation', 'python_implementation')\n extras_require[new_key] = extras_require.pop(key)\n\neverything = set()\nfor key, deps in extras_require.items():\n if ':' not in key:\n everything.update(deps)\nextras_require['all'] = everything\n\nif 'setuptools' in sys.modules:\n setuptools_extra_args['zip_safe'] = False\n setuptools_extra_args['entry_points'] = {\n 'console_scripts': find_entry_points(),\n 'pygments.lexers': [\n 'ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer',\n 'ipython = IPython.lib.lexers:IPythonLexer',\n 'ipython3 = IPython.lib.lexers:IPython3Lexer',\n ],\n }\n setup_args['extras_require'] = extras_require\n requires = setup_args['install_requires'] = install_requires\n\n # Script to be run by the windows binary installer after the default setup\n # routine, to add shortcuts and similar windows-only things. Windows\n # post-install scripts MUST reside in the scripts/ dir, otherwise distutils\n # doesn't find them.\n if 'bdist_wininst' in sys.argv:\n if len(sys.argv) > 2 and \\\n ('sdist' in sys.argv or 'bdist_rpm' in sys.argv):\n print(\"ERROR: bdist_wininst must be run alone. Exiting.\", file=sys.stderr)\n sys.exit(1)\n setup_args['data_files'].append(\n ['Scripts', ('scripts/ipython.ico', 'scripts/ipython_nb.ico')])\n setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')]\n setup_args['options'] = {\"bdist_wininst\":\n {\"install_script\":\n \"ipython_win_post_install.py\"}}\n\nelse:\n # scripts has to be a non-empty list, or install_scripts isn't called\n setup_args['scripts'] = [e.split('=')[0].strip() for e in find_entry_points()]\n\n setup_args['cmdclass']['build_scripts'] = build_scripts_entrypt\n\n#---------------------------------------------------------------------------\n# Do the actual setup now\n#---------------------------------------------------------------------------\n\nsetup_args.update(setuptools_extra_args)\n\n\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Setup script for IPython.\n\nUnder Posix environments it works like a typical setup.py script.\nUnder Windows, the command sdist is not supported, since IPython\nrequires utilities which are not available under Windows.\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2008-2011, IPython Development Team.\n# Copyright (c) 2001-2007, Fernando Perez <[email protected]>\n# Copyright (c) 2001, Janko Hauser <[email protected]>\n# Copyright (c) 2001, Nathaniel Gray <[email protected]>\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.rst, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\nimport sys\n\n# This check is also made in IPython/__init__, don't forget to update both when\n# changing Python version requirements.\nv = sys.version_info\nif v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):\n error = \"ERROR: IPython requires Python version 2.7 or 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\nPY3 = (sys.version_info[0] >= 3)\n\n# At least we're on the python version we need, move on.\n\n#-------------------------------------------------------------------------------\n# Imports\n#-------------------------------------------------------------------------------\n\n# Stdlib imports\nimport os\n\nfrom glob import glob\n\n# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly\n# update it when the contents of directories change.\nif os.path.exists('MANIFEST'): os.remove('MANIFEST')\n\nfrom distutils.core import setup\n\n# Our own imports\nfrom setupbase import target_update\n\nfrom setupbase import (\n setup_args,\n find_packages,\n find_package_data,\n check_package_data_first,\n find_entry_points,\n build_scripts_entrypt,\n find_data_files,\n git_prebuild,\n install_symlinked,\n install_lib_symlink,\n install_scripts_for_symlink,\n unsymlink,\n)\n\nisfile = os.path.isfile\npjoin = os.path.join\n\n#-------------------------------------------------------------------------------\n# Handle OS specific things\n#-------------------------------------------------------------------------------\n\nif os.name in ('nt','dos'):\n os_name = 'windows'\nelse:\n os_name = os.name\n\n# Under Windows, 'sdist' has not been supported. Now that the docs build with\n# Sphinx it might work, but let's not turn it on until someone confirms that it\n# actually works.\nif os_name == 'windows' and 'sdist' in sys.argv:\n print('The sdist command is not available under Windows. Exiting.')\n sys.exit(1)\n\n\n#-------------------------------------------------------------------------------\n# Things related to the IPython documentation\n#-------------------------------------------------------------------------------\n\n# update the manuals when building a source dist\nif len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):\n\n # List of things to be updated. Each entry is a triplet of args for\n # target_update()\n to_update = [\n ('docs/man/ipython.1.gz',\n ['docs/man/ipython.1'],\n 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),\n ]\n\n\n [ target_update(*t) for t in to_update ]\n\n#---------------------------------------------------------------------------\n# Find all the packages, package data, and data_files\n#---------------------------------------------------------------------------\n\npackages = find_packages()\npackage_data = find_package_data()\n\ndata_files = find_data_files()\n\nsetup_args['packages'] = packages\nsetup_args['package_data'] = package_data\nsetup_args['data_files'] = data_files\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n# imports here, so they are after setuptools import if there was one\nfrom distutils.command.sdist import sdist\nfrom distutils.command.upload import upload\n\nclass UploadWindowsInstallers(upload):\n\n description = \"Upload Windows installers to PyPI (only used from tools/release_windows.py)\"\n user_options = upload.user_options + [\n ('files=', 'f', 'exe file (or glob) to upload')\n ]\n def initialize_options(self):\n upload.initialize_options(self)\n meta = self.distribution.metadata\n base = '{name}-{version}'.format(\n name=meta.get_name(),\n version=meta.get_version()\n )\n self.files = os.path.join('dist', '%s.*.exe' % base)\n\n def run(self):\n for dist_file in glob(self.files):\n self.upload_file('bdist_wininst', 'any', dist_file)\n\nsetup_args['cmdclass'] = {\n 'build_py': \\\n check_package_data_first(git_prebuild('IPython')),\n 'sdist' : git_prebuild('IPython', sdist),\n 'upload_wininst' : UploadWindowsInstallers,\n 'symlink': install_symlinked,\n 'install_lib_symlink': install_lib_symlink,\n 'install_scripts_sym': install_scripts_for_symlink,\n 'unsymlink': unsymlink,\n}\n\n\n#---------------------------------------------------------------------------\n# Handle scripts, dependencies, and setuptools specific things\n#---------------------------------------------------------------------------\n\n# For some commands, use setuptools. Note that we do NOT list install here!\n# If you want a setuptools-enhanced install, just run 'setupegg.py install'\nneeds_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',\n 'bdist', 'bdist_dumb', 'bdist_wininst', 'bdist_wheel',\n 'egg_info', 'easy_install', 'upload', 'install_egg_info',\n ))\n\nif len(needs_setuptools.intersection(sys.argv)) > 0:\n import setuptools\n\n# This dict is used for passing extra arguments that are setuptools\n# specific to setup\nsetuptools_extra_args = {}\n\n# setuptools requirements\n\nextras_require = dict(\n parallel = ['ipyparallel'],\n qtconsole = ['qtconsole'],\n doc = ['Sphinx>=1.3'],\n test = ['nose>=0.10.1', 'requests', 'testpath', 'pygments', 'nbformat', 'ipykernel', 'numpy'],\n terminal = [],\n kernel = ['ipykernel'],\n nbformat = ['nbformat'],\n notebook = ['notebook', 'ipywidgets'],\n nbconvert = ['nbconvert'],\n)\n\ninstall_requires = [\n 'setuptools>=18.5',\n 'decorator',\n 'pickleshare',\n 'simplegeneric>0.8',\n 'traitlets>=4.2',\n 'prompt_toolkit>=1.0.3,<2.0.0',\n 'pygments',\n]\n\n# Platform-specific dependencies:\n# This is the correct way to specify these,\n# but requires pip >= 6. pip < 6 ignores these.\n\nextras_require.update({\n ':python_version == \"2.7\"': ['backports.shutil_get_terminal_size'],\n ':python_version == \"2.7\" or python_version == \"3.3\"': ['pathlib2'],\n ':sys_platform != \"win32\"': ['pexpect'],\n ':sys_platform == \"darwin\"': ['appnope'],\n ':sys_platform == \"win32\"': ['colorama', 'win_unicode_console'],\n 'test:python_version == \"2.7\"': ['mock'],\n})\n# FIXME: re-specify above platform dependencies for pip < 6\n# These would result in non-portable bdists.\nif not any(arg.startswith('bdist') for arg in sys.argv):\n if sys.version_info < (3, 3):\n extras_require['test'].append('mock')\n\n if sys.platform == 'darwin':\n install_requires.extend(['appnope'])\n have_readline = False\n try:\n import readline\n except ImportError:\n pass\n else:\n if 'libedit' not in readline.__doc__:\n have_readline = True\n if not have_readline:\n install_requires.extend(['gnureadline'])\n\n if sys.platform.startswith('win'):\n extras_require['terminal'].append('pyreadline>=2.0')\n else:\n install_requires.append('pexpect')\n \n # workaround pypa/setuptools#147, where setuptools misspells\n # platform_python_implementation as python_implementation\n if 'setuptools' in sys.modules:\n for key in list(extras_require):\n if 'platform_python_implementation' in key:\n new_key = key.replace('platform_python_implementation', 'python_implementation')\n extras_require[new_key] = extras_require.pop(key)\n\neverything = set()\nfor key, deps in extras_require.items():\n if ':' not in key:\n everything.update(deps)\nextras_require['all'] = everything\n\nif 'setuptools' in sys.modules:\n setuptools_extra_args['zip_safe'] = False\n setuptools_extra_args['entry_points'] = {\n 'console_scripts': find_entry_points(),\n 'pygments.lexers': [\n 'ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer',\n 'ipython = IPython.lib.lexers:IPythonLexer',\n 'ipython3 = IPython.lib.lexers:IPython3Lexer',\n ],\n }\n setup_args['extras_require'] = extras_require\n requires = setup_args['install_requires'] = install_requires\n\n # Script to be run by the windows binary installer after the default setup\n # routine, to add shortcuts and similar windows-only things. Windows\n # post-install scripts MUST reside in the scripts/ dir, otherwise distutils\n # doesn't find them.\n if 'bdist_wininst' in sys.argv:\n if len(sys.argv) > 2 and \\\n ('sdist' in sys.argv or 'bdist_rpm' in sys.argv):\n print(\"ERROR: bdist_wininst must be run alone. Exiting.\", file=sys.stderr)\n sys.exit(1)\n setup_args['data_files'].append(\n ['Scripts', ('scripts/ipython.ico', 'scripts/ipython_nb.ico')])\n setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')]\n setup_args['options'] = {\"bdist_wininst\":\n {\"install_script\":\n \"ipython_win_post_install.py\"}}\n\nelse:\n # scripts has to be a non-empty list, or install_scripts isn't called\n setup_args['scripts'] = [e.split('=')[0].strip() for e in find_entry_points()]\n\n setup_args['cmdclass']['build_scripts'] = build_scripts_entrypt\n\n#---------------------------------------------------------------------------\n# Do the actual setup now\n#---------------------------------------------------------------------------\n\nsetup_args.update(setuptools_extra_args)\n\n\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 44975df60fb..4a81a4f3b58 100755
--- a/setup.py
+++ b/setup.py
@@ -196,7 +196,7 @@ def run(self):
'pickleshare',
'simplegeneric>0.8',
'traitlets>=4.2',
- 'prompt_toolkit>=1.0.1,<2.0.0',
+ 'prompt_toolkit>=1.0.3,<2.0.0',
'pygments',
]
|
PyGithub__PyGithub-557 | GitHub Integration raises "NotImplementedError Algorithm not supported"
We have working github integration code using PyGithub v1.32 that does essentially:
```python
integration = github.GithubIntegration(settings.GITHUB_INTEGRATION_ID, settings.GITHUB_INTEGRATION_PRIVATE_PEM)
inst_token = integration.get_access_token(installation_id).token
```
After upgrading to v1.34 this code raises "NotImplementedError Algorithm not supported"
I suspect it has to do with the [switch to pyjwt from python-jose](https://github.com/PyGithub/PyGithub/commit/d447eb13b9f4688a4c981ca03b1b3111fb299142)
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ########################## Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.github.io/PyGithub/v1/index.html #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n# ##############################################################################\n\nimport setuptools\nimport textwrap\n\nversion = \"1.34\"\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"PyGithub\",\n version=version,\n description=\"Use the full Github API v3\",\n author=\"Vincent Jacques\",\n author_email=\"[email protected]\",\n url=\"http://pygithub.github.io/PyGithub/v1/index.html\",\n long_description=textwrap.dedent(\"\"\"\\\n (Very short) Tutorial\n =====================\n\n First create a Github instance::\n\n from github import Github\n\n g = Github(\"user\", \"password\")\n\n Then play with your Github objects::\n\n for repo in g.get_user().get_repos():\n print repo.name\n repo.edit(has_wiki=False)\n\n You can also create a Github instance with an OAuth token::\n\n g = Github(token)\n\n Or without authentication::\n\n g = Github()\n\n Reference documentation\n =======================\n\n See http://pygithub.github.io/PyGithub/v1/index.html\"\"\"),\n packages=[\n \"github\",\n \"github.tests\",\n ],\n package_data={\n \"github\": [\"tests/ReplayData/*.txt\"]\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.5\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Software Development\",\n ],\n test_suite=\"github.tests.AllTests\",\n use_2to3=True,\n install_requires=[\n \"pyjwt\"\n ]\n )\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ########################## Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.github.io/PyGithub/v1/index.html #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n# ##############################################################################\n\nimport setuptools\nimport textwrap\n\nversion = \"1.34\"\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"PyGithub\",\n version=version,\n description=\"Use the full Github API v3\",\n author=\"Vincent Jacques\",\n author_email=\"[email protected]\",\n url=\"http://pygithub.github.io/PyGithub/v1/index.html\",\n long_description=textwrap.dedent(\"\"\"\\\n (Very short) Tutorial\n =====================\n\n First create a Github instance::\n\n from github import Github\n\n g = Github(\"user\", \"password\")\n\n Then play with your Github objects::\n\n for repo in g.get_user().get_repos():\n print repo.name\n repo.edit(has_wiki=False)\n\n You can also create a Github instance with an OAuth token::\n\n g = Github(token)\n\n Or without authentication::\n\n g = Github()\n\n Reference documentation\n =======================\n\n See http://pygithub.github.io/PyGithub/v1/index.html\"\"\"),\n packages=[\n \"github\",\n \"github.tests\",\n ],\n package_data={\n \"github\": [\"tests/ReplayData/*.txt\"]\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.5\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Software Development\",\n ],\n test_suite=\"github.tests.AllTests\",\n use_2to3=True,\n install_requires=[\n \"pyjwt\"\n ],\n extras_require = {\n \"integrations\": [\"cryptography\"]\n }\n )\n",
"path": "setup.py"
}
] | diff --git a/doc/introduction.rst b/doc/introduction.rst
index 85ff523cf4..0bf2ea9f30 100644
--- a/doc/introduction.rst
+++ b/doc/introduction.rst
@@ -31,6 +31,9 @@ This package is in the `Python Package Index
be enough. You can also clone it on `Github
<http://github.com/PyGithub/PyGithub>`__.
+If you wish to use GitHub Integrations, you'll want to be sure to install the
+'integrations' option: ``pip install PyGithub['integrations']``
+
Licensing
---------
diff --git a/setup.py b/setup.py
index 7f221382c4..ee1cb00df7 100755
--- a/setup.py
+++ b/setup.py
@@ -96,5 +96,8 @@
use_2to3=True,
install_requires=[
"pyjwt"
- ]
+ ],
+ extras_require = {
+ "integrations": ["cryptography"]
+ }
)
|
mlcommons__GaNDLF-477 | Add histology exception tests
**Is your feature request related to a problem? Please describe.**
Currently, the histology inference pipeline contains a lot of exceptions, but they aren't being tested.
**Describe the solution you'd like**
See title.
**Describe alternatives you've considered**
N.A.
**Additional context**
N.A.
| [
{
"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\n\ndef git_submodule_update():\n ## submodule update\n os.system(\"git submodule update --init --recursive\")\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n git_submodule_update()\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n git_submodule_update()\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n git_submodule_update()\n\n\n# read version.py\nimport sys, re\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"black\",\n \"numpy==1.22.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.57\",\n \"pandas\",\n \"pylint\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n 'pickle5>=0.0.11; python_version < \"3.8.0\"',\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pyvips\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==0.5.1\", # newer versions have changed api for f1 invocation\n \"OpenPatchMiner==0.1.8\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n]\n\n# pytorch doesn't have LTS support on OSX - https://github.com/CBICA/GaNDLF/issues/389\nif sys.platform == \"darwin\":\n requirements.append(\"torch==1.9.0\")\nelse:\n requirements.append(\"torch==1.8.2\")\n\nsetup(\n name=\"GANDLF\",\n version=__version__,\n author=\"Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun Güley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos\", # alphabetical order\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n packages=find_packages(),\n cmdclass={ # this ensures git_submodule_update is called during install\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"BSD-3-Clause License\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging\",\n zip_safe=False,\n)\n\n## windows vips installation\nif os.name == \"nt\": # proceed for windows\n from pathlib import Path\n\n # download and extract if main dll is absent\n if not Path(\"./vips/vips-dev-8.10/bin/libvips-42.dll\").exists():\n print(\"Downloading and extracting VIPS for Windows\")\n url = \"https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip\"\n zip_to_extract = \"./vips.zip\"\n import urllib.request, zipfile\n\n urllib.request.urlretrieve(url, zip_to_extract)\n z = zipfile.ZipFile(zip_to_extract)\n z.extractall(\"./vips\")\n z.close()\n os.remove(zip_to_extract)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\n\ndef git_submodule_update():\n ## submodule update\n os.system(\"git submodule update --init --recursive\")\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n git_submodule_update()\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n git_submodule_update()\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n git_submodule_update()\n\n\n# read version.py\nimport sys, re\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"black\",\n \"numpy==1.22.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.57\",\n \"pandas\",\n \"pylint\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n 'pickle5>=0.0.11; python_version < \"3.8.0\"',\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pyvips==2.2.1\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==0.5.1\", # newer versions have changed api for f1 invocation\n \"OpenPatchMiner==0.1.8\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n]\n\n# pytorch doesn't have LTS support on OSX - https://github.com/CBICA/GaNDLF/issues/389\nif sys.platform == \"darwin\":\n requirements.append(\"torch==1.9.0\")\nelse:\n requirements.append(\"torch==1.8.2\")\n\nsetup(\n name=\"GANDLF\",\n version=__version__,\n author=\"Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun Güley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos\", # alphabetical order\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n packages=find_packages(),\n cmdclass={ # this ensures git_submodule_update is called during install\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"BSD-3-Clause License\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging\",\n zip_safe=False,\n)\n\n## windows vips installation\nif os.name == \"nt\": # proceed for windows\n from pathlib import Path\n\n # download and extract if main dll is absent\n if not Path(\"./vips/vips-dev-8.10/bin/libvips-42.dll\").exists():\n print(\"Downloading and extracting VIPS for Windows\")\n url = \"https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip\"\n zip_to_extract = \"./vips.zip\"\n import urllib.request, zipfile\n\n urllib.request.urlretrieve(url, zip_to_extract)\n z = zipfile.ZipFile(zip_to_extract)\n z.extractall(\"./vips\")\n z.close()\n os.remove(zip_to_extract)\n",
"path": "setup.py"
}
] | diff --git a/.github/workflows/python-test.yml b/.github/workflows/python-test.yml
index de1ee4d91..2d555dfda 100644
--- a/.github/workflows/python-test.yml
+++ b/.github/workflows/python-test.yml
@@ -22,10 +22,11 @@ jobs:
python-version: 3.8
- name: Install dependencies and package
run: |
+ sudo apt-get install libvips -y
python -m pip install --upgrade pip
+ python -m pip install wheel
+ python -m pip install pyvips
python -m pip install openvino-dev==2022.1.0
- python -m pip install torch==1.8.2+cu102 torchvision==0.9.2+cu102 torchaudio===0.8.2 -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
- $CONDA/bin/conda install -c conda-forge libvips -y
pip3 install torch==1.8.2+cpu torchvision==0.9.2+cpu torchaudio==0.8.2 -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
pip install -e .
@@ -34,7 +35,10 @@ jobs:
pytest --cov=. --cov-report=xml -k "generic"
- name: Run classification unit tests
run: |
- pytest --cov=. --cov-report=xml --cov-append -k "classification"
+ pytest --cov=. --cov-report=xml --cov-append -k "classification and not histology"
+ - name: Run classification unit tests with histology
+ run: |
+ pytest --cov=. --cov-report=xml --cov-append -k "classification and histology"
- name: Run regression unit tests
run: |
pytest --cov=. --cov-report=xml --cov-append -k "regression"
@@ -44,8 +48,8 @@ jobs:
- name: Run transunet unit tests
run: |
pytest --cov=. --cov-report=xml --cov-append -k "transunet"
-
-
+
+
- name: Upload coverage
uses: codecov/codecov-action@v1
with:
diff --git a/setup.py b/setup.py
index 1db03b25c..f71a3f2fc 100644
--- a/setup.py
+++ b/setup.py
@@ -67,7 +67,7 @@ def run(self):
"tiffslide",
"matplotlib",
"requests>=2.25.0",
- "pyvips",
+ "pyvips==2.2.1",
"pytest",
"coverage",
"pytest-cov",
diff --git a/testing/test_full.py b/testing/test_full.py
index 33afc623f..cefb198bb 100644
--- a/testing/test_full.py
+++ b/testing/test_full.py
@@ -5,6 +5,7 @@
import pandas as pd
from pydicom.data import get_testdata_file
+import pyvips as pv
from GANDLF.data.ImagesFromDataFrame import ImagesFromDataFrame
from GANDLF.utils import *
@@ -1737,8 +1738,125 @@ def test_train_inference_segmentation_histology_2d(device):
print("passed")
+def test_train_inference_classification_histology_large_2d(device):
+ print(
+ "35: Starting histology train/inference classification tests for large images to check exception handling"
+ )
+ # overwrite previous results
+ sanitize_outputDir()
+ output_dir_patches = os.path.join(outputDir, "histo_patches")
+ if os.path.isdir(output_dir_patches):
+ shutil.rmtree(output_dir_patches)
+ Path(output_dir_patches).mkdir(parents=True, exist_ok=True)
+ output_dir_patches_output = os.path.join(output_dir_patches, "histo_patches_output")
+ Path(output_dir_patches_output).mkdir(parents=True, exist_ok=True)
+ file_config_temp = os.path.join(
+ output_dir_patches, "config_patch-extraction_temp.yaml"
+ )
+ # if found in previous run, discard.
+ if os.path.exists(file_config_temp):
+ os.remove(file_config_temp)
+
+ parameters_patch = {}
+ # extracting minimal number of patches to ensure that the test does not take too long
+ parameters_patch["num_patches"] = 3
+ parameters_patch["patch_size"] = [128, 128]
+
+ with open(file_config_temp, "w") as file:
+ yaml.dump(parameters_patch, file)
+
+ # resize the image
+ input_df = pd.read_csv(inputDir + "/train_2d_histo_classification.csv")
+ for _, row in input_df.iterrows():
+ img = pv.Image.new_from_file(row["Channel_0"])
+ img_resize = img.resize(10)
+ new_filename = row["Channel_0"].replace(".tiff", "_resize.tiff")
+ row["Channel_0"] = new_filename
+ img_resize.tiffsave(new_filename)
+
+ input_df.to_csv(inputDir + "/train_2d_histo_classification_resize.csv", index=False)
+
+ patch_extraction(
+ inputDir + "/train_2d_histo_classification_resize.csv",
+ output_dir_patches_output,
+ file_config_temp,
+ )
+
+ file_for_Training = os.path.join(output_dir_patches_output, "opm_train.csv")
+ temp_df = pd.read_csv(file_for_Training)
+ temp_df.drop("Label", axis=1, inplace=True)
+ temp_df["valuetopredict"] = np.random.randint(2, size=6)
+ temp_df.to_csv(file_for_Training, index=False)
+ # read and parse csv
+ parameters = parseConfig(
+ testingDir + "/config_classification.yaml", version_check_flag=False
+ )
+ parameters["modality"] = "histo"
+ parameters["patch_size"] = 128
+ file_config_temp = os.path.join(outputDir, "config_classification_temp.yaml")
+ with open(file_config_temp, "w") as file:
+ yaml.dump(parameters, file)
+ parameters = parseConfig(file_config_temp, version_check_flag=False)
+ parameters["model"]["dimension"] = 2
+ # read and parse csv
+ training_data, parameters["headers"] = parseTrainingCSV(file_for_Training)
+ parameters["model"]["num_channels"] = 3
+ parameters["model"]["architecture"] = "densenet121"
+ parameters["model"]["norm_type"] = "none"
+ parameters["data_preprocessing"]["rgba2rgb"] = ""
+ parameters = populate_header_in_parameters(parameters, parameters["headers"])
+ parameters["nested_training"]["testing"] = 1
+ parameters["nested_training"]["validation"] = -2
+ parameters["model"]["print_summary"] = False
+ modelDir = os.path.join(outputDir, "modelDir")
+ if os.path.isdir(modelDir):
+ shutil.rmtree(modelDir)
+ Path(modelDir).mkdir(parents=True, exist_ok=True)
+ TrainingManager(
+ dataframe=training_data,
+ outputDir=modelDir,
+ parameters=parameters,
+ device=device,
+ resume=False,
+ reset=True,
+ )
+ parameters["output_dir"] = modelDir # this is in inference mode
+ # drop last subject
+ input_df.drop(index=input_df.index[-1], axis=0, inplace=True)
+ input_df.to_csv(inputDir + "/train_2d_histo_classification_resize.csv", index=False)
+ inference_data, parameters["headers"] = parseTrainingCSV(
+ inputDir + "/train_2d_histo_classification_resize.csv", train=False
+ )
+ with pytest.raises(Exception) as exc_info:
+ for model_type in all_model_type:
+ parameters["nested_training"]["testing"] = 1
+ parameters["nested_training"]["validation"] = -2
+ parameters["output_dir"] = modelDir # this is in inference mode
+ inference_data, parameters["headers"] = parseTrainingCSV(
+ inputDir + "/train_2d_histo_segmentation.csv", train=False
+ )
+ parameters["model"]["type"] = model_type
+ InferenceManager(
+ dataframe=inference_data,
+ outputDir=modelDir,
+ parameters=parameters,
+ device=device,
+ )
+ assert (
+ os.path.exists(
+ os.path.join(modelDir, input_df["SubjectID"][0], "predictions.csv")
+ )
+ is True
+ )
+
+ exception_raised = exc_info.value
+ print("Exception raised: ", exception_raised)
+
+ print("passed")
+
+
def test_train_inference_classification_histology_2d(device):
- print("35: Starting histology train/inference classification tests")
+ print("36: Starting histology train/inference classification tests")
# overwrite previous results
sanitize_outputDir()
output_dir_patches = os.path.join(outputDir, "histo_patches")
@@ -1830,7 +1948,7 @@ def test_train_inference_classification_histology_2d(device):
def test_train_segmentation_unet_layerchange_rad_2d(device):
# test case to up code coverage --> test decreasing allowed layers for unet
- print("36: Starting 2D Rad segmentation tests for normtype")
+ print("37: Starting 2D Rad segmentation tests for normtype")
# read and parse csv
# read and initialize parameters for specific data dimension
parameters = parseConfig(
@@ -1877,7 +1995,7 @@ def test_train_segmentation_unet_layerchange_rad_2d(device):
def test_train_segmentation_unetr_rad_3d(device):
- print("37: Testing UNETR for 3D segmentation")
+ print("38: Testing UNETR for 3D segmentation")
parameters = parseConfig(
testingDir + "/config_segmentation.yaml", version_check_flag=False
)
@@ -1933,7 +2051,7 @@ def test_train_segmentation_unetr_rad_3d(device):
def test_train_segmentation_unetr_rad_2d(device):
- print("38: Testing UNETR for 2D segmentation")
+ print("39: Testing UNETR for 2D segmentation")
parameters = parseConfig(
testingDir + "/config_segmentation.yaml", version_check_flag=False
)
@@ -1971,7 +2089,7 @@ def test_train_segmentation_unetr_rad_2d(device):
def test_train_segmentation_transunet_rad_2d(device):
- print("39: Testing TransUNet for 2D segmentation")
+ print("40: Testing TransUNet for 2D segmentation")
parameters = parseConfig(
testingDir + "/config_segmentation.yaml", version_check_flag=False
)
@@ -2020,7 +2138,7 @@ def test_train_segmentation_transunet_rad_2d(device):
def test_train_segmentation_transunet_rad_3d(device):
- print("40: Testing TransUNet for 3D segmentation")
+ print("41: Testing TransUNet for 3D segmentation")
parameters = parseConfig(
testingDir + "/config_segmentation.yaml", version_check_flag=False
)
|
apache__tvm-5851 | [BACKPORT-0.6][BUGFIX] Fixed process termination routine in windows
#4844
| [
{
"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"RPC server implementation.\n\nNote\n----\nServer is TCP based with the following protocol:\n- Initial handshake to the peer\n - [RPC_MAGIC, keysize(int32), key-bytes]\n- The key is in format\n - {server|client}:device-type[:random-key] [-timeout=timeout]\n\"\"\"\n# pylint: disable=invalid-name\n\nfrom __future__ import absolute_import\n\nimport os\nimport ctypes\nimport socket\nimport select\nimport struct\nimport logging\nimport multiprocessing\nimport subprocess\nimport time\nimport sys\nimport signal\n\nfrom .._ffi.function import register_func\nfrom .._ffi.base import py_str\nfrom .._ffi.libinfo import find_lib_path\nfrom ..module import load as _load_module\nfrom ..contrib import util\nfrom . import base\nfrom . base import TrackerCode\n\nlogger = logging.getLogger('RPCServer')\n\ndef _server_env(load_library, work_path=None):\n \"\"\"Server environment function return temp dir\"\"\"\n if work_path:\n temp = work_path\n else:\n temp = util.tempdir()\n\n # pylint: disable=unused-variable\n @register_func(\"tvm.rpc.server.workpath\")\n def get_workpath(path):\n return temp.relpath(path)\n\n @register_func(\"tvm.rpc.server.load_module\", override=True)\n def load_module(file_name):\n \"\"\"Load module from remote side.\"\"\"\n path = temp.relpath(file_name)\n m = _load_module(path)\n logger.info(\"load_module %s\", path)\n return m\n\n libs = []\n load_library = load_library.split(\":\") if load_library else []\n for file_name in load_library:\n file_name = find_lib_path(file_name)[0]\n libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL))\n logger.info(\"Load additional library %s\", file_name)\n temp.libs = libs\n return temp\n\ndef _serve_loop(sock, addr, load_library, work_path=None):\n \"\"\"Server loop\"\"\"\n sockfd = sock.fileno()\n temp = _server_env(load_library, work_path)\n base._ServerLoop(sockfd)\n if not work_path:\n temp.remove()\n logger.info(\"Finish serving %s\", addr)\n\ndef _parse_server_opt(opts):\n # parse client options\n ret = {}\n for kv in opts:\n if kv.startswith(\"-timeout=\"):\n ret[\"timeout\"] = float(kv[9:])\n return ret\n\ndef _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr):\n \"\"\"Listening loop of the server master.\"\"\"\n def _accept_conn(listen_sock, tracker_conn, ping_period=2):\n \"\"\"Accept connection from the other places.\n\n Parameters\n ----------\n listen_sock: Socket\n The socket used by listening process.\n\n tracker_conn : connnection to tracker\n Tracker connection\n\n ping_period : float, optional\n ping tracker every k seconds if no connection is accepted.\n \"\"\"\n old_keyset = set()\n # Report resource to tracker\n if tracker_conn:\n matchkey = base.random_key(rpc_key + \":\")\n base.sendjson(tracker_conn,\n [TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr])\n assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS\n else:\n matchkey = rpc_key\n\n unmatch_period_count = 0\n unmatch_timeout = 4\n # Wait until we get a valid connection\n while True:\n if tracker_conn:\n trigger = select.select([listen_sock], [], [], ping_period)\n if not listen_sock in trigger[0]:\n base.sendjson(tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])\n pending_keys = base.recvjson(tracker_conn)\n old_keyset.add(matchkey)\n # if match key not in pending key set\n # it means the key is acquired by a client but not used.\n if matchkey not in pending_keys:\n unmatch_period_count += 1\n else:\n unmatch_period_count = 0\n # regenerate match key if key is acquired but not used for a while\n if unmatch_period_count * ping_period > unmatch_timeout + ping_period:\n logger.info(\"no incoming connections, regenerate key ...\")\n matchkey = base.random_key(rpc_key + \":\", old_keyset)\n base.sendjson(tracker_conn,\n [TrackerCode.PUT, rpc_key, (port, matchkey),\n custom_addr])\n assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS\n unmatch_period_count = 0\n continue\n conn, addr = listen_sock.accept()\n magic = struct.unpack(\"<i\", base.recvall(conn, 4))[0]\n if magic != base.RPC_MAGIC:\n conn.close()\n continue\n keylen = struct.unpack(\"<i\", base.recvall(conn, 4))[0]\n key = py_str(base.recvall(conn, keylen))\n arr = key.split()\n expect_header = \"client:\" + matchkey\n server_key = \"server:\" + rpc_key\n if arr[0] != expect_header:\n conn.sendall(struct.pack(\"<i\", base.RPC_CODE_MISMATCH))\n conn.close()\n logger.warning(\"mismatch key from %s\", addr)\n continue\n else:\n conn.sendall(struct.pack(\"<i\", base.RPC_CODE_SUCCESS))\n conn.sendall(struct.pack(\"<i\", len(server_key)))\n conn.sendall(server_key.encode(\"utf-8\"))\n return conn, addr, _parse_server_opt(arr[1:])\n\n # Server logic\n tracker_conn = None\n while True:\n try:\n # step 1: setup tracker and report to tracker\n if tracker_addr and tracker_conn is None:\n tracker_conn = base.connect_with_retry(tracker_addr)\n tracker_conn.sendall(struct.pack(\"<i\", base.RPC_TRACKER_MAGIC))\n magic = struct.unpack(\"<i\", base.recvall(tracker_conn, 4))[0]\n if magic != base.RPC_TRACKER_MAGIC:\n raise RuntimeError(\"%s is not RPC Tracker\" % str(tracker_addr))\n # report status of current queue\n cinfo = {\"key\" : \"server:\" + rpc_key}\n base.sendjson(tracker_conn,\n [TrackerCode.UPDATE_INFO, cinfo])\n assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS\n\n # step 2: wait for in-coming connections\n conn, addr, opts = _accept_conn(sock, tracker_conn)\n except (socket.error, IOError):\n # retry when tracker is dropped\n if tracker_conn:\n tracker_conn.close()\n tracker_conn = None\n continue\n except RuntimeError as exc:\n raise exc\n\n # step 3: serving\n work_path = util.tempdir()\n logger.info(\"connection from %s\", addr)\n server_proc = multiprocessing.Process(target=_serve_loop,\n args=(conn, addr, load_library, work_path))\n server_proc.deamon = True\n server_proc.start()\n # close from our side.\n conn.close()\n # wait until server process finish or timeout\n server_proc.join(opts.get(\"timeout\", None))\n if server_proc.is_alive():\n logger.info(\"Timeout in RPC session, kill..\")\n import psutil\n parent = psutil.Process(server_proc.pid)\n # terminate worker childs\n for child in parent.children(recursive=True):\n child.terminate()\n # terminate the worker\n server_proc.terminate()\n work_path.remove()\n\n\ndef _connect_proxy_loop(addr, key, load_library):\n key = \"server:\" + key\n retry_count = 0\n max_retry = 5\n retry_period = 5\n while True:\n try:\n sock = socket.socket(base.get_addr_family(addr), socket.SOCK_STREAM)\n sock.connect(addr)\n sock.sendall(struct.pack(\"<i\", base.RPC_MAGIC))\n sock.sendall(struct.pack(\"<i\", len(key)))\n sock.sendall(key.encode(\"utf-8\"))\n magic = struct.unpack(\"<i\", base.recvall(sock, 4))[0]\n if magic == base.RPC_CODE_DUPLICATE:\n raise RuntimeError(\"key: %s has already been used in proxy\" % key)\n elif magic == base.RPC_CODE_MISMATCH:\n logger.warning(\"RPCProxy do not have matching client key %s\", key)\n elif magic != base.RPC_CODE_SUCCESS:\n raise RuntimeError(\"%s is not RPC Proxy\" % str(addr))\n keylen = struct.unpack(\"<i\", base.recvall(sock, 4))[0]\n remote_key = py_str(base.recvall(sock, keylen))\n opts = _parse_server_opt(remote_key.split()[1:])\n logger.info(\"connected to %s\", str(addr))\n process = multiprocessing.Process(\n target=_serve_loop, args=(sock, addr, load_library))\n process.deamon = True\n process.start()\n sock.close()\n process.join(opts.get(\"timeout\", None))\n if process.is_alive():\n logger.info(\"Timeout in RPC session, kill..\")\n process.terminate()\n retry_count = 0\n except (socket.error, IOError) as err:\n retry_count += 1\n logger.warning(\"Error encountered %s, retry in %g sec\", str(err), retry_period)\n if retry_count > max_retry:\n raise RuntimeError(\"Maximum retry error: last error: %s\" % str(err))\n time.sleep(retry_period)\n\ndef _popen(cmd):\n proc = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n env=os.environ)\n (out, _) = proc.communicate()\n if proc.returncode != 0:\n msg = \"Server invoke error:\\n\"\n msg += out\n raise RuntimeError(msg)\n\n\nclass Server(object):\n \"\"\"Start RPC server on a separate process.\n\n This is a simple python implementation based on multi-processing.\n It is also possible to implement a similar C based server with\n TVM runtime which does not depend on the python.\n\n Parameters\n ----------\n host : str\n The host url of the server.\n\n port : int\n The port to be bind to\n\n port_end : int, optional\n The end port to search\n\n is_proxy : bool, optional\n Whether the address specified is a proxy.\n If this is true, the host and port actually corresponds to the\n address of the proxy server.\n\n use_popen : bool, optional\n Whether to use Popen to start a fresh new process instead of fork.\n This is recommended to switch on if we want to do local RPC demonstration\n for GPU devices to avoid fork safety issues.\n\n tracker_addr: Tuple (str, int) , optional\n The address of RPC Tracker in tuple(host, ip) format.\n If is not None, the server will register itself to the tracker.\n\n key : str, optional\n The key used to identify the device type in tracker.\n\n load_library : str, optional\n List of additional libraries to be loaded during execution.\n\n custom_addr: str, optional\n Custom IP Address to Report to RPC Tracker\n\n silent: bool, optional\n Whether run this server in silent mode.\n \"\"\"\n def __init__(self,\n host,\n port=9091,\n port_end=9199,\n is_proxy=False,\n use_popen=False,\n tracker_addr=None,\n key=\"\",\n load_library=None,\n custom_addr=None,\n silent=False):\n try:\n if base._ServerLoop is None:\n raise RuntimeError(\"Please compile with USE_RPC=1\")\n except NameError:\n raise RuntimeError(\"Please compile with USE_RPC=1\")\n self.host = host\n self.port = port\n self.libs = []\n self.custom_addr = custom_addr\n self.use_popen = use_popen\n\n if silent:\n logger.setLevel(logging.ERROR)\n\n if use_popen:\n cmd = [sys.executable,\n \"-m\", \"tvm.exec.rpc_server\",\n \"--host=%s\" % host,\n \"--port=%s\" % port]\n if tracker_addr:\n assert key\n cmd += [\"--tracker=%s:%d\" % tracker_addr,\n \"--key=%s\" % key]\n if load_library:\n cmd += [\"--load-library\", load_library]\n if custom_addr:\n cmd += [\"--custom-addr\", custom_addr]\n if silent:\n cmd += [\"--silent\"]\n\n # prexec_fn is not thread safe and may result in deadlock.\n # python 3.2 introduced the start_new_session parameter as\n # an alternative to the common use case of\n # prexec_fn=os.setsid. Once the minimum version of python\n # supported by TVM reaches python 3.2 this code can be\n # rewritten in favour of start_new_session. In the\n # interim, stop the pylint diagnostic.\n #\n # pylint: disable=subprocess-popen-preexec-fn\n self.proc = subprocess.Popen(cmd, preexec_fn=os.setsid)\n time.sleep(0.5)\n elif not is_proxy:\n sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM)\n self.port = None\n for my_port in range(port, port_end):\n try:\n sock.bind((host, my_port))\n self.port = my_port\n break\n except socket.error as sock_err:\n if sock_err.errno in [98, 48]:\n continue\n else:\n raise sock_err\n if not self.port:\n raise ValueError(\"cannot bind to any port in [%d, %d)\" % (port, port_end))\n logger.info(\"bind to %s:%d\", host, self.port)\n sock.listen(1)\n self.sock = sock\n self.proc = multiprocessing.Process(\n target=_listen_loop, args=(\n self.sock, self.port, key, tracker_addr, load_library,\n self.custom_addr))\n self.proc.deamon = True\n self.proc.start()\n else:\n self.proc = multiprocessing.Process(\n target=_connect_proxy_loop, args=((host, port), key, load_library))\n self.proc.deamon = True\n self.proc.start()\n\n def terminate(self):\n \"\"\"Terminate the server process\"\"\"\n if self.use_popen:\n if self.proc:\n if platform.system() == \"Windows\":\n os.kill(self.proc.pid, signal.CTRL_C_EVENT)\n else:\n os.killpg(self.proc.pid, signal.SIGTERM)\n self.proc = None\n else:\n if self.proc:\n self.proc.terminate()\n self.proc = None\n\n def __del__(self):\n self.terminate()\n",
"path": "python/tvm/rpc/server.py"
}
] | [
{
"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"RPC server implementation.\n\nNote\n----\nServer is TCP based with the following protocol:\n- Initial handshake to the peer\n - [RPC_MAGIC, keysize(int32), key-bytes]\n- The key is in format\n - {server|client}:device-type[:random-key] [-timeout=timeout]\n\"\"\"\n# pylint: disable=invalid-name\n\nfrom __future__ import absolute_import\n\nimport os\nimport ctypes\nimport socket\nimport select\nimport struct\nimport logging\nimport multiprocessing\nimport subprocess\nimport time\nimport sys\nimport signal\nimport platform\n\nfrom .._ffi.function import register_func\nfrom .._ffi.base import py_str\nfrom .._ffi.libinfo import find_lib_path\nfrom ..module import load as _load_module\nfrom ..contrib import util\nfrom . import base\nfrom . base import TrackerCode\n\nlogger = logging.getLogger('RPCServer')\n\ndef _server_env(load_library, work_path=None):\n \"\"\"Server environment function return temp dir\"\"\"\n if work_path:\n temp = work_path\n else:\n temp = util.tempdir()\n\n # pylint: disable=unused-variable\n @register_func(\"tvm.rpc.server.workpath\")\n def get_workpath(path):\n return temp.relpath(path)\n\n @register_func(\"tvm.rpc.server.load_module\", override=True)\n def load_module(file_name):\n \"\"\"Load module from remote side.\"\"\"\n path = temp.relpath(file_name)\n m = _load_module(path)\n logger.info(\"load_module %s\", path)\n return m\n\n libs = []\n load_library = load_library.split(\":\") if load_library else []\n for file_name in load_library:\n file_name = find_lib_path(file_name)[0]\n libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL))\n logger.info(\"Load additional library %s\", file_name)\n temp.libs = libs\n return temp\n\ndef _serve_loop(sock, addr, load_library, work_path=None):\n \"\"\"Server loop\"\"\"\n sockfd = sock.fileno()\n temp = _server_env(load_library, work_path)\n base._ServerLoop(sockfd)\n if not work_path:\n temp.remove()\n logger.info(\"Finish serving %s\", addr)\n\ndef _parse_server_opt(opts):\n # parse client options\n ret = {}\n for kv in opts:\n if kv.startswith(\"-timeout=\"):\n ret[\"timeout\"] = float(kv[9:])\n return ret\n\ndef _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr):\n \"\"\"Listening loop of the server master.\"\"\"\n def _accept_conn(listen_sock, tracker_conn, ping_period=2):\n \"\"\"Accept connection from the other places.\n\n Parameters\n ----------\n listen_sock: Socket\n The socket used by listening process.\n\n tracker_conn : connnection to tracker\n Tracker connection\n\n ping_period : float, optional\n ping tracker every k seconds if no connection is accepted.\n \"\"\"\n old_keyset = set()\n # Report resource to tracker\n if tracker_conn:\n matchkey = base.random_key(rpc_key + \":\")\n base.sendjson(tracker_conn,\n [TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr])\n assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS\n else:\n matchkey = rpc_key\n\n unmatch_period_count = 0\n unmatch_timeout = 4\n # Wait until we get a valid connection\n while True:\n if tracker_conn:\n trigger = select.select([listen_sock], [], [], ping_period)\n if not listen_sock in trigger[0]:\n base.sendjson(tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])\n pending_keys = base.recvjson(tracker_conn)\n old_keyset.add(matchkey)\n # if match key not in pending key set\n # it means the key is acquired by a client but not used.\n if matchkey not in pending_keys:\n unmatch_period_count += 1\n else:\n unmatch_period_count = 0\n # regenerate match key if key is acquired but not used for a while\n if unmatch_period_count * ping_period > unmatch_timeout + ping_period:\n logger.info(\"no incoming connections, regenerate key ...\")\n matchkey = base.random_key(rpc_key + \":\", old_keyset)\n base.sendjson(tracker_conn,\n [TrackerCode.PUT, rpc_key, (port, matchkey),\n custom_addr])\n assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS\n unmatch_period_count = 0\n continue\n conn, addr = listen_sock.accept()\n magic = struct.unpack(\"<i\", base.recvall(conn, 4))[0]\n if magic != base.RPC_MAGIC:\n conn.close()\n continue\n keylen = struct.unpack(\"<i\", base.recvall(conn, 4))[0]\n key = py_str(base.recvall(conn, keylen))\n arr = key.split()\n expect_header = \"client:\" + matchkey\n server_key = \"server:\" + rpc_key\n if arr[0] != expect_header:\n conn.sendall(struct.pack(\"<i\", base.RPC_CODE_MISMATCH))\n conn.close()\n logger.warning(\"mismatch key from %s\", addr)\n continue\n else:\n conn.sendall(struct.pack(\"<i\", base.RPC_CODE_SUCCESS))\n conn.sendall(struct.pack(\"<i\", len(server_key)))\n conn.sendall(server_key.encode(\"utf-8\"))\n return conn, addr, _parse_server_opt(arr[1:])\n\n # Server logic\n tracker_conn = None\n while True:\n try:\n # step 1: setup tracker and report to tracker\n if tracker_addr and tracker_conn is None:\n tracker_conn = base.connect_with_retry(tracker_addr)\n tracker_conn.sendall(struct.pack(\"<i\", base.RPC_TRACKER_MAGIC))\n magic = struct.unpack(\"<i\", base.recvall(tracker_conn, 4))[0]\n if magic != base.RPC_TRACKER_MAGIC:\n raise RuntimeError(\"%s is not RPC Tracker\" % str(tracker_addr))\n # report status of current queue\n cinfo = {\"key\" : \"server:\" + rpc_key}\n base.sendjson(tracker_conn,\n [TrackerCode.UPDATE_INFO, cinfo])\n assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS\n\n # step 2: wait for in-coming connections\n conn, addr, opts = _accept_conn(sock, tracker_conn)\n except (socket.error, IOError):\n # retry when tracker is dropped\n if tracker_conn:\n tracker_conn.close()\n tracker_conn = None\n continue\n except RuntimeError as exc:\n raise exc\n\n # step 3: serving\n work_path = util.tempdir()\n logger.info(\"connection from %s\", addr)\n server_proc = multiprocessing.Process(target=_serve_loop,\n args=(conn, addr, load_library, work_path))\n server_proc.deamon = True\n server_proc.start()\n # close from our side.\n conn.close()\n # wait until server process finish or timeout\n server_proc.join(opts.get(\"timeout\", None))\n if server_proc.is_alive():\n logger.info(\"Timeout in RPC session, kill..\")\n import psutil\n parent = psutil.Process(server_proc.pid)\n # terminate worker childs\n for child in parent.children(recursive=True):\n child.terminate()\n # terminate the worker\n server_proc.terminate()\n work_path.remove()\n\n\ndef _connect_proxy_loop(addr, key, load_library):\n key = \"server:\" + key\n retry_count = 0\n max_retry = 5\n retry_period = 5\n while True:\n try:\n sock = socket.socket(base.get_addr_family(addr), socket.SOCK_STREAM)\n sock.connect(addr)\n sock.sendall(struct.pack(\"<i\", base.RPC_MAGIC))\n sock.sendall(struct.pack(\"<i\", len(key)))\n sock.sendall(key.encode(\"utf-8\"))\n magic = struct.unpack(\"<i\", base.recvall(sock, 4))[0]\n if magic == base.RPC_CODE_DUPLICATE:\n raise RuntimeError(\"key: %s has already been used in proxy\" % key)\n elif magic == base.RPC_CODE_MISMATCH:\n logger.warning(\"RPCProxy do not have matching client key %s\", key)\n elif magic != base.RPC_CODE_SUCCESS:\n raise RuntimeError(\"%s is not RPC Proxy\" % str(addr))\n keylen = struct.unpack(\"<i\", base.recvall(sock, 4))[0]\n remote_key = py_str(base.recvall(sock, keylen))\n opts = _parse_server_opt(remote_key.split()[1:])\n logger.info(\"connected to %s\", str(addr))\n process = multiprocessing.Process(\n target=_serve_loop, args=(sock, addr, load_library))\n process.deamon = True\n process.start()\n sock.close()\n process.join(opts.get(\"timeout\", None))\n if process.is_alive():\n logger.info(\"Timeout in RPC session, kill..\")\n process.terminate()\n retry_count = 0\n except (socket.error, IOError) as err:\n retry_count += 1\n logger.warning(\"Error encountered %s, retry in %g sec\", str(err), retry_period)\n if retry_count > max_retry:\n raise RuntimeError(\"Maximum retry error: last error: %s\" % str(err))\n time.sleep(retry_period)\n\ndef _popen(cmd):\n proc = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n env=os.environ)\n (out, _) = proc.communicate()\n if proc.returncode != 0:\n msg = \"Server invoke error:\\n\"\n msg += out\n raise RuntimeError(msg)\n\n\nclass Server(object):\n \"\"\"Start RPC server on a separate process.\n\n This is a simple python implementation based on multi-processing.\n It is also possible to implement a similar C based server with\n TVM runtime which does not depend on the python.\n\n Parameters\n ----------\n host : str\n The host url of the server.\n\n port : int\n The port to be bind to\n\n port_end : int, optional\n The end port to search\n\n is_proxy : bool, optional\n Whether the address specified is a proxy.\n If this is true, the host and port actually corresponds to the\n address of the proxy server.\n\n use_popen : bool, optional\n Whether to use Popen to start a fresh new process instead of fork.\n This is recommended to switch on if we want to do local RPC demonstration\n for GPU devices to avoid fork safety issues.\n\n tracker_addr: Tuple (str, int) , optional\n The address of RPC Tracker in tuple(host, ip) format.\n If is not None, the server will register itself to the tracker.\n\n key : str, optional\n The key used to identify the device type in tracker.\n\n load_library : str, optional\n List of additional libraries to be loaded during execution.\n\n custom_addr: str, optional\n Custom IP Address to Report to RPC Tracker\n\n silent: bool, optional\n Whether run this server in silent mode.\n \"\"\"\n def __init__(self,\n host,\n port=9091,\n port_end=9199,\n is_proxy=False,\n use_popen=False,\n tracker_addr=None,\n key=\"\",\n load_library=None,\n custom_addr=None,\n silent=False):\n try:\n if base._ServerLoop is None:\n raise RuntimeError(\"Please compile with USE_RPC=1\")\n except NameError:\n raise RuntimeError(\"Please compile with USE_RPC=1\")\n self.host = host\n self.port = port\n self.libs = []\n self.custom_addr = custom_addr\n self.use_popen = use_popen\n\n if silent:\n logger.setLevel(logging.ERROR)\n\n if use_popen:\n cmd = [sys.executable,\n \"-m\", \"tvm.exec.rpc_server\",\n \"--host=%s\" % host,\n \"--port=%s\" % port]\n if tracker_addr:\n assert key\n cmd += [\"--tracker=%s:%d\" % tracker_addr,\n \"--key=%s\" % key]\n if load_library:\n cmd += [\"--load-library\", load_library]\n if custom_addr:\n cmd += [\"--custom-addr\", custom_addr]\n if silent:\n cmd += [\"--silent\"]\n\n # prexec_fn is not thread safe and may result in deadlock.\n # python 3.2 introduced the start_new_session parameter as\n # an alternative to the common use case of\n # prexec_fn=os.setsid. Once the minimum version of python\n # supported by TVM reaches python 3.2 this code can be\n # rewritten in favour of start_new_session. In the\n # interim, stop the pylint diagnostic.\n #\n # pylint: disable=subprocess-popen-preexec-fn\n self.proc = subprocess.Popen(cmd, preexec_fn=os.setsid)\n time.sleep(0.5)\n elif not is_proxy:\n sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM)\n self.port = None\n for my_port in range(port, port_end):\n try:\n sock.bind((host, my_port))\n self.port = my_port\n break\n except socket.error as sock_err:\n if sock_err.errno in [98, 48]:\n continue\n else:\n raise sock_err\n if not self.port:\n raise ValueError(\"cannot bind to any port in [%d, %d)\" % (port, port_end))\n logger.info(\"bind to %s:%d\", host, self.port)\n sock.listen(1)\n self.sock = sock\n self.proc = multiprocessing.Process(\n target=_listen_loop, args=(\n self.sock, self.port, key, tracker_addr, load_library,\n self.custom_addr))\n self.proc.deamon = True\n self.proc.start()\n else:\n self.proc = multiprocessing.Process(\n target=_connect_proxy_loop, args=((host, port), key, load_library))\n self.proc.deamon = True\n self.proc.start()\n\n def terminate(self):\n \"\"\"Terminate the server process\"\"\"\n if self.use_popen:\n if self.proc:\n if platform.system() == \"Windows\":\n os.kill(self.proc.pid, signal.CTRL_C_EVENT)\n else:\n os.killpg(self.proc.pid, signal.SIGTERM)\n self.proc = None\n else:\n if self.proc:\n self.proc.terminate()\n self.proc = None\n\n def __del__(self):\n self.terminate()\n",
"path": "python/tvm/rpc/server.py"
}
] | diff --git a/python/tvm/rpc/server.py b/python/tvm/rpc/server.py
index 3fff5309c45b..3700c824d235 100644
--- a/python/tvm/rpc/server.py
+++ b/python/tvm/rpc/server.py
@@ -39,6 +39,7 @@
import time
import sys
import signal
+import platform
from .._ffi.function import register_func
from .._ffi.base import py_str
|
ietf-tools__datatracker-5409 | AD dashboard should also display trend when current == 0
### Description
https://datatracker.ietf.org/doc/ad/ with the trending arrows would benefit by displaying the trend *even* if the current value is 0.
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
| [
{
"content": "# Copyright The IETF Trust 2009-2022, All Rights Reserved\n# -*- coding: utf-8 -*-\n#\n# Some parts Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).\n# All rights reserved. Contact: Pasi Eronen <[email protected]>\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# * Neither the name of the Nokia Corporation and/or its\n# subsidiary(-ies) nor the names of its contributors may be used\n# to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nimport re\nimport datetime\n\nfrom collections import defaultdict\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.cache import cache, caches\nfrom django.urls import reverse as urlreverse\nfrom django.db.models import Q\nfrom django.http import Http404, HttpResponseBadRequest, HttpResponse, HttpResponseRedirect, QueryDict\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.utils.cache import _generate_cache_key # type: ignore\n\n\n\nimport debug # pyflakes:ignore\n\nfrom ietf.doc.models import ( Document, DocHistory, DocAlias, State,\n LastCallDocEvent, NewRevisionDocEvent, IESG_SUBSTATE_TAGS,\n IESG_BALLOT_ACTIVE_STATES, IESG_STATCHG_CONFLREV_ACTIVE_STATES,\n IESG_CHARTER_ACTIVE_STATES )\nfrom ietf.doc.fields import select2_id_doc_name_json\nfrom ietf.doc.utils import get_search_cache_key, augment_events_with_revision\nfrom ietf.group.models import Group\nfrom ietf.idindex.index import active_drafts_index_by_group\nfrom ietf.name.models import DocTagName, DocTypeName, StreamName\nfrom ietf.person.models import Person\nfrom ietf.person.utils import get_active_ads\nfrom ietf.utils.draft_search import normalize_draftname\nfrom ietf.doc.utils_search import prepare_document_table\n\n\nclass SearchForm(forms.Form):\n name = forms.CharField(required=False)\n rfcs = forms.BooleanField(required=False, initial=True)\n activedrafts = forms.BooleanField(required=False, initial=True)\n olddrafts = forms.BooleanField(required=False, initial=False)\n\n by = forms.ChoiceField(choices=[(x,x) for x in ('author','group','area','ad','state','irtfstate','stream')], required=False, initial='group')\n author = forms.CharField(required=False)\n group = forms.CharField(required=False)\n stream = forms.ModelChoiceField(StreamName.objects.all().order_by('name'), empty_label=\"any stream\", required=False)\n area = forms.ModelChoiceField(Group.objects.filter(type=\"area\", state=\"active\").order_by('name'), empty_label=\"any area\", required=False)\n ad = forms.ChoiceField(choices=(), required=False)\n state = forms.ModelChoiceField(State.objects.filter(type=\"draft-iesg\"), empty_label=\"any state\", required=False)\n substate = forms.ChoiceField(choices=(), required=False)\n irtfstate = forms.ModelChoiceField(State.objects.filter(type=\"draft-stream-irtf\"), empty_label=\"any state\", required=False)\n\n sort = forms.ChoiceField(\n choices= (\n (\"document\", \"Document\"), (\"-document\", \"Document (desc.)\"),\n (\"title\", \"Title\"), (\"-title\", \"Title (desc.)\"),\n (\"date\", \"Date\"), (\"-date\", \"Date (desc.)\"),\n (\"status\", \"Status\"), (\"-status\", \"Status (desc.)\"),\n (\"ipr\", \"Ipr\"), (\"ipr\", \"Ipr (desc.)\"),\n (\"ad\", \"AD\"), (\"-ad\", \"AD (desc)\"), ),\n required=False, widget=forms.HiddenInput)\n\n doctypes = forms.ModelMultipleChoiceField(queryset=DocTypeName.objects.filter(used=True).exclude(slug__in=('draft','liai-att')).order_by('name'), required=False)\n\n def __init__(self, *args, **kwargs):\n super(SearchForm, self).__init__(*args, **kwargs)\n responsible = Document.objects.values_list('ad', flat=True).distinct()\n active_ads = get_active_ads()\n inactive_ads = list(((Person.objects.filter(pk__in=responsible) | Person.objects.filter(role__name=\"pre-ad\",\n role__group__type=\"area\",\n role__group__state=\"active\")).distinct())\n .exclude(pk__in=[x.pk for x in active_ads]))\n extract_last_name = lambda x: x.name_parts()[3]\n active_ads.sort(key=extract_last_name)\n inactive_ads.sort(key=extract_last_name)\n\n self.fields['ad'].choices = [('', 'any AD')] + [(ad.pk, ad.plain_name()) for ad in active_ads] + [('', '------------------')] + [(ad.pk, ad.name) for ad in inactive_ads]\n self.fields['substate'].choices = [('', 'any substate'), ('0', 'no substate')] + [(n.slug, n.name) for n in DocTagName.objects.filter(slug__in=IESG_SUBSTATE_TAGS)]\n\n def clean_name(self):\n value = self.cleaned_data.get('name','')\n return normalize_draftname(value)\n\n def clean(self):\n q = self.cleaned_data\n # Reset query['by'] if needed\n if 'by' in q:\n for k in ('author', 'group', 'area', 'ad'):\n if q['by'] == k and not q.get(k):\n q['by'] = None\n if q['by'] == 'state' and not (q.get('state') or q.get('substate')):\n q['by'] = None\n if q['by'] == 'irtfstate' and not (q.get('irtfstate')):\n q['by'] = None\n else:\n q['by'] = None\n # Reset other fields\n for k in ('author','group', 'area', 'ad'):\n if k != q['by']:\n q[k] = \"\"\n if q['by'] != 'state':\n q['state'] = q['substate'] = None\n if q['by'] != 'irtfstate':\n q['irtfstate'] = None\n return q\n\ndef retrieve_search_results(form, all_types=False):\n \"\"\"Takes a validated SearchForm and return the results.\"\"\"\n\n if not form.is_valid():\n raise ValueError(\"SearchForm doesn't validate: %s\" % form.errors)\n\n query = form.cleaned_data\n\n if all_types:\n # order by time here to retain the most recent documents in case we\n # find too many and have to chop the results list\n docs = Document.objects.all().order_by('-time')\n else:\n types = []\n\n if query['activedrafts'] or query['olddrafts'] or query['rfcs']:\n types.append('draft')\n\n types.extend(query[\"doctypes\"])\n\n if not types:\n return Document.objects.none()\n\n docs = Document.objects.filter(type__in=types)\n\n # name\n if query[\"name\"]:\n docs = docs.filter(Q(docalias__name__icontains=query[\"name\"]) |\n Q(title__icontains=query[\"name\"])).distinct()\n\n # rfc/active/old check buttons\n allowed_draft_states = []\n if query[\"rfcs\"]:\n allowed_draft_states.append(\"rfc\")\n if query[\"activedrafts\"]:\n allowed_draft_states.append(\"active\")\n if query[\"olddrafts\"]:\n allowed_draft_states.extend(['repl', 'expired', 'auth-rm', 'ietf-rm'])\n\n docs = docs.filter(Q(states__slug__in=allowed_draft_states) |\n ~Q(type__slug='draft')).distinct()\n\n # radio choices\n by = query[\"by\"]\n if by == \"author\":\n docs = docs.filter(\n Q(documentauthor__person__alias__name__icontains=query[\"author\"]) |\n Q(documentauthor__person__email__address__icontains=query[\"author\"])\n )\n elif by == \"group\":\n docs = docs.filter(group__acronym=query[\"group\"])\n elif by == \"area\":\n docs = docs.filter(Q(group__type=\"wg\", group__parent=query[\"area\"]) |\n Q(group=query[\"area\"])).distinct()\n elif by == \"ad\":\n docs = docs.filter(ad=query[\"ad\"])\n elif by == \"state\":\n if query[\"state\"]:\n docs = docs.filter(states=query[\"state\"])\n if query[\"substate\"]:\n docs = docs.filter(tags=query[\"substate\"])\n elif by == \"irtfstate\":\n docs = docs.filter(states=query[\"irtfstate\"])\n elif by == \"stream\":\n docs = docs.filter(stream=query[\"stream\"])\n\n return docs\n\ndef search(request):\n if request.GET:\n # backwards compatibility\n get_params = request.GET.copy()\n if 'activeDrafts' in request.GET:\n get_params['activedrafts'] = request.GET['activeDrafts']\n if 'oldDrafts' in request.GET:\n get_params['olddrafts'] = request.GET['oldDrafts']\n if 'subState' in request.GET:\n get_params['substate'] = request.GET['subState']\n\n form = SearchForm(get_params)\n if not form.is_valid():\n return HttpResponseBadRequest(\"form not valid: %s\" % form.errors)\n\n cache_key = get_search_cache_key(get_params)\n results = cache.get(cache_key)\n if not results:\n results = retrieve_search_results(form)\n cache.set(cache_key, results)\n\n results, meta = prepare_document_table(request, results, get_params)\n meta['searching'] = True\n else:\n form = SearchForm()\n results = []\n meta = { 'by': None, 'searching': False }\n get_params = QueryDict('')\n\n return render(request, 'doc/search/search.html', {\n 'form':form, 'docs':results, 'meta':meta, 'queryargs':get_params.urlencode() },\n )\n\ndef frontpage(request):\n form = SearchForm()\n return render(request, 'doc/frontpage.html', {'form':form})\n\ndef search_for_name(request, name):\n def find_unique(n):\n exact = DocAlias.objects.filter(name=n).first()\n if exact:\n return exact.name\n\n aliases = DocAlias.objects.filter(name__startswith=n)[:2]\n if len(aliases) == 1:\n return aliases[0].name\n\n aliases = DocAlias.objects.filter(name__contains=n)[:2]\n if len(aliases) == 1:\n return aliases[0].name\n\n return None\n\n def cached_redirect(cache_key, url):\n cache.set(cache_key, url, settings.CACHE_MIDDLEWARE_SECONDS)\n return HttpResponseRedirect(url)\n\n n = name\n\n cache_key = _generate_cache_key(request, 'GET', [], settings.CACHE_MIDDLEWARE_KEY_PREFIX)\n if cache_key:\n url = cache.get(cache_key, None)\n if url:\n return HttpResponseRedirect(url)\n\n # chop away extension\n extension_split = re.search(r\"^(.+)\\.(txt|ps|pdf)$\", n)\n if extension_split:\n n = extension_split.group(1)\n\n redirect_to = find_unique(name)\n if redirect_to:\n return cached_redirect(cache_key, urlreverse(\"ietf.doc.views_doc.document_main\", kwargs={ \"name\": redirect_to }))\n else:\n # check for embedded rev - this may be ambiguous, so don't\n # chop it off if we don't find a match\n rev_split = re.search(\"^(.+)-([0-9]{2})$\", n)\n if rev_split:\n redirect_to = find_unique(rev_split.group(1))\n if redirect_to:\n rev = rev_split.group(2)\n # check if we can redirect directly to the rev\n if DocHistory.objects.filter(doc__docalias__name=redirect_to, rev=rev).exists():\n return cached_redirect(cache_key, urlreverse(\"ietf.doc.views_doc.document_main\", kwargs={ \"name\": redirect_to, \"rev\": rev }))\n else:\n return cached_redirect(cache_key, urlreverse(\"ietf.doc.views_doc.document_main\", kwargs={ \"name\": redirect_to }))\n\n # build appropriate flags based on string prefix\n doctypenames = DocTypeName.objects.filter(used=True)\n # This would have been more straightforward if document prefixes couldn't\n # contain a dash. Probably, document prefixes shouldn't contain a dash ...\n search_args = \"?name=%s\" % n\n if n.startswith(\"draft\"):\n search_args += \"&rfcs=on&activedrafts=on&olddrafts=on\"\n else:\n for t in doctypenames:\n if t.prefix and n.startswith(t.prefix):\n search_args += \"&doctypes=%s\" % t.slug\n break\n else:\n search_args += \"&rfcs=on&activedrafts=on&olddrafts=on\"\n\n return cached_redirect(cache_key, urlreverse('ietf.doc.views_search.search') + search_args)\n\ndef ad_dashboard_group_type(doc):\n # Return group type for document for dashboard.\n # If doc is not defined return list of all possible\n # group types\n if not doc:\n return ('I-D', 'RFC', 'Conflict Review', 'Status Change', 'Charter')\n if doc.type.slug=='draft':\n if doc.get_state_slug('draft') == 'rfc':\n return 'RFC'\n elif doc.get_state_slug('draft') == 'active' and doc.get_state_slug('draft-iesg') and doc.get_state('draft-iesg').name =='RFC Ed Queue':\n return 'RFC'\n elif doc.get_state_slug('draft') == 'active' and doc.get_state_slug('draft-iesg') and doc.get_state('draft-iesg').name in ('Dead', 'I-D Exists', 'AD is watching'):\n return None\n elif doc.get_state('draft').name in ('Expired', 'Replaced'):\n return None\n else:\n return 'I-D'\n elif doc.type.slug=='conflrev':\n return 'Conflict Review'\n elif doc.type.slug=='statchg':\n return 'Status Change'\n elif doc.type.slug=='charter':\n return \"Charter\"\n else:\n return \"Document\"\n\ndef ad_dashboard_group(doc):\n\n if doc.type.slug=='draft':\n if doc.get_state_slug('draft') == 'rfc':\n return 'RFC'\n elif doc.get_state_slug('draft') == 'active' and doc.get_state_slug('draft-iesg'):\n return '%s Internet-Draft' % doc.get_state('draft-iesg').name\n else:\n return '%s Internet-Draft' % doc.get_state('draft').name\n elif doc.type.slug=='conflrev':\n if doc.get_state_slug('conflrev') in ('appr-reqnopub-sent','appr-noprob-sent'):\n return 'Approved Conflict Review'\n elif doc.get_state_slug('conflrev') in ('appr-reqnopub-pend','appr-noprob-pend','appr-reqnopub-pr','appr-noprob-pr'):\n return \"%s Conflict Review\" % State.objects.get(type__slug='draft-iesg',slug='approved')\n else:\n return '%s Conflict Review' % doc.get_state('conflrev')\n elif doc.type.slug=='statchg':\n if doc.get_state_slug('statchg') in ('appr-sent',):\n return 'Approved Status Change'\n if doc.get_state_slug('statchg') in ('appr-pend','appr-pr'):\n return '%s Status Change' % State.objects.get(type__slug='draft-iesg',slug='approved')\n else:\n return '%s Status Change' % doc.get_state('statchg')\n elif doc.type.slug=='charter':\n if doc.get_state_slug('charter') == 'approved':\n return \"Approved Charter\"\n else:\n return '%s Charter' % doc.get_state('charter')\n else:\n return \"Document\"\n\n\ndef shorten_group_name(name):\n for s in [\n \" Internet-Draft\",\n \" Conflict Review\",\n \" Status Change\",\n \" (Internal Steering Group/IAB Review) Charter\",\n \"Charter\",\n ]:\n if name.endswith(s):\n name = name[: -len(s)]\n\n for pat, sub in [\n (\"Writeup\", \"Write-up\"),\n (\"Requested\", \"Req\"),\n (\"Evaluation\", \"Eval\"),\n (\"Publication\", \"Pub\"),\n (\"Waiting\", \"Wait\"),\n (\"Go-Ahead\", \"OK\"),\n (\"Approved-\", \"App, \"),\n (\"announcement\", \"ann.\"),\n (\"IESG Eval - \", \"\"),\n (\"Not currently under review\", \"Not under review\"),\n (\"External Review\", \"Ext. Review\"),\n (r\"IESG Review \\(Charter for Approval, Selected by Secretariat\\)\", \"IESG Review\"),\n (\"Needs Shepherd\", \"Needs Shep.\"),\n (\"Approved\", \"App.\"),\n (\"Replaced\", \"Repl.\"),\n (\"Withdrawn\", \"Withd.\"),\n (\"Chartering/Rechartering\", \"Charter\"),\n (r\"\\(Message to Community, Selected by Secretariat\\)\", \"\")\n ]:\n name = re.sub(pat, sub, name)\n\n return name.strip()\n\n\ndef ad_dashboard_sort_key(doc):\n\n if doc.type.slug=='draft' and doc.get_state_slug('draft') == 'rfc':\n return \"21%04d\" % int(doc.rfc_number())\n if doc.type.slug=='statchg' and doc.get_state_slug('statchg') == 'appr-sent':\n return \"22%d\" % 0 # TODO - get the date of the transition into this state here\n if doc.type.slug=='conflrev' and doc.get_state_slug('conflrev') in ('appr-reqnopub-sent','appr-noprob-sent'):\n return \"23%d\" % 0 # TODO - get the date of the transition into this state here\n if doc.type.slug=='charter' and doc.get_state_slug('charter') == 'approved':\n return \"24%d\" % 0 # TODO - get the date of the transition into this state here\n\n seed = ad_dashboard_group(doc)\n\n if doc.type.slug=='conflrev' and doc.get_state_slug('conflrev') == 'adrev':\n state = State.objects.get(type__slug='draft-iesg',slug='ad-eval')\n return \"1%d%s\" % (state.order,seed)\n\n if doc.type.slug=='charter' and doc.get_state_slug('charter') != 'replaced':\n if doc.get_state_slug('charter') in ('notrev','infrev'):\n return \"100%s\" % seed\n elif doc.get_state_slug('charter') == 'intrev':\n state = State.objects.get(type__slug='draft-iesg',slug='ad-eval')\n return \"1%d%s\" % (state.order,seed)\n elif doc.get_state_slug('charter') == 'extrev':\n state = State.objects.get(type__slug='draft-iesg',slug='lc')\n return \"1%d%s\" % (state.order,seed)\n elif doc.get_state_slug('charter') == 'iesgrev':\n state = State.objects.get(type__slug='draft-iesg',slug='iesg-eva')\n return \"1%d%s\" % (state.order,seed)\n\n if doc.type.slug=='statchg' and doc.get_state_slug('statchg') == 'adrev':\n state = State.objects.get(type__slug='draft-iesg',slug='ad-eval')\n return \"1%d%s\" % (state.order,seed)\n\n if seed.startswith('Needs Shepherd'):\n return \"100%s\" % seed\n if seed.endswith(' Document'):\n seed = seed[:-9]\n elif seed.endswith(' Internet-Draft'):\n seed = seed[:-15]\n elif seed.endswith(' Conflict Review'):\n seed = seed[:-16]\n elif seed.endswith(' Status Change'):\n seed = seed[:-14]\n state = State.objects.filter(type__slug='draft-iesg',name=seed)\n if state:\n ageseconds = 0\n changetime= doc.latest_event(type='changed_document')\n if changetime:\n ad = (timezone.now()-doc.latest_event(type='changed_document').time)\n ageseconds = (ad.microseconds + (ad.seconds + ad.days * 24 * 3600) * 10**6) / 10**6\n return \"1%d%s%s%010d\" % (state[0].order,seed,doc.type.slug,ageseconds)\n\n return \"3%s\" % seed\n\n\ndef ad_workload(request):\n delta = datetime.timedelta(days=30)\n right_now = timezone.now()\n\n ads = []\n responsible = Document.objects.values_list(\"ad\", flat=True).distinct()\n for p in Person.objects.filter(\n Q(\n role__name__in=(\"pre-ad\", \"ad\"),\n role__group__type=\"area\",\n role__group__state=\"active\",\n )\n | Q(pk__in=responsible)\n ).distinct():\n if p in get_active_ads():\n ads.append(p)\n\n doctypes = list(\n DocTypeName.objects.filter(used=True)\n .exclude(slug__in=(\"draft\", \"liai-att\"))\n .values_list(\"pk\", flat=True)\n )\n\n up_is_good = {}\n group_types = ad_dashboard_group_type(None)\n groups = {g: {} for g in group_types}\n group_names = {g: [] for g in group_types}\n\n # Prefill groups in preferred sort order\n # FIXME: This should really use the database states instead of replicating the logic\n for id, (g, uig) in enumerate(\n [\n (\"Publication Requested Internet-Draft\", False),\n (\"AD Evaluation Internet-Draft\", False),\n (\"In Last Call Internet-Draft\", True),\n (\"Waiting for Writeup Internet-Draft\", False),\n (\"IESG Evaluation - Defer Internet-Draft\", False),\n (\"IESG Evaluation Internet-Draft\", True),\n (\"Waiting for AD Go-Ahead Internet-Draft\", False),\n (\"Approved-announcement to be sent Internet-Draft\", True),\n (\"Approved-announcement sent Internet-Draft\", True),\n ]\n ):\n groups[\"I-D\"][g] = id\n group_names[\"I-D\"].append(g)\n up_is_good[g] = uig\n\n for id, g in enumerate([\"RFC Ed Queue Internet-Draft\", \"RFC\"]):\n groups[\"RFC\"][g] = id\n group_names[\"RFC\"].append(g)\n up_is_good[g] = True\n\n for id, (g, uig) in enumerate(\n [\n (\"AD Review Conflict Review\", False),\n (\"Needs Shepherd Conflict Review\", False),\n (\"IESG Evaluation Conflict Review\", True),\n (\"Approved Conflict Review\", True),\n (\"Withdrawn Conflict Review\", None),\n ]\n ):\n groups[\"Conflict Review\"][g] = id\n group_names[\"Conflict Review\"].append(g)\n up_is_good[g] = uig\n\n for id, (g, uig) in enumerate(\n [\n (\"Publication Requested Status Change\", False),\n (\"AD Evaluation Status Change\", False),\n (\"In Last Call Status Change\", True),\n (\"Waiting for Writeup Status Change\", False),\n (\"IESG Evaluation Status Change\", True),\n (\"Waiting for AD Go-Ahead Status Change\", False),\n ]\n ):\n groups[\"Status Change\"][g] = id\n group_names[\"Status Change\"].append(g)\n up_is_good[g] = uig\n\n for id, (g, uig) in enumerate(\n [\n (\"Not currently under review Charter\", None),\n (\"Draft Charter Charter\", None),\n (\"Start Chartering/Rechartering (Internal Steering Group/IAB Review) Charter\", False),\n (\"External Review (Message to Community, Selected by Secretariat) Charter\", True),\n (\"IESG Review (Charter for Approval, Selected by Secretariat) Charter\", True),\n (\"Approved Charter\", True),\n (\"Replaced Charter\", None),\n ]\n ):\n groups[\"Charter\"][g] = id\n group_names[\"Charter\"].append(g)\n up_is_good[g] = uig\n\n for ad in ads:\n form = SearchForm(\n {\n \"by\": \"ad\",\n \"ad\": ad.id,\n \"rfcs\": \"on\",\n \"activedrafts\": \"on\",\n \"olddrafts\": \"on\",\n \"doctypes\": doctypes,\n }\n )\n\n ad.dashboard = urlreverse(\n \"ietf.doc.views_search.docs_for_ad\", kwargs=dict(name=ad.full_name_as_key())\n )\n ad.counts = defaultdict(list)\n ad.prev = defaultdict(list)\n ad.doc_now = defaultdict(list)\n ad.doc_prev = defaultdict(list)\n\n for doc in retrieve_search_results(form):\n group_type = ad_dashboard_group_type(doc)\n if group_type and group_type in groups:\n # Right now, anything with group_type \"Document\", such as a bofreq is not handled.\n group = ad_dashboard_group(doc)\n if group not in groups[group_type]:\n groups[group_type][group] = len(groups[group_type])\n group_names[group_type].append(group)\n\n inc = len(groups[group_type]) - len(ad.counts[group_type])\n if inc > 0:\n ad.counts[group_type].extend([0] * inc)\n ad.prev[group_type].extend([0] * inc)\n ad.doc_now[group_type].extend(set() for _ in range(inc))\n ad.doc_prev[group_type].extend(set() for _ in range(inc))\n\n ad.counts[group_type][groups[group_type][group]] += 1\n ad.doc_now[group_type][groups[group_type][group]].add(doc)\n\n last_state_event = (\n doc.docevent_set.filter(\n Q(type=\"started_iesg_process\") | Q(type=\"changed_state\")\n )\n .order_by(\"-time\")\n .first()\n )\n if (last_state_event is not None) and (right_now - last_state_event.time) > delta:\n ad.prev[group_type][groups[group_type][group]] += 1\n ad.doc_prev[group_type][groups[group_type][group]].add(doc)\n\n for ad in ads:\n ad.doc_diff = defaultdict(list)\n for gt in group_types:\n inc = len(groups[gt]) - len(ad.counts[gt])\n if inc > 0:\n ad.counts[gt].extend([0] * inc)\n ad.prev[gt].extend([0] * inc)\n ad.doc_now[gt].extend([set()] * inc)\n ad.doc_prev[gt].extend([set()] * inc)\n\n ad.doc_diff[gt].extend([set()] * len(groups[gt]))\n for idx, g in enumerate(group_names[gt]):\n ad.doc_diff[gt][idx] = ad.doc_prev[gt][idx] ^ ad.doc_now[gt][idx]\n\n # Shorten the names of groups\n for gt in group_types:\n for idx, g in enumerate(group_names[gt]):\n group_names[gt][idx] = (\n shorten_group_name(g),\n g,\n up_is_good[g] if g in up_is_good else None,\n )\n\n workload = [\n dict(\n group_type=gt,\n group_names=group_names[gt],\n counts=[\n (\n ad,\n [\n (\n group_names[gt][index],\n ad.counts[gt][index],\n ad.prev[gt][index],\n ad.doc_diff[gt][index],\n )\n for index in range(len(group_names[gt]))\n ],\n )\n for ad in ads\n ],\n sums=[\n (\n group_names[gt][index],\n sum([ad.counts[gt][index] for ad in ads]),\n sum([ad.prev[gt][index] for ad in ads]),\n )\n for index in range(len(group_names[gt]))\n ],\n )\n for gt in group_types\n ]\n\n return render(request, \"doc/ad_list.html\", {\"workload\": workload, \"delta\": delta})\n\ndef docs_for_ad(request, name):\n ad = None\n responsible = Document.objects.values_list('ad', flat=True).distinct()\n for p in Person.objects.filter(Q(role__name__in=(\"pre-ad\", \"ad\"),\n role__group__type=\"area\",\n role__group__state=\"active\")\n | Q(pk__in=responsible)).distinct():\n if name == p.full_name_as_key():\n ad = p\n break\n if not ad:\n raise Http404\n form = SearchForm({'by':'ad','ad': ad.id,\n 'rfcs':'on', 'activedrafts':'on', 'olddrafts':'on',\n 'sort': 'status',\n 'doctypes': list(DocTypeName.objects.filter(used=True).exclude(slug__in=('draft','liai-att')).values_list(\"pk\", flat=True))})\n results, meta = prepare_document_table(request, retrieve_search_results(form), form.data, max_results=500)\n results.sort(key=ad_dashboard_sort_key)\n del meta[\"headers\"][-1]\n\n # filter out some results\n results = [\n r\n for r in results\n if not (\n r.type_id == \"charter\"\n and (\n r.group.state_id == \"abandon\"\n or r.get_state_slug(\"charter\") == \"replaced\"\n )\n )\n and not (\n r.type_id == \"draft\"\n and (\n r.get_state_slug(\"draft-iesg\") == \"dead\"\n or r.get_state_slug(\"draft\") == \"repl\"\n )\n )\n ]\n\n for d in results:\n d.search_heading = ad_dashboard_group(d)\n #\n # Additional content showing docs with blocking positions by this ad\n blocked_docs = []\n if ad in get_active_ads():\n possible_docs = Document.objects.filter(Q(states__type=\"draft-iesg\",\n states__slug__in=IESG_BALLOT_ACTIVE_STATES) |\n Q(states__type=\"charter\",\n states__slug__in=IESG_CHARTER_ACTIVE_STATES) |\n Q(states__type__in=(\"statchg\", \"conflrev\"),\n states__slug__in=IESG_STATCHG_CONFLREV_ACTIVE_STATES),\n docevent__ballotpositiondocevent__pos__blocking=True,\n docevent__ballotpositiondocevent__balloter=ad).distinct()\n for doc in possible_docs:\n ballot = doc.active_ballot()\n if not ballot:\n continue\n\n blocking_positions = [p for p in ballot.all_positions() if p.pos.blocking]\n if not blocking_positions or not any( p.balloter==ad for p in blocking_positions ):\n continue\n\n augment_events_with_revision(doc, blocking_positions)\n\n doc.blocking_positions = blocking_positions\n doc.ballot = ballot\n\n blocked_docs.append(doc)\n\n # latest first\n if blocked_docs:\n blocked_docs.sort(key=lambda d: min(p.time for p in d.blocking_positions if p.balloter==ad), reverse=True)\n\n for d in blocked_docs:\n if d.get_base_name() == 'charter-ietf-shmoo-01-04.txt':\n print('Is in list')\n\n return render(request, 'doc/drafts_for_ad.html', {\n 'form':form, 'docs':results, 'meta':meta, 'ad_name': ad.plain_name(), 'blocked_docs': blocked_docs\n })\ndef drafts_in_last_call(request):\n lc_state = State.objects.get(type=\"draft-iesg\", slug=\"lc\").pk\n form = SearchForm({'by':'state','state': lc_state, 'rfcs':'on', 'activedrafts':'on'})\n results, meta = prepare_document_table(request, retrieve_search_results(form), form.data)\n pages = 0\n for doc in results:\n pages += doc.pages\n\n return render(request, 'doc/drafts_in_last_call.html', {\n 'form':form, 'docs':results, 'meta':meta, 'pages':pages\n })\n\ndef drafts_in_iesg_process(request):\n states = State.objects.filter(type=\"draft-iesg\").exclude(slug__in=('idexists', 'pub', 'dead', 'watching', 'rfcqueue'))\n title = \"Documents in IESG process\"\n\n grouped_docs = []\n\n for s in states.order_by(\"order\"):\n docs = Document.objects.filter(type=\"draft\", states=s).distinct().order_by(\"time\").select_related(\"ad\", \"group\", \"group__parent\")\n if docs:\n if s.slug == \"lc\":\n for d in docs:\n e = d.latest_event(LastCallDocEvent, type=\"sent_last_call\")\n d.lc_expires = e.expires if e else datetime.datetime.min\n docs = list(docs)\n docs.sort(key=lambda d: d.lc_expires)\n\n grouped_docs.append((s, docs))\n\n return render(request, 'doc/drafts_in_iesg_process.html', {\n \"grouped_docs\": grouped_docs,\n \"title\": title,\n })\n\ndef recent_drafts(request, days=7):\n slowcache = caches['slowpages']\n cache_key = f'recentdraftsview{days}' \n cached_val = slowcache.get(cache_key)\n if not cached_val:\n since = timezone.now()-datetime.timedelta(days=days)\n state = State.objects.get(type='draft', slug='active')\n events = NewRevisionDocEvent.objects.filter(time__gt=since)\n names = [ e.doc.name for e in events ]\n docs = Document.objects.filter(name__in=names, states=state)\n results, meta = prepare_document_table(request, docs, query={'sort':'-date', }, max_results=len(names))\n slowcache.set(cache_key, [docs, results, meta], 1800)\n else:\n [docs, results, meta] = cached_val\n\n pages = 0\n for doc in results:\n pages += doc.pages or 0\n\n return render(request, 'doc/recent_drafts.html', {\n 'docs':results, 'meta':meta, 'pages':pages, 'days': days,\n })\n\n\ndef index_all_drafts(request):\n # try to be efficient since this view returns a lot of data\n categories = []\n\n for s in (\"active\", \"rfc\", \"expired\", \"repl\", \"auth-rm\", \"ietf-rm\"):\n state = State.objects.get(type=\"draft\", slug=s)\n\n if state.slug == \"rfc\":\n heading = \"RFCs\"\n elif state.slug in (\"ietf-rm\", \"auth-rm\"):\n heading = \"Internet-Drafts %s\" % state.name\n else:\n heading = \"%s Internet-Drafts\" % state.name\n\n draft_names = DocAlias.objects.filter(docs__states=state).values_list(\"name\", \"docs__name\")\n\n names = []\n names_to_skip = set()\n for name, doc in draft_names:\n sort_key = name\n if name != doc:\n if not name.startswith(\"rfc\"):\n name, doc = doc, name\n names_to_skip.add(doc)\n\n if name.startswith(\"rfc\"):\n name = name.upper()\n sort_key = '%09d' % (100000000-int(name[3:]))\n\n names.append((name, sort_key))\n\n names.sort(key=lambda t: t[1])\n\n names = [f'<a href=\\\"{urlreverse(\"ietf.doc.views_doc.document_main\", kwargs=dict(name=n))}\\\">{n}</a>'\n for n, __ in names if n not in names_to_skip]\n\n categories.append((state,\n heading,\n len(names),\n \"<br>\".join(names)\n ))\n return render(request, 'doc/index_all_drafts.html', { \"categories\": categories })\n\ndef index_active_drafts(request):\n cache_key = 'doc:index_active_drafts'\n groups = cache.get(cache_key)\n if not groups:\n groups = active_drafts_index_by_group()\n cache.set(cache_key, groups, 15*60)\n return render(request, \"doc/index_active_drafts.html\", { 'groups': groups })\n\ndef ajax_select2_search_docs(request, model_name, doc_type):\n if model_name == \"docalias\":\n model = DocAlias\n else:\n model = Document\n\n q = [w.strip() for w in request.GET.get('q', '').split() if w.strip()]\n\n if not q:\n objs = model.objects.none()\n else:\n qs = model.objects.all()\n\n if model == Document:\n qs = qs.filter(type=doc_type)\n elif model == DocAlias:\n qs = qs.filter(docs__type=doc_type)\n\n for t in q:\n qs = qs.filter(name__icontains=t)\n\n objs = qs.distinct().order_by(\"name\")[:20]\n\n return HttpResponse(select2_id_doc_name_json(model, objs), content_type='application/json')\n",
"path": "ietf/doc/views_search.py"
}
] | [
{
"content": "# Copyright The IETF Trust 2009-2022, All Rights Reserved\n# -*- coding: utf-8 -*-\n#\n# Some parts Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).\n# All rights reserved. Contact: Pasi Eronen <[email protected]>\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# * Neither the name of the Nokia Corporation and/or its\n# subsidiary(-ies) nor the names of its contributors may be used\n# to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nimport re\nimport datetime\n\nfrom collections import defaultdict\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.cache import cache, caches\nfrom django.urls import reverse as urlreverse\nfrom django.db.models import Q\nfrom django.http import Http404, HttpResponseBadRequest, HttpResponse, HttpResponseRedirect, QueryDict\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.utils.cache import _generate_cache_key # type: ignore\n\n\n\nimport debug # pyflakes:ignore\n\nfrom ietf.doc.models import ( Document, DocHistory, DocAlias, State,\n LastCallDocEvent, NewRevisionDocEvent, IESG_SUBSTATE_TAGS,\n IESG_BALLOT_ACTIVE_STATES, IESG_STATCHG_CONFLREV_ACTIVE_STATES,\n IESG_CHARTER_ACTIVE_STATES )\nfrom ietf.doc.fields import select2_id_doc_name_json\nfrom ietf.doc.utils import get_search_cache_key, augment_events_with_revision\nfrom ietf.group.models import Group\nfrom ietf.idindex.index import active_drafts_index_by_group\nfrom ietf.name.models import DocTagName, DocTypeName, StreamName\nfrom ietf.person.models import Person\nfrom ietf.person.utils import get_active_ads\nfrom ietf.utils.draft_search import normalize_draftname\nfrom ietf.doc.utils_search import prepare_document_table\n\n\nclass SearchForm(forms.Form):\n name = forms.CharField(required=False)\n rfcs = forms.BooleanField(required=False, initial=True)\n activedrafts = forms.BooleanField(required=False, initial=True)\n olddrafts = forms.BooleanField(required=False, initial=False)\n\n by = forms.ChoiceField(choices=[(x,x) for x in ('author','group','area','ad','state','irtfstate','stream')], required=False, initial='group')\n author = forms.CharField(required=False)\n group = forms.CharField(required=False)\n stream = forms.ModelChoiceField(StreamName.objects.all().order_by('name'), empty_label=\"any stream\", required=False)\n area = forms.ModelChoiceField(Group.objects.filter(type=\"area\", state=\"active\").order_by('name'), empty_label=\"any area\", required=False)\n ad = forms.ChoiceField(choices=(), required=False)\n state = forms.ModelChoiceField(State.objects.filter(type=\"draft-iesg\"), empty_label=\"any state\", required=False)\n substate = forms.ChoiceField(choices=(), required=False)\n irtfstate = forms.ModelChoiceField(State.objects.filter(type=\"draft-stream-irtf\"), empty_label=\"any state\", required=False)\n\n sort = forms.ChoiceField(\n choices= (\n (\"document\", \"Document\"), (\"-document\", \"Document (desc.)\"),\n (\"title\", \"Title\"), (\"-title\", \"Title (desc.)\"),\n (\"date\", \"Date\"), (\"-date\", \"Date (desc.)\"),\n (\"status\", \"Status\"), (\"-status\", \"Status (desc.)\"),\n (\"ipr\", \"Ipr\"), (\"ipr\", \"Ipr (desc.)\"),\n (\"ad\", \"AD\"), (\"-ad\", \"AD (desc)\"), ),\n required=False, widget=forms.HiddenInput)\n\n doctypes = forms.ModelMultipleChoiceField(queryset=DocTypeName.objects.filter(used=True).exclude(slug__in=('draft','liai-att')).order_by('name'), required=False)\n\n def __init__(self, *args, **kwargs):\n super(SearchForm, self).__init__(*args, **kwargs)\n responsible = Document.objects.values_list('ad', flat=True).distinct()\n active_ads = get_active_ads()\n inactive_ads = list(((Person.objects.filter(pk__in=responsible) | Person.objects.filter(role__name=\"pre-ad\",\n role__group__type=\"area\",\n role__group__state=\"active\")).distinct())\n .exclude(pk__in=[x.pk for x in active_ads]))\n extract_last_name = lambda x: x.name_parts()[3]\n active_ads.sort(key=extract_last_name)\n inactive_ads.sort(key=extract_last_name)\n\n self.fields['ad'].choices = [('', 'any AD')] + [(ad.pk, ad.plain_name()) for ad in active_ads] + [('', '------------------')] + [(ad.pk, ad.name) for ad in inactive_ads]\n self.fields['substate'].choices = [('', 'any substate'), ('0', 'no substate')] + [(n.slug, n.name) for n in DocTagName.objects.filter(slug__in=IESG_SUBSTATE_TAGS)]\n\n def clean_name(self):\n value = self.cleaned_data.get('name','')\n return normalize_draftname(value)\n\n def clean(self):\n q = self.cleaned_data\n # Reset query['by'] if needed\n if 'by' in q:\n for k in ('author', 'group', 'area', 'ad'):\n if q['by'] == k and not q.get(k):\n q['by'] = None\n if q['by'] == 'state' and not (q.get('state') or q.get('substate')):\n q['by'] = None\n if q['by'] == 'irtfstate' and not (q.get('irtfstate')):\n q['by'] = None\n else:\n q['by'] = None\n # Reset other fields\n for k in ('author','group', 'area', 'ad'):\n if k != q['by']:\n q[k] = \"\"\n if q['by'] != 'state':\n q['state'] = q['substate'] = None\n if q['by'] != 'irtfstate':\n q['irtfstate'] = None\n return q\n\ndef retrieve_search_results(form, all_types=False):\n \"\"\"Takes a validated SearchForm and return the results.\"\"\"\n\n if not form.is_valid():\n raise ValueError(\"SearchForm doesn't validate: %s\" % form.errors)\n\n query = form.cleaned_data\n\n if all_types:\n # order by time here to retain the most recent documents in case we\n # find too many and have to chop the results list\n docs = Document.objects.all().order_by('-time')\n else:\n types = []\n\n if query['activedrafts'] or query['olddrafts'] or query['rfcs']:\n types.append('draft')\n\n types.extend(query[\"doctypes\"])\n\n if not types:\n return Document.objects.none()\n\n docs = Document.objects.filter(type__in=types)\n\n # name\n if query[\"name\"]:\n docs = docs.filter(Q(docalias__name__icontains=query[\"name\"]) |\n Q(title__icontains=query[\"name\"])).distinct()\n\n # rfc/active/old check buttons\n allowed_draft_states = []\n if query[\"rfcs\"]:\n allowed_draft_states.append(\"rfc\")\n if query[\"activedrafts\"]:\n allowed_draft_states.append(\"active\")\n if query[\"olddrafts\"]:\n allowed_draft_states.extend(['repl', 'expired', 'auth-rm', 'ietf-rm'])\n\n docs = docs.filter(Q(states__slug__in=allowed_draft_states) |\n ~Q(type__slug='draft')).distinct()\n\n # radio choices\n by = query[\"by\"]\n if by == \"author\":\n docs = docs.filter(\n Q(documentauthor__person__alias__name__icontains=query[\"author\"]) |\n Q(documentauthor__person__email__address__icontains=query[\"author\"])\n )\n elif by == \"group\":\n docs = docs.filter(group__acronym=query[\"group\"])\n elif by == \"area\":\n docs = docs.filter(Q(group__type=\"wg\", group__parent=query[\"area\"]) |\n Q(group=query[\"area\"])).distinct()\n elif by == \"ad\":\n docs = docs.filter(ad=query[\"ad\"])\n elif by == \"state\":\n if query[\"state\"]:\n docs = docs.filter(states=query[\"state\"])\n if query[\"substate\"]:\n docs = docs.filter(tags=query[\"substate\"])\n elif by == \"irtfstate\":\n docs = docs.filter(states=query[\"irtfstate\"])\n elif by == \"stream\":\n docs = docs.filter(stream=query[\"stream\"])\n\n return docs\n\ndef search(request):\n if request.GET:\n # backwards compatibility\n get_params = request.GET.copy()\n if 'activeDrafts' in request.GET:\n get_params['activedrafts'] = request.GET['activeDrafts']\n if 'oldDrafts' in request.GET:\n get_params['olddrafts'] = request.GET['oldDrafts']\n if 'subState' in request.GET:\n get_params['substate'] = request.GET['subState']\n\n form = SearchForm(get_params)\n if not form.is_valid():\n return HttpResponseBadRequest(\"form not valid: %s\" % form.errors)\n\n cache_key = get_search_cache_key(get_params)\n results = cache.get(cache_key)\n if not results:\n results = retrieve_search_results(form)\n cache.set(cache_key, results)\n\n results, meta = prepare_document_table(request, results, get_params)\n meta['searching'] = True\n else:\n form = SearchForm()\n results = []\n meta = { 'by': None, 'searching': False }\n get_params = QueryDict('')\n\n return render(request, 'doc/search/search.html', {\n 'form':form, 'docs':results, 'meta':meta, 'queryargs':get_params.urlencode() },\n )\n\ndef frontpage(request):\n form = SearchForm()\n return render(request, 'doc/frontpage.html', {'form':form})\n\ndef search_for_name(request, name):\n def find_unique(n):\n exact = DocAlias.objects.filter(name=n).first()\n if exact:\n return exact.name\n\n aliases = DocAlias.objects.filter(name__startswith=n)[:2]\n if len(aliases) == 1:\n return aliases[0].name\n\n aliases = DocAlias.objects.filter(name__contains=n)[:2]\n if len(aliases) == 1:\n return aliases[0].name\n\n return None\n\n def cached_redirect(cache_key, url):\n cache.set(cache_key, url, settings.CACHE_MIDDLEWARE_SECONDS)\n return HttpResponseRedirect(url)\n\n n = name\n\n cache_key = _generate_cache_key(request, 'GET', [], settings.CACHE_MIDDLEWARE_KEY_PREFIX)\n if cache_key:\n url = cache.get(cache_key, None)\n if url:\n return HttpResponseRedirect(url)\n\n # chop away extension\n extension_split = re.search(r\"^(.+)\\.(txt|ps|pdf)$\", n)\n if extension_split:\n n = extension_split.group(1)\n\n redirect_to = find_unique(name)\n if redirect_to:\n return cached_redirect(cache_key, urlreverse(\"ietf.doc.views_doc.document_main\", kwargs={ \"name\": redirect_to }))\n else:\n # check for embedded rev - this may be ambiguous, so don't\n # chop it off if we don't find a match\n rev_split = re.search(\"^(.+)-([0-9]{2})$\", n)\n if rev_split:\n redirect_to = find_unique(rev_split.group(1))\n if redirect_to:\n rev = rev_split.group(2)\n # check if we can redirect directly to the rev\n if DocHistory.objects.filter(doc__docalias__name=redirect_to, rev=rev).exists():\n return cached_redirect(cache_key, urlreverse(\"ietf.doc.views_doc.document_main\", kwargs={ \"name\": redirect_to, \"rev\": rev }))\n else:\n return cached_redirect(cache_key, urlreverse(\"ietf.doc.views_doc.document_main\", kwargs={ \"name\": redirect_to }))\n\n # build appropriate flags based on string prefix\n doctypenames = DocTypeName.objects.filter(used=True)\n # This would have been more straightforward if document prefixes couldn't\n # contain a dash. Probably, document prefixes shouldn't contain a dash ...\n search_args = \"?name=%s\" % n\n if n.startswith(\"draft\"):\n search_args += \"&rfcs=on&activedrafts=on&olddrafts=on\"\n else:\n for t in doctypenames:\n if t.prefix and n.startswith(t.prefix):\n search_args += \"&doctypes=%s\" % t.slug\n break\n else:\n search_args += \"&rfcs=on&activedrafts=on&olddrafts=on\"\n\n return cached_redirect(cache_key, urlreverse('ietf.doc.views_search.search') + search_args)\n\ndef ad_dashboard_group_type(doc):\n # Return group type for document for dashboard.\n # If doc is not defined return list of all possible\n # group types\n if not doc:\n return ('I-D', 'RFC', 'Conflict Review', 'Status Change', 'Charter')\n if doc.type.slug=='draft':\n if doc.get_state_slug('draft') == 'rfc':\n return 'RFC'\n elif doc.get_state_slug('draft') == 'active' and doc.get_state_slug('draft-iesg') and doc.get_state('draft-iesg').name =='RFC Ed Queue':\n return 'RFC'\n elif doc.get_state_slug('draft') == 'active' and doc.get_state_slug('draft-iesg') and doc.get_state('draft-iesg').name in ('Dead', 'I-D Exists', 'AD is watching'):\n return None\n elif doc.get_state('draft').name in ('Expired', 'Replaced'):\n return None\n else:\n return 'I-D'\n elif doc.type.slug=='conflrev':\n return 'Conflict Review'\n elif doc.type.slug=='statchg':\n return 'Status Change'\n elif doc.type.slug=='charter':\n return \"Charter\"\n else:\n return \"Document\"\n\ndef ad_dashboard_group(doc):\n\n if doc.type.slug=='draft':\n if doc.get_state_slug('draft') == 'rfc':\n return 'RFC'\n elif doc.get_state_slug('draft') == 'active' and doc.get_state_slug('draft-iesg'):\n return '%s Internet-Draft' % doc.get_state('draft-iesg').name\n else:\n return '%s Internet-Draft' % doc.get_state('draft').name\n elif doc.type.slug=='conflrev':\n if doc.get_state_slug('conflrev') in ('appr-reqnopub-sent','appr-noprob-sent'):\n return 'Approved Conflict Review'\n elif doc.get_state_slug('conflrev') in ('appr-reqnopub-pend','appr-noprob-pend','appr-reqnopub-pr','appr-noprob-pr'):\n return \"%s Conflict Review\" % State.objects.get(type__slug='draft-iesg',slug='approved')\n else:\n return '%s Conflict Review' % doc.get_state('conflrev')\n elif doc.type.slug=='statchg':\n if doc.get_state_slug('statchg') in ('appr-sent',):\n return 'Approved Status Change'\n if doc.get_state_slug('statchg') in ('appr-pend','appr-pr'):\n return '%s Status Change' % State.objects.get(type__slug='draft-iesg',slug='approved')\n else:\n return '%s Status Change' % doc.get_state('statchg')\n elif doc.type.slug=='charter':\n if doc.get_state_slug('charter') == 'approved':\n return \"Approved Charter\"\n else:\n return '%s Charter' % doc.get_state('charter')\n else:\n return \"Document\"\n\n\ndef shorten_group_name(name):\n for s in [\n \" Internet-Draft\",\n \" Conflict Review\",\n \" Status Change\",\n \" (Internal Steering Group/IAB Review) Charter\",\n \"Charter\",\n ]:\n if name.endswith(s):\n name = name[: -len(s)]\n\n for pat, sub in [\n (\"Writeup\", \"Write-up\"),\n (\"Requested\", \"Req\"),\n (\"Evaluation\", \"Eval\"),\n (\"Publication\", \"Pub\"),\n (\"Waiting\", \"Wait\"),\n (\"Go-Ahead\", \"OK\"),\n (\"Approved-\", \"App, \"),\n (\"announcement\", \"ann.\"),\n (\"IESG Eval - \", \"\"),\n (\"Not currently under review\", \"Not under review\"),\n (\"External Review\", \"Ext. Review\"),\n (r\"IESG Review \\(Charter for Approval, Selected by Secretariat\\)\", \"IESG Review\"),\n (\"Needs Shepherd\", \"Needs Shep.\"),\n (\"Approved\", \"App.\"),\n (\"Replaced\", \"Repl.\"),\n (\"Withdrawn\", \"Withd.\"),\n (\"Chartering/Rechartering\", \"Charter\"),\n (r\"\\(Message to Community, Selected by Secretariat\\)\", \"\")\n ]:\n name = re.sub(pat, sub, name)\n\n return name.strip()\n\n\ndef ad_dashboard_sort_key(doc):\n\n if doc.type.slug=='draft' and doc.get_state_slug('draft') == 'rfc':\n return \"21%04d\" % int(doc.rfc_number())\n if doc.type.slug=='statchg' and doc.get_state_slug('statchg') == 'appr-sent':\n return \"22%d\" % 0 # TODO - get the date of the transition into this state here\n if doc.type.slug=='conflrev' and doc.get_state_slug('conflrev') in ('appr-reqnopub-sent','appr-noprob-sent'):\n return \"23%d\" % 0 # TODO - get the date of the transition into this state here\n if doc.type.slug=='charter' and doc.get_state_slug('charter') == 'approved':\n return \"24%d\" % 0 # TODO - get the date of the transition into this state here\n\n seed = ad_dashboard_group(doc)\n\n if doc.type.slug=='conflrev' and doc.get_state_slug('conflrev') == 'adrev':\n state = State.objects.get(type__slug='draft-iesg',slug='ad-eval')\n return \"1%d%s\" % (state.order,seed)\n\n if doc.type.slug=='charter' and doc.get_state_slug('charter') != 'replaced':\n if doc.get_state_slug('charter') in ('notrev','infrev'):\n return \"100%s\" % seed\n elif doc.get_state_slug('charter') == 'intrev':\n state = State.objects.get(type__slug='draft-iesg',slug='ad-eval')\n return \"1%d%s\" % (state.order,seed)\n elif doc.get_state_slug('charter') == 'extrev':\n state = State.objects.get(type__slug='draft-iesg',slug='lc')\n return \"1%d%s\" % (state.order,seed)\n elif doc.get_state_slug('charter') == 'iesgrev':\n state = State.objects.get(type__slug='draft-iesg',slug='iesg-eva')\n return \"1%d%s\" % (state.order,seed)\n\n if doc.type.slug=='statchg' and doc.get_state_slug('statchg') == 'adrev':\n state = State.objects.get(type__slug='draft-iesg',slug='ad-eval')\n return \"1%d%s\" % (state.order,seed)\n\n if seed.startswith('Needs Shepherd'):\n return \"100%s\" % seed\n if seed.endswith(' Document'):\n seed = seed[:-9]\n elif seed.endswith(' Internet-Draft'):\n seed = seed[:-15]\n elif seed.endswith(' Conflict Review'):\n seed = seed[:-16]\n elif seed.endswith(' Status Change'):\n seed = seed[:-14]\n state = State.objects.filter(type__slug='draft-iesg',name=seed)\n if state:\n ageseconds = 0\n changetime= doc.latest_event(type='changed_document')\n if changetime:\n ad = (timezone.now()-doc.latest_event(type='changed_document').time)\n ageseconds = (ad.microseconds + (ad.seconds + ad.days * 24 * 3600) * 10**6) / 10**6\n return \"1%d%s%s%010d\" % (state[0].order,seed,doc.type.slug,ageseconds)\n\n return \"3%s\" % seed\n\n\ndef ad_workload(request):\n delta = datetime.timedelta(days=120)\n right_now = timezone.now()\n\n ads = []\n responsible = Document.objects.values_list(\"ad\", flat=True).distinct()\n for p in Person.objects.filter(\n Q(\n role__name__in=(\"pre-ad\", \"ad\"),\n role__group__type=\"area\",\n role__group__state=\"active\",\n )\n | Q(pk__in=responsible)\n ).distinct():\n if p in get_active_ads():\n ads.append(p)\n\n doctypes = list(\n DocTypeName.objects.filter(used=True)\n .exclude(slug__in=(\"draft\", \"liai-att\"))\n .values_list(\"pk\", flat=True)\n )\n\n up_is_good = {}\n group_types = ad_dashboard_group_type(None)\n groups = {g: {} for g in group_types}\n group_names = {g: [] for g in group_types}\n\n # Prefill groups in preferred sort order\n # FIXME: This should really use the database states instead of replicating the logic\n for id, (g, uig) in enumerate(\n [\n (\"Publication Requested Internet-Draft\", False),\n (\"AD Evaluation Internet-Draft\", False),\n (\"In Last Call Internet-Draft\", True),\n (\"Waiting for Writeup Internet-Draft\", False),\n (\"IESG Evaluation - Defer Internet-Draft\", False),\n (\"IESG Evaluation Internet-Draft\", True),\n (\"Waiting for AD Go-Ahead Internet-Draft\", False),\n (\"Approved-announcement to be sent Internet-Draft\", True),\n (\"Approved-announcement sent Internet-Draft\", True),\n ]\n ):\n groups[\"I-D\"][g] = id\n group_names[\"I-D\"].append(g)\n up_is_good[g] = uig\n\n for id, g in enumerate([\"RFC Ed Queue Internet-Draft\", \"RFC\"]):\n groups[\"RFC\"][g] = id\n group_names[\"RFC\"].append(g)\n up_is_good[g] = True\n\n for id, (g, uig) in enumerate(\n [\n (\"AD Review Conflict Review\", False),\n (\"Needs Shepherd Conflict Review\", False),\n (\"IESG Evaluation Conflict Review\", True),\n (\"Approved Conflict Review\", True),\n (\"Withdrawn Conflict Review\", None),\n ]\n ):\n groups[\"Conflict Review\"][g] = id\n group_names[\"Conflict Review\"].append(g)\n up_is_good[g] = uig\n\n for id, (g, uig) in enumerate(\n [\n (\"Publication Requested Status Change\", False),\n (\"AD Evaluation Status Change\", False),\n (\"In Last Call Status Change\", True),\n (\"Waiting for Writeup Status Change\", False),\n (\"IESG Evaluation Status Change\", True),\n (\"Waiting for AD Go-Ahead Status Change\", False),\n ]\n ):\n groups[\"Status Change\"][g] = id\n group_names[\"Status Change\"].append(g)\n up_is_good[g] = uig\n\n for id, (g, uig) in enumerate(\n [\n (\"Not currently under review Charter\", None),\n (\"Draft Charter Charter\", None),\n (\"Start Chartering/Rechartering (Internal Steering Group/IAB Review) Charter\", False),\n (\"External Review (Message to Community, Selected by Secretariat) Charter\", True),\n (\"IESG Review (Charter for Approval, Selected by Secretariat) Charter\", True),\n (\"Approved Charter\", True),\n (\"Replaced Charter\", None),\n ]\n ):\n groups[\"Charter\"][g] = id\n group_names[\"Charter\"].append(g)\n up_is_good[g] = uig\n\n for ad in ads:\n form = SearchForm(\n {\n \"by\": \"ad\",\n \"ad\": ad.id,\n \"rfcs\": \"on\",\n \"activedrafts\": \"on\",\n \"olddrafts\": \"on\",\n \"doctypes\": doctypes,\n }\n )\n\n ad.dashboard = urlreverse(\n \"ietf.doc.views_search.docs_for_ad\", kwargs=dict(name=ad.full_name_as_key())\n )\n ad.counts = defaultdict(list)\n ad.prev = defaultdict(list)\n ad.doc_now = defaultdict(list)\n ad.doc_prev = defaultdict(list)\n\n for doc in retrieve_search_results(form):\n group_type = ad_dashboard_group_type(doc)\n if group_type and group_type in groups:\n # Right now, anything with group_type \"Document\", such as a bofreq is not handled.\n group = ad_dashboard_group(doc)\n if group not in groups[group_type]:\n groups[group_type][group] = len(groups[group_type])\n group_names[group_type].append(group)\n\n inc = len(groups[group_type]) - len(ad.counts[group_type])\n if inc > 0:\n ad.counts[group_type].extend([0] * inc)\n ad.prev[group_type].extend([0] * inc)\n ad.doc_now[group_type].extend(set() for _ in range(inc))\n ad.doc_prev[group_type].extend(set() for _ in range(inc))\n\n ad.counts[group_type][groups[group_type][group]] += 1\n ad.doc_now[group_type][groups[group_type][group]].add(doc)\n\n last_state_event = (\n doc.docevent_set.filter(\n Q(type=\"started_iesg_process\") | Q(type=\"changed_state\")\n )\n .order_by(\"-time\")\n .first()\n )\n if (last_state_event is not None) and (right_now - last_state_event.time) > delta:\n ad.prev[group_type][groups[group_type][group]] += 1\n ad.doc_prev[group_type][groups[group_type][group]].add(doc)\n\n for ad in ads:\n ad.doc_diff = defaultdict(list)\n for gt in group_types:\n inc = len(groups[gt]) - len(ad.counts[gt])\n if inc > 0:\n ad.counts[gt].extend([0] * inc)\n ad.prev[gt].extend([0] * inc)\n ad.doc_now[gt].extend([set()] * inc)\n ad.doc_prev[gt].extend([set()] * inc)\n\n ad.doc_diff[gt].extend([set()] * len(groups[gt]))\n for idx, g in enumerate(group_names[gt]):\n ad.doc_diff[gt][idx] = ad.doc_prev[gt][idx] ^ ad.doc_now[gt][idx]\n\n # Shorten the names of groups\n for gt in group_types:\n for idx, g in enumerate(group_names[gt]):\n group_names[gt][idx] = (\n shorten_group_name(g),\n g,\n up_is_good[g] if g in up_is_good else None,\n )\n\n workload = [\n dict(\n group_type=gt,\n group_names=group_names[gt],\n counts=[\n (\n ad,\n [\n (\n group_names[gt][index],\n ad.counts[gt][index],\n ad.prev[gt][index],\n ad.doc_diff[gt][index],\n )\n for index in range(len(group_names[gt]))\n ],\n )\n for ad in ads\n ],\n sums=[\n (\n group_names[gt][index],\n sum([ad.counts[gt][index] for ad in ads]),\n sum([ad.prev[gt][index] for ad in ads]),\n )\n for index in range(len(group_names[gt]))\n ],\n )\n for gt in group_types\n ]\n\n return render(request, \"doc/ad_list.html\", {\"workload\": workload, \"delta\": delta})\n\ndef docs_for_ad(request, name):\n ad = None\n responsible = Document.objects.values_list('ad', flat=True).distinct()\n for p in Person.objects.filter(Q(role__name__in=(\"pre-ad\", \"ad\"),\n role__group__type=\"area\",\n role__group__state=\"active\")\n | Q(pk__in=responsible)).distinct():\n if name == p.full_name_as_key():\n ad = p\n break\n if not ad:\n raise Http404\n form = SearchForm({'by':'ad','ad': ad.id,\n 'rfcs':'on', 'activedrafts':'on', 'olddrafts':'on',\n 'sort': 'status',\n 'doctypes': list(DocTypeName.objects.filter(used=True).exclude(slug__in=('draft','liai-att')).values_list(\"pk\", flat=True))})\n results, meta = prepare_document_table(request, retrieve_search_results(form), form.data, max_results=500)\n results.sort(key=ad_dashboard_sort_key)\n del meta[\"headers\"][-1]\n\n # filter out some results\n results = [\n r\n for r in results\n if not (\n r.type_id == \"charter\"\n and (\n r.group.state_id == \"abandon\"\n or r.get_state_slug(\"charter\") == \"replaced\"\n )\n )\n and not (\n r.type_id == \"draft\"\n and (\n r.get_state_slug(\"draft-iesg\") == \"dead\"\n or r.get_state_slug(\"draft\") == \"repl\"\n )\n )\n ]\n\n for d in results:\n d.search_heading = ad_dashboard_group(d)\n #\n # Additional content showing docs with blocking positions by this ad\n blocked_docs = []\n if ad in get_active_ads():\n possible_docs = Document.objects.filter(Q(states__type=\"draft-iesg\",\n states__slug__in=IESG_BALLOT_ACTIVE_STATES) |\n Q(states__type=\"charter\",\n states__slug__in=IESG_CHARTER_ACTIVE_STATES) |\n Q(states__type__in=(\"statchg\", \"conflrev\"),\n states__slug__in=IESG_STATCHG_CONFLREV_ACTIVE_STATES),\n docevent__ballotpositiondocevent__pos__blocking=True,\n docevent__ballotpositiondocevent__balloter=ad).distinct()\n for doc in possible_docs:\n ballot = doc.active_ballot()\n if not ballot:\n continue\n\n blocking_positions = [p for p in ballot.all_positions() if p.pos.blocking]\n if not blocking_positions or not any( p.balloter==ad for p in blocking_positions ):\n continue\n\n augment_events_with_revision(doc, blocking_positions)\n\n doc.blocking_positions = blocking_positions\n doc.ballot = ballot\n\n blocked_docs.append(doc)\n\n # latest first\n if blocked_docs:\n blocked_docs.sort(key=lambda d: min(p.time for p in d.blocking_positions if p.balloter==ad), reverse=True)\n\n for d in blocked_docs:\n if d.get_base_name() == 'charter-ietf-shmoo-01-04.txt':\n print('Is in list')\n\n return render(request, 'doc/drafts_for_ad.html', {\n 'form':form, 'docs':results, 'meta':meta, 'ad_name': ad.plain_name(), 'blocked_docs': blocked_docs\n })\ndef drafts_in_last_call(request):\n lc_state = State.objects.get(type=\"draft-iesg\", slug=\"lc\").pk\n form = SearchForm({'by':'state','state': lc_state, 'rfcs':'on', 'activedrafts':'on'})\n results, meta = prepare_document_table(request, retrieve_search_results(form), form.data)\n pages = 0\n for doc in results:\n pages += doc.pages\n\n return render(request, 'doc/drafts_in_last_call.html', {\n 'form':form, 'docs':results, 'meta':meta, 'pages':pages\n })\n\ndef drafts_in_iesg_process(request):\n states = State.objects.filter(type=\"draft-iesg\").exclude(slug__in=('idexists', 'pub', 'dead', 'watching', 'rfcqueue'))\n title = \"Documents in IESG process\"\n\n grouped_docs = []\n\n for s in states.order_by(\"order\"):\n docs = Document.objects.filter(type=\"draft\", states=s).distinct().order_by(\"time\").select_related(\"ad\", \"group\", \"group__parent\")\n if docs:\n if s.slug == \"lc\":\n for d in docs:\n e = d.latest_event(LastCallDocEvent, type=\"sent_last_call\")\n d.lc_expires = e.expires if e else datetime.datetime.min\n docs = list(docs)\n docs.sort(key=lambda d: d.lc_expires)\n\n grouped_docs.append((s, docs))\n\n return render(request, 'doc/drafts_in_iesg_process.html', {\n \"grouped_docs\": grouped_docs,\n \"title\": title,\n })\n\ndef recent_drafts(request, days=7):\n slowcache = caches['slowpages']\n cache_key = f'recentdraftsview{days}' \n cached_val = slowcache.get(cache_key)\n if not cached_val:\n since = timezone.now()-datetime.timedelta(days=days)\n state = State.objects.get(type='draft', slug='active')\n events = NewRevisionDocEvent.objects.filter(time__gt=since)\n names = [ e.doc.name for e in events ]\n docs = Document.objects.filter(name__in=names, states=state)\n results, meta = prepare_document_table(request, docs, query={'sort':'-date', }, max_results=len(names))\n slowcache.set(cache_key, [docs, results, meta], 1800)\n else:\n [docs, results, meta] = cached_val\n\n pages = 0\n for doc in results:\n pages += doc.pages or 0\n\n return render(request, 'doc/recent_drafts.html', {\n 'docs':results, 'meta':meta, 'pages':pages, 'days': days,\n })\n\n\ndef index_all_drafts(request):\n # try to be efficient since this view returns a lot of data\n categories = []\n\n for s in (\"active\", \"rfc\", \"expired\", \"repl\", \"auth-rm\", \"ietf-rm\"):\n state = State.objects.get(type=\"draft\", slug=s)\n\n if state.slug == \"rfc\":\n heading = \"RFCs\"\n elif state.slug in (\"ietf-rm\", \"auth-rm\"):\n heading = \"Internet-Drafts %s\" % state.name\n else:\n heading = \"%s Internet-Drafts\" % state.name\n\n draft_names = DocAlias.objects.filter(docs__states=state).values_list(\"name\", \"docs__name\")\n\n names = []\n names_to_skip = set()\n for name, doc in draft_names:\n sort_key = name\n if name != doc:\n if not name.startswith(\"rfc\"):\n name, doc = doc, name\n names_to_skip.add(doc)\n\n if name.startswith(\"rfc\"):\n name = name.upper()\n sort_key = '%09d' % (100000000-int(name[3:]))\n\n names.append((name, sort_key))\n\n names.sort(key=lambda t: t[1])\n\n names = [f'<a href=\\\"{urlreverse(\"ietf.doc.views_doc.document_main\", kwargs=dict(name=n))}\\\">{n}</a>'\n for n, __ in names if n not in names_to_skip]\n\n categories.append((state,\n heading,\n len(names),\n \"<br>\".join(names)\n ))\n return render(request, 'doc/index_all_drafts.html', { \"categories\": categories })\n\ndef index_active_drafts(request):\n cache_key = 'doc:index_active_drafts'\n groups = cache.get(cache_key)\n if not groups:\n groups = active_drafts_index_by_group()\n cache.set(cache_key, groups, 15*60)\n return render(request, \"doc/index_active_drafts.html\", { 'groups': groups })\n\ndef ajax_select2_search_docs(request, model_name, doc_type):\n if model_name == \"docalias\":\n model = DocAlias\n else:\n model = Document\n\n q = [w.strip() for w in request.GET.get('q', '').split() if w.strip()]\n\n if not q:\n objs = model.objects.none()\n else:\n qs = model.objects.all()\n\n if model == Document:\n qs = qs.filter(type=doc_type)\n elif model == DocAlias:\n qs = qs.filter(docs__type=doc_type)\n\n for t in q:\n qs = qs.filter(name__icontains=t)\n\n objs = qs.distinct().order_by(\"name\")[:20]\n\n return HttpResponse(select2_id_doc_name_json(model, objs), content_type='application/json')\n",
"path": "ietf/doc/views_search.py"
}
] | diff --git a/ietf/doc/views_search.py b/ietf/doc/views_search.py
index ec540ba393..84baeda69a 100644
--- a/ietf/doc/views_search.py
+++ b/ietf/doc/views_search.py
@@ -461,7 +461,7 @@ def ad_dashboard_sort_key(doc):
def ad_workload(request):
- delta = datetime.timedelta(days=30)
+ delta = datetime.timedelta(days=120)
right_now = timezone.now()
ads = []
|
pyinstaller__pyinstaller-2347 | gi._gobject.option is not part of pygobject
The [GObject hook](https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/hook-gi.repository.GObject.py) adds a `hiddenimport` for `gi._gobject.option` however `gi/_gobject/option.py` is not part of pygobject.
This leads to the following warning when packaging a Gtk application:
```
4813 INFO: Loading module hook "hook-gi.py"...
4818 INFO: Loading module hook "hook-gi.repository.GObject.py"...
4926 INFO: Processing pre-safe import module hook gi.repository.GLib
4963 WARNING: Hidden import "gi._gobject.option" not found!
```
Browsing through the [pygobject git history](https://git.gnome.org/browse/pygobject/), I find commit [8afd7e8](https://git.gnome.org/browse/pygobject/commit/gi/_option.py?id=8afd7e880a72a44e6ea46c763bab82146fd75c96) which moved `gi/_glib/option.py` into `gi/_option.py`
Replacing the `hiddenimport` to `hiddenimports += ['gi._option', 'gi._gobject']` silences the issue. However, I do not yet understand enough about pygobject and pyinstaller to know if this is the right thing to do.
| [
{
"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\"\"\"\nImport hook for GObject https://developer.gnome.org/gobject/stable/ from the GLib\nlibrary https://wiki.gnome.org/Projects/GLib introspected through PyGobject https://wiki.gnome.org/PyGObject\nvia the GObject Introspection middleware layer https://wiki.gnome.org/Projects/GObjectIntrospection\n\nTested with GLib 2.44.1, PyGObject 3.16.2, and GObject Introspection 1.44.0 on Mac OS X 10.10 and\nGLib 2.42.2, PyGObject 3.14.0, and GObject Introspection 1.42 on Windows 7\n\"\"\"\n\nfrom PyInstaller.utils.hooks import get_gi_typelibs\n\nbinaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')\n\nhiddenimports += ['gi._gobject.option', 'gi._gobject']\n",
"path": "PyInstaller/hooks/hook-gi.repository.GObject.py"
}
] | [
{
"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\"\"\"\nImport hook for GObject https://developer.gnome.org/gobject/stable/ from the GLib\nlibrary https://wiki.gnome.org/Projects/GLib introspected through PyGobject https://wiki.gnome.org/PyGObject\nvia the GObject Introspection middleware layer https://wiki.gnome.org/Projects/GObjectIntrospection\n\nTested with GLib 2.44.1, PyGObject 3.16.2, and GObject Introspection 1.44.0 on Mac OS X 10.10 and\nGLib 2.42.2, PyGObject 3.14.0, and GObject Introspection 1.42 on Windows 7\n\"\"\"\n\nfrom PyInstaller.utils.hooks import get_gi_typelibs\n\nbinaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')\n\nhiddenimports += ['gi._gobject']\n",
"path": "PyInstaller/hooks/hook-gi.repository.GObject.py"
}
] | diff --git a/PyInstaller/hooks/hook-gi.repository.GObject.py b/PyInstaller/hooks/hook-gi.repository.GObject.py
index 724c108563..3c9ae7a74e 100644
--- a/PyInstaller/hooks/hook-gi.repository.GObject.py
+++ b/PyInstaller/hooks/hook-gi.repository.GObject.py
@@ -19,4 +19,4 @@
binaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')
-hiddenimports += ['gi._gobject.option', 'gi._gobject']
+hiddenimports += ['gi._gobject']
|
statsmodels__statsmodels-507 | numdifftools dependency
Original Launchpad bug 653902: https://bugs.launchpad.net/statsmodels/+bug/653902
Reported by: vincent-vincentdavis (Vincent Davis).
statsmodels/_init_.py imports tsa
Which then returns an exception from statsmodels/tsa/var.py "raise Warning("You need to install numdifftools to try out the AR model")"
Should numdifftools be a dependency for all of statsmodels ?
| [
{
"content": "\"\"\"Base Classes for Likelihood Models in time series analysis\n\nWarning: imports numdifftools\n\n\n\nCreated on Sun Oct 10 15:00:47 2010\n\nAuthor: josef-pktd\nLicense: BSD\n\n\"\"\"\n\nimport numpy as np\n\nimport numdifftools as ndt\n\nfrom statsmodels.base.model import LikelihoodModel\n\n#copied from sandbox/regression/mle.py\n#TODO: I take it this is only a stub and should be included in another\n# model class?\nclass TSMLEModel(LikelihoodModel):\n \"\"\"\n univariate time series model for estimation with maximum likelihood\n\n Note: This is not working yet\n \"\"\"\n\n def __init__(self, endog, exog=None):\n #need to override p,q (nar,nma) correctly\n super(TSMLEModel, self).__init__(endog, exog)\n #set default arma(1,1)\n self.nar = 1\n self.nma = 1\n #self.initialize()\n\n def geterrors(self, params):\n raise NotImplementedError\n\n def loglike(self, params):\n \"\"\"\n Loglikelihood for timeseries model\n\n Notes\n -----\n needs to be overwritten by subclass\n \"\"\"\n raise NotImplementedError\n\n\n def score(self, params):\n \"\"\"\n Score vector for Arma model\n \"\"\"\n #return None\n #print params\n jac = ndt.Jacobian(self.loglike, stepMax=1e-4)\n return jac(params)[-1]\n\n def hessian(self, params):\n \"\"\"\n Hessian of arma model. Currently uses numdifftools\n \"\"\"\n #return None\n Hfun = ndt.Jacobian(self.score, stepMax=1e-4)\n return Hfun(params)[-1]\n\n\n def fit(self, start_params=None, maxiter=5000, method='fmin', tol=1e-08):\n '''estimate model by minimizing negative loglikelihood\n\n does this need to be overwritten ?\n '''\n if start_params is None and hasattr(self, '_start_params'):\n start_params = self._start_params\n #start_params = np.concatenate((0.05*np.ones(self.nar + self.nma), [1]))\n mlefit = super(TSMLEModel, self).fit(start_params=start_params,\n maxiter=maxiter, method=method, tol=tol)\n return mlefit\n",
"path": "statsmodels/tsa/mlemodel.py"
}
] | [
{
"content": "\"\"\"Base Classes for Likelihood Models in time series analysis\n\nWarning: imports numdifftools\n\n\n\nCreated on Sun Oct 10 15:00:47 2010\n\nAuthor: josef-pktd\nLicense: BSD\n\n\"\"\"\n\nimport numpy as np\n\ntry:\n import numdifftools as ndt\nexcept:\n pass\n\nfrom statsmodels.base.model import LikelihoodModel\n\n#copied from sandbox/regression/mle.py\n#TODO: I take it this is only a stub and should be included in another\n# model class?\nclass TSMLEModel(LikelihoodModel):\n \"\"\"\n univariate time series model for estimation with maximum likelihood\n\n Note: This is not working yet\n \"\"\"\n\n def __init__(self, endog, exog=None):\n #need to override p,q (nar,nma) correctly\n super(TSMLEModel, self).__init__(endog, exog)\n #set default arma(1,1)\n self.nar = 1\n self.nma = 1\n #self.initialize()\n\n def geterrors(self, params):\n raise NotImplementedError\n\n def loglike(self, params):\n \"\"\"\n Loglikelihood for timeseries model\n\n Notes\n -----\n needs to be overwritten by subclass\n \"\"\"\n raise NotImplementedError\n\n\n def score(self, params):\n \"\"\"\n Score vector for Arma model\n \"\"\"\n #return None\n #print params\n jac = ndt.Jacobian(self.loglike, stepMax=1e-4)\n return jac(params)[-1]\n\n def hessian(self, params):\n \"\"\"\n Hessian of arma model. Currently uses numdifftools\n \"\"\"\n #return None\n Hfun = ndt.Jacobian(self.score, stepMax=1e-4)\n return Hfun(params)[-1]\n\n\n def fit(self, start_params=None, maxiter=5000, method='fmin', tol=1e-08):\n '''estimate model by minimizing negative loglikelihood\n\n does this need to be overwritten ?\n '''\n if start_params is None and hasattr(self, '_start_params'):\n start_params = self._start_params\n #start_params = np.concatenate((0.05*np.ones(self.nar + self.nma), [1]))\n mlefit = super(TSMLEModel, self).fit(start_params=start_params,\n maxiter=maxiter, method=method, tol=tol)\n return mlefit\n",
"path": "statsmodels/tsa/mlemodel.py"
}
] | diff --git a/statsmodels/tsa/mlemodel.py b/statsmodels/tsa/mlemodel.py
index 1fd064294cf..698e19f5710 100644
--- a/statsmodels/tsa/mlemodel.py
+++ b/statsmodels/tsa/mlemodel.py
@@ -13,7 +13,10 @@
import numpy as np
-import numdifftools as ndt
+try:
+ import numdifftools as ndt
+except:
+ pass
from statsmodels.base.model import LikelihoodModel
|
getsentry__sentry-python-337 | Unified sentry-sdk integration does not have support to add stack trace in python logger using 'stack': True in extra dict.
Migration from raven to unified sentry sdk, affected extended functionalities to python logging provided by raven. _extra_from_record - excludes keywords 'stack' and 'data'. Is there a known workaround?
| [
{
"content": "from __future__ import absolute_import\n\nimport logging\nimport datetime\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import (\n to_string,\n event_from_exception,\n current_stacktrace,\n capture_internal_exceptions,\n)\nfrom sentry_sdk.integrations import Integration\n\nif False:\n from logging import LogRecord\n from typing import Any\n from typing import Dict\n from typing import Optional\n\nDEFAULT_LEVEL = logging.INFO\nDEFAULT_EVENT_LEVEL = logging.ERROR\n\n_IGNORED_LOGGERS = set([\"sentry_sdk.errors\"])\n\n\ndef ignore_logger(name):\n # type: (str) -> None\n \"\"\"This disables the breadcrumb integration for a logger of a specific\n name. This primary use is for some integrations to disable breadcrumbs\n of this integration.\n \"\"\"\n _IGNORED_LOGGERS.add(name)\n\n\nclass LoggingIntegration(Integration):\n identifier = \"logging\"\n\n def __init__(self, level=DEFAULT_LEVEL, event_level=DEFAULT_EVENT_LEVEL):\n # type: (int, int) -> None\n self._handler = None\n self._breadcrumb_handler = None\n\n if level is not None:\n self._breadcrumb_handler = BreadcrumbHandler(level=level)\n\n if event_level is not None:\n self._handler = EventHandler(level=event_level)\n\n def _handle_record(self, record):\n # type: (LogRecord) -> None\n if self._handler is not None and record.levelno >= self._handler.level:\n self._handler.handle(record)\n\n if (\n self._breadcrumb_handler is not None\n and record.levelno >= self._breadcrumb_handler.level\n ):\n self._breadcrumb_handler.handle(record)\n\n @staticmethod\n def setup_once():\n # type: () -> None\n old_callhandlers = logging.Logger.callHandlers # type: ignore\n\n def sentry_patched_callhandlers(self, record):\n # type: (Any, LogRecord) -> Any\n try:\n return old_callhandlers(self, record)\n finally:\n # This check is done twice, once also here before we even get\n # the integration. Otherwise we have a high chance of getting\n # into a recursion error when the integration is resolved\n # (this also is slower).\n if record.name not in _IGNORED_LOGGERS:\n integration = Hub.current.get_integration(LoggingIntegration)\n if integration is not None:\n integration._handle_record(record)\n\n logging.Logger.callHandlers = sentry_patched_callhandlers # type: ignore\n\n\ndef _can_record(record):\n # type: (LogRecord) -> bool\n return record.name not in _IGNORED_LOGGERS\n\n\ndef _breadcrumb_from_record(record):\n # type: (LogRecord) -> Dict[str, Any]\n return {\n \"ty\": \"log\",\n \"level\": _logging_to_event_level(record.levelname),\n \"category\": record.name,\n \"message\": record.message,\n \"timestamp\": datetime.datetime.fromtimestamp(record.created),\n \"data\": _extra_from_record(record),\n }\n\n\ndef _logging_to_event_level(levelname):\n # type: (str) -> str\n return {\"critical\": \"fatal\"}.get(levelname.lower(), levelname.lower())\n\n\nCOMMON_RECORD_ATTRS = frozenset(\n (\n \"args\",\n \"created\",\n \"data\",\n \"exc_info\",\n \"exc_text\",\n \"filename\",\n \"funcName\",\n \"levelname\",\n \"levelno\",\n \"linenno\",\n \"lineno\",\n \"message\",\n \"module\",\n \"msecs\",\n \"msg\",\n \"name\",\n \"pathname\",\n \"process\",\n \"processName\",\n \"relativeCreated\",\n \"stack\",\n \"tags\",\n \"thread\",\n \"threadName\",\n )\n)\n\n\ndef _extra_from_record(record):\n # type: (LogRecord) -> Dict[str, None]\n return {\n k: v\n for k, v in vars(record).items()\n if k not in COMMON_RECORD_ATTRS and not k.startswith(\"_\")\n }\n\n\nclass EventHandler(logging.Handler, object):\n def emit(self, record):\n # type: (LogRecord) -> Any\n with capture_internal_exceptions():\n self.format(record)\n return self._emit(record)\n\n def _emit(self, record):\n # type: (LogRecord) -> None\n if not _can_record(record):\n return\n\n hub = Hub.current\n if hub.client is None:\n return\n\n hint = None # type: Optional[Dict[str, Any]]\n client_options = hub.client.options\n\n # exc_info might be None or (None, None, None)\n if record.exc_info is not None and record.exc_info[0] is not None:\n event, hint = event_from_exception(\n record.exc_info,\n client_options=client_options,\n mechanism={\"type\": \"logging\", \"handled\": True},\n )\n elif record.exc_info and record.exc_info[0] is None:\n event = {}\n hint = None\n with capture_internal_exceptions():\n event[\"threads\"] = [\n {\n \"stacktrace\": current_stacktrace(client_options[\"with_locals\"]),\n \"crashed\": False,\n \"current\": True,\n }\n ]\n else:\n event = {}\n\n event[\"level\"] = _logging_to_event_level(record.levelname)\n event[\"logger\"] = record.name\n event[\"logentry\"] = {\"message\": to_string(record.msg), \"params\": record.args}\n event[\"extra\"] = _extra_from_record(record)\n\n hub.capture_event(event, hint=hint)\n\n\n# Legacy name\nSentryHandler = EventHandler\n\n\nclass BreadcrumbHandler(logging.Handler, object):\n def emit(self, record):\n # type: (LogRecord) -> Any\n with capture_internal_exceptions():\n self.format(record)\n return self._emit(record)\n\n def _emit(self, record):\n # type: (LogRecord) -> None\n if not _can_record(record):\n return\n\n Hub.current.add_breadcrumb(\n _breadcrumb_from_record(record), hint={\"log_record\": record}\n )\n",
"path": "sentry_sdk/integrations/logging.py"
}
] | [
{
"content": "from __future__ import absolute_import\n\nimport logging\nimport datetime\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import (\n to_string,\n event_from_exception,\n current_stacktrace,\n capture_internal_exceptions,\n)\nfrom sentry_sdk.integrations import Integration\n\nif False:\n from logging import LogRecord\n from typing import Any\n from typing import Dict\n from typing import Optional\n\nDEFAULT_LEVEL = logging.INFO\nDEFAULT_EVENT_LEVEL = logging.ERROR\n\n_IGNORED_LOGGERS = set([\"sentry_sdk.errors\"])\n\n\ndef ignore_logger(name):\n # type: (str) -> None\n \"\"\"This disables the breadcrumb integration for a logger of a specific\n name. This primary use is for some integrations to disable breadcrumbs\n of this integration.\n \"\"\"\n _IGNORED_LOGGERS.add(name)\n\n\nclass LoggingIntegration(Integration):\n identifier = \"logging\"\n\n def __init__(self, level=DEFAULT_LEVEL, event_level=DEFAULT_EVENT_LEVEL):\n # type: (int, int) -> None\n self._handler = None\n self._breadcrumb_handler = None\n\n if level is not None:\n self._breadcrumb_handler = BreadcrumbHandler(level=level)\n\n if event_level is not None:\n self._handler = EventHandler(level=event_level)\n\n def _handle_record(self, record):\n # type: (LogRecord) -> None\n if self._handler is not None and record.levelno >= self._handler.level:\n self._handler.handle(record)\n\n if (\n self._breadcrumb_handler is not None\n and record.levelno >= self._breadcrumb_handler.level\n ):\n self._breadcrumb_handler.handle(record)\n\n @staticmethod\n def setup_once():\n # type: () -> None\n old_callhandlers = logging.Logger.callHandlers # type: ignore\n\n def sentry_patched_callhandlers(self, record):\n # type: (Any, LogRecord) -> Any\n try:\n return old_callhandlers(self, record)\n finally:\n # This check is done twice, once also here before we even get\n # the integration. Otherwise we have a high chance of getting\n # into a recursion error when the integration is resolved\n # (this also is slower).\n if record.name not in _IGNORED_LOGGERS:\n integration = Hub.current.get_integration(LoggingIntegration)\n if integration is not None:\n integration._handle_record(record)\n\n logging.Logger.callHandlers = sentry_patched_callhandlers # type: ignore\n\n\ndef _can_record(record):\n # type: (LogRecord) -> bool\n return record.name not in _IGNORED_LOGGERS\n\n\ndef _breadcrumb_from_record(record):\n # type: (LogRecord) -> Dict[str, Any]\n return {\n \"ty\": \"log\",\n \"level\": _logging_to_event_level(record.levelname),\n \"category\": record.name,\n \"message\": record.message,\n \"timestamp\": datetime.datetime.fromtimestamp(record.created),\n \"data\": _extra_from_record(record),\n }\n\n\ndef _logging_to_event_level(levelname):\n # type: (str) -> str\n return {\"critical\": \"fatal\"}.get(levelname.lower(), levelname.lower())\n\n\nCOMMON_RECORD_ATTRS = frozenset(\n (\n \"args\",\n \"created\",\n \"exc_info\",\n \"exc_text\",\n \"filename\",\n \"funcName\",\n \"levelname\",\n \"levelno\",\n \"linenno\",\n \"lineno\",\n \"message\",\n \"module\",\n \"msecs\",\n \"msg\",\n \"name\",\n \"pathname\",\n \"process\",\n \"processName\",\n \"relativeCreated\",\n \"stack\",\n \"tags\",\n \"thread\",\n \"threadName\",\n )\n)\n\n\ndef _extra_from_record(record):\n # type: (LogRecord) -> Dict[str, None]\n return {\n k: v\n for k, v in vars(record).items()\n if k not in COMMON_RECORD_ATTRS and not k.startswith(\"_\")\n }\n\n\nclass EventHandler(logging.Handler, object):\n def emit(self, record):\n # type: (LogRecord) -> Any\n with capture_internal_exceptions():\n self.format(record)\n return self._emit(record)\n\n def _emit(self, record):\n # type: (LogRecord) -> None\n if not _can_record(record):\n return\n\n hub = Hub.current\n if hub.client is None:\n return\n\n hint = None # type: Optional[Dict[str, Any]]\n client_options = hub.client.options\n\n # exc_info might be None or (None, None, None)\n if record.exc_info is not None and record.exc_info[0] is not None:\n event, hint = event_from_exception(\n record.exc_info,\n client_options=client_options,\n mechanism={\"type\": \"logging\", \"handled\": True},\n )\n elif record.exc_info and record.exc_info[0] is None:\n event = {}\n hint = None\n with capture_internal_exceptions():\n event[\"threads\"] = [\n {\n \"stacktrace\": current_stacktrace(client_options[\"with_locals\"]),\n \"crashed\": False,\n \"current\": True,\n }\n ]\n else:\n event = {}\n\n event[\"level\"] = _logging_to_event_level(record.levelname)\n event[\"logger\"] = record.name\n event[\"logentry\"] = {\"message\": to_string(record.msg), \"params\": record.args}\n event[\"extra\"] = _extra_from_record(record)\n\n hub.capture_event(event, hint=hint)\n\n\n# Legacy name\nSentryHandler = EventHandler\n\n\nclass BreadcrumbHandler(logging.Handler, object):\n def emit(self, record):\n # type: (LogRecord) -> Any\n with capture_internal_exceptions():\n self.format(record)\n return self._emit(record)\n\n def _emit(self, record):\n # type: (LogRecord) -> None\n if not _can_record(record):\n return\n\n Hub.current.add_breadcrumb(\n _breadcrumb_from_record(record), hint={\"log_record\": record}\n )\n",
"path": "sentry_sdk/integrations/logging.py"
}
] | diff --git a/sentry_sdk/integrations/logging.py b/sentry_sdk/integrations/logging.py
index a8b02b588e..60fba0dc74 100644
--- a/sentry_sdk/integrations/logging.py
+++ b/sentry_sdk/integrations/logging.py
@@ -106,7 +106,6 @@ def _logging_to_event_level(levelname):
(
"args",
"created",
- "data",
"exc_info",
"exc_text",
"filename",
|
zulip__zulip-18598 | Pivotal integration exception
Hi,
I've added Pivotal integration and from time to time I receive those two e-mails when working in Pivotal:
I'm running ubuntu 20.04
If you need more information, I'd be happy to help.
```
Logger django.request, from module django.utils.log line 224:
Error generated by PivotalMessenger <pivotal-bot@***> (Member) on *** deployment
No stack trace available
Deployed code:
- git: None
- ZULIP_VERSION: 4.2
Request info:
- path: /api/v1/external/pivotal
- POST: {}
- REMOTE_ADDR: "35.184.18.147"
- QUERY_STRING: "api_key=******&stream=******&topic=******"
- SERVER_NAME: ""
```
```
Logger zerver.middleware.json_error_handler, from module zerver.middleware line 450:
Error generated by PivotalMessenger <pivotal-bot@***> (Member) on *** deployment
Traceback (most recent call last):
File "/usr/lib/python3.8/xml/etree/ElementTree.py", line 1693, in feed
self.parser.Parse(data, 0)
xml.parsers.expat.ExpatError: not well-formed (invalid token): line 1, column 0
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./zerver/webhooks/pivotal/view.py", line 172, in api_pivotal_webhook
subject, content = api_pivotal_webhook_v3(request, user_profile)
File "./zerver/webhooks/pivotal/view.py", line 19, in api_pivotal_webhook_v3
payload = xml_fromstring(request.body)
File "/srv/zulip-venv-cache/9d0f5ac272f4e644b222ed65b0b5a996616a215f/zulip-py3-venv/lib/python3.8/site-packages/defusedxml/common.py", line 131, in fromstring
parser.feed(text)
File "/usr/lib/python3.8/xml/etree/ElementTree.py", line 1695, in feed
self._raiseerror(v)
File "/usr/lib/python3.8/xml/etree/ElementTree.py", line 1602, in _raiseerror
raise err
File "<string>", line None
xml.etree.ElementTree.ParseError: not well-formed (invalid token): line 1, column 0
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/srv/zulip-venv-cache/9d0f5ac272f4e644b222ed65b0b5a996616a215f/zulip-py3-venv/lib/python3.8/site-packages/django/core/handlers/base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/srv/zulip-venv-cache/9d0f5ac272f4e644b222ed65b0b5a996616a215f/zulip-py3-venv/lib/python3.8/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "./zerver/lib/request.py", line 390, in _wrapped_view_func
return view_func(request, *args, **kwargs)
File "./zerver/decorator.py", line 354, in _wrapped_func_arguments
raise err
File "./zerver/decorator.py", line 334, in _wrapped_func_arguments
return view_func(request, user_profile, *args, **kwargs)
File "./zerver/lib/request.py", line 390, in _wrapped_view_func
return view_func(request, *args, **kwargs)
File "./zerver/webhooks/pivotal/view.py", line 175, in api_pivotal_webhook
subject, content = api_pivotal_webhook_v5(request, user_profile)
File "./zerver/webhooks/pivotal/view.py", line 87, in api_pivotal_webhook_v5
story_url = primary_resources["url"]
KeyError: 'url'
Deployed code:
- git: None
- ZULIP_VERSION: 4.2
Request info:
- path: /api/v1/external/pivotal
- POST: {}
- REMOTE_ADDR: "35.184.18.147"
- QUERY_STRING: "api_key=******&stream=******&topic=******"
- SERVER_NAME: ""
```
| [
{
"content": "\"\"\"Webhooks for external integrations.\"\"\"\nimport re\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport orjson\nfrom defusedxml.ElementTree import fromstring as xml_fromstring\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.translation import gettext as _\n\nfrom zerver.decorator import webhook_view\nfrom zerver.lib.exceptions import UnsupportedWebhookEventType\nfrom zerver.lib.request import has_request_variables\nfrom zerver.lib.response import json_error, json_success\nfrom zerver.lib.webhooks.common import check_send_webhook_message\nfrom zerver.models import UserProfile\n\n\ndef api_pivotal_webhook_v3(request: HttpRequest, user_profile: UserProfile) -> Tuple[str, str]:\n payload = xml_fromstring(request.body)\n\n def get_text(attrs: List[str]) -> str:\n start = payload\n try:\n for attr in attrs:\n start = start.find(attr)\n return start.text\n except AttributeError:\n return \"\"\n\n event_type = payload.find(\"event_type\").text\n description = payload.find(\"description\").text\n project_id = payload.find(\"project_id\").text\n story_id = get_text([\"stories\", \"story\", \"id\"])\n # Ugh, the URL in the XML data is not a clickable URL that works for the user\n # so we try to build one that the user can actually click on\n url = f\"https://www.pivotaltracker.com/s/projects/{project_id}/stories/{story_id}\"\n\n # Pivotal doesn't tell us the name of the story, but it's usually in the\n # description in quotes as the first quoted string\n name_re = re.compile(r'[^\"]+\"([^\"]+)\".*')\n match = name_re.match(description)\n if match and len(match.groups()):\n name = match.group(1)\n else:\n name = \"Story changed\" # Failed for an unknown reason, show something\n more_info = f\" [(view)]({url}).\"\n\n if event_type == \"story_update\":\n subject = name\n content = description + more_info\n elif event_type == \"note_create\":\n subject = \"Comment added\"\n content = description + more_info\n elif event_type == \"story_create\":\n issue_desc = get_text([\"stories\", \"story\", \"description\"])\n issue_type = get_text([\"stories\", \"story\", \"story_type\"])\n issue_status = get_text([\"stories\", \"story\", \"current_state\"])\n estimate = get_text([\"stories\", \"story\", \"estimate\"])\n if estimate != \"\":\n estimate = f\" worth {estimate} story points\"\n subject = name\n content = f\"{description} ({issue_status} {issue_type}{estimate}):\\n\\n~~~ quote\\n{issue_desc}\\n~~~\\n\\n{more_info}\"\n return subject, content\n\n\nUNSUPPORTED_EVENT_TYPES = [\n \"task_create_activity\",\n \"comment_delete_activity\",\n \"task_delete_activity\",\n \"task_update_activity\",\n \"story_move_from_project_activity\",\n \"story_delete_activity\",\n \"story_move_into_project_activity\",\n \"epic_update_activity\",\n]\n\n\ndef api_pivotal_webhook_v5(request: HttpRequest, user_profile: UserProfile) -> Tuple[str, str]:\n payload = orjson.loads(request.body)\n\n event_type = payload[\"kind\"]\n\n project_name = payload[\"project\"][\"name\"]\n project_id = payload[\"project\"][\"id\"]\n\n primary_resources = payload[\"primary_resources\"][0]\n story_url = primary_resources[\"url\"]\n story_type = primary_resources.get(\"story_type\")\n story_id = primary_resources[\"id\"]\n story_name = primary_resources[\"name\"]\n\n performed_by = payload.get(\"performed_by\", {}).get(\"name\", \"\")\n\n story_info = f\"[{project_name}](https://www.pivotaltracker.com/s/projects/{project_id}): [{story_name}]({story_url})\"\n\n changes = payload.get(\"changes\", [])\n\n content = \"\"\n subject = f\"#{story_id}: {story_name}\"\n\n def extract_comment(change: Dict[str, Any]) -> Optional[str]:\n if change.get(\"kind\") == \"comment\":\n return change.get(\"new_values\", {}).get(\"text\", None)\n return None\n\n if event_type == \"story_update_activity\":\n # Find the changed valued and build a message\n content += f\"{performed_by} updated {story_info}:\\n\"\n for change in changes:\n old_values = change.get(\"original_values\", {})\n new_values = change[\"new_values\"]\n\n if \"current_state\" in old_values and \"current_state\" in new_values:\n content += \"* state changed from **{}** to **{}**\\n\".format(\n old_values[\"current_state\"], new_values[\"current_state\"]\n )\n if \"estimate\" in old_values and \"estimate\" in new_values:\n old_estimate = old_values.get(\"estimate\", None)\n if old_estimate is None:\n estimate = \"is now\"\n else:\n estimate = f\"changed from {old_estimate} to\"\n new_estimate = new_values[\"estimate\"] if new_values[\"estimate\"] is not None else \"0\"\n content += f\"* estimate {estimate} **{new_estimate} points**\\n\"\n if \"story_type\" in old_values and \"story_type\" in new_values:\n content += \"* type changed from **{}** to **{}**\\n\".format(\n old_values[\"story_type\"], new_values[\"story_type\"]\n )\n\n comment = extract_comment(change)\n if comment is not None:\n content += f\"* Comment added:\\n~~~quote\\n{comment}\\n~~~\\n\"\n\n elif event_type == \"comment_create_activity\":\n for change in changes:\n comment = extract_comment(change)\n if comment is not None:\n content += (\n f\"{performed_by} added a comment to {story_info}:\\n~~~quote\\n{comment}\\n~~~\"\n )\n elif event_type == \"story_create_activity\":\n content += f\"{performed_by} created {story_type}: {story_info}\\n\"\n for change in changes:\n new_values = change.get(\"new_values\", {})\n if \"current_state\" in new_values:\n content += \"* State is **{}**\\n\".format(new_values[\"current_state\"])\n if \"description\" in new_values:\n content += \"* Description is\\n\\n> {}\".format(new_values[\"description\"])\n elif event_type == \"story_move_activity\":\n content = f\"{performed_by} moved {story_info}\"\n for change in changes:\n old_values = change.get(\"original_values\", {})\n new_values = change[\"new_values\"]\n if \"current_state\" in old_values and \"current_state\" in new_values:\n content += \" from **{}** to **{}**.\".format(\n old_values[\"current_state\"], new_values[\"current_state\"]\n )\n elif event_type in UNSUPPORTED_EVENT_TYPES:\n # Known but unsupported Pivotal event types\n pass\n else:\n raise UnsupportedWebhookEventType(event_type)\n\n return subject, content\n\n\n@webhook_view(\"Pivotal\")\n@has_request_variables\ndef api_pivotal_webhook(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:\n subject = content = None\n try:\n subject, content = api_pivotal_webhook_v3(request, user_profile)\n except Exception:\n # Attempt to parse v5 JSON payload\n subject, content = api_pivotal_webhook_v5(request, user_profile)\n\n if not content:\n return json_error(_(\"Unable to handle Pivotal payload\"))\n\n check_send_webhook_message(request, user_profile, subject, content)\n return json_success()\n",
"path": "zerver/webhooks/pivotal/view.py"
}
] | [
{
"content": "\"\"\"Webhooks for external integrations.\"\"\"\nimport re\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport orjson\nfrom defusedxml.ElementTree import fromstring as xml_fromstring\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.translation import gettext as _\n\nfrom zerver.decorator import webhook_view\nfrom zerver.lib.exceptions import UnsupportedWebhookEventType\nfrom zerver.lib.request import has_request_variables\nfrom zerver.lib.response import json_error, json_success\nfrom zerver.lib.webhooks.common import check_send_webhook_message\nfrom zerver.models import UserProfile\n\n\ndef api_pivotal_webhook_v3(request: HttpRequest, user_profile: UserProfile) -> Tuple[str, str]:\n payload = xml_fromstring(request.body)\n\n def get_text(attrs: List[str]) -> str:\n start = payload\n try:\n for attr in attrs:\n start = start.find(attr)\n return start.text\n except AttributeError:\n return \"\"\n\n event_type = payload.find(\"event_type\").text\n description = payload.find(\"description\").text\n project_id = payload.find(\"project_id\").text\n story_id = get_text([\"stories\", \"story\", \"id\"])\n # Ugh, the URL in the XML data is not a clickable URL that works for the user\n # so we try to build one that the user can actually click on\n url = f\"https://www.pivotaltracker.com/s/projects/{project_id}/stories/{story_id}\"\n\n # Pivotal doesn't tell us the name of the story, but it's usually in the\n # description in quotes as the first quoted string\n name_re = re.compile(r'[^\"]+\"([^\"]+)\".*')\n match = name_re.match(description)\n if match and len(match.groups()):\n name = match.group(1)\n else:\n name = \"Story changed\" # Failed for an unknown reason, show something\n more_info = f\" [(view)]({url}).\"\n\n if event_type == \"story_update\":\n subject = name\n content = description + more_info\n elif event_type == \"note_create\":\n subject = \"Comment added\"\n content = description + more_info\n elif event_type == \"story_create\":\n issue_desc = get_text([\"stories\", \"story\", \"description\"])\n issue_type = get_text([\"stories\", \"story\", \"story_type\"])\n issue_status = get_text([\"stories\", \"story\", \"current_state\"])\n estimate = get_text([\"stories\", \"story\", \"estimate\"])\n if estimate != \"\":\n estimate = f\" worth {estimate} story points\"\n subject = name\n content = f\"{description} ({issue_status} {issue_type}{estimate}):\\n\\n~~~ quote\\n{issue_desc}\\n~~~\\n\\n{more_info}\"\n return subject, content\n\n\nUNSUPPORTED_EVENT_TYPES = [\n \"task_create_activity\",\n \"comment_delete_activity\",\n \"task_delete_activity\",\n \"task_update_activity\",\n \"story_move_from_project_activity\",\n \"story_delete_activity\",\n \"story_move_into_project_activity\",\n \"epic_update_activity\",\n \"label_create_activity\",\n]\n\n\ndef api_pivotal_webhook_v5(request: HttpRequest, user_profile: UserProfile) -> Tuple[str, str]:\n payload = orjson.loads(request.body)\n\n event_type = payload[\"kind\"]\n\n project_name = payload[\"project\"][\"name\"]\n project_id = payload[\"project\"][\"id\"]\n\n primary_resources = payload[\"primary_resources\"][0]\n story_url = primary_resources[\"url\"]\n story_type = primary_resources.get(\"story_type\")\n story_id = primary_resources[\"id\"]\n story_name = primary_resources[\"name\"]\n\n performed_by = payload.get(\"performed_by\", {}).get(\"name\", \"\")\n\n story_info = f\"[{project_name}](https://www.pivotaltracker.com/s/projects/{project_id}): [{story_name}]({story_url})\"\n\n changes = payload.get(\"changes\", [])\n\n content = \"\"\n subject = f\"#{story_id}: {story_name}\"\n\n def extract_comment(change: Dict[str, Any]) -> Optional[str]:\n if change.get(\"kind\") == \"comment\":\n return change.get(\"new_values\", {}).get(\"text\", None)\n return None\n\n if event_type == \"story_update_activity\":\n # Find the changed valued and build a message\n content += f\"{performed_by} updated {story_info}:\\n\"\n for change in changes:\n old_values = change.get(\"original_values\", {})\n new_values = change[\"new_values\"]\n\n if \"current_state\" in old_values and \"current_state\" in new_values:\n content += \"* state changed from **{}** to **{}**\\n\".format(\n old_values[\"current_state\"], new_values[\"current_state\"]\n )\n if \"estimate\" in old_values and \"estimate\" in new_values:\n old_estimate = old_values.get(\"estimate\", None)\n if old_estimate is None:\n estimate = \"is now\"\n else:\n estimate = f\"changed from {old_estimate} to\"\n new_estimate = new_values[\"estimate\"] if new_values[\"estimate\"] is not None else \"0\"\n content += f\"* estimate {estimate} **{new_estimate} points**\\n\"\n if \"story_type\" in old_values and \"story_type\" in new_values:\n content += \"* type changed from **{}** to **{}**\\n\".format(\n old_values[\"story_type\"], new_values[\"story_type\"]\n )\n\n comment = extract_comment(change)\n if comment is not None:\n content += f\"* Comment added:\\n~~~quote\\n{comment}\\n~~~\\n\"\n\n elif event_type == \"comment_create_activity\":\n for change in changes:\n comment = extract_comment(change)\n if comment is not None:\n content += (\n f\"{performed_by} added a comment to {story_info}:\\n~~~quote\\n{comment}\\n~~~\"\n )\n elif event_type == \"story_create_activity\":\n content += f\"{performed_by} created {story_type}: {story_info}\\n\"\n for change in changes:\n new_values = change.get(\"new_values\", {})\n if \"current_state\" in new_values:\n content += \"* State is **{}**\\n\".format(new_values[\"current_state\"])\n if \"description\" in new_values:\n content += \"* Description is\\n\\n> {}\".format(new_values[\"description\"])\n elif event_type == \"story_move_activity\":\n content = f\"{performed_by} moved {story_info}\"\n for change in changes:\n old_values = change.get(\"original_values\", {})\n new_values = change[\"new_values\"]\n if \"current_state\" in old_values and \"current_state\" in new_values:\n content += \" from **{}** to **{}**.\".format(\n old_values[\"current_state\"], new_values[\"current_state\"]\n )\n elif event_type in UNSUPPORTED_EVENT_TYPES:\n # Known but unsupported Pivotal event types\n pass\n else:\n raise UnsupportedWebhookEventType(event_type)\n\n return subject, content\n\n\n@webhook_view(\"Pivotal\")\n@has_request_variables\ndef api_pivotal_webhook(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:\n subject = content = None\n try:\n subject, content = api_pivotal_webhook_v3(request, user_profile)\n except Exception:\n # Attempt to parse v5 JSON payload\n subject, content = api_pivotal_webhook_v5(request, user_profile)\n\n if not content:\n return json_error(_(\"Unable to handle Pivotal payload\"))\n\n check_send_webhook_message(request, user_profile, subject, content)\n return json_success()\n",
"path": "zerver/webhooks/pivotal/view.py"
}
] | diff --git a/zerver/webhooks/pivotal/view.py b/zerver/webhooks/pivotal/view.py
index 8109449cb2c0e..8c372ba4e1ac4 100644
--- a/zerver/webhooks/pivotal/view.py
+++ b/zerver/webhooks/pivotal/view.py
@@ -72,6 +72,7 @@ def get_text(attrs: List[str]) -> str:
"story_delete_activity",
"story_move_into_project_activity",
"epic_update_activity",
+ "label_create_activity",
]
|
DataDog__dd-trace-py-1724 | botocore gets monkey patched before gevent when using pynamoDB
In [0.43.0 ssl libs are patched on import](https://github.com/DataDog/dd-trace-py/pull/1629) to allow `ddtrace-run` and `gevent` to exist in harmony.
`pynamodb` imports `botocore` and PynamoDB is patched by default. The result of this is that `ddtrace-run` ends up monkey patching `botocore` before `gevent` does.
I believe PynamoDB should be listed in the SSL libs that only get patched on import.
### Which version of dd-trace-py are you using?
0.43.0
### Which version of the libraries are you using?
ddtrace==0.43.0
gevent==20.9.0
greenlet==0.4.17
gunicorn==20.0.4
pynamodb==4.3.3
### How can we reproduce your problem?
1. Create new virtualenv
```
$ mkdir temp
$ cd temp
$ virtualenv .
$ . ./bin/active
```
2. Install libs
```
pip install ddtrace gunicorn[gevent] pynamodb
```
3. Create empty `app.py`
```
import time
while True:
time.sleep(1)
```
Run the failing command
```
ddtrace-run gunicorn -k gevent app
```
The following warning is displayed, which will turn into a SSL recursion error if you try and use urllib3.
```
$ ddtrace-run gunicorn -k gevent app
- DATADOG TRACER DIAGNOSTIC - Agent not reachable. Exception raised: [Errno 61] Connection refused
- DATADOG TRACER DIAGNOSTIC - Agent not reachable. Exception raised: [Errno 61] Connection refused
[2020-10-12 16:46:09 +1100] [69996] [INFO] Starting gunicorn 20.0.4
[2020-10-12 16:46:09 +1100] [69996] [INFO] Listening at: http://127.0.0.1:8000 (69996)
[2020-10-12 16:46:09 +1100] [69996] [INFO] Using worker: gevent
[2020-10-12 16:46:09 +1100] [70004] [INFO] Booting worker with pid: 70004
/private/tmp/venv/lib/python3.7/site-packages/gunicorn/workers/ggevent.py:53: MonkeyPatchWarning: Monkey-patching ssl after ssl has already been imported may lead to errors, including RecursionError on Python 3.6. It may also silently lead to incorrect behaviour on Python 3.7. Please monkey-patch earlier. See https://github.com/gevent/gevent/issues/1016. Modules that had direct imports (NOT patched): ['botocore.httpsession (/private/tmp/venv/lib/python3.7/site-packages/botocore/httpsession.py)', 'urllib3.util.ssl_ (/private/tmp/venv/lib/python3.7/site-packages/urllib3/util/ssl_.py)', 'urllib3.util (/private/tmp/venv/lib/python3.7/site-packages/urllib3/util/__init__.py)'].
monkey.patch_all()
```
Disable pynamodb tracing to fix
```
DD_TRACE_PYNAMODB_ENABLED=False ddtrace-run gunicorn -k gevent app
```
Which gives the following output
```
$ DD_TRACE_PYNAMODB_ENABLED=False ddtrace-run gunicorn -k gevent app
- DATADOG TRACER DIAGNOSTIC - Agent not reachable. Exception raised: [Errno 61] Connection refused
- DATADOG TRACER DIAGNOSTIC - Agent not reachable. Exception raised: [Errno 61] Connection refused
[2020-10-12 16:48:11 +1100] [70038] [INFO] Starting gunicorn 20.0.4
[2020-10-12 16:48:11 +1100] [70038] [INFO] Listening at: http://127.0.0.1:8000 (70038)
[2020-10-12 16:48:11 +1100] [70038] [INFO] Using worker: gevent
[2020-10-12 16:48:11 +1100] [70046] [INFO] Booting worker with pid: 70046
```
| [
{
"content": "\"\"\"Patch libraries to be automatically instrumented.\n\nIt can monkey patch supported standard libraries and third party modules.\nA patched module will automatically report spans with its default configuration.\n\nA library instrumentation can be configured (for instance, to report as another service)\nusing Pin. For that, check its documentation.\n\"\"\"\nimport importlib\nimport os\nimport sys\nimport threading\n\nfrom ddtrace.vendor.wrapt.importer import when_imported\n\nfrom .internal.logger import get_logger\nfrom .settings import config\nfrom .utils import formats\n\n\nlog = get_logger(__name__)\n\n# Default set of modules to automatically patch or not\nPATCH_MODULES = {\n \"asyncio\": True,\n \"boto\": True,\n \"botocore\": True,\n \"bottle\": False,\n \"cassandra\": True,\n \"celery\": True,\n \"consul\": True,\n \"django\": True,\n \"elasticsearch\": True,\n \"algoliasearch\": True,\n \"futures\": False, # experimental propagation\n \"grpc\": True,\n \"mongoengine\": True,\n \"mysql\": True,\n \"mysqldb\": True,\n \"pymysql\": True,\n \"psycopg\": True,\n \"pylibmc\": True,\n \"pymemcache\": True,\n \"pymongo\": True,\n \"redis\": True,\n \"rediscluster\": True,\n \"requests\": True,\n \"sanic\": True,\n \"sqlalchemy\": False, # Prefer DB client instrumentation\n \"sqlite3\": True,\n \"aiohttp\": True, # requires asyncio (Python 3.4+)\n \"aiopg\": True,\n \"aiobotocore\": False,\n \"httplib\": False,\n \"vertica\": True,\n \"molten\": True,\n \"jinja2\": True,\n \"mako\": True,\n \"flask\": True,\n \"kombu\": False,\n \"starlette\": True,\n # Ignore some web framework integrations that might be configured explicitly in code\n \"falcon\": False,\n \"pylons\": False,\n \"pyramid\": False,\n # Auto-enable logging if the environment variable DD_LOGS_INJECTION is true\n \"logging\": config.logs_injection,\n \"pynamodb\": True,\n}\n\n_LOCK = threading.Lock()\n_PATCHED_MODULES = set()\n\n# Modules which are patched on first use\n# DEV: These modules are patched when the user first imports them, rather than\n# explicitly importing and patching them on application startup `ddtrace.patch_all(module=True)`\n# DEV: This ensures we do not patch a module until it is needed\n# DEV: <contrib name> => <list of module names that trigger a patch>\n_PATCH_ON_IMPORT = {\n \"aiohttp\": (\"aiohttp\",),\n \"aiobotocore\": (\"aiobotocore\",),\n \"celery\": (\"celery\",),\n \"flask\": (\"flask, \"),\n \"gevent\": (\"gevent\",),\n \"requests\": (\"requests\",),\n \"botocore\": (\"botocore\",),\n \"elasticsearch\": (\"elasticsearch\",),\n}\n\n\nclass PatchException(Exception):\n \"\"\"Wraps regular `Exception` class when patching modules\"\"\"\n\n pass\n\n\nclass ModuleNotFoundException(PatchException):\n pass\n\n\ndef _on_import_factory(module, raise_errors=True):\n \"\"\"Factory to create an import hook for the provided module name\"\"\"\n\n def on_import(hook):\n # Import and patch module\n path = \"ddtrace.contrib.%s\" % module\n imported_module = importlib.import_module(path)\n imported_module.patch()\n\n return on_import\n\n\ndef patch_all(**patch_modules):\n \"\"\"Automatically patches all available modules.\n\n In addition to ``patch_modules``, an override can be specified via an\n environment variable, ``DD_TRACE_<module>_ENABLED`` for each module.\n\n ``patch_modules`` have the highest precedence for overriding.\n\n :param dict patch_modules: Override whether particular modules are patched or not.\n\n >>> patch_all(redis=False, cassandra=False)\n \"\"\"\n modules = PATCH_MODULES.copy()\n\n # The enabled setting can be overridden by environment variables\n for module, enabled in modules.items():\n env_var = \"DD_TRACE_%s_ENABLED\" % module.upper()\n if env_var not in os.environ:\n continue\n\n override_enabled = formats.asbool(os.environ[env_var])\n modules[module] = override_enabled\n\n # Arguments take precedence over the environment and the defaults.\n modules.update(patch_modules)\n\n patch(raise_errors=False, **modules)\n\n\ndef patch(raise_errors=True, **patch_modules):\n \"\"\"Patch only a set of given modules.\n\n :param bool raise_errors: Raise error if one patch fail.\n :param dict patch_modules: List of modules to patch.\n\n >>> patch(psycopg=True, elasticsearch=True)\n \"\"\"\n modules = [m for (m, should_patch) in patch_modules.items() if should_patch]\n for module in modules:\n if module in _PATCH_ON_IMPORT:\n # If the module has already been imported then patch immediately\n if module in sys.modules:\n patch_module(module, raise_errors=raise_errors)\n\n # Otherwise, add a hook to patch when it is imported for the first time\n else:\n # Use factory to create handler to close over `module` and `raise_errors` values from this loop\n when_imported(module)(_on_import_factory(module, raise_errors))\n\n # manually add module to patched modules\n with _LOCK:\n _PATCHED_MODULES.add(module)\n else:\n patch_module(module, raise_errors=raise_errors)\n\n patched_modules = get_patched_modules()\n log.info(\n \"patched %s/%s modules (%s)\",\n len(patched_modules),\n len(modules),\n \",\".join(patched_modules),\n )\n\n\ndef patch_module(module, raise_errors=True):\n \"\"\"Patch a single module\n\n Returns if the module got properly patched.\n \"\"\"\n try:\n return _patch_module(module)\n except ModuleNotFoundException:\n if raise_errors:\n raise\n return False\n except Exception:\n if raise_errors:\n raise\n log.debug(\"failed to patch %s\", module, exc_info=True)\n return False\n\n\ndef get_patched_modules():\n \"\"\"Get the list of patched modules\"\"\"\n with _LOCK:\n return sorted(_PATCHED_MODULES)\n\n\ndef _patch_module(module):\n \"\"\"_patch_module will attempt to monkey patch the module.\n\n Returns if the module got patched.\n Can also raise errors if it fails.\n \"\"\"\n path = \"ddtrace.contrib.%s\" % module\n with _LOCK:\n if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT:\n log.debug(\"already patched: %s\", path)\n return False\n\n try:\n imported_module = importlib.import_module(path)\n except ImportError:\n # if the import fails, the integration is not available\n raise PatchException(\"integration '%s' not available\" % path)\n else:\n # if patch() is not available in the module, it means\n # that the library is not installed in the environment\n if not hasattr(imported_module, \"patch\"):\n raise ModuleNotFoundException(\"module '%s' not installed\" % module)\n\n imported_module.patch()\n _PATCHED_MODULES.add(module)\n return True\n",
"path": "ddtrace/monkey.py"
}
] | [
{
"content": "\"\"\"Patch libraries to be automatically instrumented.\n\nIt can monkey patch supported standard libraries and third party modules.\nA patched module will automatically report spans with its default configuration.\n\nA library instrumentation can be configured (for instance, to report as another service)\nusing Pin. For that, check its documentation.\n\"\"\"\nimport importlib\nimport os\nimport sys\nimport threading\n\nfrom ddtrace.vendor.wrapt.importer import when_imported\n\nfrom .internal.logger import get_logger\nfrom .settings import config\nfrom .utils import formats\n\n\nlog = get_logger(__name__)\n\n# Default set of modules to automatically patch or not\nPATCH_MODULES = {\n \"asyncio\": True,\n \"boto\": True,\n \"botocore\": True,\n \"bottle\": False,\n \"cassandra\": True,\n \"celery\": True,\n \"consul\": True,\n \"django\": True,\n \"elasticsearch\": True,\n \"algoliasearch\": True,\n \"futures\": False, # experimental propagation\n \"grpc\": True,\n \"mongoengine\": True,\n \"mysql\": True,\n \"mysqldb\": True,\n \"pymysql\": True,\n \"psycopg\": True,\n \"pylibmc\": True,\n \"pymemcache\": True,\n \"pymongo\": True,\n \"redis\": True,\n \"rediscluster\": True,\n \"requests\": True,\n \"sanic\": True,\n \"sqlalchemy\": False, # Prefer DB client instrumentation\n \"sqlite3\": True,\n \"aiohttp\": True, # requires asyncio (Python 3.4+)\n \"aiopg\": True,\n \"aiobotocore\": False,\n \"httplib\": False,\n \"vertica\": True,\n \"molten\": True,\n \"jinja2\": True,\n \"mako\": True,\n \"flask\": True,\n \"kombu\": False,\n \"starlette\": True,\n # Ignore some web framework integrations that might be configured explicitly in code\n \"falcon\": False,\n \"pylons\": False,\n \"pyramid\": False,\n # Auto-enable logging if the environment variable DD_LOGS_INJECTION is true\n \"logging\": config.logs_injection,\n \"pynamodb\": True,\n}\n\n_LOCK = threading.Lock()\n_PATCHED_MODULES = set()\n\n# Modules which are patched on first use\n# DEV: These modules are patched when the user first imports them, rather than\n# explicitly importing and patching them on application startup `ddtrace.patch_all(module=True)`\n# DEV: This ensures we do not patch a module until it is needed\n# DEV: <contrib name> => <list of module names that trigger a patch>\n_PATCH_ON_IMPORT = {\n \"aiohttp\": (\"aiohttp\",),\n \"aiobotocore\": (\"aiobotocore\",),\n \"celery\": (\"celery\",),\n \"flask\": (\"flask, \"),\n \"gevent\": (\"gevent\",),\n \"requests\": (\"requests\",),\n \"botocore\": (\"botocore\",),\n \"elasticsearch\": (\"elasticsearch\",),\n \"pynamodb\": (\"pynamodb\",),\n}\n\n\nclass PatchException(Exception):\n \"\"\"Wraps regular `Exception` class when patching modules\"\"\"\n\n pass\n\n\nclass ModuleNotFoundException(PatchException):\n pass\n\n\ndef _on_import_factory(module, raise_errors=True):\n \"\"\"Factory to create an import hook for the provided module name\"\"\"\n\n def on_import(hook):\n # Import and patch module\n path = \"ddtrace.contrib.%s\" % module\n imported_module = importlib.import_module(path)\n imported_module.patch()\n\n return on_import\n\n\ndef patch_all(**patch_modules):\n \"\"\"Automatically patches all available modules.\n\n In addition to ``patch_modules``, an override can be specified via an\n environment variable, ``DD_TRACE_<module>_ENABLED`` for each module.\n\n ``patch_modules`` have the highest precedence for overriding.\n\n :param dict patch_modules: Override whether particular modules are patched or not.\n\n >>> patch_all(redis=False, cassandra=False)\n \"\"\"\n modules = PATCH_MODULES.copy()\n\n # The enabled setting can be overridden by environment variables\n for module, enabled in modules.items():\n env_var = \"DD_TRACE_%s_ENABLED\" % module.upper()\n if env_var not in os.environ:\n continue\n\n override_enabled = formats.asbool(os.environ[env_var])\n modules[module] = override_enabled\n\n # Arguments take precedence over the environment and the defaults.\n modules.update(patch_modules)\n\n patch(raise_errors=False, **modules)\n\n\ndef patch(raise_errors=True, **patch_modules):\n \"\"\"Patch only a set of given modules.\n\n :param bool raise_errors: Raise error if one patch fail.\n :param dict patch_modules: List of modules to patch.\n\n >>> patch(psycopg=True, elasticsearch=True)\n \"\"\"\n modules = [m for (m, should_patch) in patch_modules.items() if should_patch]\n for module in modules:\n if module in _PATCH_ON_IMPORT:\n # If the module has already been imported then patch immediately\n if module in sys.modules:\n patch_module(module, raise_errors=raise_errors)\n\n # Otherwise, add a hook to patch when it is imported for the first time\n else:\n # Use factory to create handler to close over `module` and `raise_errors` values from this loop\n when_imported(module)(_on_import_factory(module, raise_errors))\n\n # manually add module to patched modules\n with _LOCK:\n _PATCHED_MODULES.add(module)\n else:\n patch_module(module, raise_errors=raise_errors)\n\n patched_modules = get_patched_modules()\n log.info(\n \"patched %s/%s modules (%s)\",\n len(patched_modules),\n len(modules),\n \",\".join(patched_modules),\n )\n\n\ndef patch_module(module, raise_errors=True):\n \"\"\"Patch a single module\n\n Returns if the module got properly patched.\n \"\"\"\n try:\n return _patch_module(module)\n except ModuleNotFoundException:\n if raise_errors:\n raise\n return False\n except Exception:\n if raise_errors:\n raise\n log.debug(\"failed to patch %s\", module, exc_info=True)\n return False\n\n\ndef get_patched_modules():\n \"\"\"Get the list of patched modules\"\"\"\n with _LOCK:\n return sorted(_PATCHED_MODULES)\n\n\ndef _patch_module(module):\n \"\"\"_patch_module will attempt to monkey patch the module.\n\n Returns if the module got patched.\n Can also raise errors if it fails.\n \"\"\"\n path = \"ddtrace.contrib.%s\" % module\n with _LOCK:\n if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT:\n log.debug(\"already patched: %s\", path)\n return False\n\n try:\n imported_module = importlib.import_module(path)\n except ImportError:\n # if the import fails, the integration is not available\n raise PatchException(\"integration '%s' not available\" % path)\n else:\n # if patch() is not available in the module, it means\n # that the library is not installed in the environment\n if not hasattr(imported_module, \"patch\"):\n raise ModuleNotFoundException(\"module '%s' not installed\" % module)\n\n imported_module.patch()\n _PATCHED_MODULES.add(module)\n return True\n",
"path": "ddtrace/monkey.py"
}
] | diff --git a/ddtrace/monkey.py b/ddtrace/monkey.py
index 747bdbc599f..bafc5dca323 100644
--- a/ddtrace/monkey.py
+++ b/ddtrace/monkey.py
@@ -85,6 +85,7 @@
"requests": ("requests",),
"botocore": ("botocore",),
"elasticsearch": ("elasticsearch",),
+ "pynamodb": ("pynamodb",),
}
diff --git a/releasenotes/notes/gevent-pynamodb-fix-72ac7017e51fd4f9.yaml b/releasenotes/notes/gevent-pynamodb-fix-72ac7017e51fd4f9.yaml
new file mode 100644
index 00000000000..b37d453e767
--- /dev/null
+++ b/releasenotes/notes/gevent-pynamodb-fix-72ac7017e51fd4f9.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Patch pynamodb on import to prevent patching conflicts with gevent.
diff --git a/tests/contrib/gevent/test_tracer.py b/tests/contrib/gevent/test_tracer.py
index ae285ee1494..c4e9166e771 100644
--- a/tests/contrib/gevent/test_tracer.py
+++ b/tests/contrib/gevent/test_tracer.py
@@ -457,6 +457,7 @@ def test_ddtracerun(self):
import botocore # noqa
import requests # noqa
import elasticsearch # noqa
+ import pynamodb # noqa
p = subprocess.Popen(
["ddtrace-run", "python", "tests/contrib/gevent/monkeypatch.py"],
diff --git a/tox.ini b/tox.ini
index 82344b98752..dca7302d4f7 100644
--- a/tox.ini
+++ b/tox.ini
@@ -471,6 +471,7 @@ deps =
sslmodules: botocore
sslmodules: requests
sslmodules: elasticsearch
+ sslmodules: pynamodb
starlette_contrib: httpx
starlette_contrib: pytest-asyncio
starlette_contrib: requests
|
zulip__zulip-13077 | Upgrade pip from 19.1.1 and pip-tools from 3.8.0
Followup issue from #13067. pip-tools 3.9.0 or 4.0.0 fails to resolve dependencies from Git URLs (jazzband/pip-tools#851):
`pip._internal.exceptions.DistributionNotFound: No matching distribution found for zulip==0.6.1_git (from -r requirements/common.in (line 135))`
while pip 19.2 breaks pip-tools 3.8.0 (jazzband/pip-tools#853):
`TypeError: __init__() got an unexpected keyword argument 'find_links'`
| [
{
"content": "import os\n\nZULIP_VERSION = \"2.0.4+git\"\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zulip-git-version')\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n version = f.read().strip()\n if version:\n ZULIP_VERSION = version\n\nLATEST_MAJOR_VERSION = \"2.0\"\nLATEST_RELEASE_VERSION = \"2.0.4\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.org/2019/03/01/zulip-2-0-released/\"\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = '49.2'\n",
"path": "version.py"
}
] | [
{
"content": "import os\n\nZULIP_VERSION = \"2.0.4+git\"\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zulip-git-version')\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n version = f.read().strip()\n if version:\n ZULIP_VERSION = version\n\nLATEST_MAJOR_VERSION = \"2.0\"\nLATEST_RELEASE_VERSION = \"2.0.4\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.org/2019/03/01/zulip-2-0-released/\"\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = '49.3'\n",
"path": "version.py"
}
] | diff --git a/requirements/dev.in b/requirements/dev.in
index 196613baae52b..6b6a824744952 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -50,7 +50,7 @@ transifex-client==0.12.5
python-digitalocean==1.14.0
# Needed for updating the locked pip dependencies
-pip-tools==3.8.0
+pip-tools==4.1.0
# zulip's linting framework - zulint
git+https://github.com/zulip/zulint@aaed679f1ad38b230090eadd3870b7682500f60c#egg=zulint==0.0.1
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 7d63ba635e3c5..9bc6dd787082d 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -102,7 +102,7 @@ phonenumberslite==8.10.15
pickleshare==0.7.5 # via ipython
pika==0.13.0
pillow==6.1.0
-pip-tools==3.8.0
+pip-tools==4.1.0
polib==1.1.0
premailer==3.5.0
prompt-toolkit==1.0.16 # via ipython
@@ -191,5 +191,5 @@ git+https://github.com/zulip/python-zulip-api.git@804501610b6a205334e71b4e441fca
git+https://github.com/zulip/python-zulip-api.git@804501610b6a205334e71b4e441fca60acf650da#egg=zulip_bots==0.6.1+git&subdirectory=zulip_bots
# The following packages are considered to be unsafe in a requirements file:
-pip==19.1.1
+pip==19.2.3
setuptools==41.0.1 # via cfn-lint, ipython, jsonschema, markdown, pyhamcrest, sphinx, zope.interface
diff --git a/requirements/pip.txt b/requirements/pip.txt
index 38d68afa0dded..0678617c8c5c4 100644
--- a/requirements/pip.txt
+++ b/requirements/pip.txt
@@ -1,4 +1,4 @@
# Dependencies for setting up pip to install our requirements.txt file.
-pip==19.1.1
+pip==19.2.3
setuptools==41.0.1
wheel==0.33.4
diff --git a/requirements/prod.txt b/requirements/prod.txt
index c10df59d0d595..3ca42a3212dca 100644
--- a/requirements/prod.txt
+++ b/requirements/prod.txt
@@ -122,5 +122,5 @@ git+https://github.com/zulip/python-zulip-api.git@804501610b6a205334e71b4e441fca
git+https://github.com/zulip/python-zulip-api.git@804501610b6a205334e71b4e441fca60acf650da#egg=zulip_bots==0.6.1+git&subdirectory=zulip_bots
# The following packages are considered to be unsafe in a requirements file:
-pip==19.1.1
+pip==19.2.3
setuptools==41.0.1 # via ipython, markdown
diff --git a/requirements/unupgradable.json b/requirements/unupgradable.json
index bbd10d0a0d749..0e67dfc624a65 100644
--- a/requirements/unupgradable.json
+++ b/requirements/unupgradable.json
@@ -14,12 +14,6 @@
"transifex-client": {
"issue": "https://github.com/zulip/zulip/issues/8914"
},
- "pip": {
- "issue": "https://github.com/zulip/zulip/issues/13067"
- },
- "pip-tools": {
- "issue": "https://github.com/zulip/zulip/issues/13067"
- },
"defusedxml": {
"issue": "https://github.com/zulip/zulip/issues/12191"
},
diff --git a/version.py b/version.py
index 5c3901c63af96..9b2112eac63e3 100644
--- a/version.py
+++ b/version.py
@@ -26,4 +26,4 @@
# historical commits sharing the same major version, in which case a
# minor version bump suffices.
-PROVISION_VERSION = '49.2'
+PROVISION_VERSION = '49.3'
|
wemake-services__wemake-python-styleguide-195 | Fix documentation main page's header
The header is gone:
<img width="1032" alt="2018-10-03 0 18 01" src="https://user-images.githubusercontent.com/4660275/46377643-d0ce1080-c6a1-11e8-950b-d2d0c515dee1.png">
| [
{
"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import Optional\n\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\nfrom wemake_python_styleguide.violations.best_practices import (\n MagicNumberViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass MagicNumberVisitor(BaseNodeVisitor):\n \"\"\"Checks magic numbers used in the code.\"\"\"\n\n _ALLOWED_PARENTS = (\n ast.Assign,\n\n # Constructor usages:\n ast.FunctionDef,\n ast.arguments,\n\n # Primitives:\n ast.List,\n ast.Dict,\n ast.Set,\n ast.Tuple,\n )\n\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\n \"\"\"\n Returns real number's parent.\n\n What can go wrong?\n\n 1. Number can be negative: ``x = -1``,\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\n\n \"\"\"\n parent = getattr(node, 'parent', None)\n if isinstance(parent, self._PROXY_PARENTS):\n return self._get_real_parent(parent)\n return parent\n\n def _check_is_magic(self, node: ast.Num) -> None:\n parent = self._get_real_parent(node)\n if isinstance(parent, self._ALLOWED_PARENTS):\n return\n\n if node.n in MAGIC_NUMBERS_WHITELIST:\n return\n\n if isinstance(node.n, int) and node.n <= 10:\n return\n\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers not to be magic constants inside the code.\n\n Raises:\n MagicNumberViolation\n\n \"\"\"\n self._check_is_magic(node)\n self.generic_visit(node)\n",
"path": "wemake_python_styleguide/visitors/ast/numbers.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import Optional\n\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\nfrom wemake_python_styleguide.violations.best_practices import (\n MagicNumberViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass MagicNumberVisitor(BaseNodeVisitor):\n \"\"\"Checks magic numbers used in the code.\"\"\"\n\n _ALLOWED_PARENTS = (\n ast.Assign,\n\n # Constructor usages:\n ast.FunctionDef,\n ast.arguments,\n\n # Primitives:\n ast.List,\n ast.Dict,\n ast.Set,\n ast.Tuple,\n )\n\n # TODO: make consistent naming rules for class attributes:\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\n \"\"\"\n Returns real number's parent.\n\n What can go wrong?\n\n 1. Number can be negative: ``x = -1``,\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\n\n \"\"\"\n parent = getattr(node, 'parent', None)\n if isinstance(parent, self._PROXY_PARENTS):\n return self._get_real_parent(parent)\n return parent\n\n def _check_is_magic(self, node: ast.Num) -> None:\n parent = self._get_real_parent(node)\n if isinstance(parent, self._ALLOWED_PARENTS):\n return\n\n if node.n in MAGIC_NUMBERS_WHITELIST:\n return\n\n if isinstance(node.n, int) and node.n <= 10:\n return\n\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers not to be magic constants inside the code.\n\n Raises:\n MagicNumberViolation\n\n \"\"\"\n self._check_is_magic(node)\n self.generic_visit(node)\n",
"path": "wemake_python_styleguide/visitors/ast/numbers.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index ec404f83a..94e94b25f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,6 +9,10 @@ We used to have incremental versioning before `0.1.0`.
### Features
- Now we are counting `async` function as a module member
+- We now forbid to use `credits()` builtin function
+- We now check for `async with` and `async for` nesting level
+- We now count `async` methods as method for classes complexity check
+- We now count `async` functions as functions for module complexity check
### Misc
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 1d82e9987..f2a01e85c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -77,9 +77,10 @@ Before submitting your code please do the following steps:
2. Add any changes you want
3. Adds tests for the new changes
4. Edit documentation if you have changed something significant
-5. Run `pytest` again to make sure it is still working
-6. Run `mypy` to ensure that types are correct
-7. Run `doc8` to ensure that docs are correct
+5. Update `CHANGELOG.md` with a quick summary of your changes
+6. Run `pytest` again to make sure it is still working
+7. Run `mypy` to ensure that types are correct
+8. Run `doc8` to ensure that docs are correct
## Other help
@@ -87,4 +88,4 @@ Before submitting your code please do the following steps:
You can contribute by spreading a word about this library.
It would also be a huge contribution to write
a short article on how you are using this project.
-What are your best-practices?
+You can also share your best practices with us.
diff --git a/README.md b/README.md
index d199b9498..aa6043abb 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,7 @@
-# wemake-python-styleguide [](https://travis-ci.org/wemake-services/wemake-python-styleguide) [](https://ci.appveyor.com/project/wemake-services/wemake-python-styleguide)
+# wemake-python-styleguide
[](https://wemake.services)
+[](https://travis-ci.org/wemake-services/wemake-python-styleguide) [](https://ci.appveyor.com/project/wemake-services/wemake-python-styleguide)
[](https://coveralls.io/github/wemake-services/wemake-python-styleguide?branch=master)
[](https://badge.fury.io/py/wemake-python-styleguide)
[](https://pypi.org/project/wemake-python-styleguide/)
@@ -88,7 +89,7 @@ We are here not to:
## Contributing
-See [CONTRIBUTING.md](https://github.com/wemake-services/wemake-python-styleguide/blob/master/CONTRIBUTING.md) file if you want to contribute.
+See ["Contributing" section](https://wemake-python-styleguide.readthedocs.io/en/latest/_pages/contributing.html) file in the docs if you want to contribute.
You can also check which [issues need some help](https://github.com/wemake-services/wemake-python-styleguide/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) right now.
diff --git a/docs/_pages/api.rst b/docs/_pages/api.rst
index ebd29cd4a..ec7007c03 100644
--- a/docs/_pages/api.rst
+++ b/docs/_pages/api.rst
@@ -1,13 +1,12 @@
-API Reference
+Internal Docs
=============
-Internal documentation.
-
Here you can find:
1. How our development process works
2. How to contribute to the project
3. How to write new rules
+4. How our internal API looks like
This information will also be helpful
if you would like to create our own ``flake8`` plugin.
@@ -23,16 +22,20 @@ where we specify all technical details about our workflow and tools.
And finally you will need to go through the API reference.
+Contributing
+------------
+
.. toctree::
:maxdepth: 2
- :caption: Meta:
glossary.rst
contributing.rst
+API Reference
+-------------
+
.. toctree::
:maxdepth: 2
- :caption: API Reference:
checker.rst
visitors/base.rst
diff --git a/docs/_pages/glossary.rst b/docs/_pages/glossary.rst
index cbba02a65..c0275b33d 100644
--- a/docs/_pages/glossary.rst
+++ b/docs/_pages/glossary.rst
@@ -3,6 +3,9 @@
Glossary
========
+First of all, we should speak the same language.
+Here we collect all the specific terms that are used in this project.
+
.. glossary::
plugin
diff --git a/docs/index.rst b/docs/index.rst
index d1ee9cebd..310b5d31b 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,9 +1,5 @@
.. mdinclude:: ../README.md
-
-User guide
------------
-
.. toctree::
:maxdepth: 2
:caption: Userguide:
@@ -13,21 +9,13 @@ User guide
_pages/constants.rst
_pages/options/config.rst
-
-Internal docs
--------------
-
.. toctree::
- :maxdepth: 1
+ :maxdepth: 2
:caption: API:
:hidden:
_pages/api.rst
-
-Changelog
----------
-
.. toctree::
:maxdepth: 1
:caption: Changelog:
diff --git a/wemake_python_styleguide/visitors/ast/numbers.py b/wemake_python_styleguide/visitors/ast/numbers.py
index 30a8859b9..789055b5e 100644
--- a/wemake_python_styleguide/visitors/ast/numbers.py
+++ b/wemake_python_styleguide/visitors/ast/numbers.py
@@ -27,6 +27,7 @@ class MagicNumberVisitor(BaseNodeVisitor):
ast.Tuple,
)
+ # TODO: make consistent naming rules for class attributes:
_PROXY_PARENTS = (
ast.UnaryOp,
)
|
streamlit__streamlit-7454 | A header with Japanese text has no anchor link.
### Summary
I found that a header with Japanese text has no anchor link.
### Steps to reproduce
Code snippet:
```
import streamlit as st
st.header("セクション")
```
1. Run code snippet above.
2. Check if the header has anchor link or not.
**Expected behavior:**
The header ("セクション") has anchor link.
**Actual behavior:**
The header ("セクション") has no anchor link.
### Is this a regression?
No
### Debug info
- Streamlit version: Streamlit, version 1.10.0
- Python version: Python 3.8.10
- Using Conda
- OS version: Ubuntu 20.04.4 LTS
- Browser version: Chrome / Version 104.0.5112.101 (Official Build) (x86_64)
### Additional information
A header with Korean text or Chinese text also has no anchor link.
| [
{
"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nst.title(\"This title is awesome!\")\nst.title(\"This title is awesome too!\", anchor=\"awesome-title\")\n",
"path": "e2e/scripts/st_title.py"
}
] | [
{
"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nst.title(\"This title is awesome!\")\nst.title(\"This title is awesome too!\", anchor=\"awesome-title\")\n\nst.title(\"日本語タイトル\")\nst.title(\"その他の邦題\", anchor=\"アンカー\")\n",
"path": "e2e/scripts/st_title.py"
}
] | diff --git a/e2e/scripts/st_title.py b/e2e/scripts/st_title.py
index d926ffed0a25..6013875100ae 100644
--- a/e2e/scripts/st_title.py
+++ b/e2e/scripts/st_title.py
@@ -16,3 +16,6 @@
st.title("This title is awesome!")
st.title("This title is awesome too!", anchor="awesome-title")
+
+st.title("日本語タイトル")
+st.title("その他の邦題", anchor="アンカー")
diff --git a/e2e/specs/st_title.spec.js b/e2e/specs/st_title.spec.js
index 7b57c4704d71..e6c1a68c7b4e 100644
--- a/e2e/specs/st_title.spec.js
+++ b/e2e/specs/st_title.spec.js
@@ -19,14 +19,17 @@ describe("st.title", () => {
cy.loadApp("http://localhost:3000/");
});
- it("displays correct number of elements", () => {
- cy.get(".element-container .stMarkdown h1").should("have.length", 2);
+ it("displays correct number of elements & anchor links", () => {
+ cy.get(".element-container .stMarkdown h1").should("have.length", 4);
+ cy.get(".element-container .stMarkdown h1 a").should("have.length", 4);
});
it("displays a title", () => {
cy.get(".element-container .stMarkdown h1").then(els => {
expect(els[0].textContent).to.eq("This title is awesome!");
expect(els[1].textContent).to.eq("This title is awesome too!");
+ expect(els[2].textContent).to.eq("日本語タイトル");
+ expect(els[3].textContent).to.eq("その他の邦題");
});
});
@@ -34,6 +37,8 @@ describe("st.title", () => {
cy.get(".element-container .stMarkdown h1").then(els => {
cy.wrap(els[0]).should("have.attr", "id", "this-title-is-awesome");
cy.wrap(els[1]).should("have.attr", "id", "awesome-title");
+ cy.wrap(els[2]).should("have.attr", "id", "d3b04b7a");
+ cy.wrap(els[3]).should("have.attr", "id", "アンカー");
});
});
});
diff --git a/frontend/lib/src/components/shared/StreamlitMarkdown/StreamlitMarkdown.tsx b/frontend/lib/src/components/shared/StreamlitMarkdown/StreamlitMarkdown.tsx
index 33529d0a9416..d033f28b6f38 100644
--- a/frontend/lib/src/components/shared/StreamlitMarkdown/StreamlitMarkdown.tsx
+++ b/frontend/lib/src/components/shared/StreamlitMarkdown/StreamlitMarkdown.tsx
@@ -53,6 +53,7 @@ import {
} from "./styled-components"
import "katex/dist/katex.min.css"
+import xxhash from "xxhashjs"
import StreamlitSyntaxHighlighter from "@streamlit/lib/src/components/elements/CodeBlock/StreamlitSyntaxHighlighter"
export enum Tags {
@@ -101,12 +102,21 @@ export interface Props {
* Splits the string on non-alphanumeric characters, and joins with a dash.
*/
export function createAnchorFromText(text: string | null): string {
- const newAnchor = text
- ?.toLowerCase()
- .split(/[^A-Za-z0-9]/)
- .filter(Boolean)
- .join("-")
- return newAnchor || ""
+ let newAnchor = ""
+ // Check if the text is valid ASCII characters - necessary for fully functional anchors (issue #5291)
+ const isASCII = text && /^[\x00-\x7F]*$/.test(text)
+
+ if (isASCII) {
+ newAnchor = text
+ ?.toLowerCase()
+ .split(/[^\p{L}\p{N}]+/gu) // split on non-alphanumeric characters
+ .filter(Boolean) // filter out falsy values using Boolean constructor
+ .join("-")
+ } else if (text) {
+ // if the text is not valid ASCII, use a hash of the text
+ newAnchor = xxhash.h32(text, 0xabcd).toString(16)
+ }
+ return newAnchor
}
// Note: React markdown limits hrefs to specific protocols ('http', 'https',
|
pwndbg__pwndbg-628 | Don't activate the IDA view when stepping in the pwndbg
<!--
Before reporting a new issue, make sure that we do not have any duplicates already open.
If there is one it might be good to take part in the discussion there.
Please make sure you have checked that the issue persists on LATEST pwndbg version.
Below is a template for BUG REPORTS.
Don't include it if this is a FEATURE REQUEST.
-->
### Description
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
By default the pwndbg will execute Jump(ea) function when we step to a new instruction. However the idc.Jump(ea) function calls the
`inline bool jumpto(ea_t ea, int opnum=-1, int uijmp_flags=UIJMP_ACTIVATE)`
internally with the default parameter. You can see the UIJMP_ACTIVATE is the default flag and this will instruct the IDA to activate the target view, which will grab the focus (at least on Windows)
Because of this, every time I step in the pwndbg, the focus will shifted into IDA and I have to focus back to pwndbg manually which is quite annoying
### Steps to reproduce
<!--
What do we have to do to reproduce the problem?
If this is connected to particular C/asm code,
please provide the smallest C code that reproduces the issue.
-->
Use the pwndbg with IDA Pro and then do stepping in pwndbg
### My setup
<!--
Show us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).
NOTE: We are currently supporting only Ubuntu installations.
It is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).
If you would like to change this situation - help us improving pwndbg and supporting other distros!
This can be displayed in pwndbg through `version` command.
If it is somehow unavailable, use:
* `show version` - for gdb
* `py import sys; print(sys.version)` - for python
* pwndbg version/git commit id
-->
newest pwndbg with IDA Pro 7.2 on Windows
### Possible Fix
We can change the L230 of pwndbg/ida.py
from
```return _ida.Jump(addr)```
to
```return _ida.jumpto(addr, -1, 0)```
which means we manually set the flag to 0, instead of UIJMP_ACTIVATE.
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nTalks to an XMLRPC server running inside of an active IDA Pro instance,\nin order to query it about the database. Allows symbol resolution and\ninteractive debugging.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport errno\nimport functools\nimport socket\nimport sys\nimport time\nimport traceback\n\nimport gdb\nimport six\n\nimport pwndbg.arch\nimport pwndbg.config\nimport pwndbg.decorators\nimport pwndbg.elf\nimport pwndbg.events\nimport pwndbg.memoize\nimport pwndbg.memory\nimport pwndbg.regs\nfrom pwndbg.color import message\n\ntry:\n import xmlrpc.client as xmlrpclib\nexcept:\n import xmlrpclib\n\n\nida_rpc_host = pwndbg.config.Parameter('ida-rpc-host', '127.0.0.1', 'ida xmlrpc server address')\nida_rpc_port = pwndbg.config.Parameter('ida-rpc-port', 31337, 'ida xmlrpc server port')\nida_enabled = pwndbg.config.Parameter('ida-enabled', True, 'whether to enable ida integration')\nida_timeout = pwndbg.config.Parameter('ida-timeout', 2, 'time to wait for ida xmlrpc in seconds')\n\nxmlrpclib.Marshaller.dispatch[int] = lambda _, v, w: w(\"<value><i8>%d</i8></value>\" % v)\n\nif six.PY2:\n xmlrpclib.Marshaller.dispatch[long] = lambda _, v, w: w(\"<value><i8>%d</i8></value>\" % v)\n\nxmlrpclib.Marshaller.dispatch[type(0)] = lambda _, v, w: w(\"<value><i8>%d</i8></value>\" % v)\n\n_ida = None\n\n# to avoid printing the same exception multiple times, we store the last exception here\n_ida_last_exception = None\n\n# to avoid checking the connection multiple times with no delay, we store the last time we checked it\n_ida_last_connection_check = 0\n\n\[email protected]_after_first_prompt()\[email protected]([ida_rpc_host, ida_rpc_port, ida_enabled, ida_timeout])\ndef init_ida_rpc_client():\n global _ida, _ida_last_exception, _ida_last_connection_check\n\n if not ida_enabled:\n return\n\n now = time.time()\n if _ida is None and (now - _ida_last_connection_check) < int(ida_timeout) + 5:\n return\n\n addr = 'http://{host}:{port}'.format(host=ida_rpc_host, port=ida_rpc_port)\n\n _ida = xmlrpclib.ServerProxy(addr)\n socket.setdefaulttimeout(int(ida_timeout))\n\n exception = None # (type, value, traceback)\n try:\n _ida.here()\n print(message.success(\"Pwndbg successfully connected to Ida Pro xmlrpc: %s\" % addr))\n except socket.error as e:\n if e.errno != errno.ECONNREFUSED:\n exception = sys.exc_info()\n _ida = None\n except socket.timeout:\n exception = sys.exc_info()\n _ida = None\n except xmlrpclib.ProtocolError:\n exception = sys.exc_info()\n _ida = None\n\n if exception:\n if not isinstance(_ida_last_exception, exception[0]) or _ida_last_exception.args != exception[1].args:\n if hasattr(pwndbg.config, \"exception_verbose\") and pwndbg.config.exception_verbose:\n print(message.error(\"[!] Ida Pro xmlrpc error\"))\n traceback.print_exception(*exception)\n else:\n exc_type, exc_value, _ = exception\n print(message.error('Failed to connect to IDA Pro ({}: {})'.format(exc_type.__qualname__, exc_value)))\n if exc_type is socket.timeout:\n print(message.notice('To increase the time to wait for IDA Pro use `') + message.hint('set ida-timeout <new-timeout-in-seconds>') + message.notice('`'))\n else:\n print(message.notice('For more info invoke `') + message.hint('set exception-verbose on') + message.notice('`'))\n print(message.notice('To disable IDA Pro integration invoke `') + message.hint('set ida-enabled off') + message.notice('`'))\n\n _ida_last_exception = exception and exception[1]\n _ida_last_connection_check = now\n\n\nclass withIDA(object):\n def __init__(self, fn):\n self.fn = fn\n functools.update_wrapper(self, fn)\n\n def __call__(self, *args, **kwargs):\n if _ida is None:\n init_ida_rpc_client()\n if _ida is not None:\n return self.fn(*args, **kwargs)\n return None\n\n\ndef withHexrays(f):\n @withIDA\n @functools.wraps(f)\n def wrapper(*a, **kw):\n if _ida.init_hexrays_plugin():\n return f(*a, **kw)\n\n return wrapper\n\n\ndef takes_address(function):\n @functools.wraps(function)\n def wrapper(address, *args, **kwargs):\n return function(l2r(address), *args, **kwargs)\n\n return wrapper\n\n\ndef returns_address(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n return r2l(function(*args, **kwargs))\n\n return wrapper\n\n\[email protected]_on_stop\ndef available():\n if not ida_enabled:\n return False\n return can_connect()\n\n\n@withIDA\ndef can_connect():\n return True\n\n\ndef l2r(addr):\n exe = pwndbg.elf.exe()\n if not exe:\n raise Exception(\"Can't find EXE base\")\n result = (addr - int(exe.address) + base()) & pwndbg.arch.ptrmask\n return result\n\n\ndef r2l(addr):\n exe = pwndbg.elf.exe()\n if not exe:\n raise Exception(\"Can't find EXE base\")\n result = (addr - base() + int(exe.address)) & pwndbg.arch.ptrmask\n return result\n\n\ndef remote(function):\n \"\"\"Runs the provided function in IDA's interpreter.\n\n The function must be self-contained and not reference any\n global variables.\"\"\"\n\n\[email protected]_on_objfile\ndef base():\n segaddr = _ida.NextSeg(0)\n base = _ida.get_fileregion_offset(segaddr)\n\n return segaddr - base\n\n\n@withIDA\n@takes_address\ndef Comment(addr):\n return _ida.GetCommentEx(addr, 0) or _ida.GetCommentEx(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef Name(addr):\n return _ida.Name(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetFuncOffset(addr):\n rv = _ida.GetFuncOffset(addr)\n return rv\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetType(addr):\n rv = _ida.GetType(addr)\n return rv\n\n\n@withIDA\n@returns_address\ndef here():\n return _ida.here()\n\n\n@withIDA\n@takes_address\ndef Jump(addr):\n return _ida.Jump(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef Anterior(addr):\n hexrays_prefix = '\\x01\\x04; '\n lines = []\n for i in range(10):\n r = _ida.LineA(addr, i)\n if not r: break\n if r.startswith(hexrays_prefix):\n r = r[len(hexrays_prefix):]\n lines.append(r)\n return '\\n'.join(lines)\n\n\n@withIDA\ndef GetBreakpoints():\n for i in range(GetBptQty()):\n yield GetBptEA(i)\n\n\n@withIDA\ndef GetBptQty():\n return _ida.GetBptQty()\n\n\n@withIDA\n@returns_address\ndef GetBptEA(i):\n return _ida.GetBptEA(i)\n\n\n_breakpoints = []\n\n\[email protected]\[email protected]\n@withIDA\ndef UpdateBreakpoints():\n # XXX: Remove breakpoints from IDA when the user removes them.\n current = set(eval(b.location.lstrip('*')) for b in _breakpoints)\n want = set(GetBreakpoints())\n\n # print(want)\n\n for addr in current - want:\n for bp in _breakpoints:\n if int(bp.location.lstrip('*'), 0) == addr:\n # print(\"delete\", addr)\n bp.delete()\n break\n _breakpoints.remove(bp)\n\n for bp in want - current:\n if not pwndbg.memory.peek(bp):\n continue\n\n bp = gdb.Breakpoint('*' + hex(int(bp)))\n _breakpoints.append(bp)\n # print(_breakpoints)\n\n\n@withIDA\n@takes_address\ndef SetColor(pc, color):\n return _ida.SetColor(pc, 1, color)\n\n\ncolored_pc = None\n\n\[email protected]\n@withIDA\ndef Auto_Color_PC():\n global colored_pc\n colored_pc = pwndbg.regs.pc\n SetColor(colored_pc, 0x7f7fff)\n\n\[email protected]\n@withIDA\ndef Auto_UnColor_PC():\n global colored_pc\n if colored_pc:\n SetColor(colored_pc, 0xffffff)\n colored_pc = None\n\n\n@withIDA\n@returns_address\[email protected]_on_objfile\ndef LocByName(name):\n return _ida.LocByName(str(name))\n\n\n@withIDA\n@takes_address\n@returns_address\[email protected]_on_objfile\ndef PrevHead(addr):\n return _ida.PrevHead(addr)\n\n\n@withIDA\n@takes_address\n@returns_address\[email protected]_on_objfile\ndef NextHead(addr):\n return _ida.NextHead(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetFunctionName(addr):\n return _ida.GetFunctionName(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetFlags(addr):\n return _ida.GetFlags(addr)\n\n\n@withIDA\[email protected]_on_objfile\ndef isASCII(flags):\n return _ida.isASCII(flags)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef ArgCount(address):\n pass\n\n\n@withIDA\ndef SaveBase(path):\n return _ida.SaveBase(path)\n\n\n@withIDA\ndef GetIdbPath():\n return _ida.GetIdbPath()\n\n\n@takes_address\[email protected]_on_stop\ndef has_cached_cfunc(addr):\n return _ida.has_cached_cfunc(addr)\n\n\n@withHexrays\n@takes_address\[email protected]_on_stop\ndef decompile(addr):\n return _ida.decompile(addr)\n\n\n@withIDA\[email protected]\ndef get_ida_versions():\n return _ida.versions()\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucQty():\n return _ida.GetStrucQty()\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucId(idx):\n return _ida.GetStrucId(idx)\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucName(sid):\n return _ida.GetStrucName(sid)\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucSize(sid):\n return _ida.GetStrucSize(sid)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberQty(sid):\n return _ida.GetMemberQty(sid)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberSize(sid, offset):\n return _ida.GetMemberSize(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberId(sid, offset):\n return _ida.GetMemberId(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberName(sid, offset):\n return _ida.GetMemberName(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberFlag(sid, offset):\n return _ida.GetMemberFlag(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucNextOff(sid, offset):\n return _ida.GetStrucNextOff(sid, offset)\n\n\nclass IDC(object):\n query = \"{k:v for k,v in globals()['idc'].__dict__.items() if type(v) in (int,long)}\"\n\n def __init__(self):\n if available():\n data = _ida.eval(self.query)\n self.__dict__.update(data)\n\n\nidc = IDC()\n\n\ndef print_member(sid, offset):\n mid = GetMemberId(sid, offset)\n mname = GetMemberName(sid, offset) or '(no name)'\n msize = GetMemberSize(sid, offset) or 0\n mflag = GetMemberFlag(sid, offset) or 0\n print(\" +%#x - %s [%#x bytes]\" % (offset, mname, msize))\n\n\ndef print_structs():\n for i in range(GetStrucQty() or 0):\n sid = GetStrucId(i)\n\n name = GetStrucName(sid)\n size = GetStrucSize(sid)\n\n print(\"%s - %#x bytes\" % (name, size))\n\n offset = 0\n while offset < size:\n print_member(sid, offset)\n offset = GetStrucNextOff(sid, offset)\n",
"path": "pwndbg/ida.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nTalks to an XMLRPC server running inside of an active IDA Pro instance,\nin order to query it about the database. Allows symbol resolution and\ninteractive debugging.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport errno\nimport functools\nimport socket\nimport sys\nimport time\nimport traceback\n\nimport gdb\nimport six\n\nimport pwndbg.arch\nimport pwndbg.config\nimport pwndbg.decorators\nimport pwndbg.elf\nimport pwndbg.events\nimport pwndbg.memoize\nimport pwndbg.memory\nimport pwndbg.regs\nfrom pwndbg.color import message\n\ntry:\n import xmlrpc.client as xmlrpclib\nexcept:\n import xmlrpclib\n\n\nida_rpc_host = pwndbg.config.Parameter('ida-rpc-host', '127.0.0.1', 'ida xmlrpc server address')\nida_rpc_port = pwndbg.config.Parameter('ida-rpc-port', 31337, 'ida xmlrpc server port')\nida_enabled = pwndbg.config.Parameter('ida-enabled', True, 'whether to enable ida integration')\nida_timeout = pwndbg.config.Parameter('ida-timeout', 2, 'time to wait for ida xmlrpc in seconds')\n\nxmlrpclib.Marshaller.dispatch[int] = lambda _, v, w: w(\"<value><i8>%d</i8></value>\" % v)\n\nif six.PY2:\n xmlrpclib.Marshaller.dispatch[long] = lambda _, v, w: w(\"<value><i8>%d</i8></value>\" % v)\n\nxmlrpclib.Marshaller.dispatch[type(0)] = lambda _, v, w: w(\"<value><i8>%d</i8></value>\" % v)\n\n_ida = None\n\n# to avoid printing the same exception multiple times, we store the last exception here\n_ida_last_exception = None\n\n# to avoid checking the connection multiple times with no delay, we store the last time we checked it\n_ida_last_connection_check = 0\n\n\[email protected]_after_first_prompt()\[email protected]([ida_rpc_host, ida_rpc_port, ida_enabled, ida_timeout])\ndef init_ida_rpc_client():\n global _ida, _ida_last_exception, _ida_last_connection_check\n\n if not ida_enabled:\n return\n\n now = time.time()\n if _ida is None and (now - _ida_last_connection_check) < int(ida_timeout) + 5:\n return\n\n addr = 'http://{host}:{port}'.format(host=ida_rpc_host, port=ida_rpc_port)\n\n _ida = xmlrpclib.ServerProxy(addr)\n socket.setdefaulttimeout(int(ida_timeout))\n\n exception = None # (type, value, traceback)\n try:\n _ida.here()\n print(message.success(\"Pwndbg successfully connected to Ida Pro xmlrpc: %s\" % addr))\n except socket.error as e:\n if e.errno != errno.ECONNREFUSED:\n exception = sys.exc_info()\n _ida = None\n except socket.timeout:\n exception = sys.exc_info()\n _ida = None\n except xmlrpclib.ProtocolError:\n exception = sys.exc_info()\n _ida = None\n\n if exception:\n if not isinstance(_ida_last_exception, exception[0]) or _ida_last_exception.args != exception[1].args:\n if hasattr(pwndbg.config, \"exception_verbose\") and pwndbg.config.exception_verbose:\n print(message.error(\"[!] Ida Pro xmlrpc error\"))\n traceback.print_exception(*exception)\n else:\n exc_type, exc_value, _ = exception\n print(message.error('Failed to connect to IDA Pro ({}: {})'.format(exc_type.__qualname__, exc_value)))\n if exc_type is socket.timeout:\n print(message.notice('To increase the time to wait for IDA Pro use `') + message.hint('set ida-timeout <new-timeout-in-seconds>') + message.notice('`'))\n else:\n print(message.notice('For more info invoke `') + message.hint('set exception-verbose on') + message.notice('`'))\n print(message.notice('To disable IDA Pro integration invoke `') + message.hint('set ida-enabled off') + message.notice('`'))\n\n _ida_last_exception = exception and exception[1]\n _ida_last_connection_check = now\n\n\nclass withIDA(object):\n def __init__(self, fn):\n self.fn = fn\n functools.update_wrapper(self, fn)\n\n def __call__(self, *args, **kwargs):\n if _ida is None:\n init_ida_rpc_client()\n if _ida is not None:\n return self.fn(*args, **kwargs)\n return None\n\n\ndef withHexrays(f):\n @withIDA\n @functools.wraps(f)\n def wrapper(*a, **kw):\n if _ida.init_hexrays_plugin():\n return f(*a, **kw)\n\n return wrapper\n\n\ndef takes_address(function):\n @functools.wraps(function)\n def wrapper(address, *args, **kwargs):\n return function(l2r(address), *args, **kwargs)\n\n return wrapper\n\n\ndef returns_address(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n return r2l(function(*args, **kwargs))\n\n return wrapper\n\n\[email protected]_on_stop\ndef available():\n if not ida_enabled:\n return False\n return can_connect()\n\n\n@withIDA\ndef can_connect():\n return True\n\n\ndef l2r(addr):\n exe = pwndbg.elf.exe()\n if not exe:\n raise Exception(\"Can't find EXE base\")\n result = (addr - int(exe.address) + base()) & pwndbg.arch.ptrmask\n return result\n\n\ndef r2l(addr):\n exe = pwndbg.elf.exe()\n if not exe:\n raise Exception(\"Can't find EXE base\")\n result = (addr - base() + int(exe.address)) & pwndbg.arch.ptrmask\n return result\n\n\ndef remote(function):\n \"\"\"Runs the provided function in IDA's interpreter.\n\n The function must be self-contained and not reference any\n global variables.\"\"\"\n\n\[email protected]_on_objfile\ndef base():\n segaddr = _ida.NextSeg(0)\n base = _ida.get_fileregion_offset(segaddr)\n\n return segaddr - base\n\n\n@withIDA\n@takes_address\ndef Comment(addr):\n return _ida.GetCommentEx(addr, 0) or _ida.GetCommentEx(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef Name(addr):\n return _ida.Name(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetFuncOffset(addr):\n rv = _ida.GetFuncOffset(addr)\n return rv\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetType(addr):\n rv = _ida.GetType(addr)\n return rv\n\n\n@withIDA\n@returns_address\ndef here():\n return _ida.here()\n\n\n@withIDA\n@takes_address\ndef Jump(addr):\n # uses C++ api instead of idc one to avoid activating the IDA window\n return _ida.jumpto(addr, -1, 0)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef Anterior(addr):\n hexrays_prefix = '\\x01\\x04; '\n lines = []\n for i in range(10):\n r = _ida.LineA(addr, i)\n if not r: break\n if r.startswith(hexrays_prefix):\n r = r[len(hexrays_prefix):]\n lines.append(r)\n return '\\n'.join(lines)\n\n\n@withIDA\ndef GetBreakpoints():\n for i in range(GetBptQty()):\n yield GetBptEA(i)\n\n\n@withIDA\ndef GetBptQty():\n return _ida.GetBptQty()\n\n\n@withIDA\n@returns_address\ndef GetBptEA(i):\n return _ida.GetBptEA(i)\n\n\n_breakpoints = []\n\n\[email protected]\[email protected]\n@withIDA\ndef UpdateBreakpoints():\n # XXX: Remove breakpoints from IDA when the user removes them.\n current = set(eval(b.location.lstrip('*')) for b in _breakpoints)\n want = set(GetBreakpoints())\n\n # print(want)\n\n for addr in current - want:\n for bp in _breakpoints:\n if int(bp.location.lstrip('*'), 0) == addr:\n # print(\"delete\", addr)\n bp.delete()\n break\n _breakpoints.remove(bp)\n\n for bp in want - current:\n if not pwndbg.memory.peek(bp):\n continue\n\n bp = gdb.Breakpoint('*' + hex(int(bp)))\n _breakpoints.append(bp)\n # print(_breakpoints)\n\n\n@withIDA\n@takes_address\ndef SetColor(pc, color):\n return _ida.SetColor(pc, 1, color)\n\n\ncolored_pc = None\n\n\[email protected]\n@withIDA\ndef Auto_Color_PC():\n global colored_pc\n colored_pc = pwndbg.regs.pc\n SetColor(colored_pc, 0x7f7fff)\n\n\[email protected]\n@withIDA\ndef Auto_UnColor_PC():\n global colored_pc\n if colored_pc:\n SetColor(colored_pc, 0xffffff)\n colored_pc = None\n\n\n@withIDA\n@returns_address\[email protected]_on_objfile\ndef LocByName(name):\n return _ida.LocByName(str(name))\n\n\n@withIDA\n@takes_address\n@returns_address\[email protected]_on_objfile\ndef PrevHead(addr):\n return _ida.PrevHead(addr)\n\n\n@withIDA\n@takes_address\n@returns_address\[email protected]_on_objfile\ndef NextHead(addr):\n return _ida.NextHead(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetFunctionName(addr):\n return _ida.GetFunctionName(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetFlags(addr):\n return _ida.GetFlags(addr)\n\n\n@withIDA\[email protected]_on_objfile\ndef isASCII(flags):\n return _ida.isASCII(flags)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef ArgCount(address):\n pass\n\n\n@withIDA\ndef SaveBase(path):\n return _ida.SaveBase(path)\n\n\n@withIDA\ndef GetIdbPath():\n return _ida.GetIdbPath()\n\n\n@takes_address\[email protected]_on_stop\ndef has_cached_cfunc(addr):\n return _ida.has_cached_cfunc(addr)\n\n\n@withHexrays\n@takes_address\[email protected]_on_stop\ndef decompile(addr):\n return _ida.decompile(addr)\n\n\n@withIDA\[email protected]\ndef get_ida_versions():\n return _ida.versions()\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucQty():\n return _ida.GetStrucQty()\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucId(idx):\n return _ida.GetStrucId(idx)\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucName(sid):\n return _ida.GetStrucName(sid)\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucSize(sid):\n return _ida.GetStrucSize(sid)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberQty(sid):\n return _ida.GetMemberQty(sid)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberSize(sid, offset):\n return _ida.GetMemberSize(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberId(sid, offset):\n return _ida.GetMemberId(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberName(sid, offset):\n return _ida.GetMemberName(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberFlag(sid, offset):\n return _ida.GetMemberFlag(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucNextOff(sid, offset):\n return _ida.GetStrucNextOff(sid, offset)\n\n\nclass IDC(object):\n query = \"{k:v for k,v in globals()['idc'].__dict__.items() if type(v) in (int,long)}\"\n\n def __init__(self):\n if available():\n data = _ida.eval(self.query)\n self.__dict__.update(data)\n\n\nidc = IDC()\n\n\ndef print_member(sid, offset):\n mid = GetMemberId(sid, offset)\n mname = GetMemberName(sid, offset) or '(no name)'\n msize = GetMemberSize(sid, offset) or 0\n mflag = GetMemberFlag(sid, offset) or 0\n print(\" +%#x - %s [%#x bytes]\" % (offset, mname, msize))\n\n\ndef print_structs():\n for i in range(GetStrucQty() or 0):\n sid = GetStrucId(i)\n\n name = GetStrucName(sid)\n size = GetStrucSize(sid)\n\n print(\"%s - %#x bytes\" % (name, size))\n\n offset = 0\n while offset < size:\n print_member(sid, offset)\n offset = GetStrucNextOff(sid, offset)\n",
"path": "pwndbg/ida.py"
}
] | diff --git a/pwndbg/ida.py b/pwndbg/ida.py
index 514a98ad14f..775b340329f 100644
--- a/pwndbg/ida.py
+++ b/pwndbg/ida.py
@@ -227,7 +227,8 @@ def here():
@withIDA
@takes_address
def Jump(addr):
- return _ida.Jump(addr)
+ # uses C++ api instead of idc one to avoid activating the IDA window
+ return _ida.jumpto(addr, -1, 0)
@withIDA
|
certbot__certbot-9331 | Remove the third-party `mock` dependency
>Functions in certbot.tests.util were previously using the third party mock module if it was available for backwards compatibiliy. This behavior will be removed and unittest.mock from the standard library will always be used instead.
e.g. https://github.com/certbot/certbot/blob/5c111d0bd1206d864d7cb93754e101f6073bc669/certbot/certbot/tests/util.py#L38-L50
| [
{
"content": "import codecs\nimport os\nimport re\nimport sys\n\nfrom pkg_resources import parse_version\nfrom setuptools import __version__ as setuptools_version\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nmin_setuptools_version='41.6.0'\n# This conditional isn't necessary, but it provides better error messages to\n# people who try to install this package with older versions of setuptools.\nif parse_version(setuptools_version) < parse_version(min_setuptools_version):\n raise RuntimeError(f'setuptools {min_setuptools_version}+ is required')\n\n# Workaround for https://bugs.python.org/issue8876, see\n# https://bugs.python.org/issue8876#msg208792\n# This can be removed when using Python 2.7.9 or later:\n# https://hg.python.org/cpython/raw-file/v2.7.9/Misc/NEWS\nif os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':\n del os.link\n\n\ndef read_file(filename, encoding='utf8'):\n \"\"\"Read unicode from given file.\"\"\"\n with codecs.open(filename, encoding=encoding) as fd:\n return fd.read()\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# read version number (and other metadata) from package init\ninit_fn = os.path.join(here, 'certbot', '__init__.py')\nmeta = dict(re.findall(r\"\"\"__([a-z]+)__ = '([^']+)\"\"\", read_file(init_fn)))\n\nreadme = read_file(os.path.join(here, 'README.rst'))\nversion = meta['version']\n\n# This package relies on PyOpenSSL and requests, however, it isn't specified\n# here to avoid masking the more specific request requirements in acme. See\n# https://github.com/pypa/pip/issues/988 for more info.\ninstall_requires = [\n # We specify the minimum acme version as the current Certbot version for\n # simplicity. See https://github.com/certbot/certbot/issues/8761 for more\n # info.\n f'acme>={version}',\n # We technically need ConfigArgParse 0.10.0 for Python 2.6 support, but\n # saying so here causes a runtime error against our temporary fork of 0.9.3\n # in which we added 2.6 support (see #2243), so we relax the requirement.\n 'ConfigArgParse>=0.9.3',\n 'configobj>=5.0.6',\n 'cryptography>=2.5.0',\n 'distro>=1.0.1',\n 'josepy>=1.13.0',\n 'parsedatetime>=2.4',\n 'pyrfc3339',\n 'pytz>=2019.3',\n # This dependency needs to be added using environment markers to avoid its\n # installation on Linux.\n 'pywin32>=300 ; sys_platform == \"win32\"',\n f'setuptools>={min_setuptools_version}',\n]\n\ndev_extras = [\n 'azure-devops',\n 'ipdb',\n # poetry 1.2.0+ is required for it to pin pip, setuptools, and wheel. See\n # https://github.com/python-poetry/poetry/issues/1584.\n 'poetry>=1.2.0a1',\n 'twine',\n]\n\ndocs_extras = [\n # If you have Sphinx<1.5.1, you need docutils<0.13.1\n # https://github.com/sphinx-doc/sphinx/issues/3212\n 'Sphinx>=1.2', # Annotation support\n 'sphinx_rtd_theme',\n]\n\n# Tools like pip, wheel, and tox are listed here to ensure they are properly\n# pinned and installed during automated testing.\ntest_extras = [\n 'coverage',\n 'mypy',\n 'pip',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-xdist',\n 'setuptools',\n 'tox',\n 'types-mock',\n 'types-pyOpenSSL',\n 'types-pyRFC3339',\n 'types-pytz',\n 'types-requests',\n 'types-setuptools',\n 'types-six',\n # typing-extensions is required to import typing.Protocol and make the mypy checks\n # pass (along with pylint about non-existent objects) on Python 3.7\n 'typing-extensions',\n 'wheel',\n]\n\n\nall_extras = dev_extras + docs_extras + test_extras\n\nsetup(\n name='certbot',\n version=version,\n description=\"ACME client\",\n long_description=readme,\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n python_requires='>=3.7',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Console :: Curses',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(exclude=['docs', 'examples', 'tests', 'venv']),\n include_package_data=True,\n\n install_requires=install_requires,\n extras_require={\n 'all': all_extras,\n 'dev': dev_extras,\n 'docs': docs_extras,\n 'test': test_extras,\n },\n\n entry_points={\n 'console_scripts': [\n 'certbot = certbot.main:main',\n ],\n 'certbot.plugins': [\n 'manual = certbot._internal.plugins.manual:Authenticator',\n 'null = certbot._internal.plugins.null:Installer',\n 'standalone = certbot._internal.plugins.standalone:Authenticator',\n 'webroot = certbot._internal.plugins.webroot:Authenticator',\n ],\n },\n)\n",
"path": "certbot/setup.py"
}
] | [
{
"content": "import codecs\nimport os\nimport re\nimport sys\n\nfrom pkg_resources import parse_version\nfrom setuptools import __version__ as setuptools_version\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nmin_setuptools_version='41.6.0'\n# This conditional isn't necessary, but it provides better error messages to\n# people who try to install this package with older versions of setuptools.\nif parse_version(setuptools_version) < parse_version(min_setuptools_version):\n raise RuntimeError(f'setuptools {min_setuptools_version}+ is required')\n\n# Workaround for https://bugs.python.org/issue8876, see\n# https://bugs.python.org/issue8876#msg208792\n# This can be removed when using Python 2.7.9 or later:\n# https://hg.python.org/cpython/raw-file/v2.7.9/Misc/NEWS\nif os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':\n del os.link\n\n\ndef read_file(filename, encoding='utf8'):\n \"\"\"Read unicode from given file.\"\"\"\n with codecs.open(filename, encoding=encoding) as fd:\n return fd.read()\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# read version number (and other metadata) from package init\ninit_fn = os.path.join(here, 'certbot', '__init__.py')\nmeta = dict(re.findall(r\"\"\"__([a-z]+)__ = '([^']+)\"\"\", read_file(init_fn)))\n\nreadme = read_file(os.path.join(here, 'README.rst'))\nversion = meta['version']\n\n# This package relies on PyOpenSSL and requests, however, it isn't specified\n# here to avoid masking the more specific request requirements in acme. See\n# https://github.com/pypa/pip/issues/988 for more info.\ninstall_requires = [\n # We specify the minimum acme version as the current Certbot version for\n # simplicity. See https://github.com/certbot/certbot/issues/8761 for more\n # info.\n f'acme>={version}',\n # We technically need ConfigArgParse 0.10.0 for Python 2.6 support, but\n # saying so here causes a runtime error against our temporary fork of 0.9.3\n # in which we added 2.6 support (see #2243), so we relax the requirement.\n 'ConfigArgParse>=0.9.3',\n 'configobj>=5.0.6',\n 'cryptography>=2.5.0',\n 'distro>=1.0.1',\n 'josepy>=1.13.0',\n 'parsedatetime>=2.4',\n 'pyrfc3339',\n 'pytz>=2019.3',\n # This dependency needs to be added using environment markers to avoid its\n # installation on Linux.\n 'pywin32>=300 ; sys_platform == \"win32\"',\n f'setuptools>={min_setuptools_version}',\n]\n\ndev_extras = [\n 'azure-devops',\n 'ipdb',\n # poetry 1.2.0+ is required for it to pin pip, setuptools, and wheel. See\n # https://github.com/python-poetry/poetry/issues/1584.\n 'poetry>=1.2.0a1',\n 'twine',\n]\n\ndocs_extras = [\n # If you have Sphinx<1.5.1, you need docutils<0.13.1\n # https://github.com/sphinx-doc/sphinx/issues/3212\n 'Sphinx>=1.2', # Annotation support\n 'sphinx_rtd_theme',\n]\n\n# Tools like pip, wheel, and tox are listed here to ensure they are properly\n# pinned and installed during automated testing.\ntest_extras = [\n 'coverage',\n 'mypy',\n 'pip',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-xdist',\n 'setuptools',\n 'tox',\n 'types-pyOpenSSL',\n 'types-pyRFC3339',\n 'types-pytz',\n 'types-requests',\n 'types-setuptools',\n 'types-six',\n # typing-extensions is required to import typing.Protocol and make the mypy checks\n # pass (along with pylint about non-existent objects) on Python 3.7\n 'typing-extensions',\n 'wheel',\n]\n\n\nall_extras = dev_extras + docs_extras + test_extras\n\nsetup(\n name='certbot',\n version=version,\n description=\"ACME client\",\n long_description=readme,\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n python_requires='>=3.7',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Console :: Curses',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(exclude=['docs', 'examples', 'tests', 'venv']),\n include_package_data=True,\n\n install_requires=install_requires,\n extras_require={\n 'all': all_extras,\n 'dev': dev_extras,\n 'docs': docs_extras,\n 'test': test_extras,\n },\n\n entry_points={\n 'console_scripts': [\n 'certbot = certbot.main:main',\n ],\n 'certbot.plugins': [\n 'manual = certbot._internal.plugins.manual:Authenticator',\n 'null = certbot._internal.plugins.null:Installer',\n 'standalone = certbot._internal.plugins.standalone:Authenticator',\n 'webroot = certbot._internal.plugins.webroot:Authenticator',\n ],\n },\n)\n",
"path": "certbot/setup.py"
}
] | diff --git a/.azure-pipelines/templates/jobs/extended-tests-jobs.yml b/.azure-pipelines/templates/jobs/extended-tests-jobs.yml
index 7c586ee5bca..0f732e6c909 100644
--- a/.azure-pipelines/templates/jobs/extended-tests-jobs.yml
+++ b/.azure-pipelines/templates/jobs/extended-tests-jobs.yml
@@ -18,8 +18,6 @@ jobs:
PYTHON_VERSION: 3.7
TOXENV: py37
CERTBOT_NO_PIN: 1
- linux-external-mock:
- TOXENV: external-mock
linux-boulder-v2-integration-certbot-oldest:
PYTHON_VERSION: 3.7
TOXENV: integration-certbot-oldest
diff --git a/certbot-apache/tests/augeasnode_test.py b/certbot-apache/tests/augeasnode_test.py
index 1e11b5eb3f4..591634d359c 100644
--- a/certbot-apache/tests/augeasnode_test.py
+++ b/certbot-apache/tests/augeasnode_test.py
@@ -1,13 +1,9 @@
"""Tests for AugeasParserNode classes"""
from typing import List
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
-
import os
import util
+from unittest import mock
from certbot import errors
diff --git a/certbot-apache/tests/autohsts_test.py b/certbot-apache/tests/autohsts_test.py
index 664d791bd78..70ed2ca1a6a 100644
--- a/certbot-apache/tests/autohsts_test.py
+++ b/certbot-apache/tests/autohsts_test.py
@@ -2,11 +2,7 @@
"""Test for certbot_apache._internal.configurator AutoHSTS functionality"""
import re
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from certbot import errors
from certbot_apache._internal import constants
diff --git a/certbot-apache/tests/centos_test.py b/certbot-apache/tests/centos_test.py
index c9a82046629..cc295266fd6 100644
--- a/certbot-apache/tests/centos_test.py
+++ b/certbot-apache/tests/centos_test.py
@@ -1,10 +1,6 @@
"""Test for certbot_apache._internal.configurator for Centos overrides"""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from certbot import errors
from certbot.compat import filesystem
diff --git a/certbot-apache/tests/configurator_reverter_test.py b/certbot-apache/tests/configurator_reverter_test.py
index 72b8fe2bd0d..fe0dfb39dff 100644
--- a/certbot-apache/tests/configurator_reverter_test.py
+++ b/certbot-apache/tests/configurator_reverter_test.py
@@ -1,11 +1,7 @@
"""Test for certbot_apache._internal.configurator implementations of reverter"""
import shutil
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from certbot import errors
import util
diff --git a/certbot-apache/tests/configurator_test.py b/certbot-apache/tests/configurator_test.py
index 566907506e3..0978b302e29 100644
--- a/certbot-apache/tests/configurator_test.py
+++ b/certbot-apache/tests/configurator_test.py
@@ -5,11 +5,7 @@
import socket
import tempfile
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from acme import challenges
from certbot import achallenges
diff --git a/certbot-apache/tests/debian_test.py b/certbot-apache/tests/debian_test.py
index 2bbf403124f..facc6510798 100644
--- a/certbot-apache/tests/debian_test.py
+++ b/certbot-apache/tests/debian_test.py
@@ -1,11 +1,7 @@
"""Test for certbot_apache._internal.configurator for Debian overrides"""
import shutil
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from certbot import errors
from certbot.compat import os
diff --git a/certbot-apache/tests/display_ops_test.py b/certbot-apache/tests/display_ops_test.py
index 50ab6bfc723..26927ffadba 100644
--- a/certbot-apache/tests/display_ops_test.py
+++ b/certbot-apache/tests/display_ops_test.py
@@ -1,10 +1,6 @@
"""Test certbot_apache._internal.display_ops."""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from certbot import errors
from certbot.display import util as display_util
diff --git a/certbot-apache/tests/dualnode_test.py b/certbot-apache/tests/dualnode_test.py
index 83a5729a55f..a3e28d09e34 100644
--- a/certbot-apache/tests/dualnode_test.py
+++ b/certbot-apache/tests/dualnode_test.py
@@ -1,10 +1,6 @@
"""Tests for DualParserNode implementation"""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from certbot_apache._internal import assertions
from certbot_apache._internal import augeasparser
diff --git a/certbot-apache/tests/entrypoint_test.py b/certbot-apache/tests/entrypoint_test.py
index 2a269441535..0b9644f09a4 100644
--- a/certbot-apache/tests/entrypoint_test.py
+++ b/certbot-apache/tests/entrypoint_test.py
@@ -1,10 +1,6 @@
"""Test for certbot_apache._internal.entrypoint for override class resolution"""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from certbot_apache._internal import configurator
from certbot_apache._internal import entrypoint
diff --git a/certbot-apache/tests/fedora_test.py b/certbot-apache/tests/fedora_test.py
index fca3c4ba451..4ff704aafe1 100644
--- a/certbot-apache/tests/fedora_test.py
+++ b/certbot-apache/tests/fedora_test.py
@@ -1,10 +1,6 @@
"""Test for certbot_apache._internal.configurator for Fedora 29+ overrides"""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from certbot import errors
from certbot.compat import filesystem
diff --git a/certbot-apache/tests/gentoo_test.py b/certbot-apache/tests/gentoo_test.py
index 25f9e929bd2..4df46e70f9f 100644
--- a/certbot-apache/tests/gentoo_test.py
+++ b/certbot-apache/tests/gentoo_test.py
@@ -1,10 +1,6 @@
"""Test for certbot_apache._internal.configurator for Gentoo overrides"""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from certbot import errors
from certbot.compat import filesystem
diff --git a/certbot-apache/tests/http_01_test.py b/certbot-apache/tests/http_01_test.py
index 65dfb6344b4..fe5b69b33e9 100644
--- a/certbot-apache/tests/http_01_test.py
+++ b/certbot-apache/tests/http_01_test.py
@@ -2,11 +2,7 @@
import unittest
import errno
from typing import List
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from acme import challenges
from certbot import achallenges
diff --git a/certbot-apache/tests/parser_test.py b/certbot-apache/tests/parser_test.py
index 31a769ddd0d..89633ae4774 100644
--- a/certbot-apache/tests/parser_test.py
+++ b/certbot-apache/tests/parser_test.py
@@ -1,11 +1,7 @@
"""Tests for certbot_apache._internal.parser."""
import shutil
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from certbot import errors
from certbot.compat import os
diff --git a/certbot-apache/tests/parsernode_configurator_test.py b/certbot-apache/tests/parsernode_configurator_test.py
index ebeda3c37ba..6c153acc4f8 100644
--- a/certbot-apache/tests/parsernode_configurator_test.py
+++ b/certbot-apache/tests/parsernode_configurator_test.py
@@ -1,10 +1,6 @@
"""Tests for ApacheConfigurator for AugeasParserNode classes"""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
import util
diff --git a/certbot-apache/tests/util.py b/certbot-apache/tests/util.py
index a4191b3fec8..cf97d9e7a18 100644
--- a/certbot-apache/tests/util.py
+++ b/certbot-apache/tests/util.py
@@ -4,11 +4,7 @@
import augeas
import josepy as jose
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from certbot.compat import os
from certbot.plugins import common
diff --git a/certbot-dns-cloudflare/tests/dns_cloudflare_test.py b/certbot-dns-cloudflare/tests/dns_cloudflare_test.py
index 2b182783103..cd73adc8fa0 100644
--- a/certbot-dns-cloudflare/tests/dns_cloudflare_test.py
+++ b/certbot-dns-cloudflare/tests/dns_cloudflare_test.py
@@ -1,12 +1,9 @@
"""Tests for certbot_dns_cloudflare._internal.dns_cloudflare."""
import unittest
+from unittest import mock
import CloudFlare
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
from certbot import errors
from certbot.compat import os
diff --git a/certbot-dns-digitalocean/tests/dns_digitalocean_test.py b/certbot-dns-digitalocean/tests/dns_digitalocean_test.py
index 4683893e80e..8fdee38f37d 100644
--- a/certbot-dns-digitalocean/tests/dns_digitalocean_test.py
+++ b/certbot-dns-digitalocean/tests/dns_digitalocean_test.py
@@ -1,12 +1,9 @@
"""Tests for certbot_dns_digitalocean._internal.dns_digitalocean."""
import unittest
+from unittest import mock
import digitalocean
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
from certbot import errors
from certbot.compat import os
diff --git a/certbot-dns-dnsimple/tests/dns_dnsimple_test.py b/certbot-dns-dnsimple/tests/dns_dnsimple_test.py
index fc3dc5b1f26..0e28f43b2cf 100644
--- a/certbot-dns-dnsimple/tests/dns_dnsimple_test.py
+++ b/certbot-dns-dnsimple/tests/dns_dnsimple_test.py
@@ -1,11 +1,8 @@
"""Tests for certbot_dns_dnsimple._internal.dns_dnsimple."""
import unittest
+from unittest import mock
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
from requests.exceptions import HTTPError
from certbot.compat import os
diff --git a/certbot-dns-dnsmadeeasy/tests/dns_dnsmadeeasy_test.py b/certbot-dns-dnsmadeeasy/tests/dns_dnsmadeeasy_test.py
index a04716d95f1..46f5895a871 100644
--- a/certbot-dns-dnsmadeeasy/tests/dns_dnsmadeeasy_test.py
+++ b/certbot-dns-dnsmadeeasy/tests/dns_dnsmadeeasy_test.py
@@ -1,11 +1,8 @@
"""Tests for certbot_dns_dnsmadeeasy._internal.dns_dnsmadeeasy."""
import unittest
+from unittest import mock
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
from requests.exceptions import HTTPError
from certbot.compat import os
diff --git a/certbot-dns-gehirn/tests/dns_gehirn_test.py b/certbot-dns-gehirn/tests/dns_gehirn_test.py
index 1310f74ca9b..b982e3e1b7a 100644
--- a/certbot-dns-gehirn/tests/dns_gehirn_test.py
+++ b/certbot-dns-gehirn/tests/dns_gehirn_test.py
@@ -1,11 +1,8 @@
"""Tests for certbot_dns_gehirn._internal.dns_gehirn."""
import unittest
+from unittest import mock
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
from requests.exceptions import HTTPError
from certbot.compat import os
diff --git a/certbot-dns-google/tests/dns_google_test.py b/certbot-dns-google/tests/dns_google_test.py
index b6f63a937f9..27e8b1a653c 100644
--- a/certbot-dns-google/tests/dns_google_test.py
+++ b/certbot-dns-google/tests/dns_google_test.py
@@ -6,10 +6,8 @@
from googleapiclient.errors import Error
from googleapiclient.http import HttpMock
from httplib2 import ServerNotFoundError
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+
+from unittest import mock
from certbot import errors
from certbot.compat import os
diff --git a/certbot-dns-linode/tests/dns_linode_test.py b/certbot-dns-linode/tests/dns_linode_test.py
index d0d6ceb039f..c227ef4b5db 100644
--- a/certbot-dns-linode/tests/dns_linode_test.py
+++ b/certbot-dns-linode/tests/dns_linode_test.py
@@ -1,11 +1,7 @@
"""Tests for certbot_dns_linode._internal.dns_linode."""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from certbot import errors
from certbot.compat import os
diff --git a/certbot-dns-luadns/tests/dns_luadns_test.py b/certbot-dns-luadns/tests/dns_luadns_test.py
index 7592e2323bd..3c1ac68418d 100644
--- a/certbot-dns-luadns/tests/dns_luadns_test.py
+++ b/certbot-dns-luadns/tests/dns_luadns_test.py
@@ -1,11 +1,8 @@
"""Tests for certbot_dns_luadns._internal.dns_luadns."""
import unittest
+from unittest import mock
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
from requests.exceptions import HTTPError
from certbot.compat import os
diff --git a/certbot-dns-nsone/tests/dns_nsone_test.py b/certbot-dns-nsone/tests/dns_nsone_test.py
index 3754f98114b..13ea09b3df8 100644
--- a/certbot-dns-nsone/tests/dns_nsone_test.py
+++ b/certbot-dns-nsone/tests/dns_nsone_test.py
@@ -1,11 +1,8 @@
"""Tests for certbot_dns_nsone._internal.dns_nsone."""
import unittest
+from unittest import mock
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
from requests.exceptions import HTTPError
from certbot.compat import os
diff --git a/certbot-dns-ovh/tests/dns_ovh_test.py b/certbot-dns-ovh/tests/dns_ovh_test.py
index 7f93967eb59..7eb767b7007 100644
--- a/certbot-dns-ovh/tests/dns_ovh_test.py
+++ b/certbot-dns-ovh/tests/dns_ovh_test.py
@@ -1,11 +1,8 @@
"""Tests for certbot_dns_ovh._internal.dns_ovh."""
import unittest
+from unittest import mock
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
from requests.exceptions import HTTPError
from certbot.compat import os
diff --git a/certbot-dns-rfc2136/tests/dns_rfc2136_test.py b/certbot-dns-rfc2136/tests/dns_rfc2136_test.py
index d0434aef558..1f91d3cb666 100644
--- a/certbot-dns-rfc2136/tests/dns_rfc2136_test.py
+++ b/certbot-dns-rfc2136/tests/dns_rfc2136_test.py
@@ -1,14 +1,11 @@
"""Tests for certbot_dns_rfc2136._internal.dns_rfc2136."""
import unittest
+from unittest import mock
import dns.flags
import dns.rcode
import dns.tsig
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
from certbot import errors
from certbot.compat import os
diff --git a/certbot-dns-route53/tests/dns_route53_test.py b/certbot-dns-route53/tests/dns_route53_test.py
index 69b6b115d4c..bdc70e04887 100644
--- a/certbot-dns-route53/tests/dns_route53_test.py
+++ b/certbot-dns-route53/tests/dns_route53_test.py
@@ -1,13 +1,10 @@
"""Tests for certbot_dns_route53._internal.dns_route53.Authenticator"""
import unittest
+from unittest import mock
from botocore.exceptions import ClientError
from botocore.exceptions import NoCredentialsError
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
from certbot import errors
from certbot.compat import os
diff --git a/certbot-dns-sakuracloud/tests/dns_sakuracloud_test.py b/certbot-dns-sakuracloud/tests/dns_sakuracloud_test.py
index 1c64df3729b..a1abf7b783a 100644
--- a/certbot-dns-sakuracloud/tests/dns_sakuracloud_test.py
+++ b/certbot-dns-sakuracloud/tests/dns_sakuracloud_test.py
@@ -1,11 +1,8 @@
"""Tests for certbot_dns_sakuracloud._internal.dns_sakuracloud."""
import unittest
+from unittest import mock
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
from requests.exceptions import HTTPError
from certbot.compat import os
diff --git a/certbot-nginx/tests/configurator_test.py b/certbot-nginx/tests/configurator_test.py
index a182f789a5c..916dfe3f54d 100644
--- a/certbot-nginx/tests/configurator_test.py
+++ b/certbot-nginx/tests/configurator_test.py
@@ -1,10 +1,7 @@
"""Test for certbot_nginx._internal.configurator."""
import unittest
+from unittest import mock
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
import OpenSSL
from acme import challenges
diff --git a/certbot-nginx/tests/http_01_test.py b/certbot-nginx/tests/http_01_test.py
index b9917af3577..05be062029e 100644
--- a/certbot-nginx/tests/http_01_test.py
+++ b/certbot-nginx/tests/http_01_test.py
@@ -1,11 +1,8 @@
"""Tests for certbot_nginx._internal.http_01"""
import unittest
+from unittest import mock
import josepy as jose
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
from acme import challenges
from certbot import achallenges
diff --git a/certbot-nginx/tests/parser_obj_test.py b/certbot-nginx/tests/parser_obj_test.py
index 4d1f2527743..60ff1c97550 100644
--- a/certbot-nginx/tests/parser_obj_test.py
+++ b/certbot-nginx/tests/parser_obj_test.py
@@ -1,11 +1,7 @@
""" Tests for functions and classes in parser_obj.py """
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
from certbot_nginx._internal.parser_obj import COMMENT_BLOCK
from certbot_nginx._internal.parser_obj import parse_raw
diff --git a/certbot-nginx/tests/test_util.py b/certbot-nginx/tests/test_util.py
index 6cc701f42c8..1ac649318bf 100644
--- a/certbot-nginx/tests/test_util.py
+++ b/certbot-nginx/tests/test_util.py
@@ -4,10 +4,7 @@
import tempfile
import josepy as jose
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
+from unittest import mock
import pkg_resources
from certbot import util
diff --git a/certbot/certbot/plugins/dns_test_common.py b/certbot/certbot/plugins/dns_test_common.py
index a2ab84dcb4e..65c9cc2c81d 100644
--- a/certbot/certbot/plugins/dns_test_common.py
+++ b/certbot/certbot/plugins/dns_test_common.py
@@ -5,6 +5,7 @@
import configobj
import josepy as jose
+from unittest import mock
from acme import challenges
from certbot import achallenges
@@ -19,12 +20,6 @@
Protocol = object
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
-
-
DOMAIN = 'example.com'
KEY = jose.JWKRSA.load(test_util.load_vector("rsa512_key.pem"))
diff --git a/certbot/certbot/plugins/dns_test_common_lexicon.py b/certbot/certbot/plugins/dns_test_common_lexicon.py
index 01f4c6d619f..3710404046b 100644
--- a/certbot/certbot/plugins/dns_test_common_lexicon.py
+++ b/certbot/certbot/plugins/dns_test_common_lexicon.py
@@ -1,6 +1,7 @@
"""Base test class for DNS authenticators built on Lexicon."""
from typing import Any
from typing import TYPE_CHECKING
+from unittest import mock
from unittest.mock import MagicMock
import josepy as jose
@@ -14,10 +15,6 @@
from certbot.plugins.dns_test_common import _AuthenticatorCallableTestCase
from certbot.tests import util as test_util
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
if TYPE_CHECKING:
from typing_extensions import Protocol
else:
diff --git a/certbot/certbot/tests/util.py b/certbot/certbot/tests/util.py
index dbff31a14de..0ef5f654c14 100644
--- a/certbot/certbot/tests/util.py
+++ b/certbot/certbot/tests/util.py
@@ -16,7 +16,7 @@
from typing import List
from typing import Optional
import unittest
-import warnings
+from unittest import mock
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
@@ -35,20 +35,6 @@
from certbot.display import util as display_util
from certbot.plugins import common
-try:
- # When we remove this deprecated import, we should also remove the
- # "external-mock" test environment and the mock dependency listed in
- # tools/pinning/pyproject.toml.
- import mock
- warnings.warn(
- "The external mock module is being used for backwards compatibility "
- "since it is available, however, future versions of Certbot's tests will "
- "use unittest.mock. Be sure to update your code accordingly.",
- PendingDeprecationWarning
- )
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
-
class DummyInstaller(common.Installer):
"""Dummy installer plugin for test purpose."""
diff --git a/certbot/setup.py b/certbot/setup.py
index 600087d7f27..8ea0b0e6f30 100644
--- a/certbot/setup.py
+++ b/certbot/setup.py
@@ -90,7 +90,6 @@ def read_file(filename, encoding='utf8'):
'pytest-xdist',
'setuptools',
'tox',
- 'types-mock',
'types-pyOpenSSL',
'types-pyRFC3339',
'types-pytz',
diff --git a/certbot/tests/account_test.py b/certbot/tests/account_test.py
index e034c5f32f2..0037de31e74 100644
--- a/certbot/tests/account_test.py
+++ b/certbot/tests/account_test.py
@@ -4,10 +4,7 @@
import unittest
import josepy as jose
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
import pytz
from acme import messages
diff --git a/certbot/tests/auth_handler_test.py b/certbot/tests/auth_handler_test.py
index ba0323c3e58..23d5b2ae2e6 100644
--- a/certbot/tests/auth_handler_test.py
+++ b/certbot/tests/auth_handler_test.py
@@ -4,10 +4,7 @@
import unittest
from josepy import b64encode
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from acme import challenges
from acme import client as acme_client
diff --git a/certbot/tests/cert_manager_test.py b/certbot/tests/cert_manager_test.py
index 0ed09eccddb..157d45b5512 100644
--- a/certbot/tests/cert_manager_test.py
+++ b/certbot/tests/cert_manager_test.py
@@ -7,10 +7,7 @@
import unittest
import configobj
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from certbot import errors, configuration
from certbot._internal.storage import ALL_FOUR
diff --git a/certbot/tests/cli_test.py b/certbot/tests/cli_test.py
index 82138f52d59..54abe2594c5 100644
--- a/certbot/tests/cli_test.py
+++ b/certbot/tests/cli_test.py
@@ -5,6 +5,7 @@
import io
import tempfile
import unittest
+from unittest import mock
from acme import challenges
from certbot import errors
@@ -16,11 +17,6 @@
import certbot.tests.util as test_util
from certbot.tests.util import TempDirTestCase
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
PLUGINS = disco.PluginsRegistry.find_all()
diff --git a/certbot/tests/client_test.py b/certbot/tests/client_test.py
index 70f46aee79a..6b430831f89 100644
--- a/certbot/tests/client_test.py
+++ b/certbot/tests/client_test.py
@@ -5,6 +5,7 @@
import shutil
import tempfile
import unittest
+from unittest import mock
from unittest.mock import MagicMock
from josepy import interfaces
@@ -17,11 +18,6 @@
from certbot.compat import os
import certbot.tests.util as test_util
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
KEY = test_util.load_vector("rsa512_key.pem")
CSR_SAN = test_util.load_vector("csr-san_512.pem")
diff --git a/certbot/tests/compat/filesystem_test.py b/certbot/tests/compat/filesystem_test.py
index 9aab49c34fa..a9a258ba290 100644
--- a/certbot/tests/compat/filesystem_test.py
+++ b/certbot/tests/compat/filesystem_test.py
@@ -2,11 +2,7 @@
import contextlib
import errno
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from certbot import util
from certbot._internal import lock
diff --git a/certbot/tests/compat/misc_test.py b/certbot/tests/compat/misc_test.py
index 2155bd5a049..5cb8167b6ba 100644
--- a/certbot/tests/compat/misc_test.py
+++ b/certbot/tests/compat/misc_test.py
@@ -1,10 +1,6 @@
"""Tests for certbot.compat.misc"""
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock # type: ignore
import unittest
-import warnings
+from unittest import mock
from certbot.compat import os
diff --git a/certbot/tests/configuration_test.py b/certbot/tests/configuration_test.py
index 1c122615bd5..61c902bc912 100644
--- a/certbot/tests/configuration_test.py
+++ b/certbot/tests/configuration_test.py
@@ -1,10 +1,6 @@
"""Tests for certbot.configuration."""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from certbot import errors
from certbot._internal import constants
diff --git a/certbot/tests/crypto_util_test.py b/certbot/tests/crypto_util_test.py
index 9a111a0ebf3..3031cf531f9 100644
--- a/certbot/tests/crypto_util_test.py
+++ b/certbot/tests/crypto_util_test.py
@@ -1,11 +1,8 @@
"""Tests for certbot.crypto_util."""
import logging
import unittest
+from unittest import mock
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
import OpenSSL
from certbot import errors
diff --git a/certbot/tests/display/completer_test.py b/certbot/tests/display/completer_test.py
index a6ada8b9acd..73722151a4f 100644
--- a/certbot/tests/display/completer_test.py
+++ b/certbot/tests/display/completer_test.py
@@ -9,17 +9,12 @@
import string
import sys
import unittest
+from unittest import mock
from certbot.compat import filesystem
from certbot.compat import os
import certbot.tests.util as test_util
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
-
class CompleterTest(test_util.TempDirTestCase):
"""Test certbot._internal.display.completer.Completer."""
diff --git a/certbot/tests/display/internal_util_test.py b/certbot/tests/display/internal_util_test.py
index 86489b6a51b..b29396c415b 100644
--- a/certbot/tests/display/internal_util_test.py
+++ b/certbot/tests/display/internal_util_test.py
@@ -3,15 +3,11 @@
import socket
import tempfile
import unittest
+from unittest import mock
from acme import messages as acme_messages
from certbot import errors
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
class WrapLinesTest(unittest.TestCase):
def test_wrap_lines(self):
diff --git a/certbot/tests/display/ops_test.py b/certbot/tests/display/ops_test.py
index e00eeb08620..1235190a7f7 100644
--- a/certbot/tests/display/ops_test.py
+++ b/certbot/tests/display/ops_test.py
@@ -2,6 +2,7 @@
"""Test certbot.display.ops."""
import sys
import unittest
+from unittest import mock
import josepy as jose
@@ -15,11 +16,6 @@
from certbot.display import util as display_util
import certbot.tests.util as test_util
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
KEY = jose.JWKRSA.load(test_util.load_vector("rsa512_key.pem"))
diff --git a/certbot/tests/display/util_test.py b/certbot/tests/display/util_test.py
index 7985de753c7..7eb45653c5e 100644
--- a/certbot/tests/display/util_test.py
+++ b/certbot/tests/display/util_test.py
@@ -3,15 +3,11 @@
import socket
import tempfile
import unittest
+from unittest import mock
from certbot import errors
import certbot.tests.util as test_util
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
class NotifyTest(unittest.TestCase):
"""Tests for certbot.display.util.notify"""
diff --git a/certbot/tests/eff_test.py b/certbot/tests/eff_test.py
index c61f183cb11..6a8ac2c6110 100644
--- a/certbot/tests/eff_test.py
+++ b/certbot/tests/eff_test.py
@@ -1,11 +1,8 @@
"""Tests for certbot._internal.eff."""
import datetime
import unittest
+from unittest import mock
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
import josepy
import pytz
import requests
diff --git a/certbot/tests/error_handler_test.py b/certbot/tests/error_handler_test.py
index 010a756c12b..d6d506956bc 100644
--- a/certbot/tests/error_handler_test.py
+++ b/certbot/tests/error_handler_test.py
@@ -6,15 +6,10 @@
from typing import Dict
from typing import Union
import unittest
+from unittest import mock
from certbot.compat import os
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
-
def get_signals(signums):
"""Get the handlers for an iterable of signums."""
diff --git a/certbot/tests/errors_test.py b/certbot/tests/errors_test.py
index 792868df0c2..d05f2b43ea9 100644
--- a/certbot/tests/errors_test.py
+++ b/certbot/tests/errors_test.py
@@ -1,10 +1,6 @@
"""Tests for certbot.errors."""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from acme import messages
from certbot import achallenges
diff --git a/certbot/tests/helpful_test.py b/certbot/tests/helpful_test.py
index 0abe277bf54..c67211a43fa 100644
--- a/certbot/tests/helpful_test.py
+++ b/certbot/tests/helpful_test.py
@@ -1,10 +1,6 @@
"""Tests for certbot.helpful_parser"""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from certbot import errors
from certbot._internal.cli import HelpfulArgumentParser
diff --git a/certbot/tests/hook_test.py b/certbot/tests/hook_test.py
index fad18dc9f8c..8cd8e663157 100644
--- a/certbot/tests/hook_test.py
+++ b/certbot/tests/hook_test.py
@@ -1,10 +1,6 @@
"""Tests for certbot._internal.hooks."""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from certbot import errors
from certbot import util
diff --git a/certbot/tests/lock_test.py b/certbot/tests/lock_test.py
index b45eb8f7a99..1e752578208 100644
--- a/certbot/tests/lock_test.py
+++ b/certbot/tests/lock_test.py
@@ -2,11 +2,7 @@
import functools
import multiprocessing
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from certbot import errors
from certbot.compat import os
diff --git a/certbot/tests/log_test.py b/certbot/tests/log_test.py
index aec3ac65a96..855582591f8 100644
--- a/certbot/tests/log_test.py
+++ b/certbot/tests/log_test.py
@@ -6,6 +6,7 @@
import time
from typing import Optional
import unittest
+from unittest import mock
from acme import messages
from certbot import errors
@@ -15,11 +16,6 @@
from certbot.compat import os
from certbot.tests import util as test_util
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
class PreArgParseSetupTest(unittest.TestCase):
diff --git a/certbot/tests/main_test.py b/certbot/tests/main_test.py
index e26b19357bd..579682bdfbb 100644
--- a/certbot/tests/main_test.py
+++ b/certbot/tests/main_test.py
@@ -12,6 +12,7 @@
import traceback
from typing import List
import unittest
+from unittest import mock
import josepy as jose
import pytz
@@ -34,11 +35,6 @@
from certbot.plugins import enhancements
import certbot.tests.util as test_util
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
CERT_PATH = test_util.vector_path('cert_512.pem')
diff --git a/certbot/tests/ocsp_test.py b/certbot/tests/ocsp_test.py
index c102667bc6b..802787e020d 100644
--- a/certbot/tests/ocsp_test.py
+++ b/certbot/tests/ocsp_test.py
@@ -4,6 +4,7 @@
from datetime import datetime
from datetime import timedelta
import unittest
+from unittest import mock
from cryptography import x509
from cryptography.exceptions import InvalidSignature
@@ -16,11 +17,6 @@
from certbot import errors
from certbot.tests import util as test_util
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
out = """Missing = in header key=value
ocsp: Use -help for summary.
diff --git a/certbot/tests/plugins/common_test.py b/certbot/tests/plugins/common_test.py
index 46d766bcfaf..215faaea3bb 100644
--- a/certbot/tests/plugins/common_test.py
+++ b/certbot/tests/plugins/common_test.py
@@ -2,12 +2,9 @@
import functools
import shutil
import unittest
+from unittest import mock
import josepy as jose
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
from acme import challenges
from certbot import achallenges
diff --git a/certbot/tests/plugins/disco_test.py b/certbot/tests/plugins/disco_test.py
index 6b599f561db..c564cebce06 100644
--- a/certbot/tests/plugins/disco_test.py
+++ b/certbot/tests/plugins/disco_test.py
@@ -3,6 +3,7 @@
import string
from typing import List
import unittest
+from unittest import mock
import pkg_resources
@@ -12,11 +13,6 @@
from certbot._internal.plugins import standalone
from certbot._internal.plugins import webroot
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
EP_SA = pkg_resources.EntryPoint(
"sa", "certbot._internal.plugins.standalone",
diff --git a/certbot/tests/plugins/dns_common_lexicon_test.py b/certbot/tests/plugins/dns_common_lexicon_test.py
index 40afd107bac..4634c205726 100644
--- a/certbot/tests/plugins/dns_common_lexicon_test.py
+++ b/certbot/tests/plugins/dns_common_lexicon_test.py
@@ -1,11 +1,7 @@
"""Tests for certbot.plugins.dns_common_lexicon."""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from certbot.plugins import dns_common_lexicon
from certbot.plugins import dns_test_common_lexicon
diff --git a/certbot/tests/plugins/dns_common_test.py b/certbot/tests/plugins/dns_common_test.py
index f68d36137ad..97bc5dea62a 100644
--- a/certbot/tests/plugins/dns_common_test.py
+++ b/certbot/tests/plugins/dns_common_test.py
@@ -3,11 +3,7 @@
import collections
import logging
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from certbot import errors
from certbot import util
diff --git a/certbot/tests/plugins/enhancements_test.py b/certbot/tests/plugins/enhancements_test.py
index 62289d95bf7..903d3e0950e 100644
--- a/certbot/tests/plugins/enhancements_test.py
+++ b/certbot/tests/plugins/enhancements_test.py
@@ -1,10 +1,6 @@
"""Tests for new style enhancements"""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from certbot._internal.plugins import null
from certbot.plugins import enhancements
diff --git a/certbot/tests/plugins/manual_test.py b/certbot/tests/plugins/manual_test.py
index cfe2f60fa8e..a5dc69c32f3 100644
--- a/certbot/tests/plugins/manual_test.py
+++ b/certbot/tests/plugins/manual_test.py
@@ -2,11 +2,7 @@
import sys
import textwrap
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from acme import challenges
from certbot import errors
diff --git a/certbot/tests/plugins/null_test.py b/certbot/tests/plugins/null_test.py
index dfdd0a7deb7..ce3440e5bc4 100644
--- a/certbot/tests/plugins/null_test.py
+++ b/certbot/tests/plugins/null_test.py
@@ -1,10 +1,6 @@
"""Tests for certbot._internal.plugins.null."""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
class InstallerTest(unittest.TestCase):
diff --git a/certbot/tests/plugins/selection_test.py b/certbot/tests/plugins/selection_test.py
index b2f38754262..6aed9ec8d22 100644
--- a/certbot/tests/plugins/selection_test.py
+++ b/certbot/tests/plugins/selection_test.py
@@ -2,7 +2,7 @@
import sys
from typing import List
import unittest
-
+from unittest import mock
from certbot import errors
from certbot import interfaces
@@ -11,11 +11,6 @@
from certbot.display import util as display_util
from certbot.tests import util as test_util
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
class ConveniencePickPluginTest(unittest.TestCase):
"""Tests for certbot._internal.plugins.selection.pick_*."""
diff --git a/certbot/tests/plugins/standalone_test.py b/certbot/tests/plugins/standalone_test.py
index 2649abae929..39454570e7e 100644
--- a/certbot/tests/plugins/standalone_test.py
+++ b/certbot/tests/plugins/standalone_test.py
@@ -5,6 +5,7 @@
from typing import Set
from typing import Tuple
import unittest
+from unittest import mock
import josepy as jose
import OpenSSL.crypto
@@ -16,11 +17,6 @@
from certbot.tests import acme_util
from certbot.tests import util as test_util
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
class ServerManagerTest(unittest.TestCase):
"""Tests for certbot._internal.plugins.standalone.ServerManager."""
diff --git a/certbot/tests/plugins/storage_test.py b/certbot/tests/plugins/storage_test.py
index 66034b09ec5..a63ef779566 100644
--- a/certbot/tests/plugins/storage_test.py
+++ b/certbot/tests/plugins/storage_test.py
@@ -4,17 +4,13 @@
from typing import List
from typing import Optional
import unittest
+from unittest import mock
from certbot import errors
from certbot.compat import filesystem
from certbot.compat import os
from certbot.tests import util as test_util
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
class PluginStorageTest(test_util.ConfigTestCase):
diff --git a/certbot/tests/plugins/util_test.py b/certbot/tests/plugins/util_test.py
index 1b4fcd6529f..faac0116532 100644
--- a/certbot/tests/plugins/util_test.py
+++ b/certbot/tests/plugins/util_test.py
@@ -1,10 +1,6 @@
"""Tests for certbot.plugins.util."""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from certbot.compat import os
diff --git a/certbot/tests/plugins/webroot_test.py b/certbot/tests/plugins/webroot_test.py
index d7e96159658..d5ccc4b4f7b 100644
--- a/certbot/tests/plugins/webroot_test.py
+++ b/certbot/tests/plugins/webroot_test.py
@@ -8,12 +8,9 @@
import shutil
import tempfile
import unittest
+from unittest import mock
import josepy as jose
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
from acme import challenges
from certbot import achallenges
diff --git a/certbot/tests/renewal_test.py b/certbot/tests/renewal_test.py
index d6e2866dc56..f3968944f6f 100644
--- a/certbot/tests/renewal_test.py
+++ b/certbot/tests/renewal_test.py
@@ -1,18 +1,13 @@
"""Tests for certbot._internal.renewal"""
import copy
import unittest
+from unittest import mock
from acme import challenges
from certbot import errors, configuration
from certbot._internal import storage
import certbot.tests.util as test_util
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
-
class RenewalTest(test_util.ConfigTestCase):
@mock.patch('certbot._internal.cli.set_by_cli')
diff --git a/certbot/tests/renewupdater_test.py b/certbot/tests/renewupdater_test.py
index f086e3cf31f..30a7b0f46e7 100644
--- a/certbot/tests/renewupdater_test.py
+++ b/certbot/tests/renewupdater_test.py
@@ -1,10 +1,6 @@
"""Tests for renewal updater interfaces"""
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from certbot import interfaces
from certbot._internal import main
diff --git a/certbot/tests/reverter_test.py b/certbot/tests/reverter_test.py
index e8d85d4d1cd..5124c7d9f9c 100644
--- a/certbot/tests/reverter_test.py
+++ b/certbot/tests/reverter_test.py
@@ -4,11 +4,7 @@
import shutil
import tempfile
import unittest
-
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
+from unittest import mock
from certbot import errors
from certbot.compat import os
diff --git a/certbot/tests/storage_test.py b/certbot/tests/storage_test.py
index c4e42ec379f..3a1f2b7b421 100644
--- a/certbot/tests/storage_test.py
+++ b/certbot/tests/storage_test.py
@@ -4,12 +4,9 @@
import shutil
import stat
import unittest
+from unittest import mock
import configobj
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
import pytz
import certbot
diff --git a/certbot/tests/util_test.py b/certbot/tests/util_test.py
index 0da0976b8eb..e9b5ddef2b2 100644
--- a/certbot/tests/util_test.py
+++ b/certbot/tests/util_test.py
@@ -5,18 +5,13 @@
import io
import sys
import unittest
+from unittest import mock
from certbot import errors
from certbot.compat import filesystem
from certbot.compat import os
import certbot.tests.util as test_util
-try:
- import mock
-except ImportError: # pragma: no cover
- from unittest import mock
-
-
class EnvNoSnapForExternalCallsTest(unittest.TestCase):
"""Tests for certbot.util.env_no_snap_for_external_calls."""
diff --git a/pytest.ini b/pytest.ini
index 42853f26be1..704912685da 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -11,14 +11,12 @@
# we release breaking changes.
#
# The current warnings being ignored are:
-# 1) The warning raised when importing certbot.tests.util and the external mock
-# library is installed.
-# 2) A deprecation warning is raised in dnspython==1.15.0 in the oldest tests for
+# 1) A deprecation warning is raised in dnspython==1.15.0 in the oldest tests for
# certbot-dns-rfc2136.
-# 3) botocore is currently using deprecated urllib3 functionality. See
-# https://github.com/boto/botocore/issues/2744.
+# 2) botocore's default TLS settings raise deprecation warnings in Python
+# 3.10+, but their values are sane from a security perspective. See
+# https://github.com/boto/botocore/issues/2550.
filterwarnings =
error
- ignore:The external mock module:PendingDeprecationWarning
ignore:decodestring\(\) is a deprecated alias:DeprecationWarning:dns
ignore:'urllib3.contrib.pyopenssl:DeprecationWarning:botocore
diff --git a/tools/oldest_constraints.txt b/tools/oldest_constraints.txt
index bbbbc579ade..2e723bd8151 100644
--- a/tools/oldest_constraints.txt
+++ b/tools/oldest_constraints.txt
@@ -45,7 +45,6 @@ jsonschema==3.2.0; python_version >= "3.7"
lazy-object-proxy==1.7.1; python_version >= "3.7"
logger==1.4; python_version >= "3.7"
mccabe==0.7.0; python_version >= "3.7"
-mock==1.0.1
mypy-extensions==0.4.3; python_version >= "3.7"
mypy==0.971; python_version >= "3.7"
ndg-httpsclient==0.3.2
@@ -92,7 +91,6 @@ tomli==2.0.1; python_version < "3.11" and python_version >= "3.7" or python_full
tox==1.9.2; python_version >= "3.7"
typed-ast==1.5.4; python_version >= "3.7" and python_version < "3.8" or implementation_name == "cpython" and python_version < "3.8" and python_version >= "3.7"
types-cryptography==3.3.21; python_version >= "3.7"
-types-mock==4.0.15; python_version >= "3.7"
types-pyopenssl==22.0.9; python_version >= "3.7"
types-pyrfc3339==1.1.1; python_version >= "3.7"
types-python-dateutil==2.8.19; python_version >= "3.7"
diff --git a/tools/pinning/current/pyproject.toml b/tools/pinning/current/pyproject.toml
index eb42ecd342a..77b4fdf8d07 100644
--- a/tools/pinning/current/pyproject.toml
+++ b/tools/pinning/current/pyproject.toml
@@ -50,13 +50,6 @@ awscli = ">=1.22.76"
# as a dependency here to ensure a version of cython is pinned for extra
# stability.
cython = "*"
-# We install mock in our "external-mock" tox environment to test that we didn't
-# break Certbot's test API which used to always use mock objects from the 3rd
-# party mock library. We list the mock dependency here so that is pinned, but
-# we don't depend on it in Certbot to avoid installing mock when it's not
-# needed. This dependency can be removed here once Certbot's support for the
-# 3rd party mock library has been dropped.
-mock = "*"
# poetry 1.2.0+ is required for it to pin pip, setuptools, and wheel. See
# https://github.com/python-poetry/poetry/issues/1584. This version is required
# here in addition to certbot/setup.py because otherwise the pre-release
diff --git a/tools/pinning/oldest/pyproject.toml b/tools/pinning/oldest/pyproject.toml
index b6aed9dc07e..67b835551ab 100644
--- a/tools/pinning/oldest/pyproject.toml
+++ b/tools/pinning/oldest/pyproject.toml
@@ -61,7 +61,6 @@ google-api-python-client = "1.5.5"
httplib2 = "0.9.2"
idna = "2.6"
ipaddress = "1.0.16"
-mock = "1.0.1"
ndg-httpsclient = "0.3.2"
oauth2client = "4.0.0"
parsedatetime = "2.4"
diff --git a/tools/requirements.txt b/tools/requirements.txt
index e25e7f565b5..36b0f512893 100644
--- a/tools/requirements.txt
+++ b/tools/requirements.txt
@@ -84,7 +84,6 @@ lockfile==0.12.2
markupsafe==2.1.1; python_version >= "3.7"
matplotlib-inline==0.1.3; python_version >= "3.7"
mccabe==0.7.0; python_version >= "3.7"
-mock==4.0.3
msgpack==1.0.4; python_version >= "3.7" and python_version < "4.0"
msrest==0.6.21; python_version >= "3.7"
mypy-extensions==0.4.3; python_version >= "3.7"
@@ -170,7 +169,6 @@ traitlets==5.3.0; python_version >= "3.7"
twine==3.3.0; python_version >= "3.7"
typed-ast==1.5.4; python_version >= "3.7" and python_version < "3.8" or implementation_name == "cpython" and python_version < "3.8" and python_version >= "3.7"
types-cryptography==3.3.21; python_version >= "3.7"
-types-mock==4.0.15; python_version >= "3.7"
types-pyopenssl==22.0.9; python_version >= "3.7"
types-pyrfc3339==1.1.1; python_version >= "3.7"
types-python-dateutil==2.8.19; python_version >= "3.7"
diff --git a/tox.ini b/tox.ini
index f4776328d99..8125304f716 100644
--- a/tox.ini
+++ b/tox.ini
@@ -112,11 +112,6 @@ commands =
setenv =
{[testenv:oldest]setenv}
-[testenv:external-mock]
-commands =
- python {toxinidir}/tools/pip_install.py mock
- {[base]install_and_test} {[base]all_packages}
-
[testenv:lint{,-win,-posix}]
basepython = python3
# separating into multiple invocations disables cross package
|
NVIDIA-Merlin__NVTabular-1139 | [BUG] Problem on writing to_parquet after transforming
```python
#######################################
trasforming Code:
class Processjson(Operator):
def transform(self, columns, gdf):
col = gdf['event_properties']
gdf['item_id'] = col.str.extract('\'product_id\'\s*:\s*\'([^\']+)\'')
gdf['event_time'] = (gdf['event_time'] - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
gdf['device_brand'] = gdf['device_brand'].fillna('Apple')
return gdf
def output_column_names(self, columns):
return [i for i in columns if (i != 'event_properties')] + ['item_id']
def dependencies(self):
return None
filtered = COLUMNS >> nvt.ops.Filter(lambda df: df['event_type'].isin(['Value']))
filtered = filtered >> nvt.ops.JoinExternal(df_ext=fid_map,on='user_id',columns_ext=['user_id','memberID'])
filtered = filtered>>Processjson() >> nvt.ops.Dropna()
workflow = nvt.Workflow(filtered)
dataset_file = glob.glob('raw/*')
subdataset_file = dataset_file[6:8]
dataset = nvt.Dataset(subdataset_file, part_size="500MB")
workflow.transform(dataset).to_parquet(f'processed/test')
############################################
```
I follow exam and edited to served my data.
When I set multiple files as input for Dataset class, there is some specific files that I loaded and this exception was thrown.
It might be because there is no handling, where some file have data and some have no data after the filtering.
This doesn't happen if I loaded a single file and process separately.
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-60-2a116bd489a4> in <module>
2 # for i in dataset_file:
3 dataset = nvt.Dataset(subdataset_file, part_size="500MB")
----> 4 workflow.transform(dataset).to_parquet(f'processed/test')
5
/usr/local/lib/python3.8/dist-packages/nvtabular/io/dataset.py in to_parquet(self, output_path, shuffle, preserve_files, output_files, out_files_per_proc, num_threads, dtypes, cats, conts, labels, suffix, partition_on)
763
764 # Output dask_cudf DataFrame to dataset
--> 765 _ddf_to_dataset(
766 ddf,
767 fs,
/usr/local/lib/python3.8/dist-packages/nvtabular/io/dask.py in _ddf_to_dataset(ddf, fs, output_path, shuffle, file_partition_map, out_files_per_proc, cat_names, cont_names, label_names, output_format, client, num_threads, cpu, suffix, partition_on)
364 out = client.compute(out).result()
365 else:
--> 366 out = dask.compute(out, scheduler="synchronous")[0]
367
368 if cached_writers:
/usr/local/lib/python3.8/dist-packages/dask/base.py in compute(*args, **kwargs)
564 postcomputes.append(x.__dask_postcompute__())
565
--> 566 results = schedule(dsk, keys, **kwargs)
567 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
568
/usr/local/lib/python3.8/dist-packages/dask/local.py in get_sync(dsk, keys, **kwargs)
558 """
559 kwargs.pop("num_workers", None) # if num_workers present, remove it
--> 560 return get_async(
561 synchronous_executor.submit,
562 synchronous_executor._max_workers,
/usr/local/lib/python3.8/dist-packages/dask/local.py in get_async(submit, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, chunksize, **kwargs)
501 while state["waiting"] or state["ready"] or state["running"]:
502 fire_tasks(chunksize)
--> 503 for key, res_info, failed in queue_get(queue).result():
504 if failed:
505 exc, tb = loads(res_info)
/usr/lib/python3.8/concurrent/futures/_base.py in result(self, timeout)
430 raise CancelledError()
431 elif self._state == FINISHED:
--> 432 return self.__get_result()
433
434 self._condition.wait(timeout)
/usr/lib/python3.8/concurrent/futures/_base.py in __get_result(self)
386 def __get_result(self):
387 if self._exception:
--> 388 raise self._exception
389 else:
390 return self._result
/usr/local/lib/python3.8/dist-packages/dask/local.py in submit(self, fn, *args, **kwargs)
543 fut = Future()
544 try:
--> 545 fut.set_result(fn(*args, **kwargs))
546 except BaseException as e:
547 fut.set_exception(e)
/usr/local/lib/python3.8/dist-packages/dask/local.py in batch_execute_tasks(it)
235 Batch computing of multiple tasks with `execute_task`
236 """
--> 237 return [execute_task(*a) for a in it]
238
239
/usr/local/lib/python3.8/dist-packages/dask/local.py in <listcomp>(.0)
235 Batch computing of multiple tasks with `execute_task`
236 """
--> 237 return [execute_task(*a) for a in it]
238
239
/usr/local/lib/python3.8/dist-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
226 failed = False
227 except BaseException as e:
--> 228 result = pack_exception(e, dumps)
229 failed = True
230 return key, result, failed
/usr/local/lib/python3.8/dist-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
221 try:
222 task, data = loads(task_info)
--> 223 result = _execute_task(task, data)
224 id = get_id()
225 result = dumps((result, id))
/usr/local/lib/python3.8/dist-packages/dask/core.py in _execute_task(arg, cache, dsk)
119 # temporaries by their reference count and can execute certain
120 # operations in-place.
--> 121 return func(*(_execute_task(a, cache) for a in args))
122 elif not ishashable(arg):
123 return arg
/usr/lib/python3.8/contextlib.py in inner(*args, **kwds)
73 def inner(*args, **kwds):
74 with self._recreate_cm():
---> 75 return func(*args, **kwds)
76 return inner
77
/usr/local/lib/python3.8/dist-packages/nvtabular/io/dask.py in _write_output_partition(df, processed_path, shuffle, out_files_per_proc, fs, cat_names, cont_names, label_names, output_format, num_threads, cpu, suffix)
92
93 # Add data
---> 94 writer.add_data(df)
95
96 return df_size
/usr/lib/python3.8/contextlib.py in inner(*args, **kwds)
73 def inner(*args, **kwds):
74 with self._recreate_cm():
---> 75 return func(*args, **kwds)
76 return inner
77
/usr/local/lib/python3.8/dist-packages/nvtabular/io/writer.py in add_data(self, df)
137 # Only writing to a single file. No need to
138 # scatter or slice the data before writing
--> 139 self._add_single_file(df)
140 else:
141 # Use different mechanism to decompose and write each df
/usr/local/lib/python3.8/dist-packages/nvtabular/io/writer.py in _add_single_file(self, df)
224 self.queue.put((0, df))
225 else:
--> 226 self._write_table(0, df)
227
228 def package_general_metadata(self):
/usr/local/lib/python3.8/dist-packages/nvtabular/io/parquet.py in _write_table(self, idx, data)
788 def _write_table(self, idx, data):
789 writer = self._get_or_create_writer(idx)
--> 790 writer.write_table(data)
791
792 @classmethod
cudf/_lib/parquet.pyx in cudf._lib.parquet.ParquetWriter.write_table()
RuntimeError: cuDF failure at: /workspace/build-env/cpp/src/io/parquet/writer_impl.cu:462: Leaf column's corresponding metadata cannot have children
| [
{
"content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport json\nimport logging\nimport os\nimport sys\nimport time\nimport warnings\nfrom typing import TYPE_CHECKING, Optional\n\nimport cloudpickle\n\ntry:\n import cudf\nexcept ImportError:\n cudf = None\nimport dask\nimport pandas as pd\nfrom dask.core import flatten\n\nimport nvtabular\nfrom nvtabular.columns import Schema\nfrom nvtabular.dispatch import _concat_columns\nfrom nvtabular.io.dataset import Dataset\nfrom nvtabular.ops import StatOperator\nfrom nvtabular.utils import _ensure_optimize_dataframe_graph, global_dask_client\nfrom nvtabular.worker import clean_worker_cache\nfrom nvtabular.workflow.node import WorkflowNode, iter_nodes\n\nLOG = logging.getLogger(\"nvtabular\")\n\n\nif TYPE_CHECKING:\n import distributed\n\n\nclass Workflow:\n \"\"\"\n The Workflow class applies a graph of operations onto a dataset, letting you transform\n datasets to do feature engineering and preprocessing operations. This class follows an API\n similar to Transformers in sklearn: we first ``fit`` the workflow by calculating statistics\n on the dataset, and then once fit we can ``transform`` datasets by applying these statistics.\n\n Example usage::\n\n # define a graph of operations\n cat_features = CAT_COLUMNS >> nvtabular.ops.Categorify()\n cont_features = CONT_COLUMNS >> nvtabular.ops.FillMissing() >> nvtabular.ops.Normalize()\n workflow = nvtabular.Workflow(cat_features + cont_features + \"label\")\n\n # calculate statistics on the training dataset\n workflow.fit(nvtabular.io.Dataset(TRAIN_PATH))\n\n # transform the training and validation datasets and write out as parquet\n workflow.transform(nvtabular.io.Dataset(TRAIN_PATH)).to_parquet(output_path=TRAIN_OUT_PATH)\n workflow.transform(nvtabular.io.Dataset(VALID_PATH)).to_parquet(output_path=VALID_OUT_PATH)\n\n Parameters\n ----------\n output_node: WorkflowNode\n The last node in the graph of operators this workflow should apply\n client: distributed.Client, optional\n The Dask distributed client to use for multi-gpu processing and multi-node processing\n \"\"\"\n\n def __init__(self, output_node: WorkflowNode, client: Optional[\"distributed.Client\"] = None):\n self.output_node = output_node\n self.client = client\n self.input_dtypes = None\n self.output_dtypes = None\n self.output_schema = None\n\n # Warn user if there is an unused global\n # Dask client available\n if global_dask_client(self.client):\n warnings.warn(\n \"A global dask.distributed client has been detected, but the \"\n \"single-threaded scheduler will be used for execution. Please \"\n \"use the `client` argument to initialize a `Workflow` object \"\n \"with distributed-execution enabled.\"\n )\n\n def transform(self, dataset: Dataset) -> Dataset:\n \"\"\"Transforms the dataset by applying the graph of operators to it. Requires the ``fit``\n method to have already been called, or calculated statistics to be loaded from disk\n\n This method returns a Dataset object, with the transformations lazily loaded. None\n of the actual computation will happen until the produced Dataset is consumed, or\n written out to disk.\n\n Parameters\n -----------\n dataset: Dataset\n\n Returns\n -------\n Dataset\n \"\"\"\n self._clear_worker_cache()\n\n if not self.output_schema:\n self.fit_schema(dataset.schema)\n\n ddf = dataset.to_ddf(columns=self._input_columns())\n return Dataset(\n _transform_ddf(ddf, self.output_node, self.output_dtypes),\n client=self.client,\n cpu=dataset.cpu,\n base_dataset=dataset.base_dataset,\n schema=self.output_schema,\n )\n\n def fit_schema(self, input_schema: Schema) -> \"Workflow\":\n schemaless_nodes = {\n node: _get_schemaless_nodes(node.parents_with_dep_nodes)\n for node in _get_schemaless_nodes([self.output_node])\n }\n\n while schemaless_nodes:\n # get all the Operators with no outstanding dependencies\n current_phase = [\n node for node, dependencies in schemaless_nodes.items() if not dependencies\n ]\n if not current_phase:\n # this shouldn't happen, but lets not infinite loop just in case\n raise RuntimeError(\"failed to find dependency-free Operator to compute schema for\")\n\n processed_nodes = []\n for node in current_phase:\n if not node.parents:\n node.compute_schemas(input_schema)\n else:\n combined_schema = sum(\n [parent.output_schema for parent in node.parents if parent.output_schema],\n Schema(),\n )\n combined_schema += input_schema\n node.compute_schemas(combined_schema)\n\n processed_nodes.append(node)\n\n # Remove all the operators we processed in this phase, and remove\n # from the dependencies of other ops too\n for schemaless_node in current_phase:\n schemaless_nodes.pop(schemaless_node)\n for dependencies in schemaless_nodes.values():\n dependencies.difference_update(current_phase)\n\n self.output_schema = self.output_node.output_schema\n\n return self\n\n def fit(self, dataset: Dataset) -> \"Workflow\":\n \"\"\"Calculates statistics for this workflow on the input dataset\n\n Parameters\n -----------\n dataset: Dataset\n The input dataset to calculate statistics for. If there is a train/test split this\n data should be the training dataset only.\n \"\"\"\n self._clear_worker_cache()\n\n if not self.output_schema:\n self.fit_schema(dataset.schema)\n\n ddf = dataset.to_ddf(columns=self._input_columns())\n\n # Get a dictionary mapping all StatOperators we need to fit to a set of any dependant\n # StatOperators (having StatOperators that depend on the output of other StatOperators\n # means that will have multiple phases in the fit cycle here)\n stat_ops = {\n op: _get_stat_ops(op.parents_with_dep_nodes) for op in _get_stat_ops([self.output_node])\n }\n\n while stat_ops:\n # get all the StatOperators that we can currently call fit on (no outstanding\n # dependencies)\n current_phase = [op for op, dependencies in stat_ops.items() if not dependencies]\n if not current_phase:\n # this shouldn't happen, but lets not infinite loop just in case\n raise RuntimeError(\"failed to find dependency-free StatOperator to fit\")\n\n stats, ops = [], []\n for workflow_node in current_phase:\n # Check for additional input columns that aren't generated by parents\n addl_input_cols = set()\n if workflow_node.parents:\n parent_output_cols = sum(\n [parent.output_columns for parent in workflow_node.parents],\n nvtabular.ColumnSelector(),\n )\n addl_input_cols = set(workflow_node.input_columns.names) - set(\n parent_output_cols.names\n )\n\n # apply transforms necessary for the inputs to the current column group, ignoring\n # the transforms from the statop itself\n transformed_ddf = _ensure_optimize_dataframe_graph(\n ddf=_transform_ddf(\n ddf, workflow_node.parents, additional_columns=addl_input_cols\n )\n )\n\n op = workflow_node.op\n try:\n stats.append(op.fit(workflow_node.input_columns, transformed_ddf))\n ops.append(op)\n except Exception:\n LOG.exception(\"Failed to fit operator %s\", workflow_node.op)\n raise\n\n if self.client:\n results = [r.result() for r in self.client.compute(stats)]\n else:\n results = dask.compute(stats, scheduler=\"synchronous\")[0]\n\n for computed_stats, op in zip(results, ops):\n op.fit_finalize(computed_stats)\n\n # Remove all the operators we processed in this phase, and remove\n # from the dependencies of other ops too\n for stat_op in current_phase:\n stat_ops.pop(stat_op)\n for dependencies in stat_ops.values():\n dependencies.difference_update(current_phase)\n\n # hack: store input/output dtypes here. We should have complete dtype\n # information for each operator (like we do for column names), but as\n # an interim solution this gets us what we need.\n input_dtypes = dataset.to_ddf()[self._input_columns()].dtypes\n self.input_dtypes = dict(zip(input_dtypes.index, input_dtypes))\n output_dtypes = self.transform(dataset).sample_dtypes()\n self.output_dtypes = dict(zip(output_dtypes.index, output_dtypes))\n\n self._zero_output_schemas()\n self.fit_schema(dataset.schema)\n return self\n\n def fit_transform(self, dataset: Dataset) -> Dataset:\n \"\"\"Convenience method to both fit the workflow and transform the dataset in a single\n call. Equivalent to calling ``workflow.fit(dataset)`` followed by\n ``workflow.transform(dataset)``\n\n Parameters\n -----------\n dataset: Dataset\n\n Returns\n -------\n Dataset\n \"\"\"\n self.fit(dataset)\n return self.transform(dataset)\n\n def save(self, path):\n \"\"\"Save this workflow to disk\n\n Parameters\n ----------\n path: str\n The path to save the workflow to\n \"\"\"\n # avoid a circular import getting the version\n from nvtabular import __version__ as nvt_version\n\n os.makedirs(path, exist_ok=True)\n\n # point all stat ops to store intermediate output (parquet etc) at the path\n # this lets us easily bundle\n for stat in _get_stat_ops([self.output_node]):\n stat.op.set_storage_path(path, copy=True)\n\n # generate a file of all versions used to generate this bundle\n lib = cudf if cudf else pd\n with open(os.path.join(path, \"metadata.json\"), \"w\") as o:\n json.dump(\n {\n \"versions\": {\n \"nvtabular\": nvt_version,\n lib.__name__: lib.__version__,\n \"python\": sys.version,\n },\n \"generated_timestamp\": int(time.time()),\n },\n o,\n )\n\n # dump out the full workflow (graph/stats/operators etc) using cloudpickle\n with open(os.path.join(path, \"workflow.pkl\"), \"wb\") as o:\n cloudpickle.dump(self, o)\n\n @classmethod\n def load(cls, path, client=None):\n \"\"\"Load up a saved workflow object from disk\n\n Parameters\n ----------\n path: str\n The path to load the workflow from\n client: distributed.Client, optional\n The Dask distributed client to use for multi-gpu processing and multi-node processing\n\n Returns\n -------\n Workflow\n \"\"\"\n # avoid a circular import getting the version\n from nvtabular import __version__ as nvt_version\n\n # check version information from the metadata blob, and warn if we have a mismatch\n meta = json.load(open(os.path.join(path, \"metadata.json\")))\n\n def parse_version(version):\n return version.split(\".\")[:2]\n\n def check_version(stored, current, name):\n if parse_version(stored) != parse_version(current):\n warnings.warn(\n f\"Loading workflow generated with {name} version {stored} \"\n f\"- but we are running {name} {current}. This might cause issues\"\n )\n\n # make sure we don't have any major/minor version conflicts between the stored worklflow\n # and the current environment\n lib = cudf if cudf else pd\n versions = meta[\"versions\"]\n check_version(versions[\"nvtabular\"], nvt_version, \"nvtabular\")\n check_version(versions[\"python\"], sys.version, \"python\")\n\n if lib.__name__ in versions:\n check_version(versions[lib.__name__], lib.__version__, lib.__name__)\n else:\n expected = \"GPU\" if \"cudf\" in versions else \"CPU\"\n warnings.warn(f\"Loading workflow generated on {expected}\")\n\n # load up the workflow object di\n workflow = cloudpickle.load(open(os.path.join(path, \"workflow.pkl\"), \"rb\"))\n workflow.client = client\n\n # we might have been copied since saving, update all the stat ops\n # with the new path to their storage locations\n for stat in _get_stat_ops([workflow.output_node]):\n stat.op.set_storage_path(path, copy=False)\n\n return workflow\n\n def __getstate__(self):\n # dask client objects aren't picklable - exclude from saved representation\n return {k: v for k, v in self.__dict__.items() if k != \"client\"}\n\n def clear_stats(self):\n for stat in _get_stat_ops([self.output_node]):\n stat.op.clear()\n\n def _input_columns(self):\n input_cols = []\n for node in iter_nodes([self.output_node]):\n upstream_output_cols = []\n\n for upstream_node in node.parents_with_dep_nodes:\n upstream_output_cols += upstream_node.output_columns.names\n\n for upstream_selector in node.dependency_selectors:\n upstream_output_cols += upstream_selector.names\n\n upstream_output_cols = _get_unique(upstream_output_cols)\n input_cols += list(set(node.input_columns.names) - set(upstream_output_cols))\n input_cols += node.dependency_columns.names\n\n return _get_unique(input_cols)\n\n def _clear_worker_cache(self):\n # Clear worker caches to be \"safe\"\n if self.client:\n self.client.run(clean_worker_cache)\n else:\n clean_worker_cache()\n\n def _zero_output_schemas(self):\n \"\"\"\n Zero out all schemas in order to rerun fit schema after operators\n have run fit and have stats to add to schema.\n \"\"\"\n for node in iter_nodes([self.output_node]):\n node.output_schema = None\n node.input_schema = None\n\n\ndef _transform_ddf(ddf, workflow_nodes, meta=None, additional_columns=None):\n # Check if we are only selecting columns (no transforms).\n # If so, we should perform column selection at the ddf level.\n # Otherwise, Dask will not push the column selection into the\n # IO function.\n if not workflow_nodes:\n return ddf[_get_unique(additional_columns)] if additional_columns else ddf\n\n if isinstance(workflow_nodes, WorkflowNode):\n workflow_nodes = [workflow_nodes]\n\n columns = list(flatten(wfn.output_columns.names for wfn in workflow_nodes))\n columns += additional_columns if additional_columns else []\n\n if isinstance(meta, dict) and isinstance(ddf._meta, pd.DataFrame):\n dtypes = meta\n meta = type(ddf._meta)({k: [] for k in columns})\n for column, dtype in dtypes.items():\n meta[column] = meta[column].astype(dtype)\n\n elif not meta:\n # TODO: constructing meta like this loses dtype information on the ddf\n # and sets it all to 'float64'. We should propogate dtype information along\n # with column names in the columngroup graph. This currently only\n # happesn during intermediate 'fit' transforms, so as long as statoperators\n # don't require dtype information on the DDF this doesn't matter all that much\n meta = type(ddf._meta)({k: [] for k in columns})\n\n return ddf.map_partitions(\n _transform_partition,\n workflow_nodes,\n additional_columns=additional_columns,\n meta=meta,\n )\n\n\ndef _get_stat_ops(nodes):\n return set(node for node in iter_nodes(nodes) if isinstance(node.op, StatOperator))\n\n\ndef _get_schemaless_nodes(nodes):\n return set(node for node in iter_nodes(nodes) if node.input_schema is None)\n\n\ndef _get_unique(cols):\n # Need to preserve order in unique-column list\n return list({x: x for x in cols}.keys())\n\n\ndef _transform_partition(root_df, workflow_nodes, additional_columns=None):\n \"\"\"Transforms a single partition by appyling all operators in a WorkflowNode\"\"\"\n output = None\n\n for node in workflow_nodes:\n node_input_cols = _get_unique(node.input_columns.names)\n node_output_cols = _get_unique(node.output_columns.names)\n addl_input_cols = set(node.dependency_columns.names)\n\n # Build input dataframe\n if node.parents_with_dep_nodes:\n # If there are parents, collect their outputs\n # to build the current node's input\n input_df = None\n seen_columns = None\n\n for parent in node.parents_with_dep_nodes:\n parent_output_cols = _get_unique(parent.output_columns.names)\n parent_df = _transform_partition(root_df, [parent])\n if input_df is None or not len(input_df):\n input_df = parent_df[parent_output_cols]\n seen_columns = set(parent_output_cols)\n else:\n new_columns = set(parent_output_cols) - seen_columns\n input_df = _concat_columns([input_df, parent_df[list(new_columns)]])\n seen_columns.update(new_columns)\n\n # Check for additional input columns that aren't generated by parents\n # and fetch them from the root dataframe\n unseen_columns = set(node.input_columns.names) - seen_columns\n addl_input_cols = addl_input_cols.union(unseen_columns)\n\n # TODO: Find a better way to remove dupes\n addl_input_cols = addl_input_cols - set(input_df.columns)\n\n if addl_input_cols:\n input_df = _concat_columns([input_df, root_df[list(addl_input_cols)]])\n else:\n # If there are no parents, this is an input node,\n # so pull columns directly from root df\n input_df = root_df[node_input_cols + list(addl_input_cols)]\n\n # Compute the node's output\n if node.op:\n try:\n output_df = node.op.transform(node.input_columns, input_df)\n except Exception:\n LOG.exception(\"Failed to transform operator %s\", node.op)\n raise\n if output_df is None:\n raise RuntimeError(\"Operator %s didn't return a value during transform\" % node.op)\n else:\n output_df = input_df\n\n # Combine output across node loop iterations\n\n # dask needs output to be in the same order defined as meta, reorder partitions here\n # this also selects columns (handling the case of removing columns from the output using\n # \"-\" overload)\n if output is None:\n output = output_df[node_output_cols]\n else:\n output = _concat_columns([output, output_df[node_output_cols]])\n\n if additional_columns:\n output = _concat_columns([output, root_df[_get_unique(additional_columns)]])\n\n return output\n",
"path": "nvtabular/workflow/workflow.py"
}
] | [
{
"content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport json\nimport logging\nimport os\nimport sys\nimport time\nimport warnings\nfrom typing import TYPE_CHECKING, Optional\n\nimport cloudpickle\n\ntry:\n import cudf\nexcept ImportError:\n cudf = None\nimport dask\nimport pandas as pd\nfrom dask.core import flatten\n\nimport nvtabular\nfrom nvtabular.columns import Schema\nfrom nvtabular.dispatch import _concat_columns\nfrom nvtabular.io.dataset import Dataset\nfrom nvtabular.ops import StatOperator\nfrom nvtabular.utils import _ensure_optimize_dataframe_graph, global_dask_client\nfrom nvtabular.worker import clean_worker_cache\nfrom nvtabular.workflow.node import WorkflowNode, iter_nodes\n\nLOG = logging.getLogger(\"nvtabular\")\n\n\nif TYPE_CHECKING:\n import distributed\n\n\nclass Workflow:\n \"\"\"\n The Workflow class applies a graph of operations onto a dataset, letting you transform\n datasets to do feature engineering and preprocessing operations. This class follows an API\n similar to Transformers in sklearn: we first ``fit`` the workflow by calculating statistics\n on the dataset, and then once fit we can ``transform`` datasets by applying these statistics.\n\n Example usage::\n\n # define a graph of operations\n cat_features = CAT_COLUMNS >> nvtabular.ops.Categorify()\n cont_features = CONT_COLUMNS >> nvtabular.ops.FillMissing() >> nvtabular.ops.Normalize()\n workflow = nvtabular.Workflow(cat_features + cont_features + \"label\")\n\n # calculate statistics on the training dataset\n workflow.fit(nvtabular.io.Dataset(TRAIN_PATH))\n\n # transform the training and validation datasets and write out as parquet\n workflow.transform(nvtabular.io.Dataset(TRAIN_PATH)).to_parquet(output_path=TRAIN_OUT_PATH)\n workflow.transform(nvtabular.io.Dataset(VALID_PATH)).to_parquet(output_path=VALID_OUT_PATH)\n\n Parameters\n ----------\n output_node: WorkflowNode\n The last node in the graph of operators this workflow should apply\n client: distributed.Client, optional\n The Dask distributed client to use for multi-gpu processing and multi-node processing\n \"\"\"\n\n def __init__(self, output_node: WorkflowNode, client: Optional[\"distributed.Client\"] = None):\n self.output_node = output_node\n self.client = client\n self.input_dtypes = None\n self.output_dtypes = None\n self.output_schema = None\n\n # Warn user if there is an unused global\n # Dask client available\n if global_dask_client(self.client):\n warnings.warn(\n \"A global dask.distributed client has been detected, but the \"\n \"single-threaded scheduler will be used for execution. Please \"\n \"use the `client` argument to initialize a `Workflow` object \"\n \"with distributed-execution enabled.\"\n )\n\n def transform(self, dataset: Dataset) -> Dataset:\n \"\"\"Transforms the dataset by applying the graph of operators to it. Requires the ``fit``\n method to have already been called, or calculated statistics to be loaded from disk\n\n This method returns a Dataset object, with the transformations lazily loaded. None\n of the actual computation will happen until the produced Dataset is consumed, or\n written out to disk.\n\n Parameters\n -----------\n dataset: Dataset\n\n Returns\n -------\n Dataset\n \"\"\"\n self._clear_worker_cache()\n\n if not self.output_schema:\n self.fit_schema(dataset.schema)\n\n ddf = dataset.to_ddf(columns=self._input_columns())\n return Dataset(\n _transform_ddf(ddf, self.output_node, self.output_dtypes),\n client=self.client,\n cpu=dataset.cpu,\n base_dataset=dataset.base_dataset,\n schema=self.output_schema,\n )\n\n def fit_schema(self, input_schema: Schema) -> \"Workflow\":\n schemaless_nodes = {\n node: _get_schemaless_nodes(node.parents_with_dep_nodes)\n for node in _get_schemaless_nodes([self.output_node])\n }\n\n while schemaless_nodes:\n # get all the Operators with no outstanding dependencies\n current_phase = [\n node for node, dependencies in schemaless_nodes.items() if not dependencies\n ]\n if not current_phase:\n # this shouldn't happen, but lets not infinite loop just in case\n raise RuntimeError(\"failed to find dependency-free Operator to compute schema for\")\n\n processed_nodes = []\n for node in current_phase:\n if not node.parents:\n node.compute_schemas(input_schema)\n else:\n combined_schema = sum(\n [parent.output_schema for parent in node.parents if parent.output_schema],\n Schema(),\n )\n combined_schema += input_schema\n node.compute_schemas(combined_schema)\n\n processed_nodes.append(node)\n\n # Remove all the operators we processed in this phase, and remove\n # from the dependencies of other ops too\n for schemaless_node in current_phase:\n schemaless_nodes.pop(schemaless_node)\n for dependencies in schemaless_nodes.values():\n dependencies.difference_update(current_phase)\n\n self.output_schema = self.output_node.output_schema\n\n return self\n\n def fit(self, dataset: Dataset) -> \"Workflow\":\n \"\"\"Calculates statistics for this workflow on the input dataset\n\n Parameters\n -----------\n dataset: Dataset\n The input dataset to calculate statistics for. If there is a train/test split this\n data should be the training dataset only.\n \"\"\"\n self._clear_worker_cache()\n\n if not self.output_schema:\n self.fit_schema(dataset.schema)\n\n ddf = dataset.to_ddf(columns=self._input_columns())\n\n # Get a dictionary mapping all StatOperators we need to fit to a set of any dependant\n # StatOperators (having StatOperators that depend on the output of other StatOperators\n # means that will have multiple phases in the fit cycle here)\n stat_ops = {\n op: _get_stat_ops(op.parents_with_dep_nodes) for op in _get_stat_ops([self.output_node])\n }\n\n while stat_ops:\n # get all the StatOperators that we can currently call fit on (no outstanding\n # dependencies)\n current_phase = [op for op, dependencies in stat_ops.items() if not dependencies]\n if not current_phase:\n # this shouldn't happen, but lets not infinite loop just in case\n raise RuntimeError(\"failed to find dependency-free StatOperator to fit\")\n\n stats, ops = [], []\n for workflow_node in current_phase:\n # Check for additional input columns that aren't generated by parents\n addl_input_cols = set()\n if workflow_node.parents:\n parent_output_cols = sum(\n [parent.output_columns for parent in workflow_node.parents],\n nvtabular.ColumnSelector(),\n )\n addl_input_cols = set(workflow_node.input_columns.names) - set(\n parent_output_cols.names\n )\n\n # apply transforms necessary for the inputs to the current column group, ignoring\n # the transforms from the statop itself\n transformed_ddf = _ensure_optimize_dataframe_graph(\n ddf=_transform_ddf(\n ddf, workflow_node.parents, additional_columns=addl_input_cols\n )\n )\n\n op = workflow_node.op\n try:\n stats.append(op.fit(workflow_node.input_columns, transformed_ddf))\n ops.append(op)\n except Exception:\n LOG.exception(\"Failed to fit operator %s\", workflow_node.op)\n raise\n\n if self.client:\n results = [r.result() for r in self.client.compute(stats)]\n else:\n results = dask.compute(stats, scheduler=\"synchronous\")[0]\n\n for computed_stats, op in zip(results, ops):\n op.fit_finalize(computed_stats)\n\n # Remove all the operators we processed in this phase, and remove\n # from the dependencies of other ops too\n for stat_op in current_phase:\n stat_ops.pop(stat_op)\n for dependencies in stat_ops.values():\n dependencies.difference_update(current_phase)\n\n # hack: store input/output dtypes here. We should have complete dtype\n # information for each operator (like we do for column names), but as\n # an interim solution this gets us what we need.\n input_dtypes = dataset.to_ddf()[self._input_columns()].dtypes\n self.input_dtypes = dict(zip(input_dtypes.index, input_dtypes))\n output_dtypes = self.transform(dataset).sample_dtypes()\n self.output_dtypes = dict(zip(output_dtypes.index, output_dtypes))\n\n self._zero_output_schemas()\n self.fit_schema(dataset.schema)\n return self\n\n def fit_transform(self, dataset: Dataset) -> Dataset:\n \"\"\"Convenience method to both fit the workflow and transform the dataset in a single\n call. Equivalent to calling ``workflow.fit(dataset)`` followed by\n ``workflow.transform(dataset)``\n\n Parameters\n -----------\n dataset: Dataset\n\n Returns\n -------\n Dataset\n \"\"\"\n self.fit(dataset)\n return self.transform(dataset)\n\n def save(self, path):\n \"\"\"Save this workflow to disk\n\n Parameters\n ----------\n path: str\n The path to save the workflow to\n \"\"\"\n # avoid a circular import getting the version\n from nvtabular import __version__ as nvt_version\n\n os.makedirs(path, exist_ok=True)\n\n # point all stat ops to store intermediate output (parquet etc) at the path\n # this lets us easily bundle\n for stat in _get_stat_ops([self.output_node]):\n stat.op.set_storage_path(path, copy=True)\n\n # generate a file of all versions used to generate this bundle\n lib = cudf if cudf else pd\n with open(os.path.join(path, \"metadata.json\"), \"w\") as o:\n json.dump(\n {\n \"versions\": {\n \"nvtabular\": nvt_version,\n lib.__name__: lib.__version__,\n \"python\": sys.version,\n },\n \"generated_timestamp\": int(time.time()),\n },\n o,\n )\n\n # dump out the full workflow (graph/stats/operators etc) using cloudpickle\n with open(os.path.join(path, \"workflow.pkl\"), \"wb\") as o:\n cloudpickle.dump(self, o)\n\n @classmethod\n def load(cls, path, client=None):\n \"\"\"Load up a saved workflow object from disk\n\n Parameters\n ----------\n path: str\n The path to load the workflow from\n client: distributed.Client, optional\n The Dask distributed client to use for multi-gpu processing and multi-node processing\n\n Returns\n -------\n Workflow\n \"\"\"\n # avoid a circular import getting the version\n from nvtabular import __version__ as nvt_version\n\n # check version information from the metadata blob, and warn if we have a mismatch\n meta = json.load(open(os.path.join(path, \"metadata.json\")))\n\n def parse_version(version):\n return version.split(\".\")[:2]\n\n def check_version(stored, current, name):\n if parse_version(stored) != parse_version(current):\n warnings.warn(\n f\"Loading workflow generated with {name} version {stored} \"\n f\"- but we are running {name} {current}. This might cause issues\"\n )\n\n # make sure we don't have any major/minor version conflicts between the stored worklflow\n # and the current environment\n lib = cudf if cudf else pd\n versions = meta[\"versions\"]\n check_version(versions[\"nvtabular\"], nvt_version, \"nvtabular\")\n check_version(versions[\"python\"], sys.version, \"python\")\n\n if lib.__name__ in versions:\n check_version(versions[lib.__name__], lib.__version__, lib.__name__)\n else:\n expected = \"GPU\" if \"cudf\" in versions else \"CPU\"\n warnings.warn(f\"Loading workflow generated on {expected}\")\n\n # load up the workflow object di\n workflow = cloudpickle.load(open(os.path.join(path, \"workflow.pkl\"), \"rb\"))\n workflow.client = client\n\n # we might have been copied since saving, update all the stat ops\n # with the new path to their storage locations\n for stat in _get_stat_ops([workflow.output_node]):\n stat.op.set_storage_path(path, copy=False)\n\n return workflow\n\n def __getstate__(self):\n # dask client objects aren't picklable - exclude from saved representation\n return {k: v for k, v in self.__dict__.items() if k != \"client\"}\n\n def clear_stats(self):\n for stat in _get_stat_ops([self.output_node]):\n stat.op.clear()\n\n def _input_columns(self):\n input_cols = []\n for node in iter_nodes([self.output_node]):\n upstream_output_cols = []\n\n for upstream_node in node.parents_with_dep_nodes:\n upstream_output_cols += upstream_node.output_columns.names\n\n for upstream_selector in node.dependency_selectors:\n upstream_output_cols += upstream_selector.names\n\n upstream_output_cols = _get_unique(upstream_output_cols)\n input_cols += list(set(node.input_columns.names) - set(upstream_output_cols))\n input_cols += node.dependency_columns.names\n\n return _get_unique(input_cols)\n\n def _clear_worker_cache(self):\n # Clear worker caches to be \"safe\"\n if self.client:\n self.client.run(clean_worker_cache)\n else:\n clean_worker_cache()\n\n def _zero_output_schemas(self):\n \"\"\"\n Zero out all schemas in order to rerun fit schema after operators\n have run fit and have stats to add to schema.\n \"\"\"\n for node in iter_nodes([self.output_node]):\n node.output_schema = None\n node.input_schema = None\n\n\ndef _transform_ddf(ddf, workflow_nodes, meta=None, additional_columns=None):\n # Check if we are only selecting columns (no transforms).\n # If so, we should perform column selection at the ddf level.\n # Otherwise, Dask will not push the column selection into the\n # IO function.\n if not workflow_nodes:\n return ddf[_get_unique(additional_columns)] if additional_columns else ddf\n\n if isinstance(workflow_nodes, WorkflowNode):\n workflow_nodes = [workflow_nodes]\n\n columns = list(flatten(wfn.output_columns.names for wfn in workflow_nodes))\n columns += additional_columns if additional_columns else []\n\n if isinstance(meta, dict) and isinstance(ddf._meta, pd.DataFrame):\n dtypes = meta\n meta = type(ddf._meta)({k: [] for k in columns})\n for column, dtype in dtypes.items():\n meta[column] = meta[column].astype(dtype)\n\n elif not meta:\n # TODO: constructing meta like this loses dtype information on the ddf\n # and sets it all to 'float64'. We should propogate dtype information along\n # with column names in the columngroup graph. This currently only\n # happesn during intermediate 'fit' transforms, so as long as statoperators\n # don't require dtype information on the DDF this doesn't matter all that much\n meta = type(ddf._meta)({k: [] for k in columns})\n\n return ddf.map_partitions(\n _transform_partition,\n workflow_nodes,\n additional_columns=additional_columns,\n meta=meta,\n enforce_metadata=False,\n )\n\n\ndef _get_stat_ops(nodes):\n return set(node for node in iter_nodes(nodes) if isinstance(node.op, StatOperator))\n\n\ndef _get_schemaless_nodes(nodes):\n return set(node for node in iter_nodes(nodes) if node.input_schema is None)\n\n\ndef _get_unique(cols):\n # Need to preserve order in unique-column list\n return list({x: x for x in cols}.keys())\n\n\ndef _transform_partition(root_df, workflow_nodes, additional_columns=None):\n \"\"\"Transforms a single partition by appyling all operators in a WorkflowNode\"\"\"\n output = None\n\n for node in workflow_nodes:\n node_input_cols = _get_unique(node.input_columns.names)\n node_output_cols = _get_unique(node.output_columns.names)\n addl_input_cols = set(node.dependency_columns.names)\n\n # Build input dataframe\n if node.parents_with_dep_nodes:\n # If there are parents, collect their outputs\n # to build the current node's input\n input_df = None\n seen_columns = None\n\n for parent in node.parents_with_dep_nodes:\n parent_output_cols = _get_unique(parent.output_columns.names)\n parent_df = _transform_partition(root_df, [parent])\n if input_df is None or not len(input_df):\n input_df = parent_df[parent_output_cols]\n seen_columns = set(parent_output_cols)\n else:\n new_columns = set(parent_output_cols) - seen_columns\n input_df = _concat_columns([input_df, parent_df[list(new_columns)]])\n seen_columns.update(new_columns)\n\n # Check for additional input columns that aren't generated by parents\n # and fetch them from the root dataframe\n unseen_columns = set(node.input_columns.names) - seen_columns\n addl_input_cols = addl_input_cols.union(unseen_columns)\n\n # TODO: Find a better way to remove dupes\n addl_input_cols = addl_input_cols - set(input_df.columns)\n\n if addl_input_cols:\n input_df = _concat_columns([input_df, root_df[list(addl_input_cols)]])\n else:\n # If there are no parents, this is an input node,\n # so pull columns directly from root df\n input_df = root_df[node_input_cols + list(addl_input_cols)]\n\n # Compute the node's output\n if node.op:\n try:\n output_df = node.op.transform(node.input_columns, input_df)\n except Exception:\n LOG.exception(\"Failed to transform operator %s\", node.op)\n raise\n if output_df is None:\n raise RuntimeError(\"Operator %s didn't return a value during transform\" % node.op)\n else:\n output_df = input_df\n\n # Combine output across node loop iterations\n\n # dask needs output to be in the same order defined as meta, reorder partitions here\n # this also selects columns (handling the case of removing columns from the output using\n # \"-\" overload)\n if output is None:\n output = output_df[node_output_cols]\n else:\n output = _concat_columns([output, output_df[node_output_cols]])\n\n if additional_columns:\n output = _concat_columns([output, root_df[_get_unique(additional_columns)]])\n\n return output\n",
"path": "nvtabular/workflow/workflow.py"
}
] | diff --git a/nvtabular/workflow/workflow.py b/nvtabular/workflow/workflow.py
index cf433cceffd..87aba3071d6 100644
--- a/nvtabular/workflow/workflow.py
+++ b/nvtabular/workflow/workflow.py
@@ -433,6 +433,7 @@ def _transform_ddf(ddf, workflow_nodes, meta=None, additional_columns=None):
workflow_nodes,
additional_columns=additional_columns,
meta=meta,
+ enforce_metadata=False,
)
diff --git a/tests/unit/test_dask_nvt.py b/tests/unit/test_dask_nvt.py
index c08b7c8e1cc..de35101c31b 100644
--- a/tests/unit/test_dask_nvt.py
+++ b/tests/unit/test_dask_nvt.py
@@ -20,8 +20,10 @@
import cudf
import dask_cudf
+import pandas as pd
import pytest
from dask.dataframe import assert_eq
+from dask.dataframe import from_pandas as dd_from_pandas
from dask.dataframe import read_parquet as dd_read_parquet
from nvtabular import ColumnSelector, Dataset, Workflow, ops
@@ -276,3 +278,18 @@ def test_dask_preproc_cpu(client, tmpdir, datasets, engine, shuffle, cpu):
df_disk.sort_values(["id", "x"])[["name-string", "label"]],
check_index=False,
)
+
+
[email protected]("cpu", [None, True])
+def test_filtered_partition(tmpdir, cpu):
+ # Toy DataFrame example
+ df = pd.DataFrame({"col": range(100)})
+ ddf = dd_from_pandas(df, npartitions=5)
+ dataset = Dataset(ddf, cpu=cpu)
+
+ # Workflow
+ filtered = ["col"] >> ops.Filter(lambda df: df["col"] < 75)
+ workflow = Workflow(filtered)
+
+ # Write result to disk
+ workflow.transform(dataset).to_parquet(str(tmpdir))
|
zulip__zulip-20788 | "Pan and zoom" cuts off images instead of using the available space
If you have a tall image and a wide monitor (and wide browser viewport), and you try to zoom… the image stays trapped inside the same box it occupied before you even tried to zoom. If the image is super wide instead of tall, the same thing happens the other way around.
This leads to a lot of frustrating panning around, to look at the different parts of the image through this narrow keyhole, while tons of screen space next to it doesn't get used.
This is the biggest of the issues described by @vanclute in #18939. It was reported again by @alexanderglueck as #19837, and I just ran into it myself ([chat](https://chat.zulip.org/#narrow/stream/6-frontend/topic/pan.2Fzoom/near/1308717)). Here's a nice illustration from #19837:

Instead, when zooming we should use the full space available. This may be bigger than the area the image occupied when it was scaled down to fit completely in the space available, because the available box may have a different aspect ratio from the image.
| [
{
"content": "import os\n\nZULIP_VERSION = \"5.0-dev+git\"\n\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"zulip-git-version\"\n)\nlines = [ZULIP_VERSION, \"\"]\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n lines = f.readlines() + [\"\", \"\"]\nZULIP_VERSION = lines.pop(0).strip()\nZULIP_MERGE_BASE = lines.pop(0).strip()\n\nLATEST_MAJOR_VERSION = \"4.0\"\nLATEST_RELEASE_VERSION = \"4.9\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.com/2021/05/13/zulip-4-0-released/\"\n\n# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be\n# prevented from connecting to the Zulip server. Versions above\n# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have\n# a banner at the top of the page asking the user to upgrade.\nDESKTOP_MINIMUM_VERSION = \"5.2.0\"\nDESKTOP_WARNING_VERSION = \"5.4.3\"\n\n# Bump the API_FEATURE_LEVEL whenever an API change is made\n# that clients might want to condition on. If we forget at\n# the time we make the change, then bump it later as soon\n# as we notice; clients using API_FEATURE_LEVEL will just not\n# use the new feature/API until the bump.\n#\n# Changes should be accompanied by documentation explaining what the\n# new level means in templates/zerver/api/changelog.md, as well as\n# \"**Changes**\" entries in the endpoint's documentation in `zulip.yaml`.\nAPI_FEATURE_LEVEL = 115\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = \"173.3\"\n",
"path": "version.py"
}
] | [
{
"content": "import os\n\nZULIP_VERSION = \"5.0-dev+git\"\n\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"zulip-git-version\"\n)\nlines = [ZULIP_VERSION, \"\"]\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n lines = f.readlines() + [\"\", \"\"]\nZULIP_VERSION = lines.pop(0).strip()\nZULIP_MERGE_BASE = lines.pop(0).strip()\n\nLATEST_MAJOR_VERSION = \"4.0\"\nLATEST_RELEASE_VERSION = \"4.9\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.com/2021/05/13/zulip-4-0-released/\"\n\n# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be\n# prevented from connecting to the Zulip server. Versions above\n# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have\n# a banner at the top of the page asking the user to upgrade.\nDESKTOP_MINIMUM_VERSION = \"5.2.0\"\nDESKTOP_WARNING_VERSION = \"5.4.3\"\n\n# Bump the API_FEATURE_LEVEL whenever an API change is made\n# that clients might want to condition on. If we forget at\n# the time we make the change, then bump it later as soon\n# as we notice; clients using API_FEATURE_LEVEL will just not\n# use the new feature/API until the bump.\n#\n# Changes should be accompanied by documentation explaining what the\n# new level means in templates/zerver/api/changelog.md, as well as\n# \"**Changes**\" entries in the endpoint's documentation in `zulip.yaml`.\nAPI_FEATURE_LEVEL = 115\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = \"173.4\"\n",
"path": "version.py"
}
] | diff --git a/frontend_tests/node_tests/lightbox.js b/frontend_tests/node_tests/lightbox.js
index 5a2b09837b38b..3a132d6dcd0d0 100644
--- a/frontend_tests/node_tests/lightbox.js
+++ b/frontend_tests/node_tests/lightbox.js
@@ -54,8 +54,8 @@ test("pan_and_zoom", ({override_rewire}) => {
};
override_rewire(lightbox, "render_lightbox_list_images", () => {});
-
- lightbox.open(img);
+ const open_image = lightbox.build_open_image_function();
+ open_image(img);
assert.equal(fetched_zid, 1234);
});
@@ -88,6 +88,7 @@ test("youtube", ({override_rewire}) => {
override_rewire(lightbox, "render_lightbox_list_images", () => {});
- lightbox.open(img);
+ const open_image = lightbox.build_open_image_function();
+ open_image(img);
assert.equal($(".image-actions .open").attr("href"), href);
});
diff --git a/package.json b/package.json
index b30b2fd67185b..3b71f14edef90 100644
--- a/package.json
+++ b/package.json
@@ -14,6 +14,7 @@
"@formatjs/intl": "^1.9.7",
"@giphy/js-components": "^5.0.5",
"@giphy/js-fetch-api": "^4.0.1",
+ "@panzoom/panzoom": "^4.4.3",
"@uppy/core": "^1.7.1",
"@uppy/progress-bar": "^1.3.4",
"@uppy/xhr-upload": "^1.4.2",
diff --git a/static/js/lightbox.js b/static/js/lightbox.js
index 84d6db7cb33e3..04496efcc6cbb 100644
--- a/static/js/lightbox.js
+++ b/static/js/lightbox.js
@@ -1,9 +1,9 @@
+import panzoom from "@panzoom/panzoom";
import $ from "jquery";
import render_lightbox_overlay from "../templates/lightbox_overlay.hbs";
import * as blueslip from "./blueslip";
-import {LightboxCanvas} from "./lightbox_canvas";
import * as message_store from "./message_store";
import * as overlays from "./overlays";
import * as people from "./people";
@@ -15,6 +15,81 @@ let is_open = false;
// memoized instead of being looked up multiple times.
const asset_map = new Map();
+export class PanZoomControl {
+ // Class for both initializing and controlling the
+ // the pan/zoom functionality.
+ constructor(container) {
+ this.container = container;
+ this.panzoom = panzoom(this.container, {
+ disablePan: true,
+ disableZoom: true,
+ cursor: "auto",
+ });
+
+ // The following events are necessary to prevent the click event
+ // firing where the user "unclicks" at the end of the drag, which
+ // was causing accidental overlay closes in some situations.
+ this.container.addEventListener("panzoomstart", () => {
+ // Marks this overlay as needing to stay open.
+ $("#lightbox_overlay").data("noclose", true);
+ });
+
+ this.container.addEventListener("panzoomend", () => {
+ // Don't remove the noclose attribute on this overlay until after paint,
+ // otherwise it will be removed too early and close the lightbox
+ // unintentionally.
+ setTimeout(() => {
+ $("#lightbox_overlay").data("noclose", false);
+ }, 0);
+ });
+
+ // keybinds
+ document.addEventListener("keydown", (e) => {
+ if (!overlays.lightbox_open()) {
+ return;
+ }
+ switch (e.key) {
+ case "Z":
+ case "+":
+ this.zoomIn();
+ break;
+ case "z":
+ case "-":
+ this.zoomOut();
+ break;
+ case "v":
+ overlays.close_overlay("lightbox");
+ break;
+ }
+ e.preventDefault();
+ e.stopPropagation();
+ });
+ }
+
+ reset() {
+ this.panzoom.reset();
+ }
+
+ disablePanZoom() {
+ this.container.removeEventListener("wheel", this.panzoom.zoomWithWheel);
+ this.panzoom.setOptions({disableZoom: true, disablePan: true, cursor: "auto"});
+ this.reset();
+ }
+
+ enablePanZoom() {
+ this.panzoom.setOptions({disableZoom: false, disablePan: false, cursor: "move"});
+ this.container.addEventListener("wheel", this.panzoom.zoomWithWheel);
+ }
+
+ zoomIn() {
+ this.panzoom.zoomIn();
+ }
+
+ zoomOut() {
+ this.panzoom.zoomOut();
+ }
+}
+
export function clear_for_testing() {
is_open = false;
asset_map.clear();
@@ -51,21 +126,10 @@ function display_image(payload) {
$(".player-container").hide();
$(".image-actions, .image-description, .download, .lightbox-canvas-trigger").show();
- const lightbox_canvas = $(".lightbox-canvas-trigger").hasClass("enabled");
-
- if (lightbox_canvas === true) {
- const canvas = document.createElement("canvas");
- canvas.dataset.src = payload.source;
-
- $("#lightbox_overlay .image-preview").html(canvas).show();
- const photo = new LightboxCanvas(canvas);
- photo.speed(2.3);
- } else {
- const img = new Image();
- img.src = payload.source;
-
- $("#lightbox_overlay .image-preview").html(img).show();
- }
+ const img_container = $("#lightbox_overlay .image-preview > .zoom-element");
+ const img = new Image();
+ img.src = payload.source;
+ img_container.html(img).show();
$(".image-description .title").text(payload.title || "N/A");
$(".image-description .user").text(payload.user);
@@ -112,55 +176,59 @@ function display_video(payload) {
$(".image-actions .open").attr("href", payload.url);
}
-export function open($image) {
- // if the asset_map already contains the metadata required to display the
- // asset, just recall that metadata.
- let $preview_src = $image.attr("src");
- let payload = asset_map.get($preview_src);
- if (payload === undefined) {
- if ($preview_src.endsWith("&size=full")) {
- // while fetching an image for canvas, `src` attribute supplies
- // full-sized image instead of thumbnail, so we have to replace
- // `size=full` with `size=thumbnail`.
- //
- // TODO: This is a hack to work around the fact that for
- // the lightbox canvas, the `src` is the data-fullsize-src
- // for the image, not the original thumbnail used to open
- // the lightbox. A better fix will be to check a
- // `data-thumbnail-src` attribute that we add to the
- // canvas elements.
- $preview_src = $preview_src.replace(/.{4}$/, "thumbnail");
- payload = asset_map.get($preview_src);
- }
- if (payload === undefined) {
- payload = parse_image_data($image);
- }
+export function build_open_image_function(on_close) {
+ if (on_close === undefined) {
+ on_close = function () {
+ $(".player-container iframe").remove();
+ is_open = false;
+ document.activeElement.blur();
+ };
}
- if (payload.type.match("-video")) {
- display_video(payload);
- } else if (payload.type === "image") {
- display_image(payload);
- }
+ return function ($image) {
+ // if the asset_map already contains the metadata required to display the
+ // asset, just recall that metadata.
+ let $preview_src = $image.attr("src");
+ let payload = asset_map.get($preview_src);
+ if (payload === undefined) {
+ if ($preview_src.endsWith("&size=full")) {
+ // while fetching an image for canvas, `src` attribute supplies
+ // full-sized image instead of thumbnail, so we have to replace
+ // `size=full` with `size=thumbnail`.
+ //
+ // TODO: This is a hack to work around the fact that for
+ // the lightbox canvas, the `src` is the data-fullsize-src
+ // for the image, not the original thumbnail used to open
+ // the lightbox. A better fix will be to check a
+ // `data-thumbnail-src` attribute that we add to the
+ // canvas elements.
+ $preview_src = $preview_src.replace(/.{4}$/, "thumbnail");
+ payload = asset_map.get($preview_src);
+ }
+ if (payload === undefined) {
+ payload = parse_image_data($image);
+ }
+ }
- if (is_open) {
- return;
- }
+ if (payload.type.match("-video")) {
+ display_video(payload);
+ } else if (payload.type === "image") {
+ display_image(payload);
+ }
- function lightbox_close_overlay() {
- $(".player-container iframe").remove();
- is_open = false;
- document.activeElement.blur();
- }
+ if (is_open) {
+ return;
+ }
- overlays.open_overlay({
- name: "lightbox",
- overlay: $("#lightbox_overlay"),
- on_close: lightbox_close_overlay,
- });
+ overlays.open_overlay({
+ name: "lightbox",
+ overlay: $("#lightbox_overlay"),
+ on_close,
+ });
- popovers.hide_all();
- is_open = true;
+ popovers.hide_all();
+ is_open = true;
+ };
}
export function show_from_selected_message() {
@@ -202,7 +270,8 @@ export function show_from_selected_message() {
}
if ($image.length !== 0) {
- open($image);
+ const open_image = build_open_image_function();
+ open_image($image);
}
}
@@ -282,16 +351,32 @@ export function next() {
// this is a block of events that are required for the lightbox to work.
export function initialize() {
+ // Renders the DOM for the lightbox.
const rendered_lightbox_overlay = render_lightbox_overlay();
$("body").append(rendered_lightbox_overlay);
+ // Bind the pan/zoom control the newly created element.
+ const pan_zoom_control = new PanZoomControl(
+ $("#lightbox_overlay .image-preview > .zoom-element")[0],
+ );
+
+ const reset_lightbox_state = function () {
+ $(".player-container iframe").remove();
+ is_open = false;
+ document.activeElement.blur();
+ pan_zoom_control.disablePanZoom();
+ $(".lightbox-canvas-trigger").removeClass("enabled");
+ };
+
+ const open_image = build_open_image_function(reset_lightbox_state);
+
$("#main_div, #compose .preview_content").on("click", ".message_inline_image a", function (e) {
// prevent the link from opening in a new page.
e.preventDefault();
// prevent the message compose dialog from happening.
e.stopPropagation();
const $img = $(this).find("img");
- open($img);
+ open_image($img);
});
$("#lightbox_overlay .download").on("click", function () {
@@ -304,10 +389,11 @@ export function initialize() {
`.message_row img[src='${CSS.escape($(this).attr("data-src"))}']`,
);
- open($original_image);
+ open_image($original_image);
$(".image-list .image.selected").removeClass("selected");
$(this).addClass("selected");
+ pan_zoom_control.reset();
const parentOffset = this.parentNode.clientWidth + this.parentNode.scrollLeft;
// this is the left and right of the image compared to its parent.
@@ -341,18 +427,15 @@ export function initialize() {
});
$("#lightbox_overlay").on("click", ".lightbox-canvas-trigger", function () {
- let $img = $("#lightbox_overlay").find(".image-preview img");
-
- if ($img.length) {
- $(this).addClass("enabled");
- // the `lightbox.open` function will see the enabled class and
- // enable the `LightboxCanvas` class.
- open($img);
- } else {
- $img = $($("#lightbox_overlay").find(".image-preview canvas")[0].image);
+ const $img = $("#lightbox_overlay").find(".image-preview img");
+ open_image($img);
+ if ($(this).hasClass("enabled")) {
+ pan_zoom_control.disablePanZoom();
$(this).removeClass("enabled");
- open($img);
+ } else {
+ pan_zoom_control.enablePanZoom();
+ $(this).addClass("enabled");
}
});
@@ -363,12 +446,14 @@ export function initialize() {
$("#lightbox_overlay .player-container").on("click", function () {
if ($(this).is(".player-container")) {
+ reset_lightbox_state();
overlays.close_active();
}
});
$("#lightbox_overlay").on("click", ".image-info-wrapper, .center", (e) => {
if ($(e.target).is(".image-info-wrapper, .center")) {
+ reset_lightbox_state();
overlays.close_overlay("lightbox");
}
});
diff --git a/static/js/lightbox_canvas.js b/static/js/lightbox_canvas.js
deleted file mode 100644
index 5550d9a0a94f2..0000000000000
--- a/static/js/lightbox_canvas.js
+++ /dev/null
@@ -1,279 +0,0 @@
-import * as blueslip from "./blueslip";
-import * as overlays from "./overlays";
-
-const funcs = {
- setZoom(meta, zoom) {
- // condition to handle zooming event by zoom hotkeys
- if (zoom === "+") {
- zoom = meta.zoom * 1.2;
- } else if (zoom === "-") {
- zoom = meta.zoom / 1.2;
- }
- // make sure the zoom is above 1 and below the maxZoom.
- meta.zoom = Math.min(Math.max(zoom, 1), meta.maxZoom);
- },
-
- // this is a function given a canvas that attaches all of the events
- // required to pan and zoom.
- attachEvents(canvas, context, meta) {
- let mousedown = false;
-
- // wheelEvent.deltaMode is a value that describes what the unit is
- // for the `deltaX`, `deltaY`, and `deltaZ` properties.
- const DELTA_MODE = {
- PIXEL: 0,
- LINE: 1,
- PAGE: 2,
- };
-
- // use the wheel event rather than scroll because this isn't
- // actually an element that can scroll. The wheel event will
- // detect the *gesture* of scrolling over an element, without actually
- // worrying about scrollable content.
- canvas.addEventListener("wheel", (e) => {
- e.preventDefault();
-
- // this is to reverse scrolling directions for the image.
- let delta = meta.direction * e.deltaY;
-
- if (e.deltaMode === DELTA_MODE.LINE) {
- // the vertical height in pixels of an approximate line.
- delta *= 15;
- }
-
- if (e.deltaMode === DELTA_MODE.PAGE) {
- // the vertical height in pixels of an approximate page.
- delta *= 300;
- }
-
- // this is calculated as the user defined speed times the normalizer
- // (which just is what it takes to take the raw delta and transform
- // it to a normal speed), multiply it against the current zoom.
- // Example:
- // delta = 8
- // normalizedDelta = delta * (1 / 20) * 1 = 0.4
- // zoom = zoom * (0.4 / 100) + 1
- const zoom =
- meta.zoom * ((meta.speed * meta.internalSpeedMultiplier * delta) / 100 + 1);
-
- funcs.setZoom(meta, zoom);
- funcs.displayImage(canvas, context, meta);
-
- return false;
- });
-
- // the only valid mousedown events should originate inside of the
- // canvas.
- canvas.addEventListener("mousedown", () => {
- mousedown = true;
- });
-
- // on mousemove, actually run the pan events.
- canvas.addEventListener("mousemove", (e) => {
- // to pan, there must be mousedown and mousemove, check if valid.
- if (mousedown === true) {
- // find the percent of movement relative to the canvas width
- // since e.movementX, e.movementY are in px.
- const percentMovement = {
- x: e.movementX / canvas.width,
- y: e.movementY / canvas.height,
- };
-
- // add the percentMovement to the meta coordinates but divide
- // out by the zoom ratio because when zoomed in 10x for example
- // moving the photo by 1% will appear like 10% on the <canvas>.
- meta.coords.x += (percentMovement.x * 2) / meta.zoom;
- meta.coords.y += (percentMovement.y * 2) / meta.zoom;
-
- // redraw the image.
- funcs.displayImage(canvas, context, meta);
- }
- });
-
- // event listener to handle zoom in and out from using keyboard keys z/Z and +/-
- // in the canvas
- // these hotkeys are not implemented in static/js/hotkey.js as the code in
- // static/js/lightbox_canvas.js and static/js/lightbox.js isn't written a way
- // that the LightboxCanvas instance created in lightbox.js can be
- // accessed from hotkey.js. Major code refactoring is required in lightbox.js
- // to implement these keyboard shortcuts in hotkey.js
- document.addEventListener("keydown", (e) => {
- if (!overlays.lightbox_open()) {
- return;
- }
- switch (e.key) {
- case "Z":
- case "+":
- funcs.setZoom(meta, "+");
- funcs.displayImage(canvas, context, meta);
- break;
- case "z":
- case "-":
- funcs.setZoom(meta, "-");
- funcs.displayImage(canvas, context, meta);
- break;
- case "v":
- overlays.close_overlay("lightbox");
- break;
- }
- e.preventDefault();
- e.stopPropagation();
- });
-
- // make sure that when the mousedown is lifted on <canvas>to prevent
- // panning events.
- canvas.addEventListener("mouseup", () => {
- mousedown = false;
- });
-
- // do so on the document.body as well, though depending on the infra,
- // these are less reliable as preventDefault may prevent these events
- // from propagating all the way to the <body>.
- document.body.addEventListener("mouseup", function body_mouseup() {
- if (document.body.contains(canvas)) {
- mousedown = false;
- } else {
- document.body.removeEventListener("mouseup", body_mouseup);
- }
- });
-
- window.addEventListener("resize", function window_resize() {
- if (document.body.contains(canvas)) {
- funcs.sizeCanvas(canvas, meta);
- funcs.displayImage(canvas, context, meta);
- } else {
- window.removeEventListener("resize", window_resize);
- }
- });
- },
-
- imageRatio(image) {
- return image.naturalWidth / image.naturalHeight;
- },
-
- displayImage(canvas, context, meta) {
- meta.coords.x = Math.max(1 / (meta.zoom * 2), meta.coords.x);
- meta.coords.x = Math.min(1 - 1 / (meta.zoom * 2), meta.coords.x);
-
- meta.coords.y = Math.max(1 / (meta.zoom * 2), meta.coords.y);
- meta.coords.y = Math.min(1 - 1 / (meta.zoom * 2), meta.coords.y);
-
- const c = {
- x: meta.coords.x - 1,
- y: meta.coords.y - 1,
- };
-
- const x = meta.zoom * c.x * canvas.width + canvas.width / 2;
- const y = meta.zoom * c.y * canvas.height + canvas.height / 2;
- const w = canvas.width * meta.zoom;
- const h = canvas.height * meta.zoom;
-
- context.clearRect(0, 0, canvas.width, canvas.height);
- context.imageSmoothingEnabled = false;
-
- context.drawImage(meta.image, x, y, w, h);
- },
-
- // the `sizeCanvas` method figures out the appropriate bounding box for
- // the canvas given a parent that has constraints.
- // for example, if a photo has a ration of 1.5:1 (w:h), and the parent
- // box is 1:1 respectively, we want to stretch the photo to be as large
- // as we can, which means that we check if having the photo width = 100%
- // means that the height is less than 100% of the parent height. If so,
- // then we size the photo as w = 100%, h = 100% / 1.5.
- sizeCanvas(canvas, meta) {
- if (canvas.parentNode === null) {
- return;
- }
-
- if (typeof meta.resize_handler === "function") {
- meta.resize_handler(canvas);
- }
-
- const parent = {
- width: canvas.parentNode.clientWidth,
- height: canvas.parentNode.clientHeight,
- };
-
- if (parent.height * meta.ratio > parent.width) {
- canvas.width = parent.width * 2;
- canvas.style.width = parent.width + "px";
-
- canvas.height = (parent.width / meta.ratio) * 2;
- canvas.style.height = parent.width / meta.ratio + "px";
- } else {
- canvas.height = parent.height * 2;
- canvas.style.height = parent.height + "px";
-
- canvas.width = parent.height * meta.ratio * 2;
- canvas.style.width = parent.height * meta.ratio + "px";
- }
-
- blueslip.warn("Please specify a 'data-width' or 'data-height' argument for canvas.");
- },
-};
-
-export class LightboxCanvas {
- meta = {
- direction: -1,
- zoom: 1,
- image: null,
- coords: {
- x: 0.5,
- y: 0.5,
- },
- speed: 1,
- // this is to normalize the speed to what I would consider to be
- // "standard" zoom speed.
- internalSpeedMultiplier: 0.05,
- maxZoom: 10,
- };
-
- constructor(el) {
- if (el instanceof Node) {
- this.canvas = el;
- } else if (typeof el === "string") {
- this.canvas = document.querySelector(el);
- } else {
- throw new TypeError("'LightboxCanvas' accepts either string selector or node.");
- }
-
- this.context = this.canvas.getContext("2d");
-
- this.meta.image = new Image();
- this.meta.image.src = this.canvas.dataset.src;
- this.meta.image.addEventListener("load", () => {
- this.meta.ratio = funcs.imageRatio(this.meta.image);
-
- funcs.sizeCanvas(this.canvas, this.meta);
- funcs.displayImage(this.canvas, this.context, this.meta);
- });
-
- this.canvas.image = this.meta.image;
-
- funcs.attachEvents(this.canvas, this.context, this.meta);
- }
-
- // set the speed at which scrolling zooms in on a photo.
- speed(speed) {
- this.meta.speed = speed;
- }
-
- // set the max zoom of the `LightboxCanvas` canvas as a mult of the total width.
- maxZoom(maxZoom) {
- this.meta.maxZoom = maxZoom;
- }
-
- reverseScrollDirection() {
- this.meta.direction = 1;
- }
-
- setZoom(zoom) {
- funcs.setZoom(this.meta, zoom);
- funcs.displayImage(this.canvas, this.context, this.meta);
- }
-
- resize(callback) {
- this.meta.resize_handler = callback;
- }
-}
diff --git a/static/js/overlays.js b/static/js/overlays.js
index 1cee48eecc798..3d0674d067eae 100644
--- a/static/js/overlays.js
+++ b/static/js/overlays.js
@@ -337,6 +337,11 @@ export function initialize() {
return;
}
+ if ($target.data("noclose")) {
+ // This overlay has been marked explicitly to not be closed.
+ return;
+ }
+
const target_name = $target.attr("data-overlay");
close_overlay(target_name);
diff --git a/static/styles/lightbox.css b/static/styles/lightbox.css
index 80383d14f933d..937572900d1c7 100644
--- a/static/styles/lightbox.css
+++ b/static/styles/lightbox.css
@@ -36,6 +36,14 @@
canvas {
cursor: pointer;
}
+
+ .zoom-element {
+ width: 100%;
+ height: 100%;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ }
}
.exit {
@@ -141,9 +149,6 @@
}
.image-description {
- width: 100%;
- /* approx width of screen minus action buttons on the side. */
- max-width: calc(100% - 450px);
/* add some extra margin top and remove some bottom to keep the
height the same. and vertically center the text with the buttons. */
margin-top: 25px;
diff --git a/static/templates/lightbox_overlay.hbs b/static/templates/lightbox_overlay.hbs
index 69d54a452de45..20fde407d3f99 100644
--- a/static/templates/lightbox_overlay.hbs
+++ b/static/templates/lightbox_overlay.hbs
@@ -1,4 +1,4 @@
-<div id="lightbox_overlay" class="overlay new-style" data-overlay="lightbox">
+<div id="lightbox_overlay" class="overlay new-style" data-overlay="lightbox" data-noclose="false">
<div class="image-info-wrapper">
<div class="image-description">
<div class="title"></div>
@@ -16,7 +16,9 @@
<div class="clear-float"></div>
</div>
- <div class="image-preview overlay-content no-select"></div>
+ <div class="image-preview no-select">
+ <div class="zoom-element no-select"></div>
+ </div>
<div class="player-container"></div>
<div class="center">
<div class="arrow no-select" data-direction="prev"><</div>
diff --git a/tools/test-js-with-node b/tools/test-js-with-node
index b2260bd1e2b70..52647a7ce12c2 100755
--- a/tools/test-js-with-node
+++ b/tools/test-js-with-node
@@ -93,7 +93,6 @@ EXEMPT_FILES = make_set(
"static/js/info_overlay.js",
"static/js/invite.js",
"static/js/lightbox.js",
- "static/js/lightbox_canvas.js",
"static/js/list_util.ts",
"static/js/loading.ts",
"static/js/local_message.js",
diff --git a/version.py b/version.py
index 71905ba4258ab..7cd446a79cc88 100644
--- a/version.py
+++ b/version.py
@@ -48,4 +48,4 @@
# historical commits sharing the same major version, in which case a
# minor version bump suffices.
-PROVISION_VERSION = "173.3"
+PROVISION_VERSION = "173.4"
diff --git a/yarn.lock b/yarn.lock
index bc2e63651d2d4..5d8188ce790a0 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1377,6 +1377,11 @@
mkdirp "^1.0.4"
rimraf "^3.0.2"
+"@panzoom/panzoom@^4.4.3":
+ version "4.4.3"
+ resolved "https://registry.yarnpkg.com/@panzoom/panzoom/-/panzoom-4.4.3.tgz#439ef0c3eba1cba0ad9b661fda5961aa2e2eec64"
+ integrity sha512-fTAr7/bc9ukvWKxxqdoAuIhKhvu6TwuNiGcA0N3lrSj5OZGlISGLXcSZZyN7kgqH/6icYS7b18UT/Iq/W2rTOA==
+
"@plotly/[email protected]":
version "0.33.1"
resolved "https://registry.yarnpkg.com/@plotly/d3-sankey-circular/-/d3-sankey-circular-0.33.1.tgz#15d1e0337e0e4b1135bdf0e2195c88adacace1a7"
|
huggingface__diffusers-6507 | StableVideoDiffusionPipeline returns a list instead of np.ndarray for output_type="np"
### Describe the bug
The [comments](https://github.com/huggingface/diffusers/blob/4497b3ec982978eca99895ed1429addde4a84fff/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py#L64) for StableVideoDiffusionPipelineOutput (which is returned by the `__call__()` function of StableVideoDiffusionPipeline) indicate that `np.ndarray` is one of the possible return types for the `frames` field but when I set `output_type="np"` for `__call__()` the `frames` field is a list of `np.ndarray`.
I think the problem is that the output of the `tensor2vid()` call [here](https://github.com/huggingface/diffusers/blob/4497b3ec982978eca99895ed1429addde4a84fff/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py#L534) is a list and not a `np.ndarray`. I have a local commit that uses `np.stack()` to convert the list to a `np.ndarray` that I plan to push up as a PR shortly.
### Reproduction
```
from diffusers import StableVideoDiffusionPipeline
from diffusers.utils import load_image
import torch
import time
repo_id = "stabilityai/stable-video-diffusion-img2vid-xt"
cache_dir = "./cache"
pipeline = StableVideoDiffusionPipeline.from_pretrained(
repo_id, cache_dir=cache_dir, variant="fp16", torch_dtype=torch.float16
)
pipeline.to("cuda")
# Load the conditioning image
image = load_image(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd/rocket.png"
)
image = [image.resize((1024, 576))]
image *= 2
generator = torch.manual_seed(42)
frames = pipeline(
image,
decode_chunk_size=8,
generator=generator,
output_type="np",
).frames
print(type(frames))
print(frames.shape)
```
### Logs
```shell
Loading pipeline components...: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5/5 [00:00<00:00, 11.34it/s]
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 25/25 [01:33<00:00, 3.72s/it]
<class 'list'>
Traceback (most recent call last):
File "/home/user/ai-worker/jobs/containers/svd-xt-film/test_svd.py", line 33, in <module>
print(frames.shape)
^^^^^^^^^^^^
AttributeError: 'list' object has no attribute 'shape'
```
### System Info
- `diffusers` version: 0.25.0
- Platform: Linux-5.4.0-166-generic-x86_64-with-glibc2.31
- Python version: 3.11.7
- PyTorch version (GPU?): 2.1.2+cu121 (True)
- Huggingface_hub version: 0.20.2
- Transformers version: 4.36.2
- Accelerate version: 0.25.0
- xFormers version: 0.0.23.post1
- Using GPU in script?: Nvidia RTX 4090
### Who can help?
@patil-suraj @patrick
| [
{
"content": "# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nfrom dataclasses import dataclass\nfrom typing import Callable, Dict, List, Optional, Union\n\nimport numpy as np\nimport PIL.Image\nimport torch\nfrom transformers import CLIPImageProcessor, CLIPVisionModelWithProjection\n\nfrom ...image_processor import VaeImageProcessor\nfrom ...models import AutoencoderKLTemporalDecoder, UNetSpatioTemporalConditionModel\nfrom ...schedulers import EulerDiscreteScheduler\nfrom ...utils import BaseOutput, logging\nfrom ...utils.torch_utils import is_compiled_module, randn_tensor\nfrom ..pipeline_utils import DiffusionPipeline\n\n\nlogger = logging.get_logger(__name__) # pylint: disable=invalid-name\n\n\ndef _append_dims(x, target_dims):\n \"\"\"Appends dimensions to the end of a tensor until it has target_dims dimensions.\"\"\"\n dims_to_append = target_dims - x.ndim\n if dims_to_append < 0:\n raise ValueError(f\"input has {x.ndim} dims but target_dims is {target_dims}, which is less\")\n return x[(...,) + (None,) * dims_to_append]\n\n\ndef tensor2vid(video: torch.Tensor, processor, output_type=\"np\"):\n # Based on:\n # https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/pipelines/multi_modal/text_to_video_synthesis_pipeline.py#L78\n\n batch_size, channels, num_frames, height, width = video.shape\n outputs = []\n for batch_idx in range(batch_size):\n batch_vid = video[batch_idx].permute(1, 0, 2, 3)\n batch_output = processor.postprocess(batch_vid, output_type)\n\n outputs.append(batch_output)\n\n return outputs\n\n\n@dataclass\nclass StableVideoDiffusionPipelineOutput(BaseOutput):\n r\"\"\"\n Output class for zero-shot text-to-video pipeline.\n\n Args:\n frames (`[List[PIL.Image.Image]`, `np.ndarray`]):\n List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width,\n num_channels)`.\n \"\"\"\n\n frames: Union[List[PIL.Image.Image], np.ndarray]\n\n\nclass StableVideoDiffusionPipeline(DiffusionPipeline):\n r\"\"\"\n Pipeline to generate video from an input image using Stable Video Diffusion.\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods\n implemented for all pipelines (downloading, saving, running on a particular device, etc.).\n\n Args:\n vae ([`AutoencoderKLTemporalDecoder`]):\n Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.\n image_encoder ([`~transformers.CLIPVisionModelWithProjection`]):\n Frozen CLIP image-encoder ([laion/CLIP-ViT-H-14-laion2B-s32B-b79K](https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K)).\n unet ([`UNetSpatioTemporalConditionModel`]):\n A `UNetSpatioTemporalConditionModel` to denoise the encoded image latents.\n scheduler ([`EulerDiscreteScheduler`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents.\n feature_extractor ([`~transformers.CLIPImageProcessor`]):\n A `CLIPImageProcessor` to extract features from generated images.\n \"\"\"\n\n model_cpu_offload_seq = \"image_encoder->unet->vae\"\n _callback_tensor_inputs = [\"latents\"]\n\n def __init__(\n self,\n vae: AutoencoderKLTemporalDecoder,\n image_encoder: CLIPVisionModelWithProjection,\n unet: UNetSpatioTemporalConditionModel,\n scheduler: EulerDiscreteScheduler,\n feature_extractor: CLIPImageProcessor,\n ):\n super().__init__()\n\n self.register_modules(\n vae=vae,\n image_encoder=image_encoder,\n unet=unet,\n scheduler=scheduler,\n feature_extractor=feature_extractor,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)\n\n def _encode_image(self, image, device, num_videos_per_prompt, do_classifier_free_guidance):\n dtype = next(self.image_encoder.parameters()).dtype\n\n if not isinstance(image, torch.Tensor):\n image = self.image_processor.pil_to_numpy(image)\n image = self.image_processor.numpy_to_pt(image)\n\n # We normalize the image before resizing to match with the original implementation.\n # Then we unnormalize it after resizing.\n image = image * 2.0 - 1.0\n image = _resize_with_antialiasing(image, (224, 224))\n image = (image + 1.0) / 2.0\n\n # Normalize the image with for CLIP input\n image = self.feature_extractor(\n images=image,\n do_normalize=True,\n do_center_crop=False,\n do_resize=False,\n do_rescale=False,\n return_tensors=\"pt\",\n ).pixel_values\n\n image = image.to(device=device, dtype=dtype)\n image_embeddings = self.image_encoder(image).image_embeds\n image_embeddings = image_embeddings.unsqueeze(1)\n\n # duplicate image embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = image_embeddings.shape\n image_embeddings = image_embeddings.repeat(1, num_videos_per_prompt, 1)\n image_embeddings = image_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1)\n\n if do_classifier_free_guidance:\n negative_image_embeddings = torch.zeros_like(image_embeddings)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n image_embeddings = torch.cat([negative_image_embeddings, image_embeddings])\n\n return image_embeddings\n\n def _encode_vae_image(\n self,\n image: torch.Tensor,\n device,\n num_videos_per_prompt,\n do_classifier_free_guidance,\n ):\n image = image.to(device=device)\n image_latents = self.vae.encode(image).latent_dist.mode()\n\n if do_classifier_free_guidance:\n negative_image_latents = torch.zeros_like(image_latents)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n image_latents = torch.cat([negative_image_latents, image_latents])\n\n # duplicate image_latents for each generation per prompt, using mps friendly method\n image_latents = image_latents.repeat(num_videos_per_prompt, 1, 1, 1)\n\n return image_latents\n\n def _get_add_time_ids(\n self,\n fps,\n motion_bucket_id,\n noise_aug_strength,\n dtype,\n batch_size,\n num_videos_per_prompt,\n do_classifier_free_guidance,\n ):\n add_time_ids = [fps, motion_bucket_id, noise_aug_strength]\n\n passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids)\n expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features\n\n if expected_add_embed_dim != passed_add_embed_dim:\n raise ValueError(\n f\"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.\"\n )\n\n add_time_ids = torch.tensor([add_time_ids], dtype=dtype)\n add_time_ids = add_time_ids.repeat(batch_size * num_videos_per_prompt, 1)\n\n if do_classifier_free_guidance:\n add_time_ids = torch.cat([add_time_ids, add_time_ids])\n\n return add_time_ids\n\n def decode_latents(self, latents, num_frames, decode_chunk_size=14):\n # [batch, frames, channels, height, width] -> [batch*frames, channels, height, width]\n latents = latents.flatten(0, 1)\n\n latents = 1 / self.vae.config.scaling_factor * latents\n\n forward_vae_fn = self.vae._orig_mod.forward if is_compiled_module(self.vae) else self.vae.forward\n accepts_num_frames = \"num_frames\" in set(inspect.signature(forward_vae_fn).parameters.keys())\n\n # decode decode_chunk_size frames at a time to avoid OOM\n frames = []\n for i in range(0, latents.shape[0], decode_chunk_size):\n num_frames_in = latents[i : i + decode_chunk_size].shape[0]\n decode_kwargs = {}\n if accepts_num_frames:\n # we only pass num_frames_in if it's expected\n decode_kwargs[\"num_frames\"] = num_frames_in\n\n frame = self.vae.decode(latents[i : i + decode_chunk_size], **decode_kwargs).sample\n frames.append(frame)\n frames = torch.cat(frames, dim=0)\n\n # [batch*frames, channels, height, width] -> [batch, channels, frames, height, width]\n frames = frames.reshape(-1, num_frames, *frames.shape[1:]).permute(0, 2, 1, 3, 4)\n\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16\n frames = frames.float()\n return frames\n\n def check_inputs(self, image, height, width):\n if (\n not isinstance(image, torch.Tensor)\n and not isinstance(image, PIL.Image.Image)\n and not isinstance(image, list)\n ):\n raise ValueError(\n \"`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is\"\n f\" {type(image)}\"\n )\n\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n def prepare_latents(\n self,\n batch_size,\n num_frames,\n num_channels_latents,\n height,\n width,\n dtype,\n device,\n generator,\n latents=None,\n ):\n shape = (\n batch_size,\n num_frames,\n num_channels_latents // 2,\n height // self.vae_scale_factor,\n width // self.vae_scale_factor,\n )\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if latents is None:\n latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n else:\n latents = latents.to(device)\n\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n @property\n def guidance_scale(self):\n return self._guidance_scale\n\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n @property\n def do_classifier_free_guidance(self):\n if isinstance(self.guidance_scale, (int, float)):\n return self.guidance_scale\n return self.guidance_scale.max() > 1\n\n @property\n def num_timesteps(self):\n return self._num_timesteps\n\n @torch.no_grad()\n def __call__(\n self,\n image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor],\n height: int = 576,\n width: int = 1024,\n num_frames: Optional[int] = None,\n num_inference_steps: int = 25,\n min_guidance_scale: float = 1.0,\n max_guidance_scale: float = 3.0,\n fps: int = 7,\n motion_bucket_id: int = 127,\n noise_aug_strength: float = 0.02,\n decode_chunk_size: Optional[int] = None,\n num_videos_per_prompt: Optional[int] = 1,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,\n callback_on_step_end_tensor_inputs: List[str] = [\"latents\"],\n return_dict: bool = True,\n ):\n r\"\"\"\n The call function to the pipeline for generation.\n\n Args:\n image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`):\n Image or images to guide image generation. If you provide a tensor, it needs to be compatible with\n [`CLIPImageProcessor`](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json).\n height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):\n The width in pixels of the generated image.\n num_frames (`int`, *optional*):\n The number of video frames to generate. Defaults to 14 for `stable-video-diffusion-img2vid` and to 25 for `stable-video-diffusion-img2vid-xt`\n num_inference_steps (`int`, *optional*, defaults to 25):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference. This parameter is modulated by `strength`.\n min_guidance_scale (`float`, *optional*, defaults to 1.0):\n The minimum guidance scale. Used for the classifier free guidance with first frame.\n max_guidance_scale (`float`, *optional*, defaults to 3.0):\n The maximum guidance scale. Used for the classifier free guidance with last frame.\n fps (`int`, *optional*, defaults to 7):\n Frames per second. The rate at which the generated images shall be exported to a video after generation.\n Note that Stable Diffusion Video's UNet was micro-conditioned on fps-1 during training.\n motion_bucket_id (`int`, *optional*, defaults to 127):\n The motion bucket ID. Used as conditioning for the generation. The higher the number the more motion will be in the video.\n noise_aug_strength (`float`, *optional*, defaults to 0.02):\n The amount of noise added to the init image, the higher it is the less the video will look like the init image. Increase it for more motion.\n decode_chunk_size (`int`, *optional*):\n The number of frames to decode at a time. The higher the chunk size, the higher the temporal consistency\n between frames, but also the higher the memory consumption. By default, the decoder will decode all frames at once\n for maximal quality. Reduce `decode_chunk_size` to reduce memory usage.\n num_videos_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make\n generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor is generated by sampling using the supplied random `generator`.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generated image. Choose between `PIL.Image` or `np.array`.\n callback_on_step_end (`Callable`, *optional*):\n A function that calls at the end of each denoising steps during the inference. The function is called\n with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,\n callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by\n `callback_on_step_end_tensor_inputs`.\n callback_on_step_end_tensor_inputs (`List`, *optional*):\n The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list\n will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the\n `._callback_tensor_inputs` attribute of your pipeline class.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n\n Returns:\n [`~pipelines.stable_diffusion.StableVideoDiffusionPipelineOutput`] or `tuple`:\n If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableVideoDiffusionPipelineOutput`] is returned,\n otherwise a `tuple` is returned where the first element is a list of list with the generated frames.\n\n Examples:\n\n ```py\n from diffusers import StableVideoDiffusionPipeline\n from diffusers.utils import load_image, export_to_video\n\n pipe = StableVideoDiffusionPipeline.from_pretrained(\"stabilityai/stable-video-diffusion-img2vid-xt\", torch_dtype=torch.float16, variant=\"fp16\")\n pipe.to(\"cuda\")\n\n image = load_image(\"https://lh3.googleusercontent.com/y-iFOHfLTwkuQSUegpwDdgKmOjRSTvPxat63dQLB25xkTs4lhIbRUFeNBWZzYf370g=s1200\")\n image = image.resize((1024, 576))\n\n frames = pipe(image, num_frames=25, decode_chunk_size=8).frames[0]\n export_to_video(frames, \"generated.mp4\", fps=7)\n ```\n \"\"\"\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n num_frames = num_frames if num_frames is not None else self.unet.config.num_frames\n decode_chunk_size = decode_chunk_size if decode_chunk_size is not None else num_frames\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(image, height, width)\n\n # 2. Define call parameters\n if isinstance(image, PIL.Image.Image):\n batch_size = 1\n elif isinstance(image, list):\n batch_size = len(image)\n else:\n batch_size = image.shape[0]\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n self._guidance_scale = max_guidance_scale\n\n # 3. Encode input image\n image_embeddings = self._encode_image(image, device, num_videos_per_prompt, self.do_classifier_free_guidance)\n\n # NOTE: Stable Diffusion Video was conditioned on fps - 1, which\n # is why it is reduced here.\n # See: https://github.com/Stability-AI/generative-models/blob/ed0997173f98eaf8f4edf7ba5fe8f15c6b877fd3/scripts/sampling/simple_video_sample.py#L188\n fps = fps - 1\n\n # 4. Encode input image using VAE\n image = self.image_processor.preprocess(image, height=height, width=width)\n noise = randn_tensor(image.shape, generator=generator, device=image.device, dtype=image.dtype)\n image = image + noise_aug_strength * noise\n\n needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast\n if needs_upcasting:\n self.vae.to(dtype=torch.float32)\n\n image_latents = self._encode_vae_image(image, device, num_videos_per_prompt, self.do_classifier_free_guidance)\n image_latents = image_latents.to(image_embeddings.dtype)\n\n # cast back to fp16 if needed\n if needs_upcasting:\n self.vae.to(dtype=torch.float16)\n\n # Repeat the image latents for each frame so we can concatenate them with the noise\n # image_latents [batch, channels, height, width] ->[batch, num_frames, channels, height, width]\n image_latents = image_latents.unsqueeze(1).repeat(1, num_frames, 1, 1, 1)\n\n # 5. Get Added Time IDs\n added_time_ids = self._get_add_time_ids(\n fps,\n motion_bucket_id,\n noise_aug_strength,\n image_embeddings.dtype,\n batch_size,\n num_videos_per_prompt,\n self.do_classifier_free_guidance,\n )\n added_time_ids = added_time_ids.to(device)\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare latent variables\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_videos_per_prompt,\n num_frames,\n num_channels_latents,\n height,\n width,\n image_embeddings.dtype,\n device,\n generator,\n latents,\n )\n\n # 7. Prepare guidance scale\n guidance_scale = torch.linspace(min_guidance_scale, max_guidance_scale, num_frames).unsqueeze(0)\n guidance_scale = guidance_scale.to(device, latents.dtype)\n guidance_scale = guidance_scale.repeat(batch_size * num_videos_per_prompt, 1)\n guidance_scale = _append_dims(guidance_scale, latents.ndim)\n\n self._guidance_scale = guidance_scale\n\n # 8. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n self._num_timesteps = len(timesteps)\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # Concatenate image_latents over channels dimention\n latent_model_input = torch.cat([latent_model_input, image_latents], dim=2)\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=image_embeddings,\n added_time_ids=added_time_ids,\n return_dict=False,\n )[0]\n\n # perform guidance\n if self.do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents).prev_sample\n\n if callback_on_step_end is not None:\n callback_kwargs = {}\n for k in callback_on_step_end_tensor_inputs:\n callback_kwargs[k] = locals()[k]\n callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)\n\n latents = callback_outputs.pop(\"latents\", latents)\n\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n\n if not output_type == \"latent\":\n # cast back to fp16 if needed\n if needs_upcasting:\n self.vae.to(dtype=torch.float16)\n frames = self.decode_latents(latents, num_frames, decode_chunk_size)\n frames = tensor2vid(frames, self.image_processor, output_type=output_type)\n else:\n frames = latents\n\n self.maybe_free_model_hooks()\n\n if not return_dict:\n return frames\n\n return StableVideoDiffusionPipelineOutput(frames=frames)\n\n\n# resizing utils\n# TODO: clean up later\ndef _resize_with_antialiasing(input, size, interpolation=\"bicubic\", align_corners=True):\n h, w = input.shape[-2:]\n factors = (h / size[0], w / size[1])\n\n # First, we have to determine sigma\n # Taken from skimage: https://github.com/scikit-image/scikit-image/blob/v0.19.2/skimage/transform/_warps.py#L171\n sigmas = (\n max((factors[0] - 1.0) / 2.0, 0.001),\n max((factors[1] - 1.0) / 2.0, 0.001),\n )\n\n # Now kernel size. Good results are for 3 sigma, but that is kind of slow. Pillow uses 1 sigma\n # https://github.com/python-pillow/Pillow/blob/master/src/libImaging/Resample.c#L206\n # But they do it in the 2 passes, which gives better results. Let's try 2 sigmas for now\n ks = int(max(2.0 * 2 * sigmas[0], 3)), int(max(2.0 * 2 * sigmas[1], 3))\n\n # Make sure it is odd\n if (ks[0] % 2) == 0:\n ks = ks[0] + 1, ks[1]\n\n if (ks[1] % 2) == 0:\n ks = ks[0], ks[1] + 1\n\n input = _gaussian_blur2d(input, ks, sigmas)\n\n output = torch.nn.functional.interpolate(input, size=size, mode=interpolation, align_corners=align_corners)\n return output\n\n\ndef _compute_padding(kernel_size):\n \"\"\"Compute padding tuple.\"\"\"\n # 4 or 6 ints: (padding_left, padding_right,padding_top,padding_bottom)\n # https://pytorch.org/docs/stable/nn.html#torch.nn.functional.pad\n if len(kernel_size) < 2:\n raise AssertionError(kernel_size)\n computed = [k - 1 for k in kernel_size]\n\n # for even kernels we need to do asymmetric padding :(\n out_padding = 2 * len(kernel_size) * [0]\n\n for i in range(len(kernel_size)):\n computed_tmp = computed[-(i + 1)]\n\n pad_front = computed_tmp // 2\n pad_rear = computed_tmp - pad_front\n\n out_padding[2 * i + 0] = pad_front\n out_padding[2 * i + 1] = pad_rear\n\n return out_padding\n\n\ndef _filter2d(input, kernel):\n # prepare kernel\n b, c, h, w = input.shape\n tmp_kernel = kernel[:, None, ...].to(device=input.device, dtype=input.dtype)\n\n tmp_kernel = tmp_kernel.expand(-1, c, -1, -1)\n\n height, width = tmp_kernel.shape[-2:]\n\n padding_shape: list[int] = _compute_padding([height, width])\n input = torch.nn.functional.pad(input, padding_shape, mode=\"reflect\")\n\n # kernel and input tensor reshape to align element-wise or batch-wise params\n tmp_kernel = tmp_kernel.reshape(-1, 1, height, width)\n input = input.view(-1, tmp_kernel.size(0), input.size(-2), input.size(-1))\n\n # convolve the tensor with the kernel.\n output = torch.nn.functional.conv2d(input, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1)\n\n out = output.view(b, c, h, w)\n return out\n\n\ndef _gaussian(window_size: int, sigma):\n if isinstance(sigma, float):\n sigma = torch.tensor([[sigma]])\n\n batch_size = sigma.shape[0]\n\n x = (torch.arange(window_size, device=sigma.device, dtype=sigma.dtype) - window_size // 2).expand(batch_size, -1)\n\n if window_size % 2 == 0:\n x = x + 0.5\n\n gauss = torch.exp(-x.pow(2.0) / (2 * sigma.pow(2.0)))\n\n return gauss / gauss.sum(-1, keepdim=True)\n\n\ndef _gaussian_blur2d(input, kernel_size, sigma):\n if isinstance(sigma, tuple):\n sigma = torch.tensor([sigma], dtype=input.dtype)\n else:\n sigma = sigma.to(dtype=input.dtype)\n\n ky, kx = int(kernel_size[0]), int(kernel_size[1])\n bs = sigma.shape[0]\n kernel_x = _gaussian(kx, sigma[:, 1].view(bs, 1))\n kernel_y = _gaussian(ky, sigma[:, 0].view(bs, 1))\n out_x = _filter2d(input, kernel_x[..., None, :])\n out = _filter2d(out_x, kernel_y[..., None])\n\n return out\n",
"path": "src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py"
}
] | [
{
"content": "# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nfrom dataclasses import dataclass\nfrom typing import Callable, Dict, List, Optional, Union\n\nimport numpy as np\nimport PIL.Image\nimport torch\nfrom transformers import CLIPImageProcessor, CLIPVisionModelWithProjection\n\nfrom ...image_processor import VaeImageProcessor\nfrom ...models import AutoencoderKLTemporalDecoder, UNetSpatioTemporalConditionModel\nfrom ...schedulers import EulerDiscreteScheduler\nfrom ...utils import BaseOutput, logging\nfrom ...utils.torch_utils import is_compiled_module, randn_tensor\nfrom ..pipeline_utils import DiffusionPipeline\n\n\nlogger = logging.get_logger(__name__) # pylint: disable=invalid-name\n\n\ndef _append_dims(x, target_dims):\n \"\"\"Appends dimensions to the end of a tensor until it has target_dims dimensions.\"\"\"\n dims_to_append = target_dims - x.ndim\n if dims_to_append < 0:\n raise ValueError(f\"input has {x.ndim} dims but target_dims is {target_dims}, which is less\")\n return x[(...,) + (None,) * dims_to_append]\n\n\ndef tensor2vid(video: torch.Tensor, processor, output_type=\"np\"):\n # Based on:\n # https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/pipelines/multi_modal/text_to_video_synthesis_pipeline.py#L78\n\n batch_size, channels, num_frames, height, width = video.shape\n outputs = []\n for batch_idx in range(batch_size):\n batch_vid = video[batch_idx].permute(1, 0, 2, 3)\n batch_output = processor.postprocess(batch_vid, output_type)\n\n outputs.append(batch_output)\n\n if output_type == \"np\":\n return np.stack(outputs)\n\n return outputs\n\n\n@dataclass\nclass StableVideoDiffusionPipelineOutput(BaseOutput):\n r\"\"\"\n Output class for zero-shot text-to-video pipeline.\n\n Args:\n frames (`[List[PIL.Image.Image]`, `np.ndarray`]):\n List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width,\n num_channels)`.\n \"\"\"\n\n frames: Union[List[PIL.Image.Image], np.ndarray]\n\n\nclass StableVideoDiffusionPipeline(DiffusionPipeline):\n r\"\"\"\n Pipeline to generate video from an input image using Stable Video Diffusion.\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods\n implemented for all pipelines (downloading, saving, running on a particular device, etc.).\n\n Args:\n vae ([`AutoencoderKLTemporalDecoder`]):\n Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.\n image_encoder ([`~transformers.CLIPVisionModelWithProjection`]):\n Frozen CLIP image-encoder ([laion/CLIP-ViT-H-14-laion2B-s32B-b79K](https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K)).\n unet ([`UNetSpatioTemporalConditionModel`]):\n A `UNetSpatioTemporalConditionModel` to denoise the encoded image latents.\n scheduler ([`EulerDiscreteScheduler`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents.\n feature_extractor ([`~transformers.CLIPImageProcessor`]):\n A `CLIPImageProcessor` to extract features from generated images.\n \"\"\"\n\n model_cpu_offload_seq = \"image_encoder->unet->vae\"\n _callback_tensor_inputs = [\"latents\"]\n\n def __init__(\n self,\n vae: AutoencoderKLTemporalDecoder,\n image_encoder: CLIPVisionModelWithProjection,\n unet: UNetSpatioTemporalConditionModel,\n scheduler: EulerDiscreteScheduler,\n feature_extractor: CLIPImageProcessor,\n ):\n super().__init__()\n\n self.register_modules(\n vae=vae,\n image_encoder=image_encoder,\n unet=unet,\n scheduler=scheduler,\n feature_extractor=feature_extractor,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)\n\n def _encode_image(self, image, device, num_videos_per_prompt, do_classifier_free_guidance):\n dtype = next(self.image_encoder.parameters()).dtype\n\n if not isinstance(image, torch.Tensor):\n image = self.image_processor.pil_to_numpy(image)\n image = self.image_processor.numpy_to_pt(image)\n\n # We normalize the image before resizing to match with the original implementation.\n # Then we unnormalize it after resizing.\n image = image * 2.0 - 1.0\n image = _resize_with_antialiasing(image, (224, 224))\n image = (image + 1.0) / 2.0\n\n # Normalize the image with for CLIP input\n image = self.feature_extractor(\n images=image,\n do_normalize=True,\n do_center_crop=False,\n do_resize=False,\n do_rescale=False,\n return_tensors=\"pt\",\n ).pixel_values\n\n image = image.to(device=device, dtype=dtype)\n image_embeddings = self.image_encoder(image).image_embeds\n image_embeddings = image_embeddings.unsqueeze(1)\n\n # duplicate image embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = image_embeddings.shape\n image_embeddings = image_embeddings.repeat(1, num_videos_per_prompt, 1)\n image_embeddings = image_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1)\n\n if do_classifier_free_guidance:\n negative_image_embeddings = torch.zeros_like(image_embeddings)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n image_embeddings = torch.cat([negative_image_embeddings, image_embeddings])\n\n return image_embeddings\n\n def _encode_vae_image(\n self,\n image: torch.Tensor,\n device,\n num_videos_per_prompt,\n do_classifier_free_guidance,\n ):\n image = image.to(device=device)\n image_latents = self.vae.encode(image).latent_dist.mode()\n\n if do_classifier_free_guidance:\n negative_image_latents = torch.zeros_like(image_latents)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n image_latents = torch.cat([negative_image_latents, image_latents])\n\n # duplicate image_latents for each generation per prompt, using mps friendly method\n image_latents = image_latents.repeat(num_videos_per_prompt, 1, 1, 1)\n\n return image_latents\n\n def _get_add_time_ids(\n self,\n fps,\n motion_bucket_id,\n noise_aug_strength,\n dtype,\n batch_size,\n num_videos_per_prompt,\n do_classifier_free_guidance,\n ):\n add_time_ids = [fps, motion_bucket_id, noise_aug_strength]\n\n passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids)\n expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features\n\n if expected_add_embed_dim != passed_add_embed_dim:\n raise ValueError(\n f\"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.\"\n )\n\n add_time_ids = torch.tensor([add_time_ids], dtype=dtype)\n add_time_ids = add_time_ids.repeat(batch_size * num_videos_per_prompt, 1)\n\n if do_classifier_free_guidance:\n add_time_ids = torch.cat([add_time_ids, add_time_ids])\n\n return add_time_ids\n\n def decode_latents(self, latents, num_frames, decode_chunk_size=14):\n # [batch, frames, channels, height, width] -> [batch*frames, channels, height, width]\n latents = latents.flatten(0, 1)\n\n latents = 1 / self.vae.config.scaling_factor * latents\n\n forward_vae_fn = self.vae._orig_mod.forward if is_compiled_module(self.vae) else self.vae.forward\n accepts_num_frames = \"num_frames\" in set(inspect.signature(forward_vae_fn).parameters.keys())\n\n # decode decode_chunk_size frames at a time to avoid OOM\n frames = []\n for i in range(0, latents.shape[0], decode_chunk_size):\n num_frames_in = latents[i : i + decode_chunk_size].shape[0]\n decode_kwargs = {}\n if accepts_num_frames:\n # we only pass num_frames_in if it's expected\n decode_kwargs[\"num_frames\"] = num_frames_in\n\n frame = self.vae.decode(latents[i : i + decode_chunk_size], **decode_kwargs).sample\n frames.append(frame)\n frames = torch.cat(frames, dim=0)\n\n # [batch*frames, channels, height, width] -> [batch, channels, frames, height, width]\n frames = frames.reshape(-1, num_frames, *frames.shape[1:]).permute(0, 2, 1, 3, 4)\n\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16\n frames = frames.float()\n return frames\n\n def check_inputs(self, image, height, width):\n if (\n not isinstance(image, torch.Tensor)\n and not isinstance(image, PIL.Image.Image)\n and not isinstance(image, list)\n ):\n raise ValueError(\n \"`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is\"\n f\" {type(image)}\"\n )\n\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n def prepare_latents(\n self,\n batch_size,\n num_frames,\n num_channels_latents,\n height,\n width,\n dtype,\n device,\n generator,\n latents=None,\n ):\n shape = (\n batch_size,\n num_frames,\n num_channels_latents // 2,\n height // self.vae_scale_factor,\n width // self.vae_scale_factor,\n )\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if latents is None:\n latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n else:\n latents = latents.to(device)\n\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n @property\n def guidance_scale(self):\n return self._guidance_scale\n\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n @property\n def do_classifier_free_guidance(self):\n if isinstance(self.guidance_scale, (int, float)):\n return self.guidance_scale\n return self.guidance_scale.max() > 1\n\n @property\n def num_timesteps(self):\n return self._num_timesteps\n\n @torch.no_grad()\n def __call__(\n self,\n image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor],\n height: int = 576,\n width: int = 1024,\n num_frames: Optional[int] = None,\n num_inference_steps: int = 25,\n min_guidance_scale: float = 1.0,\n max_guidance_scale: float = 3.0,\n fps: int = 7,\n motion_bucket_id: int = 127,\n noise_aug_strength: float = 0.02,\n decode_chunk_size: Optional[int] = None,\n num_videos_per_prompt: Optional[int] = 1,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,\n callback_on_step_end_tensor_inputs: List[str] = [\"latents\"],\n return_dict: bool = True,\n ):\n r\"\"\"\n The call function to the pipeline for generation.\n\n Args:\n image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`):\n Image or images to guide image generation. If you provide a tensor, it needs to be compatible with\n [`CLIPImageProcessor`](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json).\n height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):\n The width in pixels of the generated image.\n num_frames (`int`, *optional*):\n The number of video frames to generate. Defaults to 14 for `stable-video-diffusion-img2vid` and to 25 for `stable-video-diffusion-img2vid-xt`\n num_inference_steps (`int`, *optional*, defaults to 25):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference. This parameter is modulated by `strength`.\n min_guidance_scale (`float`, *optional*, defaults to 1.0):\n The minimum guidance scale. Used for the classifier free guidance with first frame.\n max_guidance_scale (`float`, *optional*, defaults to 3.0):\n The maximum guidance scale. Used for the classifier free guidance with last frame.\n fps (`int`, *optional*, defaults to 7):\n Frames per second. The rate at which the generated images shall be exported to a video after generation.\n Note that Stable Diffusion Video's UNet was micro-conditioned on fps-1 during training.\n motion_bucket_id (`int`, *optional*, defaults to 127):\n The motion bucket ID. Used as conditioning for the generation. The higher the number the more motion will be in the video.\n noise_aug_strength (`float`, *optional*, defaults to 0.02):\n The amount of noise added to the init image, the higher it is the less the video will look like the init image. Increase it for more motion.\n decode_chunk_size (`int`, *optional*):\n The number of frames to decode at a time. The higher the chunk size, the higher the temporal consistency\n between frames, but also the higher the memory consumption. By default, the decoder will decode all frames at once\n for maximal quality. Reduce `decode_chunk_size` to reduce memory usage.\n num_videos_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make\n generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor is generated by sampling using the supplied random `generator`.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generated image. Choose between `PIL.Image` or `np.array`.\n callback_on_step_end (`Callable`, *optional*):\n A function that calls at the end of each denoising steps during the inference. The function is called\n with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,\n callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by\n `callback_on_step_end_tensor_inputs`.\n callback_on_step_end_tensor_inputs (`List`, *optional*):\n The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list\n will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the\n `._callback_tensor_inputs` attribute of your pipeline class.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n\n Returns:\n [`~pipelines.stable_diffusion.StableVideoDiffusionPipelineOutput`] or `tuple`:\n If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableVideoDiffusionPipelineOutput`] is returned,\n otherwise a `tuple` is returned where the first element is a list of list with the generated frames.\n\n Examples:\n\n ```py\n from diffusers import StableVideoDiffusionPipeline\n from diffusers.utils import load_image, export_to_video\n\n pipe = StableVideoDiffusionPipeline.from_pretrained(\"stabilityai/stable-video-diffusion-img2vid-xt\", torch_dtype=torch.float16, variant=\"fp16\")\n pipe.to(\"cuda\")\n\n image = load_image(\"https://lh3.googleusercontent.com/y-iFOHfLTwkuQSUegpwDdgKmOjRSTvPxat63dQLB25xkTs4lhIbRUFeNBWZzYf370g=s1200\")\n image = image.resize((1024, 576))\n\n frames = pipe(image, num_frames=25, decode_chunk_size=8).frames[0]\n export_to_video(frames, \"generated.mp4\", fps=7)\n ```\n \"\"\"\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n num_frames = num_frames if num_frames is not None else self.unet.config.num_frames\n decode_chunk_size = decode_chunk_size if decode_chunk_size is not None else num_frames\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(image, height, width)\n\n # 2. Define call parameters\n if isinstance(image, PIL.Image.Image):\n batch_size = 1\n elif isinstance(image, list):\n batch_size = len(image)\n else:\n batch_size = image.shape[0]\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n self._guidance_scale = max_guidance_scale\n\n # 3. Encode input image\n image_embeddings = self._encode_image(image, device, num_videos_per_prompt, self.do_classifier_free_guidance)\n\n # NOTE: Stable Diffusion Video was conditioned on fps - 1, which\n # is why it is reduced here.\n # See: https://github.com/Stability-AI/generative-models/blob/ed0997173f98eaf8f4edf7ba5fe8f15c6b877fd3/scripts/sampling/simple_video_sample.py#L188\n fps = fps - 1\n\n # 4. Encode input image using VAE\n image = self.image_processor.preprocess(image, height=height, width=width)\n noise = randn_tensor(image.shape, generator=generator, device=image.device, dtype=image.dtype)\n image = image + noise_aug_strength * noise\n\n needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast\n if needs_upcasting:\n self.vae.to(dtype=torch.float32)\n\n image_latents = self._encode_vae_image(image, device, num_videos_per_prompt, self.do_classifier_free_guidance)\n image_latents = image_latents.to(image_embeddings.dtype)\n\n # cast back to fp16 if needed\n if needs_upcasting:\n self.vae.to(dtype=torch.float16)\n\n # Repeat the image latents for each frame so we can concatenate them with the noise\n # image_latents [batch, channels, height, width] ->[batch, num_frames, channels, height, width]\n image_latents = image_latents.unsqueeze(1).repeat(1, num_frames, 1, 1, 1)\n\n # 5. Get Added Time IDs\n added_time_ids = self._get_add_time_ids(\n fps,\n motion_bucket_id,\n noise_aug_strength,\n image_embeddings.dtype,\n batch_size,\n num_videos_per_prompt,\n self.do_classifier_free_guidance,\n )\n added_time_ids = added_time_ids.to(device)\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare latent variables\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_videos_per_prompt,\n num_frames,\n num_channels_latents,\n height,\n width,\n image_embeddings.dtype,\n device,\n generator,\n latents,\n )\n\n # 7. Prepare guidance scale\n guidance_scale = torch.linspace(min_guidance_scale, max_guidance_scale, num_frames).unsqueeze(0)\n guidance_scale = guidance_scale.to(device, latents.dtype)\n guidance_scale = guidance_scale.repeat(batch_size * num_videos_per_prompt, 1)\n guidance_scale = _append_dims(guidance_scale, latents.ndim)\n\n self._guidance_scale = guidance_scale\n\n # 8. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n self._num_timesteps = len(timesteps)\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # Concatenate image_latents over channels dimention\n latent_model_input = torch.cat([latent_model_input, image_latents], dim=2)\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=image_embeddings,\n added_time_ids=added_time_ids,\n return_dict=False,\n )[0]\n\n # perform guidance\n if self.do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents).prev_sample\n\n if callback_on_step_end is not None:\n callback_kwargs = {}\n for k in callback_on_step_end_tensor_inputs:\n callback_kwargs[k] = locals()[k]\n callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)\n\n latents = callback_outputs.pop(\"latents\", latents)\n\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n\n if not output_type == \"latent\":\n # cast back to fp16 if needed\n if needs_upcasting:\n self.vae.to(dtype=torch.float16)\n frames = self.decode_latents(latents, num_frames, decode_chunk_size)\n frames = tensor2vid(frames, self.image_processor, output_type=output_type)\n else:\n frames = latents\n\n self.maybe_free_model_hooks()\n\n if not return_dict:\n return frames\n\n return StableVideoDiffusionPipelineOutput(frames=frames)\n\n\n# resizing utils\n# TODO: clean up later\ndef _resize_with_antialiasing(input, size, interpolation=\"bicubic\", align_corners=True):\n h, w = input.shape[-2:]\n factors = (h / size[0], w / size[1])\n\n # First, we have to determine sigma\n # Taken from skimage: https://github.com/scikit-image/scikit-image/blob/v0.19.2/skimage/transform/_warps.py#L171\n sigmas = (\n max((factors[0] - 1.0) / 2.0, 0.001),\n max((factors[1] - 1.0) / 2.0, 0.001),\n )\n\n # Now kernel size. Good results are for 3 sigma, but that is kind of slow. Pillow uses 1 sigma\n # https://github.com/python-pillow/Pillow/blob/master/src/libImaging/Resample.c#L206\n # But they do it in the 2 passes, which gives better results. Let's try 2 sigmas for now\n ks = int(max(2.0 * 2 * sigmas[0], 3)), int(max(2.0 * 2 * sigmas[1], 3))\n\n # Make sure it is odd\n if (ks[0] % 2) == 0:\n ks = ks[0] + 1, ks[1]\n\n if (ks[1] % 2) == 0:\n ks = ks[0], ks[1] + 1\n\n input = _gaussian_blur2d(input, ks, sigmas)\n\n output = torch.nn.functional.interpolate(input, size=size, mode=interpolation, align_corners=align_corners)\n return output\n\n\ndef _compute_padding(kernel_size):\n \"\"\"Compute padding tuple.\"\"\"\n # 4 or 6 ints: (padding_left, padding_right,padding_top,padding_bottom)\n # https://pytorch.org/docs/stable/nn.html#torch.nn.functional.pad\n if len(kernel_size) < 2:\n raise AssertionError(kernel_size)\n computed = [k - 1 for k in kernel_size]\n\n # for even kernels we need to do asymmetric padding :(\n out_padding = 2 * len(kernel_size) * [0]\n\n for i in range(len(kernel_size)):\n computed_tmp = computed[-(i + 1)]\n\n pad_front = computed_tmp // 2\n pad_rear = computed_tmp - pad_front\n\n out_padding[2 * i + 0] = pad_front\n out_padding[2 * i + 1] = pad_rear\n\n return out_padding\n\n\ndef _filter2d(input, kernel):\n # prepare kernel\n b, c, h, w = input.shape\n tmp_kernel = kernel[:, None, ...].to(device=input.device, dtype=input.dtype)\n\n tmp_kernel = tmp_kernel.expand(-1, c, -1, -1)\n\n height, width = tmp_kernel.shape[-2:]\n\n padding_shape: list[int] = _compute_padding([height, width])\n input = torch.nn.functional.pad(input, padding_shape, mode=\"reflect\")\n\n # kernel and input tensor reshape to align element-wise or batch-wise params\n tmp_kernel = tmp_kernel.reshape(-1, 1, height, width)\n input = input.view(-1, tmp_kernel.size(0), input.size(-2), input.size(-1))\n\n # convolve the tensor with the kernel.\n output = torch.nn.functional.conv2d(input, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1)\n\n out = output.view(b, c, h, w)\n return out\n\n\ndef _gaussian(window_size: int, sigma):\n if isinstance(sigma, float):\n sigma = torch.tensor([[sigma]])\n\n batch_size = sigma.shape[0]\n\n x = (torch.arange(window_size, device=sigma.device, dtype=sigma.dtype) - window_size // 2).expand(batch_size, -1)\n\n if window_size % 2 == 0:\n x = x + 0.5\n\n gauss = torch.exp(-x.pow(2.0) / (2 * sigma.pow(2.0)))\n\n return gauss / gauss.sum(-1, keepdim=True)\n\n\ndef _gaussian_blur2d(input, kernel_size, sigma):\n if isinstance(sigma, tuple):\n sigma = torch.tensor([sigma], dtype=input.dtype)\n else:\n sigma = sigma.to(dtype=input.dtype)\n\n ky, kx = int(kernel_size[0]), int(kernel_size[1])\n bs = sigma.shape[0]\n kernel_x = _gaussian(kx, sigma[:, 1].view(bs, 1))\n kernel_y = _gaussian(ky, sigma[:, 0].view(bs, 1))\n out_x = _filter2d(input, kernel_x[..., None, :])\n out = _filter2d(out_x, kernel_y[..., None])\n\n return out\n",
"path": "src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py"
}
] | diff --git a/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py b/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py
index fa96f41cd81f..a0c3be089ece 100644
--- a/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py
+++ b/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py
@@ -52,6 +52,9 @@ def tensor2vid(video: torch.Tensor, processor, output_type="np"):
outputs.append(batch_output)
+ if output_type == "np":
+ return np.stack(outputs)
+
return outputs
diff --git a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py
index 11978424368f..871266fb9c24 100644
--- a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py
+++ b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py
@@ -185,6 +185,23 @@ def test_inference_batch_single_identical(
def test_inference_batch_consistent(self):
pass
+ def test_np_output_type(self):
+ components = self.get_dummy_components()
+ pipe = self.pipeline_class(**components)
+ for component in pipe.components.values():
+ if hasattr(component, "set_default_attn_processor"):
+ component.set_default_attn_processor()
+
+ pipe.to(torch_device)
+ pipe.set_progress_bar_config(disable=None)
+
+ generator_device = "cpu"
+ inputs = self.get_dummy_inputs(generator_device)
+ inputs["output_type"] = "np"
+ output = pipe(**inputs).frames
+ self.assertTrue(isinstance(output, np.ndarray))
+ self.assertEqual(len(output.shape), 5)
+
def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
|
microsoft__ptvsd-806 | listen(0) in create_server() does not allow client to connect to linux server
## Environment data
- PTVSD version: 4.1.1
- OS and version: linux kernel 4.14.46
- Python version (& distribution if applicable, e.g. Anaconda): 2.7.13, 3.5.3
- Using VS Code or Visual Studio: N/A
## Actual behavior
server on linux never accepts connection, i.e. `client, _ = sock.accept()` in `connect()` in socket.py never returns. This is due to the `listen(0)` call in `create_server()`. This was changed from `listen(1)` in 322f6946. Although `listen(0)` does work correctly on mac, it does not on linux.
## Expected behavior
the incoming connection to be accepted
## Steps to reproduce:
run:
```python
ptvsd.enable_attach(address=('0.0.0.0', 9876), redirect_output=True)
ptvsd.wait_for_attach()
```
then from the command line, see that `telnet localhost 9876` hangs instead of connecting. some background history is at https://bugs.python.org/issue8498
| [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import absolute_import\n\nfrom collections import namedtuple\nimport contextlib\nimport errno\nimport socket\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\n\ntry:\n ConnectionError # noqa\n BrokenPipeError # noqa\n ConnectionResetError # noqa\nexcept NameError:\n class BrokenPipeError(Exception):\n # EPIPE and ESHUTDOWN\n pass\n\n class ConnectionResetError(Exception):\n # ECONNRESET\n pass\n\n\nNOT_CONNECTED = (\n errno.ENOTCONN,\n errno.EBADF,\n)\n\nCLOSED = (\n errno.EPIPE,\n errno.ESHUTDOWN,\n errno.ECONNRESET,\n # Windows\n 10038, # \"An operation was attempted on something that is not a socket\"\n 10058,\n)\n\nEOF = NOT_CONNECTED + CLOSED\n\n\[email protected]\ndef convert_eof():\n \"\"\"A context manager to convert some socket errors into EOFError.\"\"\"\n try:\n yield\n except ConnectionResetError:\n raise EOFError\n except BrokenPipeError:\n raise EOFError\n except OSError as exc:\n if exc.errno in EOF:\n raise EOFError\n raise\n\n\nclass TimeoutError(socket.timeout):\n \"\"\"A socket timeout happened.\"\"\"\n\n\ndef is_socket(sock):\n \"\"\"Return True if the object can be used as a socket.\"\"\"\n return isinstance(sock, socket.socket)\n\n\ndef create_server(host, port):\n \"\"\"Return a local server socket listening on the given port.\"\"\"\n if host is None:\n host = 'localhost'\n server = _new_sock()\n server.bind((host, port))\n server.listen(0)\n return server\n\n\ndef create_client():\n \"\"\"Return a client socket that may be connected to a remote address.\"\"\"\n return _new_sock()\n\n\ndef _new_sock():\n sock = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM,\n socket.IPPROTO_TCP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return sock\n\n\[email protected]\ndef ignored_errno(*ignored):\n \"\"\"A context manager that ignores the given errnos.\"\"\"\n try:\n yield\n except OSError as exc:\n if exc.errno not in ignored:\n raise\n\n\nclass KeepAlive(namedtuple('KeepAlive', 'interval idle maxfails')):\n \"\"\"TCP keep-alive settings.\"\"\"\n\n INTERVAL = 3 # seconds\n IDLE = 1 # seconds after idle\n MAX_FAILS = 5\n\n @classmethod\n def from_raw(cls, raw):\n \"\"\"Return the corresponding KeepAlive.\"\"\"\n if raw is None:\n return None\n elif isinstance(raw, cls):\n return raw\n elif isinstance(raw, (str, int, float)):\n return cls(raw)\n else:\n try:\n raw = dict(raw)\n except TypeError:\n return cls(*raw)\n else:\n return cls(**raw)\n\n def __new__(cls, interval=None, idle=None, maxfails=None):\n self = super(KeepAlive, cls).__new__(\n cls,\n float(interval) if interval or interval == 0 else cls.INTERVAL,\n float(idle) if idle or idle == 0 else cls.IDLE,\n float(maxfails) if maxfails or maxfails == 0 else cls.MAX_FAILS,\n )\n return self\n\n def apply(self, sock):\n \"\"\"Set the keepalive values on the socket.\"\"\"\n sock.setsockopt(socket.SOL_SOCKET,\n socket.SO_KEEPALIVE,\n 1)\n interval = self.interval\n idle = self.idle\n maxfails = self.maxfails\n try:\n if interval > 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPINTVL,\n interval)\n if idle > 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPIDLE,\n idle)\n if maxfails >= 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPCNT,\n maxfails)\n except AttributeError:\n # mostly linux-only\n pass\n\n\ndef connect(sock, addr, keepalive=None):\n \"\"\"Return the client socket for the next connection.\"\"\"\n if addr is None:\n if keepalive is None or keepalive is True:\n keepalive = KeepAlive()\n elif keepalive:\n keepalive = KeepAlive.from_raw(keepalive)\n client, _ = sock.accept()\n if keepalive:\n keepalive.apply(client)\n return client\n else:\n if keepalive:\n raise NotImplementedError\n sock.connect(addr)\n return sock\n\n\ndef shut_down(sock, how=socket.SHUT_RDWR, ignored=NOT_CONNECTED):\n \"\"\"Shut down the given socket.\"\"\"\n with ignored_errno(*ignored or ()):\n sock.shutdown(how)\n\n\ndef close_socket(sock):\n \"\"\"Shutdown and close the socket.\"\"\"\n try:\n shut_down(sock)\n except Exception:\n # TODO: Log errors?\n pass\n sock.close()\n\n\nclass Address(namedtuple('Address', 'host port')):\n \"\"\"An IP address to use for sockets.\"\"\"\n\n @classmethod\n def from_raw(cls, raw, defaultport=None):\n \"\"\"Return an address corresponding to the given data.\"\"\"\n if isinstance(raw, cls):\n return raw\n elif isinstance(raw, int):\n return cls(None, raw)\n elif isinstance(raw, str):\n if raw == '':\n return cls('', defaultport)\n parsed = urlparse(raw)\n if not parsed.netloc:\n if parsed.scheme:\n raise ValueError('invalid address {!r}'.format(raw))\n return cls.from_raw('x://' + raw, defaultport=defaultport)\n return cls(\n parsed.hostname or '',\n parsed.port if parsed.port else defaultport,\n )\n elif not raw:\n return cls(None, defaultport)\n else:\n try:\n kwargs = dict(**raw)\n except TypeError:\n return cls(*raw)\n else:\n kwargs.setdefault('host', None)\n kwargs.setdefault('port', defaultport)\n return cls(**kwargs)\n\n @classmethod\n def as_server(cls, host, port):\n \"\"\"Return an address to use as a server address.\"\"\"\n return cls(host, port, isserver=True)\n\n @classmethod\n def as_client(cls, host, port):\n \"\"\"Return an address to use as a server address.\"\"\"\n return cls(host, port, isserver=False)\n\n def __new__(cls, host, port, **kwargs):\n if host == '*':\n host = ''\n isserver = kwargs.pop('isserver', None)\n if isserver is None:\n isserver = (host is None or host == '')\n else:\n isserver = bool(isserver)\n if host is None:\n host = 'localhost'\n self = super(Address, cls).__new__(\n cls,\n str(host),\n int(port) if port is not None else None,\n **kwargs\n )\n self._isserver = isserver\n return self\n\n def __init__(self, *args, **kwargs):\n if self.port is None:\n raise TypeError('missing port')\n if self.port <= 0 or self.port > 65535:\n raise ValueError('port must be positive int < 65535')\n\n def __repr__(self):\n orig = super(Address, self).__repr__()\n return '{}, isserver={})'.format(orig[:-1], self._isserver)\n\n def __eq__(self, other):\n if not super(Address, self).__eq__(other):\n return False\n try:\n other = self.from_raw(other)\n except Exception:\n return False\n return self._isserver == other._isserver\n\n @property\n def isserver(self):\n return self._isserver\n",
"path": "ptvsd/socket.py"
}
] | [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import absolute_import\n\nfrom collections import namedtuple\nimport contextlib\nimport errno\nimport socket\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\n\ntry:\n ConnectionError # noqa\n BrokenPipeError # noqa\n ConnectionResetError # noqa\nexcept NameError:\n class BrokenPipeError(Exception):\n # EPIPE and ESHUTDOWN\n pass\n\n class ConnectionResetError(Exception):\n # ECONNRESET\n pass\n\n\nNOT_CONNECTED = (\n errno.ENOTCONN,\n errno.EBADF,\n)\n\nCLOSED = (\n errno.EPIPE,\n errno.ESHUTDOWN,\n errno.ECONNRESET,\n # Windows\n 10038, # \"An operation was attempted on something that is not a socket\"\n 10058,\n)\n\nEOF = NOT_CONNECTED + CLOSED\n\n\[email protected]\ndef convert_eof():\n \"\"\"A context manager to convert some socket errors into EOFError.\"\"\"\n try:\n yield\n except ConnectionResetError:\n raise EOFError\n except BrokenPipeError:\n raise EOFError\n except OSError as exc:\n if exc.errno in EOF:\n raise EOFError\n raise\n\n\nclass TimeoutError(socket.timeout):\n \"\"\"A socket timeout happened.\"\"\"\n\n\ndef is_socket(sock):\n \"\"\"Return True if the object can be used as a socket.\"\"\"\n return isinstance(sock, socket.socket)\n\n\ndef create_server(host, port):\n \"\"\"Return a local server socket listening on the given port.\"\"\"\n if host is None:\n host = 'localhost'\n server = _new_sock()\n server.bind((host, port))\n server.listen(1)\n return server\n\n\ndef create_client():\n \"\"\"Return a client socket that may be connected to a remote address.\"\"\"\n return _new_sock()\n\n\ndef _new_sock():\n sock = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM,\n socket.IPPROTO_TCP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return sock\n\n\[email protected]\ndef ignored_errno(*ignored):\n \"\"\"A context manager that ignores the given errnos.\"\"\"\n try:\n yield\n except OSError as exc:\n if exc.errno not in ignored:\n raise\n\n\nclass KeepAlive(namedtuple('KeepAlive', 'interval idle maxfails')):\n \"\"\"TCP keep-alive settings.\"\"\"\n\n INTERVAL = 3 # seconds\n IDLE = 1 # seconds after idle\n MAX_FAILS = 5\n\n @classmethod\n def from_raw(cls, raw):\n \"\"\"Return the corresponding KeepAlive.\"\"\"\n if raw is None:\n return None\n elif isinstance(raw, cls):\n return raw\n elif isinstance(raw, (str, int, float)):\n return cls(raw)\n else:\n try:\n raw = dict(raw)\n except TypeError:\n return cls(*raw)\n else:\n return cls(**raw)\n\n def __new__(cls, interval=None, idle=None, maxfails=None):\n self = super(KeepAlive, cls).__new__(\n cls,\n float(interval) if interval or interval == 0 else cls.INTERVAL,\n float(idle) if idle or idle == 0 else cls.IDLE,\n float(maxfails) if maxfails or maxfails == 0 else cls.MAX_FAILS,\n )\n return self\n\n def apply(self, sock):\n \"\"\"Set the keepalive values on the socket.\"\"\"\n sock.setsockopt(socket.SOL_SOCKET,\n socket.SO_KEEPALIVE,\n 1)\n interval = self.interval\n idle = self.idle\n maxfails = self.maxfails\n try:\n if interval > 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPINTVL,\n interval)\n if idle > 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPIDLE,\n idle)\n if maxfails >= 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPCNT,\n maxfails)\n except AttributeError:\n # mostly linux-only\n pass\n\n\ndef connect(sock, addr, keepalive=None):\n \"\"\"Return the client socket for the next connection.\"\"\"\n if addr is None:\n if keepalive is None or keepalive is True:\n keepalive = KeepAlive()\n elif keepalive:\n keepalive = KeepAlive.from_raw(keepalive)\n client, _ = sock.accept()\n if keepalive:\n keepalive.apply(client)\n return client\n else:\n if keepalive:\n raise NotImplementedError\n sock.connect(addr)\n return sock\n\n\ndef shut_down(sock, how=socket.SHUT_RDWR, ignored=NOT_CONNECTED):\n \"\"\"Shut down the given socket.\"\"\"\n with ignored_errno(*ignored or ()):\n sock.shutdown(how)\n\n\ndef close_socket(sock):\n \"\"\"Shutdown and close the socket.\"\"\"\n try:\n shut_down(sock)\n except Exception:\n # TODO: Log errors?\n pass\n sock.close()\n\n\nclass Address(namedtuple('Address', 'host port')):\n \"\"\"An IP address to use for sockets.\"\"\"\n\n @classmethod\n def from_raw(cls, raw, defaultport=None):\n \"\"\"Return an address corresponding to the given data.\"\"\"\n if isinstance(raw, cls):\n return raw\n elif isinstance(raw, int):\n return cls(None, raw)\n elif isinstance(raw, str):\n if raw == '':\n return cls('', defaultport)\n parsed = urlparse(raw)\n if not parsed.netloc:\n if parsed.scheme:\n raise ValueError('invalid address {!r}'.format(raw))\n return cls.from_raw('x://' + raw, defaultport=defaultport)\n return cls(\n parsed.hostname or '',\n parsed.port if parsed.port else defaultport,\n )\n elif not raw:\n return cls(None, defaultport)\n else:\n try:\n kwargs = dict(**raw)\n except TypeError:\n return cls(*raw)\n else:\n kwargs.setdefault('host', None)\n kwargs.setdefault('port', defaultport)\n return cls(**kwargs)\n\n @classmethod\n def as_server(cls, host, port):\n \"\"\"Return an address to use as a server address.\"\"\"\n return cls(host, port, isserver=True)\n\n @classmethod\n def as_client(cls, host, port):\n \"\"\"Return an address to use as a server address.\"\"\"\n return cls(host, port, isserver=False)\n\n def __new__(cls, host, port, **kwargs):\n if host == '*':\n host = ''\n isserver = kwargs.pop('isserver', None)\n if isserver is None:\n isserver = (host is None or host == '')\n else:\n isserver = bool(isserver)\n if host is None:\n host = 'localhost'\n self = super(Address, cls).__new__(\n cls,\n str(host),\n int(port) if port is not None else None,\n **kwargs\n )\n self._isserver = isserver\n return self\n\n def __init__(self, *args, **kwargs):\n if self.port is None:\n raise TypeError('missing port')\n if self.port <= 0 or self.port > 65535:\n raise ValueError('port must be positive int < 65535')\n\n def __repr__(self):\n orig = super(Address, self).__repr__()\n return '{}, isserver={})'.format(orig[:-1], self._isserver)\n\n def __eq__(self, other):\n if not super(Address, self).__eq__(other):\n return False\n try:\n other = self.from_raw(other)\n except Exception:\n return False\n return self._isserver == other._isserver\n\n @property\n def isserver(self):\n return self._isserver\n",
"path": "ptvsd/socket.py"
}
] | diff --git a/ptvsd/socket.py b/ptvsd/socket.py
index 49739b292..f5226bb52 100644
--- a/ptvsd/socket.py
+++ b/ptvsd/socket.py
@@ -75,7 +75,7 @@ def create_server(host, port):
host = 'localhost'
server = _new_sock()
server.bind((host, port))
- server.listen(0)
+ server.listen(1)
return server
|
jazzband__pip-tools-1871 | Convert the README from rst to md
<!--- Describe the changes here. --->
This PR converts the documentation from README.rst to README.md
Related: https://github.com/jazzband/pip-tools/issues/1856
##### Contributor checklist
- [ ] Provided the tests for the changes.
- [x] Assure PR title is short, clear, and good to be included in the user-oriented changelog
##### Maintainer checklist
- [ ] Assure one of these labels is present: `backwards incompatible`, `feature`, `enhancement`, `deprecation`, `bug`, `dependency`, `docs` or `skip-changelog` as they determine changelog listing.
- [ ] Assign the PR to an existing or new milestone for the target version (following [Semantic Versioning](https://blog.versioneye.com/2014/01/16/semantic-versioning/)).
| [
{
"content": "# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\nfrom __future__ import annotations\n\nfrom functools import partial\nfrom pathlib import Path\n\nfrom setuptools_scm import get_version\n\n# -- Path setup --------------------------------------------------------------\n\nPROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()\nget_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pip-tools\"\nauthor = f\"{project} Contributors\"\ncopyright = f\"The {author}\"\n\n# The short X.Y version\nversion = \".\".join(\n get_scm_version(\n local_scheme=\"no-local-version\",\n ).split(\n \".\"\n )[:3],\n)\n\n# The full version, including alpha/beta/rc tags\nrelease = get_scm_version()\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"myst_parser\"]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\n\n\n# -------------------------------------------------------------------------\ndefault_role = \"any\"\nnitpicky = True\n",
"path": "docs/conf.py"
}
] | [
{
"content": "# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\nfrom __future__ import annotations\n\nfrom functools import partial\nfrom pathlib import Path\n\nfrom setuptools_scm import get_version\n\n# -- Path setup --------------------------------------------------------------\n\nPROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()\nget_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pip-tools\"\nauthor = f\"{project} Contributors\"\ncopyright = f\"The {author}\"\n\n# The short X.Y version\nversion = \".\".join(\n get_scm_version(\n local_scheme=\"no-local-version\",\n ).split(\n \".\"\n )[:3],\n)\n\n# The full version, including alpha/beta/rc tags\nrelease = get_scm_version()\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"myst_parser\"]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\n\n\n# -------------------------------------------------------------------------\ndefault_role = \"any\"\nnitpicky = True\nsuppress_warnings = [\"myst.xref_missing\"]\n",
"path": "docs/conf.py"
}
] | diff --git a/README.md b/README.md
new file mode 100644
index 000000000..c6267daae
--- /dev/null
+++ b/README.md
@@ -0,0 +1,583 @@
+[![jazzband-image]][jazzband]
+[![pypi][pypi-image]][pypi]
+[![pyversions][pyversions-image]][pyversions]
+[![pre-commit][pre-commit-image]][pre-commit]
+[![buildstatus-gha][buildstatus-gha-image]][buildstatus-gha]
+[![codecov][codecov-image]][codecov]
+
+# pip-tools = pip-compile + pip-sync
+
+A set of command line tools to help you keep your `pip`-based packages fresh,
+even when you've pinned them. You do pin them, right? (In building your Python application and its dependencies for production, you want to make sure that your builds are predictable and deterministic.)
+
+[![pip-tools overview for phase II][pip-tools-overview]][pip-tools-overview]
+
+## Installation
+
+Similar to `pip`, `pip-tools` must be installed in each of your project's
+[virtual environments](https://packaging.python.org/tutorials/installing-packages/#creating-virtual-environments):
+
+```console
+$ source /path/to/venv/bin/activate
+(venv) $ python -m pip install pip-tools
+```
+
+**Note**: all of the remaining example commands assume you've activated your
+project's virtual environment.
+
+## Example usage for `pip-compile`
+
+The `pip-compile` command lets you compile a `requirements.txt` file from
+your dependencies, specified in either `pyproject.toml`, `setup.cfg`,
+`setup.py`, or `requirements.in`.
+
+Run it with `pip-compile` or `python -m piptools compile`. If you use
+multiple Python versions, you can also run `py -X.Y -m piptools compile` on
+Windows and `pythonX.Y -m piptools compile` on other systems.
+
+`pip-compile` should be run from the same virtual environment as your
+project so conditional dependencies that require a specific Python version,
+or other environment markers, resolve relative to your project's
+environment.
+
+**Note**: If `pip-compile` finds an existing `requirements.txt` file that
+fulfils the dependencies then no changes will be made, even if updates are
+available. To compile from scratch, first delete the existing
+`requirements.txt` file, or see
+[Updating requirements](#updating-requirements)
+for alternative approaches.
+
+### Requirements from `pyproject.toml`
+
+The `pyproject.toml` file is the
+[latest standard](https://peps.python.org/pep-0621/) for configuring
+packages and applications, and is recommended for new projects. `pip-compile`
+supports both installing your `project.dependencies` as well as your
+`project.optional-dependencies`. Thanks to the fact that this is an
+official standard, you can use `pip-compile` to pin the dependencies
+in projects that use modern standards-adhering packaging tools like
+[Setuptools](https://setuptools.pypa.io), [Hatch](https://hatch.pypa.io/)
+or [flit](https://flit.pypa.io/).
+
+Suppose you have a 'foobar' Python application that is packaged using `Setuptools`,
+and you want to pin it for production. You can declare the project metadata as:
+
+```toml
+[build-system]
+requires = ["setuptools", "setuptools-scm"]
+build-backend = "setuptools.build_meta"
+
+[project]
+requires-python = ">=3.9"
+name = "foobar"
+dynamic = ["dependencies", "optional-dependencies"]
+
+[tool.setuptools.dynamic]
+dependencies = { file = ["requirements.in"] }
+optional-dependencies.test = { file = ["requirements-test.txt"] }
+
+```
+
+If you have a Django application that is packaged using `Hatch`, and you
+want to pin it for production. You also want to pin your development tools
+in a separate pin file. You declare `django` as a dependency and create an
+optional dependency `dev` that includes `pytest`:
+
+```toml
+[build-system]
+requires = ["hatchling"]
+build-backend = "hatchling.build"
+
+[project]
+name = "my-cool-django-app"
+version = "42"
+dependencies = ["django"]
+
+[project.optional-dependencies]
+dev = ["pytest"]
+
+```
+
+You can produce your pin files as easily as:
+
+```console
+$ pip-compile -o requirements.txt pyproject.toml
+#
+# This file is autogenerated by pip-compile with Python 3.10
+# by the following command:
+#
+# pip-compile --output-file=requirements.txt pyproject.toml
+#
+asgiref==3.6.0
+ # via django
+django==4.1.7
+ # via my-cool-django-app (pyproject.toml)
+sqlparse==0.4.3
+ # via django
+
+$ pip-compile --extra dev -o dev-requirements.txt pyproject.toml
+#
+# This file is autogenerated by pip-compile with Python 3.10
+# by the following command:
+#
+# pip-compile --extra=dev --output-file=dev-requirements.txt pyproject.toml
+#
+asgiref==3.6.0
+ # via django
+attrs==22.2.0
+ # via pytest
+django==4.1.7
+ # via my-cool-django-app (pyproject.toml)
+exceptiongroup==1.1.1
+ # via pytest
+iniconfig==2.0.0
+ # via pytest
+packaging==23.0
+ # via pytest
+pluggy==1.0.0
+ # via pytest
+pytest==7.2.2
+ # via my-cool-django-app (pyproject.toml)
+sqlparse==0.4.3
+ # via django
+tomli==2.0.1
+ # via pytest
+```
+
+This is great for both pinning your applications, but also to keep the CI
+of your open-source Python package stable.
+
+### Requirements from `setup.py` and `setup.cfg`
+
+`pip-compile` has also full support for `setup.py`- and
+`setup.cfg`-based projects that use `setuptools`.
+
+Just define your dependencies and extras as usual and run
+`pip-compile` as above.
+
+### Requirements from `requirements.in`
+
+You can also use plain text files for your requirements (e.g. if you don't
+want your application to be a package). To use a `requirements.in` file to
+declare the Django dependency:
+
+```
+# requirements.in
+django
+```
+
+Now, run `pip-compile requirements.in`:
+
+```console
+$ pip-compile requirements.in
+#
+# This file is autogenerated by pip-compile with Python 3.10
+# by the following command:
+#
+# pip-compile requirements.in
+#
+asgiref==3.6.0
+ # via django
+django==4.1.7
+ # via -r requirements.in
+sqlparse==0.4.3
+ # via django
+```
+
+And it will produce your `requirements.txt`, with all the Django dependencies
+(and all underlying dependencies) pinned.
+
+(updating-requirements)=
+
+### Updating requirements
+
+`pip-compile` generates a `requirements.txt` file using the latest versions
+that fulfil the dependencies you specify in the supported files.
+
+If `pip-compile` finds an existing `requirements.txt` file that fulfils the
+dependencies then no changes will be made, even if updates are available.
+
+To force `pip-compile` to update all packages in an existing
+`requirements.txt`, run `pip-compile --upgrade`.
+
+To update a specific package to the latest or a specific version use the
+`--upgrade-package` or `-P` flag:
+
+```console
+# only update the django package
+$ pip-compile --upgrade-package django
+
+# update both the django and requests packages
+$ pip-compile --upgrade-package django --upgrade-package requests
+
+# update the django package to the latest, and requests to v2.0.0
+$ pip-compile --upgrade-package django --upgrade-package requests==2.0.0
+```
+
+You can combine `--upgrade` and `--upgrade-package` in one command, to
+provide constraints on the allowed upgrades. For example to upgrade all
+packages whilst constraining requests to the latest version less than 3.0:
+
+```console
+$ pip-compile --upgrade --upgrade-package 'requests<3.0'
+```
+
+### Using hashes
+
+If you would like to use _Hash-Checking Mode_ available in `pip` since
+version 8.0, `pip-compile` offers `--generate-hashes` flag:
+
+```console
+$ pip-compile --generate-hashes requirements.in
+#
+# This file is autogenerated by pip-compile with Python 3.10
+# by the following command:
+#
+# pip-compile --generate-hashes requirements.in
+#
+asgiref==3.6.0 \
+ --hash=sha256:71e68008da809b957b7ee4b43dbccff33d1b23519fb8344e33f049897077afac \
+ --hash=sha256:9567dfe7bd8d3c8c892227827c41cce860b368104c3431da67a0c5a65a949506
+ # via django
+django==4.1.7 \
+ --hash=sha256:44f714b81c5f190d9d2ddad01a532fe502fa01c4cb8faf1d081f4264ed15dcd8 \
+ --hash=sha256:f2f431e75adc40039ace496ad3b9f17227022e8b11566f4b363da44c7e44761e
+ # via -r requirements.in
+sqlparse==0.4.3 \
+ --hash=sha256:0323c0ec29cd52bceabc1b4d9d579e311f3e4961b98d174201d5622a23b85e34 \
+ --hash=sha256:69ca804846bb114d2ec380e4360a8a340db83f0ccf3afceeb1404df028f57268
+ # via django
+```
+
+### Output File
+
+To output the pinned requirements in a filename other than
+`requirements.txt`, use `--output-file`. This might be useful for compiling
+multiple files, for example with different constraints on django to test a
+library with both versions using [tox](https://tox.readthedocs.io/en/latest/):
+
+```console
+$ pip-compile --upgrade-package 'django<1.0' --output-file requirements-django0x.txt
+$ pip-compile --upgrade-package 'django<2.0' --output-file requirements-django1x.txt
+```
+
+Or to output to standard output, use `--output-file=-`:
+
+```console
+$ pip-compile --output-file=- > requirements.txt
+$ pip-compile - --output-file=- < requirements.in > requirements.txt
+```
+
+### Forwarding options to `pip`
+
+Any valid `pip` flags or arguments may be passed on with `pip-compile`'s
+`--pip-args` option, e.g.
+
+```console
+$ pip-compile requirements.in --pip-args "--retries 10 --timeout 30"
+```
+
+### Configuration
+
+You might be wrapping the `pip-compile` command in another script. To avoid
+confusing consumers of your custom script you can override the update command
+generated at the top of requirements files by setting the
+`CUSTOM_COMPILE_COMMAND` environment variable.
+
+```console
+$ CUSTOM_COMPILE_COMMAND="./pipcompilewrapper" pip-compile requirements.in
+#
+# This file is autogenerated by pip-compile with Python 3.10
+# by the following command:
+#
+# ./pipcompilewrapper
+#
+asgiref==3.6.0
+ # via django
+django==4.1.7
+ # via -r requirements.in
+sqlparse==0.4.3
+ # via django
+```
+
+### Workflow for layered requirements
+
+If you have different environments that you need to install different but
+compatible packages for, then you can create layered requirements files and use
+one layer to constrain the other.
+
+For example, if you have a Django project where you want the newest `2.1`
+release in production and when developing you want to use the Django debug
+toolbar, then you can create two `*.in` files, one for each layer:
+
+```
+# requirements.in
+django<2.2
+```
+
+At the top of the development requirements `dev-requirements.in` you use `-c
+requirements.txt` to constrain the dev requirements to packages already
+selected for production in `requirements.txt`.
+
+```
+# dev-requirements.in
+-c requirements.txt
+django-debug-toolbar<2.2
+```
+
+First, compile `requirements.txt` as usual:
+
+```
+$ pip-compile
+#
+# This file is autogenerated by pip-compile with Python 3.10
+# by the following command:
+#
+# pip-compile
+#
+django==2.1.15
+ # via -r requirements.in
+pytz==2023.3
+ # via django
+```
+
+Now compile the dev requirements and the `requirements.txt` file is used as
+a constraint:
+
+```console
+$ pip-compile dev-requirements.in
+#
+# This file is autogenerated by pip-compile with Python 3.10
+# by the following command:
+#
+# pip-compile dev-requirements.in
+#
+django==2.1.15
+ # via
+ # -c requirements.txt
+ # django-debug-toolbar
+django-debug-toolbar==2.1
+ # via -r dev-requirements.in
+pytz==2023.3
+ # via
+ # -c requirements.txt
+ # django
+sqlparse==0.4.3
+ # via django-debug-toolbar
+```
+
+As you can see above, even though a `2.2` release of Django is available, the
+dev requirements only include a `2.1` version of Django because they were
+constrained. Now both compiled requirements files can be installed safely in
+the dev environment.
+
+To install requirements in production stage use:
+
+```console
+$ pip-sync
+```
+
+You can install requirements in development stage by:
+
+```console
+$ pip-sync requirements.txt dev-requirements.txt
+```
+
+### Version control integration
+
+You might use `pip-compile` as a hook for the [pre-commit](https://github.com/pre-commit/pre-commit).
+See [pre-commit docs](https://pre-commit.com/) for instructions.
+Sample `.pre-commit-config.yaml`:
+
+```yaml
+repos:
+ - repo: https://github.com/jazzband/pip-tools
+ rev: 6.13.0
+ hooks:
+ - id: pip-compile
+```
+
+You might want to customize `pip-compile` args by configuring `args` and/or `files`, for example:
+
+```yaml
+repos:
+ - repo: https://github.com/jazzband/pip-tools
+ rev: 6.13.0
+ hooks:
+ - id: pip-compile
+ files: ^requirements/production\.(in|txt)$
+ args: [--index-url=https://example.com, requirements/production.in]
+```
+
+If you have multiple requirement files make sure you create a hook for each file.
+
+```yaml
+repos:
+ - repo: https://github.com/jazzband/pip-tools
+ rev: 6.13.0
+ hooks:
+ - id: pip-compile
+ name: pip-compile setup.py
+ files: ^(setup\.py|requirements\.txt)$
+ - id: pip-compile
+ name: pip-compile requirements-dev.in
+ args: [requirements-dev.in]
+ files: ^requirements-dev\.(in|txt)$
+ - id: pip-compile
+ name: pip-compile requirements-lint.in
+ args: [requirements-lint.in]
+ files: ^requirements-lint\.(in|txt)$
+ - id: pip-compile
+ name: pip-compile requirements.in
+ args: [requirements.in]
+ files: ^requirements\.(in|txt)$
+```
+
+### Example usage for `pip-sync`
+
+Now that you have a `requirements.txt`, you can use `pip-sync` to update
+your virtual environment to reflect exactly what's in there. This will
+install/upgrade/uninstall everything necessary to match the
+`requirements.txt` contents.
+
+Run it with `pip-sync` or `python -m piptools sync`. If you use multiple
+Python versions, you can also run `py -X.Y -m piptools sync` on Windows and
+`pythonX.Y -m piptools sync` on other systems.
+
+`pip-sync` must be installed into and run from the same virtual
+environment as your project to identify which packages to install
+or upgrade.
+
+**Be careful**: `pip-sync` is meant to be used only with a
+`requirements.txt` generated by `pip-compile`.
+
+```console
+$ pip-sync
+Uninstalling flake8-2.4.1:
+ Successfully uninstalled flake8-2.4.1
+Collecting click==4.1
+ Downloading click-4.1-py2.py3-none-any.whl (62kB)
+ 100% |................................| 65kB 1.8MB/s
+ Found existing installation: click 4.0
+ Uninstalling click-4.0:
+ Successfully uninstalled click-4.0
+Successfully installed click-4.1
+```
+
+To sync multiple `*.txt` dependency lists, just pass them in via command
+line arguments, e.g.
+
+```console
+$ pip-sync dev-requirements.txt requirements.txt
+```
+
+Passing in empty arguments would cause it to default to `requirements.txt`.
+
+Any valid `pip install` flags or arguments may be passed with `pip-sync`'s
+`--pip-args` option, e.g.
+
+```console
+$ pip-sync requirements.txt --pip-args "--no-cache-dir --no-deps"
+```
+
+**Note**: `pip-sync` will not upgrade or uninstall packaging tools like
+`setuptools`, `pip`, or `pip-tools` itself. Use `python -m pip install --upgrade`
+to upgrade those packages.
+
+### Should I commit `requirements.in` and `requirements.txt` to source control?
+
+Generally, yes. If you want a reproducible environment installation available from your source control,
+then yes, you should commit both `requirements.in` and `requirements.txt` to source control.
+
+Note that if you are deploying on multiple Python environments (read the section below),
+then you must commit a separate output file for each Python environment.
+We suggest to use the `{env}-requirements.txt` format
+(ex: `win32-py3.7-requirements.txt`, `macos-py3.10-requirements.txt`, etc.).
+
+### Cross-environment usage of `requirements.in`/`requirements.txt` and `pip-compile`
+
+The dependencies of a package can change depending on the Python environment in which it
+is installed. Here, we define a Python environment as the combination of Operating
+System, Python version (3.7, 3.8, etc.), and Python implementation (CPython, PyPy,
+etc.). For an exact definition, refer to the possible combinations of [PEP 508
+environment markers][environment-markers].
+
+As the resulting `requirements.txt` can differ for each environment, users must
+execute `pip-compile` **on each Python environment separately** to generate a
+`requirements.txt` valid for each said environment. The same `requirements.in` can
+be used as the source file for all environments, using
+[PEP 508 environment markers][environment-markers] as
+needed, the same way it would be done for regular `pip` cross-environment usage.
+
+If the generated `requirements.txt` remains exactly the same for all Python
+environments, then it can be used across Python environments safely. **But** users
+should be careful as any package update can introduce environment-dependent
+dependencies, making any newly generated `requirements.txt` environment-dependent too.
+As a general rule, it's advised that users should still always execute `pip-compile`
+on each targeted Python environment to avoid issues.
+
+### Other useful tools
+
+- [pipdeptree](https://github.com/tox-dev/pipdeptree) to print the dependency tree of the installed packages.
+- `requirements.in`/`requirements.txt` syntax highlighting:
+
+ - [requirements.txt.vim](https://github.com/raimon49/requirements.txt.vim) for Vim.
+ - [Python extension for VS Code](https://marketplace.visualstudio.com/items?itemName=ms-python.python) for VS Code.
+ - [pip-requirements.el](https://github.com/Wilfred/pip-requirements.el) for Emacs.
+
+### Deprecations
+
+This section lists `pip-tools` features that are currently deprecated.
+
+- In future versions, the `--allow-unsafe` behavior will be enabled by
+ default. Use `--no-allow-unsafe` to keep the old behavior. It is
+ recommended to pass the `--allow-unsafe` now to adapt to the upcoming
+ change.
+- Legacy resolver is deprecated and will be removed in future versions.
+ Use `--resolver=backtracking` instead.
+
+### A Note on Resolvers
+
+You can choose from either the legacy or the backtracking resolver.
+The backtracking resolver is recommended, and will become the default
+with the 7.0 release.
+
+Use it now with the `--resolver=backtracking` option to `pip-compile`.
+
+The legacy resolver will occasionally fail to resolve dependencies. The
+backtracking resolver is more robust, but can take longer to run in
+general.
+
+You can continue using the legacy resolver with `--resolver=legacy`.
+
+### Versions and compatibility
+
+The table below summarizes the latest `pip-tools` versions with the required
+`pip` and Python versions. Generally, `pip-tools` supports the same Python
+versions as the required `pip` versions.
+
+| pip-tools | pip | Python |
+| -------------- | -------------- | -------------- |
+| 4.5.\* | 8.1.3 - 20.0.2 | 2.7, 3.5 - 3.8 |
+| 5.0.0 - 5.3.0 | 20.0 - 20.1.1 | 2.7, 3.5 - 3.8 |
+| 5.4.0 | 20.1 - 20.3.\* | 2.7, 3.5 - 3.8 |
+| 5.5.0 | 20.1 - 20.3.\* | 2.7, 3.5 - 3.9 |
+| 6.0.0 - 6.3.1 | 20.3 - 21.2.\* | 3.6 - 3.9 |
+| 6.4.0 | 21.2 - 21.3.\* | 3.6 - 3.10 |
+| 6.5.0 - 6.10.0 | 21.2 - 22.3.\* | 3.7 - 3.11 |
+| 6.11.0+ | 22.2+ | 3.7 - 3.11 |
+
+[jazzband]: https://jazzband.co/
+[jazzband-image]: https://jazzband.co/static/img/badge.svg
+[pypi]: https://pypi.org/project/pip-tools/
+[pypi-image]: https://img.shields.io/pypi/v/pip-tools.svg
+[pyversions]: https://pypi.org/project/pip-tools/
+[pyversions-image]: https://img.shields.io/pypi/pyversions/pip-tools.svg
+[pre-commit]: https://results.pre-commit.ci/latest/github/jazzband/pip-tools/main
+[pre-commit-image]: https://results.pre-commit.ci/badge/github/jazzband/pip-tools/main.svg
+[buildstatus-gha]: https://github.com/jazzband/pip-tools/actions?query=workflow%3ACI
+[buildstatus-gha-image]: https://github.com/jazzband/pip-tools/workflows/CI/badge.svg
+[codecov]: https://codecov.io/gh/jazzband/pip-tools
+[codecov-image]: https://codecov.io/gh/jazzband/pip-tools/branch/main/graph/badge.svg
+[pip-tools-overview]: https://github.com/jazzband/pip-tools/raw/main/img/pip-tools-overview.svg
+[environment-markers]: https://peps.python.org/pep-0508/#environment-markers
diff --git a/README.rst b/README.rst
deleted file mode 100644
index d88b75e0b..000000000
--- a/README.rst
+++ /dev/null
@@ -1,624 +0,0 @@
-|jazzband| |pypi| |pyversions| |pre-commit| |buildstatus-gha| |codecov|
-
-==================================
-pip-tools = pip-compile + pip-sync
-==================================
-
-A set of command line tools to help you keep your ``pip``-based packages fresh,
-even when you've pinned them. You do pin them, right? (In building your Python application and its dependencies for production, you want to make sure that your builds are predictable and deterministic.)
-
-.. image:: https://github.com/jazzband/pip-tools/raw/main/img/pip-tools-overview.svg
- :alt: pip-tools overview for phase II
-
-.. |buildstatus-gha| image:: https://github.com/jazzband/pip-tools/workflows/CI/badge.svg
- :alt: GitHub Actions build status
- :target: https://github.com/jazzband/pip-tools/actions?query=workflow%3ACI
-.. |codecov| image:: https://codecov.io/gh/jazzband/pip-tools/branch/main/graph/badge.svg
- :alt: Coverage
- :target: https://codecov.io/gh/jazzband/pip-tools
-.. |jazzband| image:: https://jazzband.co/static/img/badge.svg
- :alt: Jazzband
- :target: https://jazzband.co/
-.. |pre-commit| image:: https://results.pre-commit.ci/badge/github/jazzband/pip-tools/main.svg
- :alt: pre-commit.ci status
- :target: https://results.pre-commit.ci/latest/github/jazzband/pip-tools/main
-.. |pypi| image:: https://img.shields.io/pypi/v/pip-tools.svg
- :alt: PyPI version
- :target: https://pypi.org/project/pip-tools/
-.. |pyversions| image:: https://img.shields.io/pypi/pyversions/pip-tools.svg
- :alt: Supported Python versions
- :target: https://pypi.org/project/pip-tools/
-.. _You do pin them, right?: https://nvie.com/posts/pin-your-packages/
-
-Installation
-============
-
-Similar to ``pip``, ``pip-tools`` must be installed in each of your project's
-`virtual environments`_:
-
-.. code-block:: bash
-
- $ source /path/to/venv/bin/activate
- (venv) $ python -m pip install pip-tools
-
-**Note**: all of the remaining example commands assume you've activated your
-project's virtual environment.
-
-.. _virtual environments: https://packaging.python.org/tutorials/installing-packages/#creating-virtual-environments
-
-Example usage for ``pip-compile``
-=================================
-
-The ``pip-compile`` command lets you compile a ``requirements.txt`` file from
-your dependencies, specified in either ``pyproject.toml``, ``setup.cfg``,
-``setup.py``, or ``requirements.in``.
-
-Run it with ``pip-compile`` or ``python -m piptools compile``. If you use
-multiple Python versions, you can also run ``py -X.Y -m piptools compile`` on
-Windows and ``pythonX.Y -m piptools compile`` on other systems.
-
-``pip-compile`` should be run from the same virtual environment as your
-project so conditional dependencies that require a specific Python version,
-or other environment markers, resolve relative to your project's
-environment.
-
-**Note**: If ``pip-compile`` finds an existing ``requirements.txt`` file that
-fulfils the dependencies then no changes will be made, even if updates are
-available. To compile from scratch, first delete the existing
-``requirements.txt`` file, or see `Updating requirements`_ for alternative
-approaches.
-
-Requirements from ``pyproject.toml``
-------------------------------------
-
-The ``pyproject.toml`` file is the
-`latest standard <https://peps.python.org/pep-0621/>`_ for configuring
-packages and applications, and is recommended for new projects. ``pip-compile``
-supports both installing your ``project.dependencies`` as well as your
-``project.optional-dependencies``. Thanks to the fact that this is an
-official standard, you can use ``pip-compile`` to pin the dependencies
-in projects that use modern standards-adhering packaging tools like
-`Setuptools <https://setuptools.pypa.io>`_ , `Hatch <https://hatch.pypa.io/>`_
-or `flit <https://flit.pypa.io/>`_.
-
-Suppose you have a 'foobar' Python application that is packaged using ``Setuptools``,
-and you want to pin it for production. You can declare the project metadata as:
-
-.. code-block:: toml
-
- [build-system]
- requires = ["setuptools", "setuptools-scm"]
- build-backend = "setuptools.build_meta"
-
- [project]
- requires-python = ">=3.9"
- name = "foobar"
- dynamic = ["dependencies", "optional-dependencies"]
-
- [tool.setuptools.dynamic]
- dependencies = { file = ["requirements.in"] }
- optional-dependencies.test = { file = ["requirements-test.txt"] }
-
-If you have a Django application that is packaged using ``Hatch``, and you
-want to pin it for production. You also want to pin your development tools
-in a separate pin file. You declare ``django`` as a dependency and create an
-optional dependency ``dev`` that includes ``pytest``:
-
-.. code-block:: toml
-
- [build-system]
- requires = ["hatchling"]
- build-backend = "hatchling.build"
-
- [project]
- name = "my-cool-django-app"
- version = "42"
- dependencies = ["django"]
-
- [project.optional-dependencies]
- dev = ["pytest"]
-
-You can produce your pin files as easily as:
-
-.. code-block:: console
-
- $ pip-compile -o requirements.txt pyproject.toml
- #
- # This file is autogenerated by pip-compile with Python 3.10
- # by the following command:
- #
- # pip-compile --output-file=requirements.txt pyproject.toml
- #
- asgiref==3.6.0
- # via django
- django==4.1.7
- # via my-cool-django-app (pyproject.toml)
- sqlparse==0.4.3
- # via django
-
- $ pip-compile --extra dev -o dev-requirements.txt pyproject.toml
- #
- # This file is autogenerated by pip-compile with Python 3.10
- # by the following command:
- #
- # pip-compile --extra=dev --output-file=dev-requirements.txt pyproject.toml
- #
- asgiref==3.6.0
- # via django
- attrs==22.2.0
- # via pytest
- django==4.1.7
- # via my-cool-django-app (pyproject.toml)
- exceptiongroup==1.1.1
- # via pytest
- iniconfig==2.0.0
- # via pytest
- packaging==23.0
- # via pytest
- pluggy==1.0.0
- # via pytest
- pytest==7.2.2
- # via my-cool-django-app (pyproject.toml)
- sqlparse==0.4.3
- # via django
- tomli==2.0.1
- # via pytest
-
-This is great for both pinning your applications, but also to keep the CI
-of your open-source Python package stable.
-
-Requirements from ``setup.py`` and ``setup.cfg``
-------------------------------------------------
-
-``pip-compile`` has also full support for ``setup.py``- and
-``setup.cfg``-based projects that use ``setuptools``.
-
-Just define your dependencies and extras as usual and run
-``pip-compile`` as above.
-
-Requirements from ``requirements.in``
--------------------------------------
-
-You can also use plain text files for your requirements (e.g. if you don't
-want your application to be a package). To use a ``requirements.in`` file to
-declare the Django dependency:
-
-.. code-block:: ini
-
- # requirements.in
- django
-
-Now, run ``pip-compile requirements.in``:
-
-.. code-block:: bash
-
- $ pip-compile requirements.in
- #
- # This file is autogenerated by pip-compile with Python 3.10
- # by the following command:
- #
- # pip-compile requirements.in
- #
- asgiref==3.6.0
- # via django
- django==4.1.7
- # via -r requirements.in
- sqlparse==0.4.3
- # via django
-
-And it will produce your ``requirements.txt``, with all the Django dependencies
-(and all underlying dependencies) pinned.
-
-.. _Updating requirements:
-
-Updating requirements
----------------------
-
-``pip-compile`` generates a ``requirements.txt`` file using the latest versions
-that fulfil the dependencies you specify in the supported files.
-
-If ``pip-compile`` finds an existing ``requirements.txt`` file that fulfils the
-dependencies then no changes will be made, even if updates are available.
-
-To force ``pip-compile`` to update all packages in an existing
-``requirements.txt``, run ``pip-compile --upgrade``.
-
-To update a specific package to the latest or a specific version use the
-``--upgrade-package`` or ``-P`` flag:
-
-.. code-block:: bash
-
- # only update the django package
- $ pip-compile --upgrade-package django
-
- # update both the django and requests packages
- $ pip-compile --upgrade-package django --upgrade-package requests
-
- # update the django package to the latest, and requests to v2.0.0
- $ pip-compile --upgrade-package django --upgrade-package requests==2.0.0
-
-You can combine ``--upgrade`` and ``--upgrade-package`` in one command, to
-provide constraints on the allowed upgrades. For example to upgrade all
-packages whilst constraining requests to the latest version less than 3.0:
-
-.. code-block:: bash
-
- $ pip-compile --upgrade --upgrade-package 'requests<3.0'
-
-Using hashes
-------------
-
-If you would like to use *Hash-Checking Mode* available in ``pip`` since
-version 8.0, ``pip-compile`` offers ``--generate-hashes`` flag:
-
-.. code-block:: bash
-
- $ pip-compile --generate-hashes requirements.in
- #
- # This file is autogenerated by pip-compile with Python 3.10
- # by the following command:
- #
- # pip-compile --generate-hashes requirements.in
- #
- asgiref==3.6.0 \
- --hash=sha256:71e68008da809b957b7ee4b43dbccff33d1b23519fb8344e33f049897077afac \
- --hash=sha256:9567dfe7bd8d3c8c892227827c41cce860b368104c3431da67a0c5a65a949506
- # via django
- django==4.1.7 \
- --hash=sha256:44f714b81c5f190d9d2ddad01a532fe502fa01c4cb8faf1d081f4264ed15dcd8 \
- --hash=sha256:f2f431e75adc40039ace496ad3b9f17227022e8b11566f4b363da44c7e44761e
- # via -r requirements.in
- sqlparse==0.4.3 \
- --hash=sha256:0323c0ec29cd52bceabc1b4d9d579e311f3e4961b98d174201d5622a23b85e34 \
- --hash=sha256:69ca804846bb114d2ec380e4360a8a340db83f0ccf3afceeb1404df028f57268
- # via django
-
-Output File
------------
-
-To output the pinned requirements in a filename other than
-``requirements.txt``, use ``--output-file``. This might be useful for compiling
-multiple files, for example with different constraints on django to test a
-library with both versions using `tox <https://tox.readthedocs.io/en/latest/>`__:
-
-.. code-block:: bash
-
- $ pip-compile --upgrade-package 'django<1.0' --output-file requirements-django0x.txt
- $ pip-compile --upgrade-package 'django<2.0' --output-file requirements-django1x.txt
-
-Or to output to standard output, use ``--output-file=-``:
-
-.. code-block:: bash
-
- $ pip-compile --output-file=- > requirements.txt
- $ pip-compile - --output-file=- < requirements.in > requirements.txt
-
-Forwarding options to ``pip``
------------------------------
-
-Any valid ``pip`` flags or arguments may be passed on with ``pip-compile``'s
-``--pip-args`` option, e.g.
-
-.. code-block:: bash
-
- $ pip-compile requirements.in --pip-args "--retries 10 --timeout 30"
-
-Configuration
--------------
-
-You might be wrapping the ``pip-compile`` command in another script. To avoid
-confusing consumers of your custom script you can override the update command
-generated at the top of requirements files by setting the
-``CUSTOM_COMPILE_COMMAND`` environment variable.
-
-.. code-block:: bash
-
- $ CUSTOM_COMPILE_COMMAND="./pipcompilewrapper" pip-compile requirements.in
- #
- # This file is autogenerated by pip-compile with Python 3.10
- # by the following command:
- #
- # ./pipcompilewrapper
- #
- asgiref==3.6.0
- # via django
- django==4.1.7
- # via -r requirements.in
- sqlparse==0.4.3
- # via django
-
-Workflow for layered requirements
----------------------------------
-
-If you have different environments that you need to install different but
-compatible packages for, then you can create layered requirements files and use
-one layer to constrain the other.
-
-For example, if you have a Django project where you want the newest ``2.1``
-release in production and when developing you want to use the Django debug
-toolbar, then you can create two ``*.in`` files, one for each layer:
-
-.. code-block:: ini
-
- # requirements.in
- django<2.2
-
-At the top of the development requirements ``dev-requirements.in`` you use ``-c
-requirements.txt`` to constrain the dev requirements to packages already
-selected for production in ``requirements.txt``.
-
-.. code-block:: ini
-
- # dev-requirements.in
- -c requirements.txt
- django-debug-toolbar<2.2
-
-First, compile ``requirements.txt`` as usual:
-
-.. code-block:: bash
-
- $ pip-compile
- #
- # This file is autogenerated by pip-compile with Python 3.10
- # by the following command:
- #
- # pip-compile
- #
- django==2.1.15
- # via -r requirements.in
- pytz==2023.3
- # via django
-
-
-Now compile the dev requirements and the ``requirements.txt`` file is used as
-a constraint:
-
-.. code-block:: bash
-
- $ pip-compile dev-requirements.in
- #
- # This file is autogenerated by pip-compile with Python 3.10
- # by the following command:
- #
- # pip-compile dev-requirements.in
- #
- django==2.1.15
- # via
- # -c requirements.txt
- # django-debug-toolbar
- django-debug-toolbar==2.1
- # via -r dev-requirements.in
- pytz==2023.3
- # via
- # -c requirements.txt
- # django
- sqlparse==0.4.3
- # via django-debug-toolbar
-
-As you can see above, even though a ``2.2`` release of Django is available, the
-dev requirements only include a ``2.1`` version of Django because they were
-constrained. Now both compiled requirements files can be installed safely in
-the dev environment.
-
-To install requirements in production stage use:
-
-.. code-block:: bash
-
- $ pip-sync
-
-You can install requirements in development stage by:
-
-.. code-block:: bash
-
- $ pip-sync requirements.txt dev-requirements.txt
-
-
-Version control integration
----------------------------
-
-You might use ``pip-compile`` as a hook for the `pre-commit <https://github.com/pre-commit/pre-commit>`_.
-See `pre-commit docs <https://pre-commit.com/>`_ for instructions.
-Sample ``.pre-commit-config.yaml``:
-
-.. code-block:: yaml
-
- repos:
- - repo: https://github.com/jazzband/pip-tools
- rev: 6.13.0
- hooks:
- - id: pip-compile
-
-You might want to customize ``pip-compile`` args by configuring ``args`` and/or ``files``, for example:
-
-.. code-block:: yaml
-
- repos:
- - repo: https://github.com/jazzband/pip-tools
- rev: 6.13.0
- hooks:
- - id: pip-compile
- files: ^requirements/production\.(in|txt)$
- args: [--index-url=https://example.com, requirements/production.in]
-
-If you have multiple requirement files make sure you create a hook for each file.
-
-.. code-block:: yaml
-
- repos:
- - repo: https://github.com/jazzband/pip-tools
- rev: 6.13.0
- hooks:
- - id: pip-compile
- name: pip-compile setup.py
- files: ^(setup\.py|requirements\.txt)$
- - id: pip-compile
- name: pip-compile requirements-dev.in
- args: [requirements-dev.in]
- files: ^requirements-dev\.(in|txt)$
- - id: pip-compile
- name: pip-compile requirements-lint.in
- args: [requirements-lint.in]
- files: ^requirements-lint\.(in|txt)$
- - id: pip-compile
- name: pip-compile requirements.in
- args: [requirements.in]
- files: ^requirements\.(in|txt)$
-
-
-Example usage for ``pip-sync``
-==============================
-
-Now that you have a ``requirements.txt``, you can use ``pip-sync`` to update
-your virtual environment to reflect exactly what's in there. This will
-install/upgrade/uninstall everything necessary to match the
-``requirements.txt`` contents.
-
-Run it with ``pip-sync`` or ``python -m piptools sync``. If you use multiple
-Python versions, you can also run ``py -X.Y -m piptools sync`` on Windows and
-``pythonX.Y -m piptools sync`` on other systems.
-
-``pip-sync`` must be installed into and run from the same virtual
-environment as your project to identify which packages to install
-or upgrade.
-
-**Be careful**: ``pip-sync`` is meant to be used only with a
-``requirements.txt`` generated by ``pip-compile``.
-
-.. code-block:: bash
-
- $ pip-sync
- Uninstalling flake8-2.4.1:
- Successfully uninstalled flake8-2.4.1
- Collecting click==4.1
- Downloading click-4.1-py2.py3-none-any.whl (62kB)
- 100% |................................| 65kB 1.8MB/s
- Found existing installation: click 4.0
- Uninstalling click-4.0:
- Successfully uninstalled click-4.0
- Successfully installed click-4.1
-
-To sync multiple ``*.txt`` dependency lists, just pass them in via command
-line arguments, e.g.
-
-.. code-block:: bash
-
- $ pip-sync dev-requirements.txt requirements.txt
-
-Passing in empty arguments would cause it to default to ``requirements.txt``.
-
-Any valid ``pip install`` flags or arguments may be passed with ``pip-sync``'s
-``--pip-args`` option, e.g.
-
-.. code-block:: bash
-
- $ pip-sync requirements.txt --pip-args "--no-cache-dir --no-deps"
-
-**Note**: ``pip-sync`` will not upgrade or uninstall packaging tools like
-``setuptools``, ``pip``, or ``pip-tools`` itself. Use ``python -m pip install --upgrade``
-to upgrade those packages.
-
-Should I commit ``requirements.in`` and ``requirements.txt`` to source control?
-===============================================================================
-
-Generally, yes. If you want a reproducible environment installation available from your source control,
-then yes, you should commit both ``requirements.in`` and ``requirements.txt`` to source control.
-
-Note that if you are deploying on multiple Python environments (read the section below),
-then you must commit a separate output file for each Python environment.
-We suggest to use the ``{env}-requirements.txt`` format
-(ex: ``win32-py3.7-requirements.txt``, ``macos-py3.10-requirements.txt``, etc.).
-
-
-Cross-environment usage of ``requirements.in``/``requirements.txt`` and ``pip-compile``
-=======================================================================================
-
-The dependencies of a package can change depending on the Python environment in which it
-is installed. Here, we define a Python environment as the combination of Operating
-System, Python version (3.7, 3.8, etc.), and Python implementation (CPython, PyPy,
-etc.). For an exact definition, refer to the possible combinations of `PEP 508
-environment markers`_.
-
-As the resulting ``requirements.txt`` can differ for each environment, users must
-execute ``pip-compile`` **on each Python environment separately** to generate a
-``requirements.txt`` valid for each said environment. The same ``requirements.in`` can
-be used as the source file for all environments, using `PEP 508 environment markers`_ as
-needed, the same way it would be done for regular ``pip`` cross-environment usage.
-
-If the generated ``requirements.txt`` remains exactly the same for all Python
-environments, then it can be used across Python environments safely. **But** users
-should be careful as any package update can introduce environment-dependent
-dependencies, making any newly generated ``requirements.txt`` environment-dependent too.
-As a general rule, it's advised that users should still always execute ``pip-compile``
-on each targeted Python environment to avoid issues.
-
-.. _PEP 508 environment markers: https://www.python.org/dev/peps/pep-0508/#environment-markers
-
-Other useful tools
-==================
-
-- `pipdeptree`_ to print the dependency tree of the installed packages.
-- ``requirements.in``/``requirements.txt`` syntax highlighting:
-
- * `requirements.txt.vim`_ for Vim.
- * `Python extension for VS Code`_ for VS Code.
- * `pip-requirements.el`_ for Emacs.
-
-.. _pipdeptree: https://github.com/naiquevin/pipdeptree
-.. _requirements.txt.vim: https://github.com/raimon49/requirements.txt.vim
-.. _Python extension for VS Code: https://marketplace.visualstudio.com/items?itemName=ms-python.python
-.. _pip-requirements.el: https://github.com/Wilfred/pip-requirements.el
-
-
-Deprecations
-============
-
-This section lists ``pip-tools`` features that are currently deprecated.
-
-- In future versions, the ``--allow-unsafe`` behavior will be enabled by
- default. Use ``--no-allow-unsafe`` to keep the old behavior. It is
- recommended to pass the ``--allow-unsafe`` now to adapt to the upcoming
- change.
-- Legacy resolver is deprecated and will be removed in future versions.
- Use ``--resolver=backtracking`` instead.
-
-A Note on Resolvers
-===================
-
-You can choose from either the legacy or the backtracking resolver.
-The backtracking resolver is recommended, and will become the default
-with the 7.0 release.
-
-Use it now with the ``--resolver=backtracking`` option to ``pip-compile``.
-
-The legacy resolver will occasionally fail to resolve dependencies. The
-backtracking resolver is more robust, but can take longer to run in
-general.
-
-You can continue using the legacy resolver with ``--resolver=legacy``.
-
-Versions and compatibility
-==========================
-
-The table below summarizes the latest ``pip-tools`` versions with the required
-``pip`` and Python versions. Generally, ``pip-tools`` supports the same Python
-versions as the required ``pip`` versions.
-
-+----------------+----------------+----------------+
-| pip-tools | pip | Python |
-+================+================+================+
-| 4.5.* | 8.1.3 - 20.0.2 | 2.7, 3.5 - 3.8 |
-+----------------+----------------+----------------+
-| 5.0.0 - 5.3.0 | 20.0 - 20.1.1 | 2.7, 3.5 - 3.8 |
-+----------------+----------------+----------------+
-| 5.4.0 | 20.1 - 20.3.* | 2.7, 3.5 - 3.8 |
-+----------------+----------------+----------------+
-| 5.5.0 | 20.1 - 20.3.* | 2.7, 3.5 - 3.9 |
-+----------------+----------------+----------------+
-| 6.0.0 - 6.3.1 | 20.3 - 21.2.* | 3.6 - 3.9 |
-+----------------+----------------+----------------+
-| 6.4.0 | 21.2 - 21.3.* | 3.6 - 3.10 |
-+----------------+----------------+----------------+
-| 6.5.0 - 6.10.0 | 21.2 - 22.3.* | 3.7 - 3.11 |
-+----------------+----------------+----------------+
-| 6.11.0+ | 22.2+ | 3.7 - 3.11 |
-+----------------+----------------+----------------+
diff --git a/docs/conf.py b/docs/conf.py
index e2e39ccdb..c8f91cf22 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -52,3 +52,4 @@
# -------------------------------------------------------------------------
default_role = "any"
nitpicky = True
+suppress_warnings = ["myst.xref_missing"]
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 000000000..570ca947b
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,14 @@
+# Welcome to pip-tools' documentation!
+
+```{include} ../README.md
+
+```
+
+```{toctree}
+:hidden:
+:maxdepth: 2
+:caption: Contents
+
+contributing
+changelog
+```
diff --git a/docs/index.rst b/docs/index.rst
deleted file mode 100644
index fb0631f5e..000000000
--- a/docs/index.rst
+++ /dev/null
@@ -1,26 +0,0 @@
-.. pip-tools documentation master file, created by
- sphinx-quickstart on Tue Jun 22 00:43:50 2021.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
-====================================
-Welcome to pip-tools' documentation!
-====================================
-
-.. include:: ../README.rst
-
-.. toctree::
- :hidden:
- :maxdepth: 2
- :caption: Contents:
-
- contributing.md
- changelog.md
-
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
diff --git a/pyproject.toml b/pyproject.toml
index 4a07c5a9f..8c061c9ad 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -8,7 +8,7 @@ requires-python = ">=3.7"
dynamic = ["version"]
name = "pip-tools"
description = "pip-tools keeps your pinned dependencies fresh."
-readme = "README.rst"
+readme = "README.md"
authors = [{ "name" = "Vincent Driessen", "email" = "[email protected]" }]
license = { text = "BSD" }
classifiers = [
|
TencentBlueKing__bk-user-164 | 部门查询接口 ?lookup_field=name,当部门名称中含有 "." 时返回 404
**用文字描述你遇到的问题**
请用简练的文字描述你遇到的问题,问题描述的清晰程度决定了问题被解决的效率。
**重现方法**
1. 创建一个目录,名字包含点,如【广东省.深圳市】
2. 使用api查询, http:://{host:port}/api/v2/departments/广东省.深圳市/?lookup_field=name
查询结果是404
请描述问题重现的方法,如果不方便描述,可以通过截图或者视频辅助。
**预期行为**
预期的正常行为
**版本**
- 提供用户管理的具体版本号
- 是否是企业版问题?
**如果是 SaaS 页面问题,请提供使用的操作系统和浏览器信息**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**额外信息**
任何你觉得有助于问题解决的内容
| [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nfrom bkuser_core.common.constants import LOOKUP_FIELD_NAME\nfrom django.conf.urls import url\n\nfrom . import views\n\nPVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-]+)\" % LOOKUP_FIELD_NAME\n\nurlpatterns = [\n url(\n r\"^api/v2/departments/$\",\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"list\",\n \"post\": \"create\",\n }\n ),\n name=\"departments\",\n ),\n url(\n r\"^api/v2/departments/%s/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"retrieve\",\n \"post\": \"update\",\n \"delete\": \"destroy\",\n \"patch\": \"partial_update\",\n }\n ),\n name=\"departments.action\",\n ),\n url(\n r\"^api/v2/departments/%s/restoration/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"post\": \"restoration\",\n }\n ),\n name=\"departments.restoration\",\n ),\n url(\n r\"^api/v2/departments/%s/ancestors/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_ancestor\",\n }\n ),\n name=\"departments.ancestors\",\n ),\n url(\n r\"^api/v2/departments/%s/children/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_children\",\n }\n ),\n name=\"departments.children\",\n ),\n url(\n r\"^api/v2/departments/%s/profiles/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view({\"get\": \"get_profiles\", \"post\": \"add_profiles\"}),\n name=\"departments.profiles\",\n ),\n #########\n # Batch #\n #########\n url(\n r\"^api/v2/batch/departments/profiles/$\",\n views.BatchDepartmentsViewSet.as_view(\n {\n \"get\": \"multiple_retrieve_profiles\",\n }\n ),\n name=\"department.batch\",\n ),\n ########\n # Edge #\n ########\n url(\n r\"^api/v2/edges/department_profile/$\",\n views.DepartmentProfileEdgeViewSet.as_view({\"get\": \"list\"}),\n name=\"edge.department_profile\",\n ),\n #############\n # shortcuts #\n #############\n url(\n r\"^api/v2/shortcuts/departments/tops/$\",\n views.DepartmentViewSet.as_view({\"get\": \"list_tops\"}),\n name=\"shortcuts.departments.list.tops\",\n ),\n]\n",
"path": "src/api/bkuser_core/departments/urls.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nfrom bkuser_core.common.constants import LOOKUP_FIELD_NAME\nfrom django.conf.urls import url\n\nfrom . import views\n\nPVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-\\.]+)\" % LOOKUP_FIELD_NAME\n\nurlpatterns = [\n url(\n r\"^api/v2/departments/$\",\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"list\",\n \"post\": \"create\",\n }\n ),\n name=\"departments\",\n ),\n url(\n r\"^api/v2/departments/%s/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"retrieve\",\n \"post\": \"update\",\n \"delete\": \"destroy\",\n \"patch\": \"partial_update\",\n }\n ),\n name=\"departments.action\",\n ),\n url(\n r\"^api/v2/departments/%s/restoration/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"post\": \"restoration\",\n }\n ),\n name=\"departments.restoration\",\n ),\n url(\n r\"^api/v2/departments/%s/ancestors/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_ancestor\",\n }\n ),\n name=\"departments.ancestors\",\n ),\n url(\n r\"^api/v2/departments/%s/children/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_children\",\n }\n ),\n name=\"departments.children\",\n ),\n url(\n r\"^api/v2/departments/%s/profiles/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view({\"get\": \"get_profiles\", \"post\": \"add_profiles\"}),\n name=\"departments.profiles\",\n ),\n #########\n # Batch #\n #########\n url(\n r\"^api/v2/batch/departments/profiles/$\",\n views.BatchDepartmentsViewSet.as_view(\n {\n \"get\": \"multiple_retrieve_profiles\",\n }\n ),\n name=\"department.batch\",\n ),\n ########\n # Edge #\n ########\n url(\n r\"^api/v2/edges/department_profile/$\",\n views.DepartmentProfileEdgeViewSet.as_view({\"get\": \"list\"}),\n name=\"edge.department_profile\",\n ),\n #############\n # shortcuts #\n #############\n url(\n r\"^api/v2/shortcuts/departments/tops/$\",\n views.DepartmentViewSet.as_view({\"get\": \"list_tops\"}),\n name=\"shortcuts.departments.list.tops\",\n ),\n]\n",
"path": "src/api/bkuser_core/departments/urls.py"
}
] | diff --git a/src/api/bkuser_core/departments/urls.py b/src/api/bkuser_core/departments/urls.py
index ce5aa3007..14c81ca61 100644
--- a/src/api/bkuser_core/departments/urls.py
+++ b/src/api/bkuser_core/departments/urls.py
@@ -13,7 +13,7 @@
from . import views
-PVAR_DEPARTMENT_ID = r"(?P<%s>[\w\-]+)" % LOOKUP_FIELD_NAME
+PVAR_DEPARTMENT_ID = r"(?P<%s>[\w\-\.]+)" % LOOKUP_FIELD_NAME
urlpatterns = [
url(
|
pwr-Solaar__Solaar-730 | better identification of Solaar versions
`git describe` produces
0.9.2-339-g39791be
Instead it should produce something based on 1.0.1
`git describe --tags` produces
1.0.1-58-g39791be
which is much better.
I think that all that is required is to upgrade the 1.0.1 tag that already exists.
| [
{
"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__version__ = '1.0.1'\nNAME = 'Solaar'\n",
"path": "lib/solaar/__init__.py"
}
] | [
{
"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__version__ = '1.0.2-rc1'\nNAME = 'Solaar'\n",
"path": "lib/solaar/__init__.py"
}
] | diff --git a/docs/_config.yml b/docs/_config.yml
index c40bfa750d..fe499a7775 100644
--- a/docs/_config.yml
+++ b/docs/_config.yml
@@ -4,7 +4,7 @@ tagline: Linux Device manager for the Logitech Unifying Receiver.
owner: pwr-Solaar
owner_url: https://github.com/pwr-Solaar
repository: pwr-Solaar/Solaar
-version: 1.0.1
+version: 1.0.2-rc1
show_downloads: false
encoding: utf-8
theme: jekyll-theme-slate
\ No newline at end of file
diff --git a/lib/solaar/__init__.py b/lib/solaar/__init__.py
index 14131450ea..5b6fa80982 100644
--- a/lib/solaar/__init__.py
+++ b/lib/solaar/__init__.py
@@ -19,5 +19,5 @@
from __future__ import absolute_import, division, print_function, unicode_literals
-__version__ = '1.0.1'
+__version__ = '1.0.2-rc1'
NAME = 'Solaar'
|
apache__airflow-18209 | Upgrade `importlib-resources` version
### Description
The version for `importlib-resources` constraint sets it to be [v1.5.0](https://github.com/python/importlib_resources/tree/v1.5.0) which is over a year old. For compatibility sake (for instance with something like Datapane) I would suggest upgrading it.
### Use case/motivation
Upgrade a an old dependency to keep code up to date.
### Related issues
Not that I am aware of, maybe somewhat #12120, or #15991.
### Are you willing to submit a PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| [
{
"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Setup.py for the Airflow project.\"\"\"\nimport glob\nimport logging\nimport os\nimport subprocess\nimport sys\nimport unittest\nfrom copy import deepcopy\nfrom distutils import log\nfrom os.path import dirname, relpath\nfrom textwrap import wrap\nfrom typing import Dict, List\n\nfrom setuptools import Command, Distribution, find_namespace_packages, setup\nfrom setuptools.command.develop import develop as develop_orig\nfrom setuptools.command.install import install as install_orig\n\n# Controls whether providers are installed from packages or directly from sources\n# It is turned on by default in case of development environments such as Breeze\n# And it is particularly useful when you add a new provider and there is no\n# PyPI version to install the provider package from\nINSTALL_PROVIDERS_FROM_SOURCES = 'INSTALL_PROVIDERS_FROM_SOURCES'\nPY39 = sys.version_info >= (3, 9)\n\nlogger = logging.getLogger(__name__)\n\nversion = '2.2.0.dev0'\n\nmy_dir = dirname(__file__)\n\n\ndef airflow_test_suite() -> unittest.TestSuite:\n \"\"\"Test suite for Airflow tests\"\"\"\n test_loader = unittest.TestLoader()\n test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')\n return test_suite\n\n\nclass CleanCommand(Command):\n \"\"\"\n Command to tidy up the project root.\n Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.\n \"\"\"\n\n description = \"Tidy up the project root\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n @staticmethod\n def rm_all_files(files: List[str]) -> None:\n \"\"\"Remove all files from the list\"\"\"\n for file in files:\n try:\n os.remove(file)\n except Exception as e:\n logger.warning(\"Error when removing %s: %s\", file, e)\n\n def run(self) -> None:\n \"\"\"Remove temporary files and directories.\"\"\"\n os.chdir(my_dir)\n self.rm_all_files(glob.glob('./build/*'))\n self.rm_all_files(glob.glob('./**/__pycache__/*', recursive=True))\n self.rm_all_files(glob.glob('./**/*.pyc', recursive=True))\n self.rm_all_files(glob.glob('./dist/*'))\n self.rm_all_files(glob.glob('./*.egg-info'))\n self.rm_all_files(glob.glob('./docker-context-files/*.whl'))\n self.rm_all_files(glob.glob('./docker-context-files/*.tgz'))\n\n\nclass CompileAssets(Command):\n \"\"\"\n Compile and build the frontend assets using yarn and webpack.\n Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.\n \"\"\"\n\n description = \"Compile and build the frontend assets\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n def run(self) -> None:\n \"\"\"Run a command to compile and build assets.\"\"\"\n subprocess.check_call('./airflow/www/compile_assets.sh')\n\n\nclass ListExtras(Command):\n \"\"\"\n List all available extras\n Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.\n \"\"\"\n\n description = \"List available extras\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n def run(self) -> None:\n \"\"\"List extras.\"\"\"\n print(\"\\n\".join(wrap(\", \".join(EXTRAS_REQUIREMENTS.keys()), 100)))\n\n\ndef git_version(version_: str) -> str:\n \"\"\"\n Return a version to identify the state of the underlying git repo. The version will\n indicate whether the head of the current git-backed working directory is tied to a\n release tag or not : it will indicate the former with a 'release:{version}' prefix\n and the latter with a '.dev0' suffix. Following the prefix will be a sha of the current\n branch head. Finally, a \"dirty\" suffix is appended to indicate that uncommitted\n changes are present.\n\n :param str version_: Semver version\n :return: Found Airflow version in Git repo\n :rtype: str\n \"\"\"\n try:\n import git\n\n try:\n repo = git.Repo(os.path.join(*[my_dir, '.git']))\n except git.NoSuchPathError:\n logger.warning('.git directory not found: Cannot compute the git version')\n return ''\n except git.InvalidGitRepositoryError:\n logger.warning('Invalid .git directory not found: Cannot compute the git version')\n return ''\n except ImportError:\n logger.warning('gitpython not found: Cannot compute the git version.')\n return ''\n if repo:\n sha = repo.head.commit.hexsha\n if repo.is_dirty():\n return f'.dev0+{sha}.dirty'\n # commit is clean\n return f'.release:{version_}+{sha}'\n return 'no_git_version'\n\n\ndef write_version(filename: str = os.path.join(*[my_dir, \"airflow\", \"git_version\"])) -> None:\n \"\"\"\n Write the Semver version + git hash to file, e.g. \".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65\".\n\n :param str filename: Destination file to write\n \"\"\"\n text = f\"{git_version(version)}\"\n with open(filename, 'w') as file:\n file.write(text)\n\n\n# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py\n# If you change this mark you should also change ./scripts/ci/check_order_setup.py\n# Start dependencies group\nalibaba = [\n 'oss2>=2.14.0',\n]\namazon = [\n 'boto3>=1.15.0,<1.18.0',\n 'watchtower~=1.0.6',\n 'jsonpath_ng>=1.5.3',\n]\napache_beam = [\n 'apache-beam>=2.20.0',\n]\nasana = ['asana>=0.10']\nasync_packages = [\n 'eventlet>= 0.9.7',\n 'gevent>=0.13',\n 'greenlet>=0.4.9',\n]\natlas = [\n 'atlasclient>=0.1.2',\n]\nazure = [\n 'azure-batch>=8.0.0',\n 'azure-cosmos>=3.0.1,<4',\n 'azure-datalake-store>=0.0.45',\n 'azure-identity>=1.3.1',\n 'azure-keyvault>=4.1.0',\n 'azure-kusto-data>=0.0.43,<0.1',\n 'azure-mgmt-containerinstance>=1.5.0,<2.0',\n 'azure-mgmt-datafactory>=1.0.0,<2.0',\n 'azure-mgmt-datalake-store>=0.5.0',\n 'azure-mgmt-resource>=2.2.0',\n 'azure-storage-blob>=12.7.0',\n 'azure-storage-common>=2.1.0',\n 'azure-storage-file>=2.1.0',\n]\ncassandra = [\n 'cassandra-driver>=3.13.0,<4',\n]\ncelery = [\n 'celery~=5.1,>=5.1.2',\n 'flower~=1.0.0',\n]\ncgroups = [\n 'cgroupspy>=0.1.4',\n]\ncloudant = [\n 'cloudant>=2.0',\n]\ndask = [\n 'cloudpickle>=1.4.1, <1.5.0',\n 'dask<2021.3.1;python_version<\"3.7\"', # dask stopped supporting python 3.6 in 2021.3.1 version\n 'dask>=2.9.0, <2021.6.1;python_version>=\"3.7\"', # dask 2021.6.1 does not work with `distributed`\n 'distributed>=2.11.1, <2.20',\n]\ndatabricks = [\n 'requests>=2.26.0, <3',\n]\ndatadog = [\n 'datadog>=0.14.0',\n]\ndeprecated_api = [\n 'requests>=2.26.0',\n]\ndoc = [\n 'click>=7.1,<9',\n # Sphinx is limited to < 3.5.0 because of https://github.com/sphinx-doc/sphinx/issues/8880\n 'sphinx>=2.1.2, <3.5.0',\n 'sphinx-airflow-theme',\n 'sphinx-argparse>=0.1.13',\n 'sphinx-autoapi==1.0.0',\n 'sphinx-copybutton',\n 'sphinx-jinja~=1.1',\n 'sphinx-rtd-theme>=0.1.6',\n 'sphinxcontrib-httpdomain>=1.7.0',\n 'sphinxcontrib-redoc>=1.6.0',\n 'sphinxcontrib-spelling==7.2.1',\n]\ndocker = [\n 'docker',\n]\ndrill = ['sqlalchemy-drill>=1.1.0', 'sqlparse>=0.4.1']\ndruid = [\n 'pydruid>=0.4.1',\n]\nelasticsearch = [\n 'elasticsearch>7',\n 'elasticsearch-dbapi',\n 'elasticsearch-dsl>=5.0.0',\n]\nexasol = [\n 'pyexasol>=0.5.1,<1.0.0',\n]\nfacebook = [\n 'facebook-business>=6.0.2',\n]\nflask_appbuilder_authlib = [\n 'authlib',\n]\ngoogle = [\n 'PyOpenSSL',\n 'google-ads>=12.0.0',\n # Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n 'google-api-core>=1.25.1,<3.0.0',\n 'google-api-python-client>=1.6.0,<2.0.0',\n # Maintainers, please do not require google-auth>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n 'google-auth>=1.0.0,<3.0.0',\n 'google-auth-httplib2>=0.0.1',\n 'google-cloud-automl>=2.1.0,<3.0.0',\n 'google-cloud-bigquery-datatransfer>=3.0.0,<4.0.0',\n 'google-cloud-bigtable>=1.0.0,<2.0.0',\n 'google-cloud-container>=0.1.1,<2.0.0',\n 'google-cloud-datacatalog>=3.0.0,<4.0.0',\n 'google-cloud-dataproc>=2.2.0,<3.0.0',\n 'google-cloud-dlp>=0.11.0,<2.0.0',\n 'google-cloud-kms>=2.0.0,<3.0.0',\n 'google-cloud-language>=1.1.1,<2.0.0',\n 'google-cloud-logging>=2.1.1,<3.0.0',\n # 1.1.0 removed field_mask and broke import for released providers\n # We can remove the <1.1.0 limitation after we release new Google Provider\n 'google-cloud-memcache>=0.2.0,<1.1.0',\n 'google-cloud-monitoring>=2.0.0,<3.0.0',\n 'google-cloud-os-login>=2.0.0,<3.0.0',\n 'google-cloud-pubsub>=2.0.0,<3.0.0',\n 'google-cloud-redis>=2.0.0,<3.0.0',\n 'google-cloud-secret-manager>=0.2.0,<2.0.0',\n 'google-cloud-spanner>=1.10.0,<2.0.0',\n 'google-cloud-speech>=0.36.3,<2.0.0',\n 'google-cloud-storage>=1.30,<2.0.0',\n 'google-cloud-tasks>=2.0.0,<3.0.0',\n 'google-cloud-texttospeech>=0.4.0,<2.0.0',\n 'google-cloud-translate>=1.5.0,<2.0.0',\n 'google-cloud-videointelligence>=1.7.0,<2.0.0',\n 'google-cloud-vision>=0.35.2,<2.0.0',\n 'google-cloud-workflows>=0.1.0,<2.0.0',\n 'grpcio-gcp>=0.2.2',\n 'httpx',\n 'json-merge-patch~=0.2',\n # pandas-gbq 0.15.0 release broke google provider's bigquery import\n # _check_google_client_version (airflow/providers/google/cloud/hooks/bigquery.py:49)\n 'pandas-gbq<0.15.0',\n]\ngrpc = [\n 'google-auth>=1.0.0, <3.0.0',\n 'google-auth-httplib2>=0.0.1',\n 'grpcio>=1.15.0',\n]\nhashicorp = [\n 'hvac~=0.10',\n]\nhdfs = [\n 'snakebite-py3',\n]\nhive = [\n 'hmsclient>=0.1.0',\n 'pyhive[hive]>=0.6.0;python_version<\"3.9\"',\n 'thrift>=0.9.2',\n]\nhttp = [\n # The 2.26.0 release of requests got rid of the chardet LGPL mandatory dependency, allowing us to\n # release it as a requirement for airflow\n 'requests>=2.26.0',\n]\nhttp_provider = [\n 'apache-airflow-providers-http',\n]\njdbc = [\n 'jaydebeapi>=1.1.1',\n]\njenkins = [\n 'python-jenkins>=1.0.0',\n]\njira = [\n 'JIRA>1.0.7',\n]\nkerberos = [\n 'pykerberos>=1.1.13',\n 'requests_kerberos>=0.10.0',\n 'thrift_sasl>=0.2.0',\n]\nkubernetes = [\n 'cryptography>=2.0.0',\n 'kubernetes>=3.0.0, <12.0.0',\n]\nkylin = ['kylinpy>=2.6']\nldap = [\n 'ldap3>=2.5.1',\n 'python-ldap',\n]\nleveldb = ['plyvel']\nmongo = [\n 'dnspython>=1.13.0,<3.0.0',\n 'pymongo>=3.6.0',\n]\nmssql = [\n 'pymssql~=2.1,>=2.1.5',\n]\nmysql = [\n 'mysql-connector-python>=8.0.11, <9',\n 'mysqlclient>=1.3.6,<3',\n]\nneo4j = ['neo4j>=4.2.1']\nodbc = [\n 'pyodbc',\n]\noracle = [\n 'cx_Oracle>=5.1.2',\n]\npagerduty = [\n 'pdpyras>=4.1.2,<5',\n]\npandas = [\n 'pandas>=0.17.1, <2.0',\n]\npapermill = [\n 'papermill[all]>=1.2.1',\n 'scrapbook[all]',\n]\npassword = [\n 'bcrypt>=2.0.0',\n 'flask-bcrypt>=0.7.1',\n]\npinot = [\n # pinotdb v0.1.1 may still work with older versions of Apache Pinot, but we've confirmed that it\n # causes a problem with newer versions.\n 'pinotdb>0.1.2,<1.0.0',\n]\nplexus = [\n 'arrow>=0.16.0',\n]\npostgres = [\n 'psycopg2-binary>=2.7.4',\n]\npresto = ['presto-python-client>=0.7.0,<0.8']\npsrp = [\n 'pypsrp~=0.5',\n]\nqubole = [\n 'qds-sdk>=1.10.4',\n]\nrabbitmq = [\n 'amqp',\n]\nredis = [\n 'redis~=3.2',\n]\nsalesforce = [\n 'simple-salesforce>=1.0.0',\n 'tableauserverclient',\n]\nsamba = [\n 'smbprotocol>=1.5.0',\n]\nsegment = [\n 'analytics-python>=1.2.9',\n]\nsendgrid = [\n 'sendgrid>=6.0.0,<7',\n]\nsentry = [\n 'blinker>=1.1',\n 'sentry-sdk>=0.8.0',\n]\nsingularity = ['spython>=0.0.56']\nslack = [\n 'slack_sdk>=3.0.0,<4.0.0',\n]\nsnowflake = [\n 'snowflake-connector-python>=2.4.1',\n 'snowflake-sqlalchemy>=1.1.0',\n]\nspark = [\n 'pyspark',\n]\nssh = [\n 'paramiko>=2.6.0',\n 'pysftp>=0.2.9',\n 'sshtunnel>=0.1.4,<0.2',\n]\nstatsd = [\n 'statsd>=3.3.0, <4.0',\n]\ntableau = [\n 'tableauserverclient',\n]\ntelegram = [\n 'python-telegram-bot~=13.0',\n]\ntrino = ['trino']\nvertica = [\n 'vertica-python>=0.5.1',\n]\nvirtualenv = [\n 'virtualenv',\n]\nwebhdfs = [\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\n]\nwinrm = [\n 'pywinrm~=0.4',\n]\nyandex = [\n 'yandexcloud>=0.97.0',\n]\nzendesk = [\n 'zdesk',\n]\n# End dependencies group\n\ndevel = [\n 'aws_xray_sdk',\n 'beautifulsoup4~=4.7.1',\n 'black',\n 'blinker',\n 'bowler',\n 'click>=7.1,<9',\n 'coverage',\n 'filelock',\n 'flake8>=3.6.0',\n 'flake8-colors',\n 'flaky',\n 'freezegun',\n 'github3.py',\n 'gitpython',\n 'importlib-resources~=1.4',\n 'ipdb',\n 'jira',\n 'jsondiff',\n 'mongomock',\n 'moto~=2.2, >=2.2.1.dev9',\n 'mypy==0.770',\n 'parameterized',\n 'paramiko',\n 'pipdeptree',\n 'pre-commit',\n 'pypsrp',\n 'pygithub',\n 'pysftp',\n 'pytest~=6.0',\n 'pytest-asyncio',\n 'pytest-cov',\n 'pytest-instafail',\n 'pytest-rerunfailures~=9.1',\n 'pytest-timeouts',\n 'pytest-xdist',\n 'python-jose',\n 'pywinrm',\n 'qds-sdk>=1.9.6',\n 'pytest-httpx',\n 'requests_mock',\n 'wheel',\n 'yamllint',\n]\n\ndevel_minreq = cgroups + devel + doc + kubernetes + mysql + pandas + password\ndevel_hadoop = devel_minreq + hdfs + hive + kerberos + presto + webhdfs\n\n# Dict of all providers which are part of the Apache Airflow repository together with their requirements\nPROVIDERS_REQUIREMENTS: Dict[str, List[str]] = {\n 'airbyte': http_provider,\n 'alibaba': alibaba,\n 'amazon': amazon,\n 'apache.beam': apache_beam,\n 'apache.cassandra': cassandra,\n 'apache.drill': drill,\n 'apache.druid': druid,\n 'apache.hdfs': hdfs,\n 'apache.hive': hive,\n 'apache.kylin': kylin,\n 'apache.livy': http_provider,\n 'apache.pig': [],\n 'apache.pinot': pinot,\n 'apache.spark': spark,\n 'apache.sqoop': [],\n 'asana': asana,\n 'celery': celery,\n 'cloudant': cloudant,\n 'cncf.kubernetes': kubernetes,\n 'databricks': databricks,\n 'datadog': datadog,\n 'dingding': [],\n 'discord': [],\n 'docker': docker,\n 'elasticsearch': elasticsearch,\n 'exasol': exasol,\n 'facebook': facebook,\n 'ftp': [],\n 'google': google,\n 'grpc': grpc,\n 'hashicorp': hashicorp,\n 'http': http,\n 'imap': [],\n 'jdbc': jdbc,\n 'jenkins': jenkins,\n 'jira': jira,\n 'microsoft.azure': azure,\n 'microsoft.mssql': mssql,\n 'microsoft.psrp': psrp,\n 'microsoft.winrm': winrm,\n 'mongo': mongo,\n 'mysql': mysql,\n 'neo4j': neo4j,\n 'odbc': odbc,\n 'openfaas': [],\n 'opsgenie': http_provider,\n 'oracle': oracle,\n 'pagerduty': pagerduty,\n 'papermill': papermill,\n 'plexus': plexus,\n 'postgres': postgres,\n 'presto': presto,\n 'qubole': qubole,\n 'redis': redis,\n 'salesforce': salesforce,\n 'samba': samba,\n 'segment': segment,\n 'sendgrid': sendgrid,\n 'sftp': ssh,\n 'singularity': singularity,\n 'slack': slack,\n 'snowflake': snowflake,\n 'sqlite': [],\n 'ssh': ssh,\n 'tableau': tableau,\n 'telegram': telegram,\n 'trino': trino,\n 'vertica': vertica,\n 'yandex': yandex,\n 'zendesk': zendesk,\n}\n\n# Those are all additional extras which do not have their own 'providers'\n# The 'apache.atlas' and 'apache.webhdfs' are extras that provide additional libraries\n# but they do not have separate providers (yet?), they are merely there to add extra libraries\n# That can be used in custom python/bash operators.\nADDITIONAL_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'apache.atlas': atlas,\n 'apache.webhdfs': webhdfs,\n}\n\n\n# Those are extras that are extensions of the 'core' Airflow. They provide additional features\n# To airflow core. They do not have separate providers because they do not have any operators/hooks etc.\nCORE_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'async': async_packages,\n 'celery': celery, # also has provider, but it extends the core with the Celery executor\n 'cgroups': cgroups,\n 'cncf.kubernetes': kubernetes, # also has provider, but it extends the core with the KubernetesExecutor\n 'dask': dask,\n 'deprecated_api': deprecated_api,\n 'github_enterprise': flask_appbuilder_authlib,\n 'google_auth': flask_appbuilder_authlib,\n 'kerberos': kerberos,\n 'ldap': ldap,\n 'leveldb': leveldb,\n 'pandas': pandas,\n 'password': password,\n 'rabbitmq': rabbitmq,\n 'sentry': sentry,\n 'statsd': statsd,\n 'virtualenv': virtualenv,\n}\n\nEXTRAS_REQUIREMENTS: Dict[str, List[str]] = deepcopy(CORE_EXTRAS_REQUIREMENTS)\n\n\ndef add_extras_for_all_providers() -> None:\n \"\"\"\n Adds extras for all providers.\n By default all providers have the same extra name as provider id, for example\n 'apache.hive' extra has 'apache.hive' provider requirement.\n \"\"\"\n for provider_name, provider_requirement in PROVIDERS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[provider_name] = provider_requirement\n\n\ndef add_additional_extras() -> None:\n \"\"\"Adds extras for all additional extras.\"\"\"\n for extra_name, extra_requirement in ADDITIONAL_EXTRAS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[extra_name] = extra_requirement\n\n\nadd_extras_for_all_providers()\nadd_additional_extras()\n\n#############################################################################################################\n# The whole section can be removed in Airflow 3.0 as those old aliases are deprecated in 2.* series\n#############################################################################################################\n\n# Dictionary of aliases from 1.10 - deprecated in Airflow 2.*\nEXTRAS_DEPRECATED_ALIASES: Dict[str, str] = {\n 'atlas': 'apache.atlas',\n 'aws': 'amazon',\n 'azure': 'microsoft.azure',\n 'cassandra': 'apache.cassandra',\n 'crypto': '', # All crypto requirements are installation requirements of core Airflow\n 'druid': 'apache.druid',\n 'gcp': 'google',\n 'gcp_api': 'google',\n 'hdfs': 'apache.hdfs',\n 'hive': 'apache.hive',\n 'kubernetes': 'cncf.kubernetes',\n 'mssql': 'microsoft.mssql',\n 'pinot': 'apache.pinot',\n 'qds': 'qubole',\n 's3': 'amazon',\n 'spark': 'apache.spark',\n 'webhdfs': 'apache.webhdfs',\n 'winrm': 'microsoft.winrm',\n}\n\nEXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS: List[str] = [\n \"crypto\",\n \"webhdfs\",\n]\n\n\ndef add_extras_for_all_deprecated_aliases() -> None:\n \"\"\"\n Add extras for all deprecated aliases. Requirements for those deprecated aliases are the same\n as the extras they are replaced with.\n The requirements are not copies - those are the same lists as for the new extras. This is intended.\n Thanks to that if the original extras are later extended with providers, aliases are extended as well.\n \"\"\"\n for alias, extra in EXTRAS_DEPRECATED_ALIASES.items():\n requirements = EXTRAS_REQUIREMENTS.get(extra) if extra != '' else []\n if requirements is None:\n raise Exception(f\"The extra {extra} is missing for deprecated alias {alias}\")\n EXTRAS_REQUIREMENTS[alias] = requirements\n\n\ndef add_all_deprecated_provider_packages() -> None:\n \"\"\"\n For deprecated aliases that are providers, we will swap the providers requirements to instead\n be the provider itself.\n\n e.g. {\"kubernetes\": [\"kubernetes>=3.0.0, <12.0.0\", ...]} becomes\n {\"kubernetes\": [\"apache-airflow-provider-cncf-kubernetes\"]}\n \"\"\"\n for alias, provider in EXTRAS_DEPRECATED_ALIASES.items():\n if alias in EXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS:\n continue\n replace_extra_requirement_with_provider_packages(alias, [provider])\n\n\nadd_extras_for_all_deprecated_aliases()\n\n#############################################################################################################\n# End of deprecated section\n#############################################################################################################\n\n# This is list of all providers. It's a shortcut for anyone who would like to easily get list of\n# All providers. It is used by pre-commits.\nALL_PROVIDERS = list(PROVIDERS_REQUIREMENTS.keys())\n\nALL_DB_PROVIDERS = [\n 'apache.cassandra',\n 'apache.drill',\n 'apache.druid',\n 'apache.hdfs',\n 'apache.hive',\n 'apache.pinot',\n 'cloudant',\n 'exasol',\n 'microsoft.mssql',\n 'mongo',\n 'mysql',\n 'neo4j',\n 'postgres',\n 'presto',\n 'trino',\n 'vertica',\n]\n\n# Special requirements for all database-related providers. They are de-duplicated.\nall_dbs = list({req for db_provider in ALL_DB_PROVIDERS for req in PROVIDERS_REQUIREMENTS[db_provider]})\n\n# Requirements for all \"user\" extras (no devel). They are de-duplicated. Note that we do not need\n# to separately add providers requirements - they have been already added as 'providers' extras above\n_all_requirements = list({req for extras_reqs in EXTRAS_REQUIREMENTS.values() for req in extras_reqs})\n\n# All user extras here\nEXTRAS_REQUIREMENTS[\"all\"] = _all_requirements\n\n# All db user extras here\nEXTRAS_REQUIREMENTS[\"all_dbs\"] = all_dbs + pandas\n\n# This can be simplified to devel_hadoop + _all_requirements due to inclusions\n# but we keep it for explicit sake. We are de-duplicating it anyway.\ndevel_all = list(set(_all_requirements + doc + devel_minreq + devel_hadoop))\n\n# Those are packages excluded for \"all\" dependencies\nPACKAGES_EXCLUDED_FOR_ALL = []\nPACKAGES_EXCLUDED_FOR_ALL.extend(\n [\n 'snakebite',\n ]\n)\n\n\ndef is_package_excluded(package: str, exclusion_list: List[str]) -> bool:\n \"\"\"\n Checks if package should be excluded.\n\n :param package: package name (beginning of it)\n :param exclusion_list: list of excluded packages\n :return: true if package should be excluded\n \"\"\"\n return any(package.startswith(excluded_package) for excluded_package in exclusion_list)\n\n\ndevel_all = [\n package\n for package in devel_all\n if not is_package_excluded(package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)\n]\n\ndevel_ci = devel_all\n\n\n# Those are extras that we have to add for development purposes\n# They can be use to install some predefined set of dependencies.\nEXTRAS_REQUIREMENTS[\"doc\"] = doc\nEXTRAS_REQUIREMENTS[\"devel\"] = devel_minreq # devel_minreq already includes doc\nEXTRAS_REQUIREMENTS[\"devel_hadoop\"] = devel_hadoop # devel_hadoop already includes devel_minreq\nEXTRAS_REQUIREMENTS[\"devel_all\"] = devel_all\nEXTRAS_REQUIREMENTS[\"devel_ci\"] = devel_ci\n\n\ndef sort_extras_requirements() -> Dict[str, List[str]]:\n \"\"\"\n For Python 3.6+ the dictionary order remains when keys() are retrieved.\n Sort both: extras and list of dependencies to make it easier to analyse problems\n external packages will be first, then if providers are added they are added at the end of the lists.\n \"\"\"\n sorted_requirements = dict(sorted(EXTRAS_REQUIREMENTS.items()))\n for extra_list in sorted_requirements.values():\n extra_list.sort()\n return sorted_requirements\n\n\nEXTRAS_REQUIREMENTS = sort_extras_requirements()\n\n# Those providers are pre-installed always when airflow is installed.\n# Those providers do not have dependency on airflow2.0 because that would lead to circular dependencies.\n# This is not a problem for PIP but some tools (pipdeptree) show those as a warning.\nPREINSTALLED_PROVIDERS = [\n 'ftp',\n 'http',\n 'imap',\n 'sqlite',\n]\n\n\ndef get_provider_package_from_package_id(package_id: str) -> str:\n \"\"\"\n Builds the name of provider package out of the package id provided/\n\n :param package_id: id of the package (like amazon or microsoft.azure)\n :return: full name of package in PyPI\n \"\"\"\n package_suffix = package_id.replace(\".\", \"-\")\n return f\"apache-airflow-providers-{package_suffix}\"\n\n\ndef get_excluded_providers() -> List[str]:\n \"\"\"\n Returns packages excluded for the current python version.\n Currently the only excluded provider is apache hive for Python 3.9.\n Until https://github.com/dropbox/PyHive/issues/380 is fixed.\n \"\"\"\n return ['apache.hive'] if PY39 else []\n\n\ndef get_all_provider_packages() -> str:\n \"\"\"Returns all provider packages configured in setup.py\"\"\"\n excluded_providers = get_excluded_providers()\n return \" \".join(\n get_provider_package_from_package_id(package)\n for package in PROVIDERS_REQUIREMENTS\n if package not in excluded_providers\n )\n\n\nclass AirflowDistribution(Distribution):\n \"\"\"The setuptools.Distribution subclass with Airflow specific behaviour\"\"\"\n\n def parse_config_files(self, *args, **kwargs) -> None:\n \"\"\"\n Ensure that when we have been asked to install providers from sources\n that we don't *also* try to install those providers from PyPI.\n Also we should make sure that in this case we copy provider.yaml files so that\n Providers manager can find package information.\n \"\"\"\n super().parse_config_files(*args, **kwargs)\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n self.install_requires = [\n req for req in self.install_requires if not req.startswith('apache-airflow-providers-')\n ]\n provider_yaml_files = glob.glob(\"airflow/providers/**/provider.yaml\", recursive=True)\n for provider_yaml_file in provider_yaml_files:\n provider_relative_path = relpath(provider_yaml_file, os.path.join(my_dir, \"airflow\"))\n self.package_data['airflow'].append(provider_relative_path)\n else:\n self.install_requires.extend(\n [get_provider_package_from_package_id(package_id) for package_id in PREINSTALLED_PROVIDERS]\n )\n\n\ndef replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None:\n \"\"\"\n Replaces extra requirement with provider package. The intention here is that when\n the provider is added as dependency of extra, there is no need to add the dependencies\n separately. This is not needed and even harmful, because in case of future versions of\n the provider, the requirements might change, so hard-coding requirements from the version\n that was available at the release time might cause dependency conflicts in the future.\n\n Say for example that you have salesforce provider with those deps:\n\n { 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] }\n\n Initially ['salesforce'] extra has those requirements and it works like that when you install\n it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when\n the production installation is used, The dependencies are changed:\n\n { 'salesforce': ['apache-airflow-providers-salesforce'] }\n\n And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies:\n ['simple-salesforce>=1.0.0', 'tableauserverclient']\n\n So transitively 'salesforce' extra has all the requirements it needs and in case the provider\n changes it's dependencies, they will transitively change as well.\n\n In the constraint mechanism we save both - provider versions and it's dependencies\n version, which means that installation using constraints is repeatable.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n EXTRAS_REQUIREMENTS[extra] = [\n get_provider_package_from_package_id(package_name) for package_name in providers\n ]\n\n\ndef add_provider_packages_to_extra_requirements(extra: str, providers: List[str]) -> None:\n \"\"\"\n Adds provider packages as requirements to extra. This is used to add provider packages as requirements\n to the \"bulk\" kind of extras. Those bulk extras do not have the detailed 'extra' requirements as\n initial values, so instead of replacing them (see previous function) we can extend them.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n EXTRAS_REQUIREMENTS[extra].extend(\n [get_provider_package_from_package_id(package_name) for package_name in providers]\n )\n\n\ndef add_all_provider_packages() -> None:\n \"\"\"\n In case of regular installation (providers installed from packages), we should add extra dependencies to\n Airflow - to get the providers automatically installed when those extras are installed.\n\n For providers installed from sources we skip that step. That helps to test and install airflow with\n all packages in CI - for example when new providers are added, otherwise the installation would fail\n as the new provider is not yet in PyPI.\n\n \"\"\"\n for provider in ALL_PROVIDERS:\n replace_extra_requirement_with_provider_packages(provider, [provider])\n add_provider_packages_to_extra_requirements(\"all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_ci\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"all_dbs\", ALL_DB_PROVIDERS)\n add_provider_packages_to_extra_requirements(\n \"devel_hadoop\", [\"apache.hdfs\", \"apache.hive\", \"presto\", \"trino\"]\n )\n add_all_deprecated_provider_packages()\n\n\nclass Develop(develop_orig):\n \"\"\"Forces removal of providers in editable mode.\"\"\"\n\n def run(self) -> None:\n self.announce('Installing in editable mode. Uninstalling provider packages!', level=log.INFO)\n # We need to run \"python3 -m pip\" because it might be that older PIP binary is in the path\n # And it results with an error when running pip directly (cannot import pip module)\n # also PIP does not have a stable API so we have to run subprocesses ¯\\_(ツ)_/¯\n try:\n installed_packages = (\n subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"]).decode().splitlines()\n )\n airflow_provider_packages = [\n package_line.split(\"=\")[0]\n for package_line in installed_packages\n if package_line.startswith(\"apache-airflow-providers\")\n ]\n self.announce(f'Uninstalling ${airflow_provider_packages}!', level=log.INFO)\n subprocess.check_call([\"python3\", \"-m\", \"pip\", \"uninstall\", \"--yes\", *airflow_provider_packages])\n except subprocess.CalledProcessError as e:\n self.announce(f'Error when uninstalling airflow provider packages: {e}!', level=log.WARN)\n super().run()\n\n\nclass Install(install_orig):\n \"\"\"Forces installation of providers from sources in editable mode.\"\"\"\n\n def run(self) -> None:\n self.announce('Standard installation. Providers are installed from packages', level=log.INFO)\n super().run()\n\n\ndef do_setup() -> None:\n \"\"\"\n Perform the Airflow package setup.\n\n Most values come from setup.cfg, only the dynamically calculated ones are passed to setup\n function call. See https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html\n \"\"\"\n setup_kwargs = {}\n\n def include_provider_namespace_packages_when_installing_from_sources() -> None:\n \"\"\"\n When installing providers from sources we install all namespace packages found below airflow,\n including airflow and provider packages, otherwise defaults from setup.cfg control this.\n The kwargs in setup() call override those that are specified in setup.cfg.\n \"\"\"\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n setup_kwargs['packages'] = find_namespace_packages(include=['airflow*'])\n\n include_provider_namespace_packages_when_installing_from_sources()\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n print(\"Installing providers from sources. Skip adding providers as dependencies\")\n else:\n add_all_provider_packages()\n\n write_version()\n setup(\n distclass=AirflowDistribution,\n version=version,\n extras_require=EXTRAS_REQUIREMENTS,\n download_url=('https://archive.apache.org/dist/airflow/' + version),\n cmdclass={\n 'extra_clean': CleanCommand,\n 'compile_assets': CompileAssets,\n 'list_extras': ListExtras,\n 'install': Install,\n 'develop': Develop,\n },\n test_suite='setup.airflow_test_suite',\n **setup_kwargs,\n )\n\n\nif __name__ == \"__main__\":\n do_setup()\n",
"path": "setup.py"
}
] | [
{
"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Setup.py for the Airflow project.\"\"\"\nimport glob\nimport logging\nimport os\nimport subprocess\nimport sys\nimport unittest\nfrom copy import deepcopy\nfrom distutils import log\nfrom os.path import dirname, relpath\nfrom textwrap import wrap\nfrom typing import Dict, List\n\nfrom setuptools import Command, Distribution, find_namespace_packages, setup\nfrom setuptools.command.develop import develop as develop_orig\nfrom setuptools.command.install import install as install_orig\n\n# Controls whether providers are installed from packages or directly from sources\n# It is turned on by default in case of development environments such as Breeze\n# And it is particularly useful when you add a new provider and there is no\n# PyPI version to install the provider package from\nINSTALL_PROVIDERS_FROM_SOURCES = 'INSTALL_PROVIDERS_FROM_SOURCES'\nPY39 = sys.version_info >= (3, 9)\n\nlogger = logging.getLogger(__name__)\n\nversion = '2.2.0.dev0'\n\nmy_dir = dirname(__file__)\n\n\ndef airflow_test_suite() -> unittest.TestSuite:\n \"\"\"Test suite for Airflow tests\"\"\"\n test_loader = unittest.TestLoader()\n test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')\n return test_suite\n\n\nclass CleanCommand(Command):\n \"\"\"\n Command to tidy up the project root.\n Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.\n \"\"\"\n\n description = \"Tidy up the project root\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n @staticmethod\n def rm_all_files(files: List[str]) -> None:\n \"\"\"Remove all files from the list\"\"\"\n for file in files:\n try:\n os.remove(file)\n except Exception as e:\n logger.warning(\"Error when removing %s: %s\", file, e)\n\n def run(self) -> None:\n \"\"\"Remove temporary files and directories.\"\"\"\n os.chdir(my_dir)\n self.rm_all_files(glob.glob('./build/*'))\n self.rm_all_files(glob.glob('./**/__pycache__/*', recursive=True))\n self.rm_all_files(glob.glob('./**/*.pyc', recursive=True))\n self.rm_all_files(glob.glob('./dist/*'))\n self.rm_all_files(glob.glob('./*.egg-info'))\n self.rm_all_files(glob.glob('./docker-context-files/*.whl'))\n self.rm_all_files(glob.glob('./docker-context-files/*.tgz'))\n\n\nclass CompileAssets(Command):\n \"\"\"\n Compile and build the frontend assets using yarn and webpack.\n Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.\n \"\"\"\n\n description = \"Compile and build the frontend assets\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n def run(self) -> None:\n \"\"\"Run a command to compile and build assets.\"\"\"\n subprocess.check_call('./airflow/www/compile_assets.sh')\n\n\nclass ListExtras(Command):\n \"\"\"\n List all available extras\n Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.\n \"\"\"\n\n description = \"List available extras\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n def run(self) -> None:\n \"\"\"List extras.\"\"\"\n print(\"\\n\".join(wrap(\", \".join(EXTRAS_REQUIREMENTS.keys()), 100)))\n\n\ndef git_version(version_: str) -> str:\n \"\"\"\n Return a version to identify the state of the underlying git repo. The version will\n indicate whether the head of the current git-backed working directory is tied to a\n release tag or not : it will indicate the former with a 'release:{version}' prefix\n and the latter with a '.dev0' suffix. Following the prefix will be a sha of the current\n branch head. Finally, a \"dirty\" suffix is appended to indicate that uncommitted\n changes are present.\n\n :param str version_: Semver version\n :return: Found Airflow version in Git repo\n :rtype: str\n \"\"\"\n try:\n import git\n\n try:\n repo = git.Repo(os.path.join(*[my_dir, '.git']))\n except git.NoSuchPathError:\n logger.warning('.git directory not found: Cannot compute the git version')\n return ''\n except git.InvalidGitRepositoryError:\n logger.warning('Invalid .git directory not found: Cannot compute the git version')\n return ''\n except ImportError:\n logger.warning('gitpython not found: Cannot compute the git version.')\n return ''\n if repo:\n sha = repo.head.commit.hexsha\n if repo.is_dirty():\n return f'.dev0+{sha}.dirty'\n # commit is clean\n return f'.release:{version_}+{sha}'\n return 'no_git_version'\n\n\ndef write_version(filename: str = os.path.join(*[my_dir, \"airflow\", \"git_version\"])) -> None:\n \"\"\"\n Write the Semver version + git hash to file, e.g. \".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65\".\n\n :param str filename: Destination file to write\n \"\"\"\n text = f\"{git_version(version)}\"\n with open(filename, 'w') as file:\n file.write(text)\n\n\n# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py\n# If you change this mark you should also change ./scripts/ci/check_order_setup.py\n# Start dependencies group\nalibaba = [\n 'oss2>=2.14.0',\n]\namazon = [\n 'boto3>=1.15.0,<1.18.0',\n 'watchtower~=1.0.6',\n 'jsonpath_ng>=1.5.3',\n]\napache_beam = [\n 'apache-beam>=2.20.0',\n]\nasana = ['asana>=0.10']\nasync_packages = [\n # DNS Python 2.0.0 and above breaks building documentation on Sphinx. When dnspython 2.0.0 is installed\n # building documentation fails with trying to import google packages with\n # TypeError(\"unsupported operand type(s) for +: 'SSL_VERIFY_PEER' and\n # 'SSL_VERIFY_FAIL_IF_NO_PEER_CERT'\")\n # The issue is opened for it https://github.com/rthalley/dnspython/issues/681\n 'dnspython<2.0.0',\n 'eventlet>= 0.9.7',\n 'gevent>=0.13',\n 'greenlet>=0.4.9',\n]\natlas = [\n 'atlasclient>=0.1.2',\n]\nazure = [\n 'azure-batch>=8.0.0',\n 'azure-cosmos>=3.0.1,<4',\n 'azure-datalake-store>=0.0.45',\n 'azure-identity>=1.3.1',\n 'azure-keyvault>=4.1.0',\n 'azure-kusto-data>=0.0.43,<0.1',\n 'azure-mgmt-containerinstance>=1.5.0,<2.0',\n 'azure-mgmt-datafactory>=1.0.0,<2.0',\n 'azure-mgmt-datalake-store>=0.5.0',\n 'azure-mgmt-resource>=2.2.0',\n 'azure-storage-blob>=12.7.0',\n 'azure-storage-common>=2.1.0',\n 'azure-storage-file>=2.1.0',\n]\ncassandra = [\n 'cassandra-driver>=3.13.0,<4',\n]\ncelery = [\n 'celery~=5.1,>=5.1.2',\n 'flower~=1.0.0',\n]\ncgroups = [\n 'cgroupspy>=0.1.4',\n]\ncloudant = [\n 'cloudant>=2.0',\n]\ndask = [\n 'cloudpickle>=1.4.1, <1.5.0',\n 'dask<2021.3.1;python_version<\"3.7\"', # dask stopped supporting python 3.6 in 2021.3.1 version\n 'dask>=2.9.0, <2021.6.1;python_version>=\"3.7\"', # dask 2021.6.1 does not work with `distributed`\n 'distributed>=2.11.1, <2.20',\n]\ndatabricks = [\n 'requests>=2.26.0, <3',\n]\ndatadog = [\n 'datadog>=0.14.0',\n]\ndeprecated_api = [\n 'requests>=2.26.0',\n]\ndoc = [\n 'click>=7.1,<9',\n # Sphinx is limited to < 3.5.0 because of https://github.com/sphinx-doc/sphinx/issues/8880\n 'sphinx>=2.1.2, <3.5.0',\n 'sphinx-airflow-theme',\n 'sphinx-argparse>=0.1.13',\n 'sphinx-autoapi==1.0.0',\n 'sphinx-copybutton',\n 'sphinx-jinja~=1.1',\n 'sphinx-rtd-theme>=0.1.6',\n 'sphinxcontrib-httpdomain>=1.7.0',\n 'sphinxcontrib-redoc>=1.6.0',\n 'sphinxcontrib-spelling==7.2.1',\n]\ndocker = [\n 'docker',\n]\ndrill = ['sqlalchemy-drill>=1.1.0', 'sqlparse>=0.4.1']\ndruid = [\n 'pydruid>=0.4.1',\n]\nelasticsearch = [\n 'elasticsearch>7',\n 'elasticsearch-dbapi',\n 'elasticsearch-dsl>=5.0.0',\n]\nexasol = [\n 'pyexasol>=0.5.1,<1.0.0',\n]\nfacebook = [\n 'facebook-business>=6.0.2',\n]\nflask_appbuilder_authlib = [\n 'authlib',\n]\ngoogle = [\n 'PyOpenSSL',\n 'google-ads>=12.0.0',\n # Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n 'google-api-core>=1.25.1,<3.0.0',\n 'google-api-python-client>=1.6.0,<2.0.0',\n # Maintainers, please do not require google-auth>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n 'google-auth>=1.0.0,<3.0.0',\n 'google-auth-httplib2>=0.0.1',\n 'google-cloud-automl>=2.1.0,<3.0.0',\n 'google-cloud-bigquery-datatransfer>=3.0.0,<4.0.0',\n 'google-cloud-bigtable>=1.0.0,<2.0.0',\n 'google-cloud-container>=0.1.1,<2.0.0',\n 'google-cloud-datacatalog>=3.0.0,<4.0.0',\n 'google-cloud-dataproc>=2.2.0,<3.0.0',\n 'google-cloud-dlp>=0.11.0,<2.0.0',\n 'google-cloud-kms>=2.0.0,<3.0.0',\n 'google-cloud-language>=1.1.1,<2.0.0',\n 'google-cloud-logging>=2.1.1,<3.0.0',\n # 1.1.0 removed field_mask and broke import for released providers\n # We can remove the <1.1.0 limitation after we release new Google Provider\n 'google-cloud-memcache>=0.2.0,<1.1.0',\n 'google-cloud-monitoring>=2.0.0,<3.0.0',\n 'google-cloud-os-login>=2.0.0,<3.0.0',\n 'google-cloud-pubsub>=2.0.0,<3.0.0',\n 'google-cloud-redis>=2.0.0,<3.0.0',\n 'google-cloud-secret-manager>=0.2.0,<2.0.0',\n 'google-cloud-spanner>=1.10.0,<2.0.0',\n 'google-cloud-speech>=0.36.3,<2.0.0',\n 'google-cloud-storage>=1.30,<2.0.0',\n 'google-cloud-tasks>=2.0.0,<3.0.0',\n 'google-cloud-texttospeech>=0.4.0,<2.0.0',\n 'google-cloud-translate>=1.5.0,<2.0.0',\n 'google-cloud-videointelligence>=1.7.0,<2.0.0',\n 'google-cloud-vision>=0.35.2,<2.0.0',\n 'google-cloud-workflows>=0.1.0,<2.0.0',\n 'grpcio-gcp>=0.2.2',\n 'httpx',\n 'json-merge-patch~=0.2',\n # pandas-gbq 0.15.0 release broke google provider's bigquery import\n # _check_google_client_version (airflow/providers/google/cloud/hooks/bigquery.py:49)\n 'pandas-gbq<0.15.0',\n]\ngrpc = [\n 'google-auth>=1.0.0, <3.0.0',\n 'google-auth-httplib2>=0.0.1',\n 'grpcio>=1.15.0',\n]\nhashicorp = [\n 'hvac~=0.10',\n]\nhdfs = [\n 'snakebite-py3',\n]\nhive = [\n 'hmsclient>=0.1.0',\n 'pyhive[hive]>=0.6.0;python_version<\"3.9\"',\n 'thrift>=0.9.2',\n]\nhttp = [\n # The 2.26.0 release of requests got rid of the chardet LGPL mandatory dependency, allowing us to\n # release it as a requirement for airflow\n 'requests>=2.26.0',\n]\nhttp_provider = [\n 'apache-airflow-providers-http',\n]\njdbc = [\n 'jaydebeapi>=1.1.1',\n]\njenkins = [\n 'python-jenkins>=1.0.0',\n]\njira = [\n 'JIRA>1.0.7',\n]\nkerberos = [\n 'pykerberos>=1.1.13',\n 'requests_kerberos>=0.10.0',\n 'thrift_sasl>=0.2.0',\n]\nkubernetes = [\n 'cryptography>=2.0.0',\n 'kubernetes>=3.0.0, <12.0.0',\n]\nkylin = ['kylinpy>=2.6']\nldap = [\n 'ldap3>=2.5.1',\n 'python-ldap',\n]\nleveldb = ['plyvel']\nmongo = [\n 'dnspython>=1.13.0,<3.0.0',\n 'pymongo>=3.6.0',\n]\nmssql = [\n 'pymssql~=2.1,>=2.1.5',\n]\nmysql = [\n 'mysql-connector-python>=8.0.11, <9',\n 'mysqlclient>=1.3.6,<3',\n]\nneo4j = ['neo4j>=4.2.1']\nodbc = [\n 'pyodbc',\n]\noracle = [\n 'cx_Oracle>=5.1.2',\n]\npagerduty = [\n 'pdpyras>=4.1.2,<5',\n]\npandas = [\n 'pandas>=0.17.1, <2.0',\n]\npapermill = [\n 'papermill[all]>=1.2.1',\n 'scrapbook[all]',\n]\npassword = [\n 'bcrypt>=2.0.0',\n 'flask-bcrypt>=0.7.1',\n]\npinot = [\n # pinotdb v0.1.1 may still work with older versions of Apache Pinot, but we've confirmed that it\n # causes a problem with newer versions.\n 'pinotdb>0.1.2,<1.0.0',\n]\nplexus = [\n 'arrow>=0.16.0',\n]\npostgres = [\n 'psycopg2-binary>=2.7.4',\n]\npresto = ['presto-python-client>=0.7.0,<0.8']\npsrp = [\n 'pypsrp~=0.5',\n]\nqubole = [\n 'qds-sdk>=1.10.4',\n]\nrabbitmq = [\n 'amqp',\n]\nredis = [\n 'redis~=3.2',\n]\nsalesforce = [\n 'simple-salesforce>=1.0.0',\n 'tableauserverclient',\n]\nsamba = [\n 'smbprotocol>=1.5.0',\n]\nsegment = [\n 'analytics-python>=1.2.9',\n]\nsendgrid = [\n 'sendgrid>=6.0.0,<7',\n]\nsentry = [\n 'blinker>=1.1',\n 'sentry-sdk>=0.8.0',\n]\nsingularity = ['spython>=0.0.56']\nslack = [\n 'slack_sdk>=3.0.0,<4.0.0',\n]\nsnowflake = [\n 'snowflake-connector-python>=2.4.1',\n 'snowflake-sqlalchemy>=1.1.0',\n]\nspark = [\n 'pyspark',\n]\nssh = [\n 'paramiko>=2.6.0',\n 'pysftp>=0.2.9',\n 'sshtunnel>=0.1.4,<0.2',\n]\nstatsd = [\n 'statsd>=3.3.0, <4.0',\n]\ntableau = [\n 'tableauserverclient',\n]\ntelegram = [\n 'python-telegram-bot~=13.0',\n]\ntrino = ['trino']\nvertica = [\n 'vertica-python>=0.5.1',\n]\nvirtualenv = [\n 'virtualenv',\n]\nwebhdfs = [\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\n]\nwinrm = [\n 'pywinrm~=0.4',\n]\nyandex = [\n 'yandexcloud>=0.97.0',\n]\nzendesk = [\n 'zdesk',\n]\n# End dependencies group\n\ndevel = [\n 'aws_xray_sdk',\n 'beautifulsoup4~=4.7.1',\n 'black',\n 'blinker',\n 'bowler',\n 'click>=7.1,<9',\n 'coverage',\n 'filelock',\n 'flake8>=3.6.0',\n 'flake8-colors',\n 'flaky',\n 'freezegun',\n 'github3.py',\n 'gitpython',\n 'importlib-resources~=5.0',\n 'ipdb',\n 'jira',\n 'jsondiff',\n 'mongomock',\n 'moto~=2.2, >=2.2.1.dev9',\n 'mypy==0.770',\n 'parameterized',\n 'paramiko',\n 'pipdeptree',\n 'pre-commit',\n 'pypsrp',\n 'pygithub',\n 'pysftp',\n 'pytest~=6.0',\n 'pytest-asyncio',\n 'pytest-cov',\n 'pytest-instafail',\n 'pytest-rerunfailures~=9.1',\n 'pytest-timeouts',\n 'pytest-xdist',\n 'python-jose',\n 'pywinrm',\n 'qds-sdk>=1.9.6',\n 'pytest-httpx',\n 'requests_mock',\n 'wheel',\n 'yamllint',\n]\n\ndevel_minreq = cgroups + devel + doc + kubernetes + mysql + pandas + password\ndevel_hadoop = devel_minreq + hdfs + hive + kerberos + presto + webhdfs\n\n# Dict of all providers which are part of the Apache Airflow repository together with their requirements\nPROVIDERS_REQUIREMENTS: Dict[str, List[str]] = {\n 'airbyte': http_provider,\n 'alibaba': alibaba,\n 'amazon': amazon,\n 'apache.beam': apache_beam,\n 'apache.cassandra': cassandra,\n 'apache.drill': drill,\n 'apache.druid': druid,\n 'apache.hdfs': hdfs,\n 'apache.hive': hive,\n 'apache.kylin': kylin,\n 'apache.livy': http_provider,\n 'apache.pig': [],\n 'apache.pinot': pinot,\n 'apache.spark': spark,\n 'apache.sqoop': [],\n 'asana': asana,\n 'celery': celery,\n 'cloudant': cloudant,\n 'cncf.kubernetes': kubernetes,\n 'databricks': databricks,\n 'datadog': datadog,\n 'dingding': [],\n 'discord': [],\n 'docker': docker,\n 'elasticsearch': elasticsearch,\n 'exasol': exasol,\n 'facebook': facebook,\n 'ftp': [],\n 'google': google,\n 'grpc': grpc,\n 'hashicorp': hashicorp,\n 'http': http,\n 'imap': [],\n 'jdbc': jdbc,\n 'jenkins': jenkins,\n 'jira': jira,\n 'microsoft.azure': azure,\n 'microsoft.mssql': mssql,\n 'microsoft.psrp': psrp,\n 'microsoft.winrm': winrm,\n 'mongo': mongo,\n 'mysql': mysql,\n 'neo4j': neo4j,\n 'odbc': odbc,\n 'openfaas': [],\n 'opsgenie': http_provider,\n 'oracle': oracle,\n 'pagerduty': pagerduty,\n 'papermill': papermill,\n 'plexus': plexus,\n 'postgres': postgres,\n 'presto': presto,\n 'qubole': qubole,\n 'redis': redis,\n 'salesforce': salesforce,\n 'samba': samba,\n 'segment': segment,\n 'sendgrid': sendgrid,\n 'sftp': ssh,\n 'singularity': singularity,\n 'slack': slack,\n 'snowflake': snowflake,\n 'sqlite': [],\n 'ssh': ssh,\n 'tableau': tableau,\n 'telegram': telegram,\n 'trino': trino,\n 'vertica': vertica,\n 'yandex': yandex,\n 'zendesk': zendesk,\n}\n\n# Those are all additional extras which do not have their own 'providers'\n# The 'apache.atlas' and 'apache.webhdfs' are extras that provide additional libraries\n# but they do not have separate providers (yet?), they are merely there to add extra libraries\n# That can be used in custom python/bash operators.\nADDITIONAL_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'apache.atlas': atlas,\n 'apache.webhdfs': webhdfs,\n}\n\n\n# Those are extras that are extensions of the 'core' Airflow. They provide additional features\n# To airflow core. They do not have separate providers because they do not have any operators/hooks etc.\nCORE_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'async': async_packages,\n 'celery': celery, # also has provider, but it extends the core with the Celery executor\n 'cgroups': cgroups,\n 'cncf.kubernetes': kubernetes, # also has provider, but it extends the core with the KubernetesExecutor\n 'dask': dask,\n 'deprecated_api': deprecated_api,\n 'github_enterprise': flask_appbuilder_authlib,\n 'google_auth': flask_appbuilder_authlib,\n 'kerberos': kerberos,\n 'ldap': ldap,\n 'leveldb': leveldb,\n 'pandas': pandas,\n 'password': password,\n 'rabbitmq': rabbitmq,\n 'sentry': sentry,\n 'statsd': statsd,\n 'virtualenv': virtualenv,\n}\n\nEXTRAS_REQUIREMENTS: Dict[str, List[str]] = deepcopy(CORE_EXTRAS_REQUIREMENTS)\n\n\ndef add_extras_for_all_providers() -> None:\n \"\"\"\n Adds extras for all providers.\n By default all providers have the same extra name as provider id, for example\n 'apache.hive' extra has 'apache.hive' provider requirement.\n \"\"\"\n for provider_name, provider_requirement in PROVIDERS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[provider_name] = provider_requirement\n\n\ndef add_additional_extras() -> None:\n \"\"\"Adds extras for all additional extras.\"\"\"\n for extra_name, extra_requirement in ADDITIONAL_EXTRAS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[extra_name] = extra_requirement\n\n\nadd_extras_for_all_providers()\nadd_additional_extras()\n\n#############################################################################################################\n# The whole section can be removed in Airflow 3.0 as those old aliases are deprecated in 2.* series\n#############################################################################################################\n\n# Dictionary of aliases from 1.10 - deprecated in Airflow 2.*\nEXTRAS_DEPRECATED_ALIASES: Dict[str, str] = {\n 'atlas': 'apache.atlas',\n 'aws': 'amazon',\n 'azure': 'microsoft.azure',\n 'cassandra': 'apache.cassandra',\n 'crypto': '', # All crypto requirements are installation requirements of core Airflow\n 'druid': 'apache.druid',\n 'gcp': 'google',\n 'gcp_api': 'google',\n 'hdfs': 'apache.hdfs',\n 'hive': 'apache.hive',\n 'kubernetes': 'cncf.kubernetes',\n 'mssql': 'microsoft.mssql',\n 'pinot': 'apache.pinot',\n 'qds': 'qubole',\n 's3': 'amazon',\n 'spark': 'apache.spark',\n 'webhdfs': 'apache.webhdfs',\n 'winrm': 'microsoft.winrm',\n}\n\nEXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS: List[str] = [\n \"crypto\",\n \"webhdfs\",\n]\n\n\ndef add_extras_for_all_deprecated_aliases() -> None:\n \"\"\"\n Add extras for all deprecated aliases. Requirements for those deprecated aliases are the same\n as the extras they are replaced with.\n The requirements are not copies - those are the same lists as for the new extras. This is intended.\n Thanks to that if the original extras are later extended with providers, aliases are extended as well.\n \"\"\"\n for alias, extra in EXTRAS_DEPRECATED_ALIASES.items():\n requirements = EXTRAS_REQUIREMENTS.get(extra) if extra != '' else []\n if requirements is None:\n raise Exception(f\"The extra {extra} is missing for deprecated alias {alias}\")\n EXTRAS_REQUIREMENTS[alias] = requirements\n\n\ndef add_all_deprecated_provider_packages() -> None:\n \"\"\"\n For deprecated aliases that are providers, we will swap the providers requirements to instead\n be the provider itself.\n\n e.g. {\"kubernetes\": [\"kubernetes>=3.0.0, <12.0.0\", ...]} becomes\n {\"kubernetes\": [\"apache-airflow-provider-cncf-kubernetes\"]}\n \"\"\"\n for alias, provider in EXTRAS_DEPRECATED_ALIASES.items():\n if alias in EXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS:\n continue\n replace_extra_requirement_with_provider_packages(alias, [provider])\n\n\nadd_extras_for_all_deprecated_aliases()\n\n#############################################################################################################\n# End of deprecated section\n#############################################################################################################\n\n# This is list of all providers. It's a shortcut for anyone who would like to easily get list of\n# All providers. It is used by pre-commits.\nALL_PROVIDERS = list(PROVIDERS_REQUIREMENTS.keys())\n\nALL_DB_PROVIDERS = [\n 'apache.cassandra',\n 'apache.drill',\n 'apache.druid',\n 'apache.hdfs',\n 'apache.hive',\n 'apache.pinot',\n 'cloudant',\n 'exasol',\n 'microsoft.mssql',\n 'mongo',\n 'mysql',\n 'neo4j',\n 'postgres',\n 'presto',\n 'trino',\n 'vertica',\n]\n\n# Special requirements for all database-related providers. They are de-duplicated.\nall_dbs = list({req for db_provider in ALL_DB_PROVIDERS for req in PROVIDERS_REQUIREMENTS[db_provider]})\n\n# Requirements for all \"user\" extras (no devel). They are de-duplicated. Note that we do not need\n# to separately add providers requirements - they have been already added as 'providers' extras above\n_all_requirements = list({req for extras_reqs in EXTRAS_REQUIREMENTS.values() for req in extras_reqs})\n\n# All user extras here\nEXTRAS_REQUIREMENTS[\"all\"] = _all_requirements\n\n# All db user extras here\nEXTRAS_REQUIREMENTS[\"all_dbs\"] = all_dbs + pandas\n\n# This can be simplified to devel_hadoop + _all_requirements due to inclusions\n# but we keep it for explicit sake. We are de-duplicating it anyway.\ndevel_all = list(set(_all_requirements + doc + devel_minreq + devel_hadoop))\n\n# Those are packages excluded for \"all\" dependencies\nPACKAGES_EXCLUDED_FOR_ALL = []\nPACKAGES_EXCLUDED_FOR_ALL.extend(\n [\n 'snakebite',\n ]\n)\n\n\ndef is_package_excluded(package: str, exclusion_list: List[str]) -> bool:\n \"\"\"\n Checks if package should be excluded.\n\n :param package: package name (beginning of it)\n :param exclusion_list: list of excluded packages\n :return: true if package should be excluded\n \"\"\"\n return any(package.startswith(excluded_package) for excluded_package in exclusion_list)\n\n\ndevel_all = [\n package\n for package in devel_all\n if not is_package_excluded(package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)\n]\n\ndevel_ci = devel_all\n\n\n# Those are extras that we have to add for development purposes\n# They can be use to install some predefined set of dependencies.\nEXTRAS_REQUIREMENTS[\"doc\"] = doc\nEXTRAS_REQUIREMENTS[\"devel\"] = devel_minreq # devel_minreq already includes doc\nEXTRAS_REQUIREMENTS[\"devel_hadoop\"] = devel_hadoop # devel_hadoop already includes devel_minreq\nEXTRAS_REQUIREMENTS[\"devel_all\"] = devel_all\nEXTRAS_REQUIREMENTS[\"devel_ci\"] = devel_ci\n\n\ndef sort_extras_requirements() -> Dict[str, List[str]]:\n \"\"\"\n For Python 3.6+ the dictionary order remains when keys() are retrieved.\n Sort both: extras and list of dependencies to make it easier to analyse problems\n external packages will be first, then if providers are added they are added at the end of the lists.\n \"\"\"\n sorted_requirements = dict(sorted(EXTRAS_REQUIREMENTS.items()))\n for extra_list in sorted_requirements.values():\n extra_list.sort()\n return sorted_requirements\n\n\nEXTRAS_REQUIREMENTS = sort_extras_requirements()\n\n# Those providers are pre-installed always when airflow is installed.\n# Those providers do not have dependency on airflow2.0 because that would lead to circular dependencies.\n# This is not a problem for PIP but some tools (pipdeptree) show those as a warning.\nPREINSTALLED_PROVIDERS = [\n 'ftp',\n 'http',\n 'imap',\n 'sqlite',\n]\n\n\ndef get_provider_package_from_package_id(package_id: str) -> str:\n \"\"\"\n Builds the name of provider package out of the package id provided/\n\n :param package_id: id of the package (like amazon or microsoft.azure)\n :return: full name of package in PyPI\n \"\"\"\n package_suffix = package_id.replace(\".\", \"-\")\n return f\"apache-airflow-providers-{package_suffix}\"\n\n\ndef get_excluded_providers() -> List[str]:\n \"\"\"\n Returns packages excluded for the current python version.\n Currently the only excluded provider is apache hive for Python 3.9.\n Until https://github.com/dropbox/PyHive/issues/380 is fixed.\n \"\"\"\n return ['apache.hive'] if PY39 else []\n\n\ndef get_all_provider_packages() -> str:\n \"\"\"Returns all provider packages configured in setup.py\"\"\"\n excluded_providers = get_excluded_providers()\n return \" \".join(\n get_provider_package_from_package_id(package)\n for package in PROVIDERS_REQUIREMENTS\n if package not in excluded_providers\n )\n\n\nclass AirflowDistribution(Distribution):\n \"\"\"The setuptools.Distribution subclass with Airflow specific behaviour\"\"\"\n\n def parse_config_files(self, *args, **kwargs) -> None:\n \"\"\"\n Ensure that when we have been asked to install providers from sources\n that we don't *also* try to install those providers from PyPI.\n Also we should make sure that in this case we copy provider.yaml files so that\n Providers manager can find package information.\n \"\"\"\n super().parse_config_files(*args, **kwargs)\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n self.install_requires = [\n req for req in self.install_requires if not req.startswith('apache-airflow-providers-')\n ]\n provider_yaml_files = glob.glob(\"airflow/providers/**/provider.yaml\", recursive=True)\n for provider_yaml_file in provider_yaml_files:\n provider_relative_path = relpath(provider_yaml_file, os.path.join(my_dir, \"airflow\"))\n self.package_data['airflow'].append(provider_relative_path)\n else:\n self.install_requires.extend(\n [get_provider_package_from_package_id(package_id) for package_id in PREINSTALLED_PROVIDERS]\n )\n\n\ndef replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None:\n \"\"\"\n Replaces extra requirement with provider package. The intention here is that when\n the provider is added as dependency of extra, there is no need to add the dependencies\n separately. This is not needed and even harmful, because in case of future versions of\n the provider, the requirements might change, so hard-coding requirements from the version\n that was available at the release time might cause dependency conflicts in the future.\n\n Say for example that you have salesforce provider with those deps:\n\n { 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] }\n\n Initially ['salesforce'] extra has those requirements and it works like that when you install\n it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when\n the production installation is used, The dependencies are changed:\n\n { 'salesforce': ['apache-airflow-providers-salesforce'] }\n\n And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies:\n ['simple-salesforce>=1.0.0', 'tableauserverclient']\n\n So transitively 'salesforce' extra has all the requirements it needs and in case the provider\n changes it's dependencies, they will transitively change as well.\n\n In the constraint mechanism we save both - provider versions and it's dependencies\n version, which means that installation using constraints is repeatable.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n EXTRAS_REQUIREMENTS[extra] = [\n get_provider_package_from_package_id(package_name) for package_name in providers\n ]\n\n\ndef add_provider_packages_to_extra_requirements(extra: str, providers: List[str]) -> None:\n \"\"\"\n Adds provider packages as requirements to extra. This is used to add provider packages as requirements\n to the \"bulk\" kind of extras. Those bulk extras do not have the detailed 'extra' requirements as\n initial values, so instead of replacing them (see previous function) we can extend them.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n EXTRAS_REQUIREMENTS[extra].extend(\n [get_provider_package_from_package_id(package_name) for package_name in providers]\n )\n\n\ndef add_all_provider_packages() -> None:\n \"\"\"\n In case of regular installation (providers installed from packages), we should add extra dependencies to\n Airflow - to get the providers automatically installed when those extras are installed.\n\n For providers installed from sources we skip that step. That helps to test and install airflow with\n all packages in CI - for example when new providers are added, otherwise the installation would fail\n as the new provider is not yet in PyPI.\n\n \"\"\"\n for provider in ALL_PROVIDERS:\n replace_extra_requirement_with_provider_packages(provider, [provider])\n add_provider_packages_to_extra_requirements(\"all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_ci\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"all_dbs\", ALL_DB_PROVIDERS)\n add_provider_packages_to_extra_requirements(\n \"devel_hadoop\", [\"apache.hdfs\", \"apache.hive\", \"presto\", \"trino\"]\n )\n add_all_deprecated_provider_packages()\n\n\nclass Develop(develop_orig):\n \"\"\"Forces removal of providers in editable mode.\"\"\"\n\n def run(self) -> None:\n self.announce('Installing in editable mode. Uninstalling provider packages!', level=log.INFO)\n # We need to run \"python3 -m pip\" because it might be that older PIP binary is in the path\n # And it results with an error when running pip directly (cannot import pip module)\n # also PIP does not have a stable API so we have to run subprocesses ¯\\_(ツ)_/¯\n try:\n installed_packages = (\n subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"]).decode().splitlines()\n )\n airflow_provider_packages = [\n package_line.split(\"=\")[0]\n for package_line in installed_packages\n if package_line.startswith(\"apache-airflow-providers\")\n ]\n self.announce(f'Uninstalling ${airflow_provider_packages}!', level=log.INFO)\n subprocess.check_call([\"python3\", \"-m\", \"pip\", \"uninstall\", \"--yes\", *airflow_provider_packages])\n except subprocess.CalledProcessError as e:\n self.announce(f'Error when uninstalling airflow provider packages: {e}!', level=log.WARN)\n super().run()\n\n\nclass Install(install_orig):\n \"\"\"Forces installation of providers from sources in editable mode.\"\"\"\n\n def run(self) -> None:\n self.announce('Standard installation. Providers are installed from packages', level=log.INFO)\n super().run()\n\n\ndef do_setup() -> None:\n \"\"\"\n Perform the Airflow package setup.\n\n Most values come from setup.cfg, only the dynamically calculated ones are passed to setup\n function call. See https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html\n \"\"\"\n setup_kwargs = {}\n\n def include_provider_namespace_packages_when_installing_from_sources() -> None:\n \"\"\"\n When installing providers from sources we install all namespace packages found below airflow,\n including airflow and provider packages, otherwise defaults from setup.cfg control this.\n The kwargs in setup() call override those that are specified in setup.cfg.\n \"\"\"\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n setup_kwargs['packages'] = find_namespace_packages(include=['airflow*'])\n\n include_provider_namespace_packages_when_installing_from_sources()\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n print(\"Installing providers from sources. Skip adding providers as dependencies\")\n else:\n add_all_provider_packages()\n\n write_version()\n setup(\n distclass=AirflowDistribution,\n version=version,\n extras_require=EXTRAS_REQUIREMENTS,\n download_url=('https://archive.apache.org/dist/airflow/' + version),\n cmdclass={\n 'extra_clean': CleanCommand,\n 'compile_assets': CompileAssets,\n 'list_extras': ListExtras,\n 'install': Install,\n 'develop': Develop,\n },\n test_suite='setup.airflow_test_suite',\n **setup_kwargs,\n )\n\n\nif __name__ == \"__main__\":\n do_setup()\n",
"path": "setup.py"
}
] | diff --git a/setup.cfg b/setup.cfg
index 4d2ee284f9bcf..10d7b7d713243 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -111,7 +111,7 @@ install_requires =
gunicorn>=20.1.0
httpx
importlib_metadata>=1.7;python_version<"3.9"
- importlib_resources~=1.4
+ importlib_resources~=5.0
# Required by vendored-in connexion
inflection>=0.3.1
iso8601>=0.1.12
diff --git a/setup.py b/setup.py
index e64d0ce7ff0dd..e392e288b4186 100644
--- a/setup.py
+++ b/setup.py
@@ -512,7 +512,7 @@ def write_version(filename: str = os.path.join(*[my_dir, "airflow", "git_version
'freezegun',
'github3.py',
'gitpython',
- 'importlib-resources~=1.4',
+ 'importlib-resources~=5.0',
'ipdb',
'jira',
'jsondiff',
|
readthedocs__readthedocs.org-5424 | Remove possibel unused constant
At first sight looks like isn't used anymore after https://github.com/rtfd/readthedocs.org/pull/5383
https://github.com/rtfd/readthedocs.org/blob/78c34c904b347110b2cd545b4b5a80ed526590f7/readthedocs/core/models.py#L13-L13
We should still double check and make sure tests are passing after the removal.
| [
{
"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Models for the core app.\"\"\"\nimport logging\n\nfrom annoying.fields import AutoOneToOneField\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext\nfrom django.utils.translation import ugettext_lazy as _\n\n\nSTANDARD_EMAIL = '[email protected]'\n\nlog = logging.getLogger(__name__)\n\n\nclass UserProfile(models.Model):\n\n \"\"\"Additional information about a User.\"\"\"\n\n user = AutoOneToOneField(\n 'auth.User',\n verbose_name=_('User'),\n related_name='profile',\n )\n whitelisted = models.BooleanField(_('Whitelisted'), default=False)\n banned = models.BooleanField(_('Banned'), default=False)\n homepage = models.CharField(_('Homepage'), max_length=100, blank=True)\n allow_ads = models.BooleanField(\n _('See paid advertising'),\n help_text=_('If unchecked, you will still see community ads.'),\n default=True,\n )\n\n def __str__(self):\n return (\n ugettext(\"%(username)s's profile\") %\n {'username': self.user.username}\n )\n\n def get_absolute_url(self):\n return reverse(\n 'profiles_profile_detail',\n kwargs={'username': self.user.username},\n )\n",
"path": "readthedocs/core/models.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Models for the core app.\"\"\"\nimport logging\n\nfrom annoying.fields import AutoOneToOneField\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext\nfrom django.utils.translation import ugettext_lazy as _\n\n\nlog = logging.getLogger(__name__)\n\n\nclass UserProfile(models.Model):\n\n \"\"\"Additional information about a User.\"\"\"\n\n user = AutoOneToOneField(\n 'auth.User',\n verbose_name=_('User'),\n related_name='profile',\n )\n whitelisted = models.BooleanField(_('Whitelisted'), default=False)\n banned = models.BooleanField(_('Banned'), default=False)\n homepage = models.CharField(_('Homepage'), max_length=100, blank=True)\n allow_ads = models.BooleanField(\n _('See paid advertising'),\n help_text=_('If unchecked, you will still see community ads.'),\n default=True,\n )\n\n def __str__(self):\n return (\n ugettext(\"%(username)s's profile\") %\n {'username': self.user.username}\n )\n\n def get_absolute_url(self):\n return reverse(\n 'profiles_profile_detail',\n kwargs={'username': self.user.username},\n )\n",
"path": "readthedocs/core/models.py"
}
] | diff --git a/readthedocs/core/models.py b/readthedocs/core/models.py
index d129f181a5e..3fab8570b45 100644
--- a/readthedocs/core/models.py
+++ b/readthedocs/core/models.py
@@ -10,8 +10,6 @@
from django.utils.translation import ugettext_lazy as _
-STANDARD_EMAIL = '[email protected]'
-
log = logging.getLogger(__name__)
|
joke2k__faker-146 | timezone() randomly throws an exception
fake.timezone() sometimes throws an exception, possibly when a country doesn't have any timezones defined:
``` python
>>> from faker import Faker
>>> f = Faker()
>>> f.timezone()
'Africa/Mogadishu'
>>> f.timezone()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/vagrant/.python/lib/python3.3/site-packages/faker/providers/date_time.py", line 378, in timezone
return cls.random_element(cls.countries)['timezones'].pop(0)
```
This is with Python 3.3 using fake-factory 0.4.0 from pypi.
| [
{
"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\nfrom . import BaseProvider\nimport random\nimport re\nfrom time import time, mktime\nfrom datetime import timedelta\nfrom faker.utils.datetime_safe import date, datetime, real_date, real_datetime\nfrom faker.utils import is_string\n\n\ndef datetime_to_timestamp(dt):\n return mktime(dt.timetuple())\n\n\ntimedelta_pattern = r''\nfor name, sym in [('years', 'y'), ('weeks', 'w'), ('days', 'd'), ('hours', 'h'), ('minutes', 'm'), ('seconds', 's')]:\n timedelta_pattern += r'((?P<{0}>(?:\\+|-)\\d+?){1})?'.format(name, sym)\n\n\nclass Provider(BaseProvider):\n centuries = ['I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X', 'XI', 'XII', 'XIII', 'XIV', 'XV', 'XVI',\n 'XVII', 'XVIII', 'XIX', 'XX', 'XXI']\n\n countries = [\n {'timezones': ['Europe/Andorra'], 'code': 'AD', 'continent': 'Europe', 'name': 'Andorra', 'capital': 'Andorra la Vella'},\n {'timezones': ['Asia/Kabul'], 'code': 'AF', 'continent': 'Asia', 'name': 'Afghanistan', 'capital': 'Kabul'},\n {'timezones': ['America/Antigua'], 'code': 'AG', 'continent': 'North America', 'name': 'Antigua and Barbuda', 'capital': \"St. John's\"},\n {'timezones': ['Europe/Tirane'], 'code': 'AL', 'continent': 'Europe', 'name': 'Albania', 'capital': 'Tirana'},\n {'timezones': ['Asia/Yerevan'], 'code': 'AM', 'continent': 'Asia', 'name': 'Armenia', 'capital': 'Yerevan'},\n {'timezones': ['Africa/Luanda'], 'code': 'AO', 'continent': 'Africa', 'name': 'Angola', 'capital': 'Luanda'},\n {'timezones': ['America/Argentina/Buenos_Aires', 'America/Argentina/Cordoba', 'America/Argentina/Jujuy', 'America/Argentina/Tucuman', 'America/Argentina/Catamarca', 'America/Argentina/La_Rioja', 'America/Argentina/San_Juan', 'America/Argentina/Mendoza', 'America/Argentina/Rio_Gallegos', 'America/Argentina/Ushuaia'], 'code': 'AR', 'continent': 'South America', 'name': 'Argentina', 'capital': 'Buenos Aires'},\n {'timezones': ['Europe/Vienna'], 'code': 'AT', 'continent': 'Europe', 'name': 'Austria', 'capital': 'Vienna'},\n {'timezones': ['Australia/Lord_Howe', 'Australia/Hobart', 'Australia/Currie', 'Australia/Melbourne', 'Australia/Sydney', 'Australia/Broken_Hill', 'Australia/Brisbane', 'Australia/Lindeman', 'Australia/Adelaide', 'Australia/Darwin', 'Australia/Perth'], 'code': 'AU', 'continent': 'Oceania', 'name': 'Australia', 'capital': 'Canberra'},\n {'timezones': ['Asia/Baku'], 'code': 'AZ', 'continent': 'Asia', 'name': 'Azerbaijan', 'capital': 'Baku'},\n {'timezones': ['America/Barbados'], 'code': 'BB', 'continent': 'North America', 'name': 'Barbados', 'capital': 'Bridgetown'},\n {'timezones': ['Asia/Dhaka'], 'code': 'BD', 'continent': 'Asia', 'name': 'Bangladesh', 'capital': 'Dhaka'},\n {'timezones': ['Europe/Brussels'], 'code': 'BE', 'continent': 'Europe', 'name': 'Belgium', 'capital': 'Brussels'},\n {'timezones': ['Africa/Ouagadougou'], 'code': 'BF', 'continent': 'Africa', 'name': 'Burkina Faso', 'capital': 'Ouagadougou'},\n {'timezones': ['Europe/Sofia'], 'code': 'BG', 'continent': 'Europe', 'name': 'Bulgaria', 'capital': 'Sofia'},\n {'timezones': ['Asia/Bahrain'], 'code': 'BH', 'continent': 'Asia', 'name': 'Bahrain', 'capital': 'Manama'},\n {'timezones': ['Africa/Bujumbura'], 'code': 'BI', 'continent': 'Africa', 'name': 'Burundi', 'capital': 'Bujumbura'},\n {'timezones': ['Africa/Porto-Novo'], 'code': 'BJ', 'continent': 'Africa', 'name': 'Benin', 'capital': 'Porto-Novo'},\n {'timezones': ['Asia/Brunei'], 'code': 'BN', 'continent': 'Asia', 'name': 'Brunei Darussalam', 'capital': 'Bandar Seri Begawan'},\n {'timezones': ['America/La_Paz'], 'code': 'BO', 'continent': 'South America', 'name': 'Bolivia', 'capital': 'Sucre'},\n {'timezones': ['America/Noronha', 'America/Belem', 'America/Fortaleza', 'America/Recife', 'America/Araguaina', 'America/Maceio', 'America/Bahia', 'America/Sao_Paulo', 'America/Campo_Grande', 'America/Cuiaba', 'America/Porto_Velho', 'America/Boa_Vista', 'America/Manaus', 'America/Eirunepe', 'America/Rio_Branco'], 'code': 'BR', 'continent': 'South America', 'name': 'Brazil', 'capital': 'Bras\\xc3\\xadlia'},\n {'timezones': ['America/Nassau'], 'code': 'BS', 'continent': 'North America', 'name': 'Bahamas', 'capital': 'Nassau'},\n {'timezones': ['Asia/Thimphu'], 'code': 'BT', 'continent': 'Asia', 'name': 'Bhutan', 'capital': 'Thimphu'},\n {'timezones': ['Africa/Gaborone'], 'code': 'BW', 'continent': 'Africa', 'name': 'Botswana', 'capital': 'Gaborone'},\n {'timezones': ['Europe/Minsk'], 'code': 'BY', 'continent': 'Europe', 'name': 'Belarus', 'capital': 'Minsk'},\n {'timezones': ['America/Belize'], 'code': 'BZ', 'continent': 'North America', 'name': 'Belize', 'capital': 'Belmopan'},\n {'timezones': ['America/St_Johns', 'America/Halifax', 'America/Glace_Bay', 'America/Moncton', 'America/Goose_Bay', 'America/Blanc-Sablon', 'America/Montreal', 'America/Toronto', 'America/Nipigon', 'America/Thunder_Bay', 'America/Pangnirtung', 'America/Iqaluit', 'America/Atikokan', 'America/Rankin_Inlet', 'America/Winnipeg', 'America/Rainy_River', 'America/Cambridge_Bay', 'America/Regina', 'America/Swift_Current', 'America/Edmonton', 'America/Yellowknife', 'America/Inuvik', 'America/Dawson_Creek', 'America/Vancouver', 'America/Whitehorse', 'America/Dawson'], 'code': 'CA', 'continent': 'North America', 'name': 'Canada', 'capital': 'Ottawa'},\n {'timezones': ['Africa/Kinshasa', 'Africa/Lubumbashi'], 'code': 'CD', 'continent': 'Africa', 'name': 'Democratic Republic of the Congo', 'capital': 'Kinshasa'},\n {'timezones': ['Africa/Brazzaville'], 'code': 'CG', 'continent': 'Africa', 'name': 'Republic of the Congo', 'capital': 'Brazzaville'},\n {'timezones': ['Africa/Abidjan'], 'code': 'CI', 'continent': 'Africa', 'name': \"C\\xc3\\xb4te d'Ivoire\", 'capital': 'Yamoussoukro'},\n {'timezones': ['America/Santiago', 'Pacific/Easter'], 'code': 'CL', 'continent': 'South America', 'name': 'Chile', 'capital': 'Santiago'},\n {'timezones': ['Africa/Douala'], 'code': 'CM', 'continent': 'Africa', 'name': 'Cameroon', 'capital': 'Yaound\\xc3\\xa9'},\n {'timezones': ['Asia/Shanghai', 'Asia/Harbin', 'Asia/Chongqing', 'Asia/Urumqi', 'Asia/Kashgar'], 'code': 'CN', 'continent': 'Asia', 'name': \"People's Republic of China\", 'capital': 'Beijing'},\n {'timezones': ['America/Bogota'], 'code': 'CO', 'continent': 'South America', 'name': 'Colombia', 'capital': 'Bogot\\xc3\\xa1'},\n {'timezones': ['America/Costa_Rica'], 'code': 'CR', 'continent': 'North America', 'name': 'Costa Rica', 'capital': 'San Jos\\xc3\\xa9'},\n {'timezones': ['America/Havana'], 'code': 'CU', 'continent': 'North America', 'name': 'Cuba', 'capital': 'Havana'},\n {'timezones': ['Atlantic/Cape_Verde'], 'code': 'CV', 'continent': 'Africa', 'name': 'Cape Verde', 'capital': 'Praia'},\n {'timezones': ['Asia/Nicosia'], 'code': 'CY', 'continent': 'Asia', 'name': 'Cyprus', 'capital': 'Nicosia'},\n {'timezones': ['Europe/Prague'], 'code': 'CZ', 'continent': 'Europe', 'name': 'Czech Republic', 'capital': 'Prague'},\n {'timezones': ['Europe/Berlin'], 'code': 'DE', 'continent': 'Europe', 'name': 'Germany', 'capital': 'Berlin'},\n {'timezones': ['Africa/Djibouti'], 'code': 'DJ', 'continent': 'Africa', 'name': 'Djibouti', 'capital': 'Djibouti City'},\n {'timezones': ['Europe/Copenhagen'], 'code': 'DK', 'continent': 'Europe', 'name': 'Denmark', 'capital': 'Copenhagen'},\n {'timezones': ['America/Dominica'], 'code': 'DM', 'continent': 'North America', 'name': 'Dominica', 'capital': 'Roseau'},\n {'timezones': ['America/Santo_Domingo'], 'code': 'DO', 'continent': 'North America', 'name': 'Dominican Republic', 'capital': 'Santo Domingo'},\n {'timezones': ['America/Guayaquil', 'Pacific/Galapagos'], 'code': 'EC', 'continent': 'South America', 'name': 'Ecuador', 'capital': 'Quito'},\n {'timezones': ['Europe/Tallinn'], 'code': 'EE', 'continent': 'Europe', 'name': 'Estonia', 'capital': 'Tallinn'},\n {'timezones': ['Africa/Cairo'], 'code': 'EG', 'continent': 'Africa', 'name': 'Egypt', 'capital': 'Cairo'},\n {'timezones': ['Africa/Asmera'], 'code': 'ER', 'continent': 'Africa', 'name': 'Eritrea', 'capital': 'Asmara'},\n {'timezones': ['Africa/Addis_Ababa'], 'code': 'ET', 'continent': 'Africa', 'name': 'Ethiopia', 'capital': 'Addis Ababa'},\n {'timezones': ['Europe/Helsinki'], 'code': 'FI', 'continent': 'Europe', 'name': 'Finland', 'capital': 'Helsinki'},\n {'timezones': ['Pacific/Fiji'], 'code': 'FJ', 'continent': 'Oceania', 'name': 'Fiji', 'capital': 'Suva'},\n {'timezones': ['Europe/Paris'], 'code': 'FR', 'continent': 'Europe', 'name': 'France', 'capital': 'Paris'},\n {'timezones': ['Africa/Libreville'], 'code': 'GA', 'continent': 'Africa', 'name': 'Gabon', 'capital': 'Libreville'},\n {'timezones': ['Asia/Tbilisi'], 'code': 'GE', 'continent': 'Asia', 'name': 'Georgia', 'capital': 'Tbilisi'},\n {'timezones': ['Africa/Accra'], 'code': 'GH', 'continent': 'Africa', 'name': 'Ghana', 'capital': 'Accra'},\n {'timezones': ['Africa/Banjul'], 'code': 'GM', 'continent': 'Africa', 'name': 'The Gambia', 'capital': 'Banjul'},\n {'timezones': ['Africa/Conakry'], 'code': 'GN', 'continent': 'Africa', 'name': 'Guinea', 'capital': 'Conakry'},\n {'timezones': ['Europe/Athens'], 'code': 'GR', 'continent': 'Europe', 'name': 'Greece', 'capital': 'Athens'},\n {'timezones': ['America/Guatemala'], 'code': 'GT', 'continent': 'North America', 'name': 'Guatemala', 'capital': 'Guatemala City'},\n {'timezones': ['America/Guatemala'], 'code': 'GT', 'continent': 'North America', 'name': 'Haiti', 'capital': 'Port-au-Prince'},\n {'timezones': ['Africa/Bissau'], 'code': 'GW', 'continent': 'Africa', 'name': 'Guinea-Bissau', 'capital': 'Bissau'},\n {'timezones': ['America/Guyana'], 'code': 'GY', 'continent': 'South America', 'name': 'Guyana', 'capital': 'Georgetown'},\n {'timezones': ['America/Tegucigalpa'], 'code': 'HN', 'continent': 'North America', 'name': 'Honduras', 'capital': 'Tegucigalpa'},\n {'timezones': ['Europe/Budapest'], 'code': 'HU', 'continent': 'Europe', 'name': 'Hungary', 'capital': 'Budapest'},\n {'timezones': ['Asia/Jakarta', 'Asia/Pontianak', 'Asia/Makassar', 'Asia/Jayapura'], 'code': 'ID', 'continent': 'Asia', 'name': 'Indonesia', 'capital': 'Jakarta'},\n {'timezones': ['Europe/Dublin'], 'code': 'IE', 'continent': 'Europe', 'name': 'Republic of Ireland', 'capital': 'Dublin'},\n {'timezones': ['Asia/Jerusalem'], 'code': 'IL', 'continent': 'Asia', 'name': 'Israel', 'capital': 'Jerusalem'},\n {'timezones': ['Asia/Calcutta'], 'code': 'IN', 'continent': 'Asia', 'name': 'India', 'capital': 'New Delhi'},\n {'timezones': ['Asia/Baghdad'], 'code': 'IQ', 'continent': 'Asia', 'name': 'Iraq', 'capital': 'Baghdad'},\n {'timezones': ['Asia/Tehran'], 'code': 'IR', 'continent': 'Asia', 'name': 'Iran', 'capital': 'Tehran'},\n {'timezones': ['Atlantic/Reykjavik'], 'code': 'IS', 'continent': 'Europe', 'name': 'Iceland', 'capital': 'Reykjav\\xc3\\xadk'},\n {'timezones': ['Europe/Rome'], 'code': 'IT', 'continent': 'Europe', 'name': 'Italy', 'capital': 'Rome'},\n {'timezones': ['America/Jamaica'], 'code': 'JM', 'continent': 'North America', 'name': 'Jamaica', 'capital': 'Kingston'},\n {'timezones': ['Asia/Amman'], 'code': 'JO', 'continent': 'Asia', 'name': 'Jordan', 'capital': 'Amman'},\n {'timezones': ['Asia/Tokyo'], 'code': 'JP', 'continent': 'Asia', 'name': 'Japan', 'capital': 'Tokyo'},\n {'timezones': ['Africa/Nairobi'], 'code': 'KE', 'continent': 'Africa', 'name': 'Kenya', 'capital': 'Nairobi'},\n {'timezones': ['Asia/Bishkek'], 'code': 'KG', 'continent': 'Asia', 'name': 'Kyrgyzstan', 'capital': 'Bishkek'},\n {'timezones': ['Pacific/Tarawa', 'Pacific/Enderbury', 'Pacific/Kiritimati'], 'code': 'KI', 'continent': 'Oceania', 'name': 'Kiribati', 'capital': 'Tarawa'},\n {'timezones': ['Asia/Pyongyang'], 'code': 'KP', 'continent': 'Asia', 'name': 'North Korea', 'capital': 'Pyongyang'},\n {'timezones': ['Asia/Seoul'], 'code': 'KR', 'continent': 'Asia', 'name': 'South Korea', 'capital': 'Seoul'},\n {'timezones': ['Asia/Kuwait'], 'code': 'KW', 'continent': 'Asia', 'name': 'Kuwait', 'capital': 'Kuwait City'},\n {'timezones': ['Asia/Beirut'], 'code': 'LB', 'continent': 'Asia', 'name': 'Lebanon', 'capital': 'Beirut'},\n {'timezones': ['Europe/Vaduz'], 'code': 'LI', 'continent': 'Europe', 'name': 'Liechtenstein', 'capital': 'Vaduz'},\n {'timezones': ['Africa/Monrovia'], 'code': 'LR', 'continent': 'Africa', 'name': 'Liberia', 'capital': 'Monrovia'},\n {'timezones': ['Africa/Maseru'], 'code': 'LS', 'continent': 'Africa', 'name': 'Lesotho', 'capital': 'Maseru'},\n {'timezones': ['Europe/Vilnius'], 'code': 'LT', 'continent': 'Europe', 'name': 'Lithuania', 'capital': 'Vilnius'},\n {'timezones': ['Europe/Luxembourg'], 'code': 'LU', 'continent': 'Europe', 'name': 'Luxembourg', 'capital': 'Luxembourg City'},\n {'timezones': ['Europe/Riga'], 'code': 'LV', 'continent': 'Europe', 'name': 'Latvia', 'capital': 'Riga'},\n {'timezones': ['Africa/Tripoli'], 'code': 'LY', 'continent': 'Africa', 'name': 'Libya', 'capital': 'Tripoli'},\n {'timezones': ['Indian/Antananarivo'], 'code': 'MG', 'continent': 'Africa', 'name': 'Madagascar', 'capital': 'Antananarivo'},\n {'timezones': ['Pacific/Majuro', 'Pacific/Kwajalein'], 'code': 'MH', 'continent': 'Oceania', 'name': 'Marshall Islands', 'capital': 'Majuro'},\n {'timezones': ['Europe/Skopje'], 'code': 'MK', 'continent': 'Europe', 'name': 'Macedonia', 'capital': 'Skopje'},\n {'timezones': ['Africa/Bamako'], 'code': 'ML', 'continent': 'Africa', 'name': 'Mali', 'capital': 'Bamako'},\n {'timezones': ['Asia/Rangoon'], 'code': 'MM', 'continent': 'Asia', 'name': 'Myanmar', 'capital': 'Naypyidaw'},\n {'timezones': ['Asia/Ulaanbaatar', 'Asia/Hovd', 'Asia/Choibalsan'], 'code': 'MN', 'continent': 'Asia', 'name': 'Mongolia', 'capital': 'Ulaanbaatar'},\n {'timezones': ['Africa/Nouakchott'], 'code': 'MR', 'continent': 'Africa', 'name': 'Mauritania', 'capital': 'Nouakchott'},\n {'timezones': ['Europe/Malta'], 'code': 'MT', 'continent': 'Europe', 'name': 'Malta', 'capital': 'Valletta'},\n {'timezones': ['Indian/Mauritius'], 'code': 'MU', 'continent': 'Africa', 'name': 'Mauritius', 'capital': 'Port Louis'},\n {'timezones': ['Indian/Maldives'], 'code': 'MV', 'continent': 'Asia', 'name': 'Maldives', 'capital': 'Mal\\xc3\\xa9'},\n {'timezones': ['Africa/Blantyre'], 'code': 'MW', 'continent': 'Africa', 'name': 'Malawi', 'capital': 'Lilongwe'},\n {'timezones': ['America/Mexico_City', 'America/Cancun', 'America/Merida', 'America/Monterrey', 'America/Mazatlan', 'America/Chihuahua', 'America/Hermosillo', 'America/Tijuana'], 'code': 'MX', 'continent': 'North America', 'name': 'Mexico', 'capital': 'Mexico City'},\n {'timezones': ['Asia/Kuala_Lumpur', 'Asia/Kuching'], 'code': 'MY', 'continent': 'Asia', 'name': 'Malaysia', 'capital': 'Kuala Lumpur'},\n {'timezones': ['Africa/Maputo'], 'code': 'MZ', 'continent': 'Africa', 'name': 'Mozambique', 'capital': 'Maputo'},\n {'timezones': ['Africa/Windhoek'], 'code': 'NA', 'continent': 'Africa', 'name': 'Namibia', 'capital': 'Windhoek'},\n {'timezones': ['Africa/Niamey'], 'code': 'NE', 'continent': 'Africa', 'name': 'Niger', 'capital': 'Niamey'},\n {'timezones': ['Africa/Lagos'], 'code': 'NG', 'continent': 'Africa', 'name': 'Nigeria', 'capital': 'Abuja'},\n {'timezones': ['America/Managua'], 'code': 'NI', 'continent': 'North America', 'name': 'Nicaragua', 'capital': 'Managua'},\n {'timezones': ['Europe/Amsterdam'], 'code': 'NL', 'continent': 'Europe', 'name': 'Kingdom of the Netherlands', 'capital': 'Amsterdam'},\n {'timezones': ['Europe/Oslo'], 'code': 'NO', 'continent': 'Europe', 'name': 'Norway', 'capital': 'Oslo'},\n {'timezones': ['Asia/Katmandu'], 'code': 'NP', 'continent': 'Asia', 'name': 'Nepal', 'capital': 'Kathmandu'},\n {'timezones': ['Pacific/Nauru'], 'code': 'NR', 'continent': 'Oceania', 'name': 'Nauru', 'capital': 'Yaren'},\n {'timezones': ['Pacific/Auckland', 'Pacific/Chatham'], 'code': 'NZ', 'continent': 'Oceania', 'name': 'New Zealand', 'capital': 'Wellington'},\n {'timezones': ['Asia/Muscat'], 'code': 'OM', 'continent': 'Asia', 'name': 'Oman', 'capital': 'Muscat'},\n {'timezones': ['America/Panama'], 'code': 'PA', 'continent': 'North America', 'name': 'Panama', 'capital': 'Panama City'},\n {'timezones': ['America/Lima'], 'code': 'PE', 'continent': 'South America', 'name': 'Peru', 'capital': 'Lima'},\n {'timezones': ['Pacific/Port_Moresby'], 'code': 'PG', 'continent': 'Oceania', 'name': 'Papua New Guinea', 'capital': 'Port Moresby'},\n {'timezones': ['Asia/Manila'], 'code': 'PH', 'continent': 'Asia', 'name': 'Philippines', 'capital': 'Manila'},\n {'timezones': ['Asia/Karachi'], 'code': 'PK', 'continent': 'Asia', 'name': 'Pakistan', 'capital': 'Islamabad'},\n {'timezones': ['Europe/Warsaw'], 'code': 'PL', 'continent': 'Europe', 'name': 'Poland', 'capital': 'Warsaw'},\n {'timezones': ['Europe/Lisbon', 'Atlantic/Madeira', 'Atlantic/Azores'], 'code': 'PT', 'continent': 'Europe', 'name': 'Portugal', 'capital': 'Lisbon'},\n {'timezones': ['Pacific/Palau'], 'code': 'PW', 'continent': 'Oceania', 'name': 'Palau', 'capital': 'Ngerulmud'},\n {'timezones': ['America/Asuncion'], 'code': 'PY', 'continent': 'South America', 'name': 'Paraguay', 'capital': 'Asunci\\xc3\\xb3n'},\n {'timezones': ['Asia/Qatar'], 'code': 'QA', 'continent': 'Asia', 'name': 'Qatar', 'capital': 'Doha'},\n {'timezones': ['Europe/Bucharest'], 'code': 'RO', 'continent': 'Europe', 'name': 'Romania', 'capital': 'Bucharest'},\n {'timezones': ['Europe/Kaliningrad', 'Europe/Moscow', 'Europe/Volgograd', 'Europe/Samara', 'Asia/Yekaterinburg', 'Asia/Omsk', 'Asia/Novosibirsk', 'Asia/Krasnoyarsk', 'Asia/Irkutsk', 'Asia/Yakutsk', 'Asia/Vladivostok', 'Asia/Sakhalin', 'Asia/Magadan', 'Asia/Kamchatka', 'Asia/Anadyr'], 'code': 'RU', 'continent': 'Europe', 'name': 'Russia', 'capital': 'Moscow'},\n {'timezones': ['Africa/Kigali'], 'code': 'RW', 'continent': 'Africa', 'name': 'Rwanda', 'capital': 'Kigali'},\n {'timezones': ['Asia/Riyadh'], 'code': 'SA', 'continent': 'Asia', 'name': 'Saudi Arabia', 'capital': 'Riyadh'},\n {'timezones': ['Pacific/Guadalcanal'], 'code': 'SB', 'continent': 'Oceania', 'name': 'Solomon Islands', 'capital': 'Honiara'},\n {'timezones': ['Indian/Mahe'], 'code': 'SC', 'continent': 'Africa', 'name': 'Seychelles', 'capital': 'Victoria'},\n {'timezones': ['Africa/Khartoum'], 'code': 'SD', 'continent': 'Africa', 'name': 'Sudan', 'capital': 'Khartoum'},\n {'timezones': ['Europe/Stockholm'], 'code': 'SE', 'continent': 'Europe', 'name': 'Sweden', 'capital': 'Stockholm'},\n {'timezones': ['Asia/Singapore'], 'code': 'SG', 'continent': 'Asia', 'name': 'Singapore', 'capital': 'Singapore'},\n {'timezones': ['Europe/Ljubljana'], 'code': 'SI', 'continent': 'Europe', 'name': 'Slovenia', 'capital': 'Ljubljana'},\n {'timezones': ['Europe/Bratislava'], 'code': 'SK', 'continent': 'Europe', 'name': 'Slovakia', 'capital': 'Bratislava'},\n {'timezones': ['Africa/Freetown'], 'code': 'SL', 'continent': 'Africa', 'name': 'Sierra Leone', 'capital': 'Freetown'},\n {'timezones': ['Europe/San_Marino'], 'code': 'SM', 'continent': 'Europe', 'name': 'San Marino', 'capital': 'San Marino'},\n {'timezones': ['Africa/Dakar'], 'code': 'SN', 'continent': 'Africa', 'name': 'Senegal', 'capital': 'Dakar'},\n {'timezones': ['Africa/Mogadishu'], 'code': 'SO', 'continent': 'Africa', 'name': 'Somalia', 'capital': 'Mogadishu'},\n {'timezones': ['America/Paramaribo'], 'code': 'SR', 'continent': 'South America', 'name': 'Suriname', 'capital': 'Paramaribo'},\n {'timezones': ['Africa/Sao_Tome'], 'code': 'ST', 'continent': 'Africa', 'name': 'S\\xc3\\xa3o Tom\\xc3\\xa9 and Pr\\xc3\\xadncipe', 'capital': 'S\\xc3\\xa3o Tom\\xc3\\xa9'},\n {'timezones': ['Asia/Damascus'], 'code': 'SY', 'continent': 'Asia', 'name': 'Syria', 'capital': 'Damascus'},\n {'timezones': ['Africa/Lome'], 'code': 'TG', 'continent': 'Africa', 'name': 'Togo', 'capital': 'Lom\\xc3\\xa9'},\n {'timezones': ['Asia/Bangkok'], 'code': 'TH', 'continent': 'Asia', 'name': 'Thailand', 'capital': 'Bangkok'},\n {'timezones': ['Asia/Dushanbe'], 'code': 'TJ', 'continent': 'Asia', 'name': 'Tajikistan', 'capital': 'Dushanbe'},\n {'timezones': ['Asia/Ashgabat'], 'code': 'TM', 'continent': 'Asia', 'name': 'Turkmenistan', 'capital': 'Ashgabat'},\n {'timezones': ['Africa/Tunis'], 'code': 'TN', 'continent': 'Africa', 'name': 'Tunisia', 'capital': 'Tunis'},\n {'timezones': ['Pacific/Tongatapu'], 'code': 'TO', 'continent': 'Oceania', 'name': 'Tonga', 'capital': 'Nuku\\xca\\xbbalofa'},\n {'timezones': ['Europe/Istanbul'], 'code': 'TR', 'continent': 'Asia', 'name': 'Turkey', 'capital': 'Ankara'},\n {'timezones': ['America/Port_of_Spain'], 'code': 'TT', 'continent': 'North America', 'name': 'Trinidad and Tobago', 'capital': 'Port of Spain'},\n {'timezones': ['Pacific/Funafuti'], 'code': 'TV', 'continent': 'Oceania', 'name': 'Tuvalu', 'capital': 'Funafuti'},\n {'timezones': ['Africa/Dar_es_Salaam'], 'code': 'TZ', 'continent': 'Africa', 'name': 'Tanzania', 'capital': 'Dodoma'},\n {'timezones': ['Europe/Kiev', 'Europe/Uzhgorod', 'Europe/Zaporozhye', 'Europe/Simferopol'], 'code': 'UA', 'continent': 'Europe', 'name': 'Ukraine', 'capital': 'Kiev'},\n {'timezones': ['Africa/Kampala'], 'code': 'UG', 'continent': 'Africa', 'name': 'Uganda', 'capital': 'Kampala'},\n {'timezones': ['America/New_York', 'America/Detroit', 'America/Kentucky/Louisville', 'America/Kentucky/Monticello', 'America/Indiana/Indianapolis', 'America/Indiana/Marengo', 'America/Indiana/Knox', 'America/Indiana/Vevay', 'America/Chicago', 'America/Indiana/Vincennes', 'America/Indiana/Petersburg', 'America/Menominee', 'America/North_Dakota/Center', 'America/North_Dakota/New_Salem', 'America/Denver', 'America/Boise', 'America/Shiprock', 'America/Phoenix', 'America/Los_Angeles', 'America/Anchorage', 'America/Juneau', 'America/Yakutat', 'America/Nome', 'America/Adak', 'Pacific/Honolulu'], 'code': 'US', 'continent': 'North America', 'name': 'United States', 'capital': 'Washington, D.C.'},\n {'timezones': ['America/Montevideo'], 'code': 'UY', 'continent': 'South America', 'name': 'Uruguay', 'capital': 'Montevideo'},\n {'timezones': ['Asia/Samarkand', 'Asia/Tashkent'], 'code': 'UZ', 'continent': 'Asia', 'name': 'Uzbekistan', 'capital': 'Tashkent'},\n {'timezones': ['Europe/Vatican'], 'code': 'VA', 'continent': 'Europe', 'name': 'Vatican City', 'capital': 'Vatican City'},\n {'timezones': ['America/Caracas'], 'code': 'VE', 'continent': 'South America', 'name': 'Venezuela', 'capital': 'Caracas'},\n {'timezones': ['Asia/Saigon'], 'code': 'VN', 'continent': 'Asia', 'name': 'Vietnam', 'capital': 'Hanoi'},\n {'timezones': ['Pacific/Efate'], 'code': 'VU', 'continent': 'Oceania', 'name': 'Vanuatu', 'capital': 'Port Vila'},\n {'timezones': ['Asia/Aden'], 'code': 'YE', 'continent': 'Asia', 'name': 'Yemen', 'capital': \"Sana'a\"},\n {'timezones': ['Africa/Lusaka'], 'code': 'ZM', 'continent': 'Africa', 'name': 'Zambia', 'capital': 'Lusaka'},\n {'timezones': ['Africa/Harare'], 'code': 'ZW', 'continent': 'Africa', 'name': 'Zimbabwe', 'capital': 'Harare'},\n {'timezones': ['Africa/Algiers'], 'code': 'DZ', 'continent': 'Africa', 'name': 'Algeria', 'capital': 'Algiers'},\n {'timezones': ['Europe/Sarajevo'], 'code': 'BA', 'continent': 'Europe', 'name': 'Bosnia and Herzegovina', 'capital': 'Sarajevo'},\n {'timezones': ['Asia/Phnom_Penh'], 'code': 'KH', 'continent': 'Asia', 'name': 'Cambodia', 'capital': 'Phnom Penh'},\n {'timezones': ['Africa/Bangui'], 'code': 'CF', 'continent': 'Africa', 'name': 'Central African Republic', 'capital': 'Bangui'},\n {'timezones': ['Africa/Ndjamena'], 'code': 'TD', 'continent': 'Africa', 'name': 'Chad', 'capital': \"N'Djamena\"},\n {'timezones': ['Indian/Comoro'], 'code': 'KM', 'continent': 'Africa', 'name': 'Comoros', 'capital': 'Moroni'},\n {'timezones': ['Europe/Zagreb'], 'code': 'HR', 'continent': 'Europe', 'name': 'Croatia', 'capital': 'Zagreb'},\n {'timezones': ['Asia/Dili'], 'code': 'TL', 'continent': 'Asia', 'name': 'East Timor', 'capital': 'Dili'},\n {'timezones': ['America/El_Salvador'], 'code': 'SV', 'continent': 'North America', 'name': 'El Salvador', 'capital': 'San Salvador'},\n {'timezones': ['Africa/Malabo'], 'code': 'GQ', 'continent': 'Africa', 'name': 'Equatorial Guinea', 'capital': 'Malabo'},\n {'timezones': ['America/Grenada'], 'code': 'GD', 'continent': 'North America', 'name': 'Grenada', 'capital': \"St. George's\"},\n {'timezones': ['Asia/Almaty', 'Asia/Qyzylorda', 'Asia/Aqtobe', 'Asia/Aqtau', 'Asia/Oral'], 'code': 'KZ', 'continent': 'Asia', 'name': 'Kazakhstan', 'capital': 'Astana'},\n {'timezones': ['Asia/Vientiane'], 'code': 'LA', 'continent': 'Asia', 'name': 'Laos', 'capital': 'Vientiane'},\n {'timezones': ['Pacific/Truk', 'Pacific/Ponape', 'Pacific/Kosrae'], 'code': 'FM', 'continent': 'Oceania', 'name': 'Federated States of Micronesia', 'capital': 'Palikir'},\n {'timezones': ['Europe/Chisinau'], 'code': 'MD', 'continent': 'Europe', 'name': 'Moldova', 'capital': 'Chi\\xc5\\x9fin\\xc4\\x83u'},\n {'timezones': ['Europe/Monaco'], 'code': 'MC', 'continent': 'Europe', 'name': 'Monaco', 'capital': 'Monaco'},\n {'timezones': ['Europe/Podgorica'], 'code': 'ME', 'continent': 'Europe', 'name': 'Montenegro', 'capital': 'Podgorica'},\n {'timezones': ['Africa/Casablanca'], 'code': 'MA', 'continent': 'Africa', 'name': 'Morocco', 'capital': 'Rabat'},\n {'timezones': ['America/St_Kitts'], 'code': 'KN', 'continent': 'North America', 'name': 'Saint Kitts and Nevis', 'capital': 'Basseterre'},\n {'timezones': ['America/St_Lucia'], 'code': 'LC', 'continent': 'North America', 'name': 'Saint Lucia', 'capital': 'Castries'},\n {'timezones': ['America/St_Vincent'], 'code': 'VC', 'continent': 'North America', 'name': 'Saint Vincent and the Grenadines', 'capital': 'Kingstown'},\n {'timezones': ['Pacific/Apia'], 'code': 'WS', 'continent': 'Oceania', 'name': 'Samoa', 'capital': 'Apia'},\n {'timezones': ['Europe/Belgrade'], 'code': 'RS', 'continent': 'Europe', 'name': 'Serbia', 'capital': 'Belgrade'},\n {'timezones': ['Africa/Johannesburg'], 'code': 'ZA', 'continent': 'Africa', 'name': 'South Africa', 'capital': 'Pretoria'},\n {'timezones': ['Europe/Madrid', 'Africa/Ceuta', 'Atlantic/Canary'], 'code': 'ES', 'continent': 'Europe', 'name': 'Spain', 'capital': 'Madrid'},\n {'timezones': ['Asia/Colombo'], 'code': 'LK', 'continent': 'Asia', 'name': 'Sri Lanka', 'capital': 'Sri Jayewardenepura Kotte'},\n {'timezones': ['Africa/Mbabane'], 'code': 'SZ', 'continent': 'Africa', 'name': 'Swaziland', 'capital': 'Mbabane'},\n {'timezones': ['Europe/Zurich'], 'code': 'CH', 'continent': 'Europe', 'name': 'Switzerland', 'capital': 'Bern'},\n {'timezones': ['Asia/Dubai'], 'code': 'AE', 'continent': 'Asia', 'name': 'United Arab Emirates', 'capital': 'Abu Dhabi'},\n {'timezones': ['Europe/London'], 'code': 'GB', 'continent': 'Europe', 'name': 'United Kingdom', 'capital': 'London'},\n ]\n\n regex = re.compile(timedelta_pattern)\n\n @classmethod\n def unix_time(cls):\n \"\"\"\n Get a timestamp between January 1, 1970 and now\n :example 1061306726\n \"\"\"\n return random.randint(0, int(time()))\n\n @classmethod\n def date_time(cls):\n \"\"\"\n Get a datetime object for a date between January 1, 1970 and now\n :example DateTime('2005-08-16 20:39:21')\n :return datetime\n \"\"\"\n return datetime.fromtimestamp(cls.unix_time())\n\n @classmethod\n def date_time_ad(cls):\n \"\"\"\n Get a datetime object for a date between January 1, 001 and now\n :example DateTime('1265-03-22 21:15:52')\n :return datetime\n \"\"\"\n ts = random.randint(-62135600400, int(time()))\n # NOTE: using datetime.fromtimestamp(ts) directly will raise\n # a \"ValueError: timestamp out of range for platform time_t\"\n # on some platforms due to system C functions;\n # see http://stackoverflow.com/a/10588133/2315612\n return datetime.fromtimestamp(0) + timedelta(seconds=ts)\n\n @classmethod\n def iso8601(cls):\n \"\"\"\n :example '2003-10-21T16:05:52+0000'\n \"\"\"\n return cls.date_time().isoformat()\n\n @classmethod\n def date(cls, pattern='%Y-%m-%d'):\n \"\"\"\n Get a date string between January 1, 1970 and now\n :param pattern format\n :example '2008-11-27'\n \"\"\"\n return cls.date_time().strftime(pattern)\n\n @classmethod\n def time(cls, pattern='%H:%M:%S'):\n \"\"\"\n Get a time string (24h format by default)\n :param pattern format\n :example '15:02:34'\n \"\"\"\n return cls.date_time().time().strftime(pattern)\n\n @classmethod\n def _parse_date_time(cls, text):\n if isinstance(text, (datetime, date, real_datetime, real_date)):\n return datetime_to_timestamp(text)\n now = datetime.now()\n if isinstance(text, timedelta):\n return datetime_to_timestamp(now - text)\n if is_string(text):\n if text == 'now':\n return datetime_to_timestamp(datetime.now())\n parts = cls.regex.match(text)\n if not parts:\n return\n parts = parts.groupdict()\n time_params = {}\n for (name, param) in parts.items():\n if param:\n time_params[name] = int(param)\n\n if 'years' in time_params:\n if 'days' not in time_params: time_params['days'] = 0\n time_params['days'] += 365.24 * time_params.pop('years')\n\n return datetime_to_timestamp(now + timedelta(**time_params))\n if isinstance(text, int):\n return datetime_to_timestamp(now + timedelta(text))\n raise ValueError(\"Invalid format for date '{0}'\".format(text))\n\n @classmethod\n def date_time_between(cls, start_date='-30y', end_date='now'):\n \"\"\"\n Get a DateTime object based on a random date between two given dates.\n Accepts date strings that can be recognized by strtotime().\n\n :param start_date Defaults to 30 years ago\n :param end_date Defaults to \"now\"\n :example DateTime('1999-02-02 11:42:52')\n :return DateTime\n \"\"\"\n start_date = cls._parse_date_time(start_date)\n end_date = cls._parse_date_time(end_date)\n timestamp = random.randint(start_date, end_date)\n return datetime.fromtimestamp(timestamp)\n\n @classmethod\n def date_time_this_century(cls):\n \"\"\"\n :example DateTime('1964-04-04 11:02:02')\n \"\"\"\n return cls.date_time_between('-100y')\n\n @classmethod\n def date_time_this_decade(cls):\n \"\"\"\n :example DateTime('2004-04-04 11:02:02')\n \"\"\"\n return cls.date_time_between('-10y')\n\n @classmethod\n def date_time_this_year(cls):\n \"\"\"\n :example DateTime('2012-04-04 11:02:02')\n \"\"\"\n return cls.date_time_between('-1y')\n\n @classmethod\n def date_time_this_month(cls):\n \"\"\"\n :example DateTime('2012-04-04 11:02:02')\n \"\"\"\n return cls.date_time_between('-30d')\n\n @classmethod\n def am_pm(cls):\n return cls.date('%p')\n\n @classmethod\n def day_of_month(cls):\n return cls.date('%d')\n\n @classmethod\n def day_of_week(cls):\n return cls.date('%A')\n\n @classmethod\n def month(cls):\n return cls.date('%m')\n\n @classmethod\n def month_name(cls):\n return cls.date('%B')\n\n @classmethod\n def year(cls):\n return cls.date('%Y')\n\n @classmethod\n def century(cls):\n \"\"\"\n :example 'XVII'\n \"\"\"\n return cls.random_element(cls.centuries)\n\n @classmethod\n def timezone(cls):\n return cls.random_element(cls.countries)['timezones'].pop(0)\n\n\n\n",
"path": "faker/providers/date_time.py"
}
] | [
{
"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\nfrom . import BaseProvider\nimport random\nimport re\nfrom time import time, mktime\nfrom datetime import timedelta\nfrom faker.utils.datetime_safe import date, datetime, real_date, real_datetime\nfrom faker.utils import is_string\n\n\ndef datetime_to_timestamp(dt):\n return mktime(dt.timetuple())\n\n\ntimedelta_pattern = r''\nfor name, sym in [('years', 'y'), ('weeks', 'w'), ('days', 'd'), ('hours', 'h'), ('minutes', 'm'), ('seconds', 's')]:\n timedelta_pattern += r'((?P<{0}>(?:\\+|-)\\d+?){1})?'.format(name, sym)\n\n\nclass Provider(BaseProvider):\n centuries = ['I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X', 'XI', 'XII', 'XIII', 'XIV', 'XV', 'XVI',\n 'XVII', 'XVIII', 'XIX', 'XX', 'XXI']\n\n countries = [\n {'timezones': ['Europe/Andorra'], 'code': 'AD', 'continent': 'Europe', 'name': 'Andorra', 'capital': 'Andorra la Vella'},\n {'timezones': ['Asia/Kabul'], 'code': 'AF', 'continent': 'Asia', 'name': 'Afghanistan', 'capital': 'Kabul'},\n {'timezones': ['America/Antigua'], 'code': 'AG', 'continent': 'North America', 'name': 'Antigua and Barbuda', 'capital': \"St. John's\"},\n {'timezones': ['Europe/Tirane'], 'code': 'AL', 'continent': 'Europe', 'name': 'Albania', 'capital': 'Tirana'},\n {'timezones': ['Asia/Yerevan'], 'code': 'AM', 'continent': 'Asia', 'name': 'Armenia', 'capital': 'Yerevan'},\n {'timezones': ['Africa/Luanda'], 'code': 'AO', 'continent': 'Africa', 'name': 'Angola', 'capital': 'Luanda'},\n {'timezones': ['America/Argentina/Buenos_Aires', 'America/Argentina/Cordoba', 'America/Argentina/Jujuy', 'America/Argentina/Tucuman', 'America/Argentina/Catamarca', 'America/Argentina/La_Rioja', 'America/Argentina/San_Juan', 'America/Argentina/Mendoza', 'America/Argentina/Rio_Gallegos', 'America/Argentina/Ushuaia'], 'code': 'AR', 'continent': 'South America', 'name': 'Argentina', 'capital': 'Buenos Aires'},\n {'timezones': ['Europe/Vienna'], 'code': 'AT', 'continent': 'Europe', 'name': 'Austria', 'capital': 'Vienna'},\n {'timezones': ['Australia/Lord_Howe', 'Australia/Hobart', 'Australia/Currie', 'Australia/Melbourne', 'Australia/Sydney', 'Australia/Broken_Hill', 'Australia/Brisbane', 'Australia/Lindeman', 'Australia/Adelaide', 'Australia/Darwin', 'Australia/Perth'], 'code': 'AU', 'continent': 'Oceania', 'name': 'Australia', 'capital': 'Canberra'},\n {'timezones': ['Asia/Baku'], 'code': 'AZ', 'continent': 'Asia', 'name': 'Azerbaijan', 'capital': 'Baku'},\n {'timezones': ['America/Barbados'], 'code': 'BB', 'continent': 'North America', 'name': 'Barbados', 'capital': 'Bridgetown'},\n {'timezones': ['Asia/Dhaka'], 'code': 'BD', 'continent': 'Asia', 'name': 'Bangladesh', 'capital': 'Dhaka'},\n {'timezones': ['Europe/Brussels'], 'code': 'BE', 'continent': 'Europe', 'name': 'Belgium', 'capital': 'Brussels'},\n {'timezones': ['Africa/Ouagadougou'], 'code': 'BF', 'continent': 'Africa', 'name': 'Burkina Faso', 'capital': 'Ouagadougou'},\n {'timezones': ['Europe/Sofia'], 'code': 'BG', 'continent': 'Europe', 'name': 'Bulgaria', 'capital': 'Sofia'},\n {'timezones': ['Asia/Bahrain'], 'code': 'BH', 'continent': 'Asia', 'name': 'Bahrain', 'capital': 'Manama'},\n {'timezones': ['Africa/Bujumbura'], 'code': 'BI', 'continent': 'Africa', 'name': 'Burundi', 'capital': 'Bujumbura'},\n {'timezones': ['Africa/Porto-Novo'], 'code': 'BJ', 'continent': 'Africa', 'name': 'Benin', 'capital': 'Porto-Novo'},\n {'timezones': ['Asia/Brunei'], 'code': 'BN', 'continent': 'Asia', 'name': 'Brunei Darussalam', 'capital': 'Bandar Seri Begawan'},\n {'timezones': ['America/La_Paz'], 'code': 'BO', 'continent': 'South America', 'name': 'Bolivia', 'capital': 'Sucre'},\n {'timezones': ['America/Noronha', 'America/Belem', 'America/Fortaleza', 'America/Recife', 'America/Araguaina', 'America/Maceio', 'America/Bahia', 'America/Sao_Paulo', 'America/Campo_Grande', 'America/Cuiaba', 'America/Porto_Velho', 'America/Boa_Vista', 'America/Manaus', 'America/Eirunepe', 'America/Rio_Branco'], 'code': 'BR', 'continent': 'South America', 'name': 'Brazil', 'capital': 'Bras\\xc3\\xadlia'},\n {'timezones': ['America/Nassau'], 'code': 'BS', 'continent': 'North America', 'name': 'Bahamas', 'capital': 'Nassau'},\n {'timezones': ['Asia/Thimphu'], 'code': 'BT', 'continent': 'Asia', 'name': 'Bhutan', 'capital': 'Thimphu'},\n {'timezones': ['Africa/Gaborone'], 'code': 'BW', 'continent': 'Africa', 'name': 'Botswana', 'capital': 'Gaborone'},\n {'timezones': ['Europe/Minsk'], 'code': 'BY', 'continent': 'Europe', 'name': 'Belarus', 'capital': 'Minsk'},\n {'timezones': ['America/Belize'], 'code': 'BZ', 'continent': 'North America', 'name': 'Belize', 'capital': 'Belmopan'},\n {'timezones': ['America/St_Johns', 'America/Halifax', 'America/Glace_Bay', 'America/Moncton', 'America/Goose_Bay', 'America/Blanc-Sablon', 'America/Montreal', 'America/Toronto', 'America/Nipigon', 'America/Thunder_Bay', 'America/Pangnirtung', 'America/Iqaluit', 'America/Atikokan', 'America/Rankin_Inlet', 'America/Winnipeg', 'America/Rainy_River', 'America/Cambridge_Bay', 'America/Regina', 'America/Swift_Current', 'America/Edmonton', 'America/Yellowknife', 'America/Inuvik', 'America/Dawson_Creek', 'America/Vancouver', 'America/Whitehorse', 'America/Dawson'], 'code': 'CA', 'continent': 'North America', 'name': 'Canada', 'capital': 'Ottawa'},\n {'timezones': ['Africa/Kinshasa', 'Africa/Lubumbashi'], 'code': 'CD', 'continent': 'Africa', 'name': 'Democratic Republic of the Congo', 'capital': 'Kinshasa'},\n {'timezones': ['Africa/Brazzaville'], 'code': 'CG', 'continent': 'Africa', 'name': 'Republic of the Congo', 'capital': 'Brazzaville'},\n {'timezones': ['Africa/Abidjan'], 'code': 'CI', 'continent': 'Africa', 'name': \"C\\xc3\\xb4te d'Ivoire\", 'capital': 'Yamoussoukro'},\n {'timezones': ['America/Santiago', 'Pacific/Easter'], 'code': 'CL', 'continent': 'South America', 'name': 'Chile', 'capital': 'Santiago'},\n {'timezones': ['Africa/Douala'], 'code': 'CM', 'continent': 'Africa', 'name': 'Cameroon', 'capital': 'Yaound\\xc3\\xa9'},\n {'timezones': ['Asia/Shanghai', 'Asia/Harbin', 'Asia/Chongqing', 'Asia/Urumqi', 'Asia/Kashgar'], 'code': 'CN', 'continent': 'Asia', 'name': \"People's Republic of China\", 'capital': 'Beijing'},\n {'timezones': ['America/Bogota'], 'code': 'CO', 'continent': 'South America', 'name': 'Colombia', 'capital': 'Bogot\\xc3\\xa1'},\n {'timezones': ['America/Costa_Rica'], 'code': 'CR', 'continent': 'North America', 'name': 'Costa Rica', 'capital': 'San Jos\\xc3\\xa9'},\n {'timezones': ['America/Havana'], 'code': 'CU', 'continent': 'North America', 'name': 'Cuba', 'capital': 'Havana'},\n {'timezones': ['Atlantic/Cape_Verde'], 'code': 'CV', 'continent': 'Africa', 'name': 'Cape Verde', 'capital': 'Praia'},\n {'timezones': ['Asia/Nicosia'], 'code': 'CY', 'continent': 'Asia', 'name': 'Cyprus', 'capital': 'Nicosia'},\n {'timezones': ['Europe/Prague'], 'code': 'CZ', 'continent': 'Europe', 'name': 'Czech Republic', 'capital': 'Prague'},\n {'timezones': ['Europe/Berlin'], 'code': 'DE', 'continent': 'Europe', 'name': 'Germany', 'capital': 'Berlin'},\n {'timezones': ['Africa/Djibouti'], 'code': 'DJ', 'continent': 'Africa', 'name': 'Djibouti', 'capital': 'Djibouti City'},\n {'timezones': ['Europe/Copenhagen'], 'code': 'DK', 'continent': 'Europe', 'name': 'Denmark', 'capital': 'Copenhagen'},\n {'timezones': ['America/Dominica'], 'code': 'DM', 'continent': 'North America', 'name': 'Dominica', 'capital': 'Roseau'},\n {'timezones': ['America/Santo_Domingo'], 'code': 'DO', 'continent': 'North America', 'name': 'Dominican Republic', 'capital': 'Santo Domingo'},\n {'timezones': ['America/Guayaquil', 'Pacific/Galapagos'], 'code': 'EC', 'continent': 'South America', 'name': 'Ecuador', 'capital': 'Quito'},\n {'timezones': ['Europe/Tallinn'], 'code': 'EE', 'continent': 'Europe', 'name': 'Estonia', 'capital': 'Tallinn'},\n {'timezones': ['Africa/Cairo'], 'code': 'EG', 'continent': 'Africa', 'name': 'Egypt', 'capital': 'Cairo'},\n {'timezones': ['Africa/Asmera'], 'code': 'ER', 'continent': 'Africa', 'name': 'Eritrea', 'capital': 'Asmara'},\n {'timezones': ['Africa/Addis_Ababa'], 'code': 'ET', 'continent': 'Africa', 'name': 'Ethiopia', 'capital': 'Addis Ababa'},\n {'timezones': ['Europe/Helsinki'], 'code': 'FI', 'continent': 'Europe', 'name': 'Finland', 'capital': 'Helsinki'},\n {'timezones': ['Pacific/Fiji'], 'code': 'FJ', 'continent': 'Oceania', 'name': 'Fiji', 'capital': 'Suva'},\n {'timezones': ['Europe/Paris'], 'code': 'FR', 'continent': 'Europe', 'name': 'France', 'capital': 'Paris'},\n {'timezones': ['Africa/Libreville'], 'code': 'GA', 'continent': 'Africa', 'name': 'Gabon', 'capital': 'Libreville'},\n {'timezones': ['Asia/Tbilisi'], 'code': 'GE', 'continent': 'Asia', 'name': 'Georgia', 'capital': 'Tbilisi'},\n {'timezones': ['Africa/Accra'], 'code': 'GH', 'continent': 'Africa', 'name': 'Ghana', 'capital': 'Accra'},\n {'timezones': ['Africa/Banjul'], 'code': 'GM', 'continent': 'Africa', 'name': 'The Gambia', 'capital': 'Banjul'},\n {'timezones': ['Africa/Conakry'], 'code': 'GN', 'continent': 'Africa', 'name': 'Guinea', 'capital': 'Conakry'},\n {'timezones': ['Europe/Athens'], 'code': 'GR', 'continent': 'Europe', 'name': 'Greece', 'capital': 'Athens'},\n {'timezones': ['America/Guatemala'], 'code': 'GT', 'continent': 'North America', 'name': 'Guatemala', 'capital': 'Guatemala City'},\n {'timezones': ['America/Guatemala'], 'code': 'GT', 'continent': 'North America', 'name': 'Haiti', 'capital': 'Port-au-Prince'},\n {'timezones': ['Africa/Bissau'], 'code': 'GW', 'continent': 'Africa', 'name': 'Guinea-Bissau', 'capital': 'Bissau'},\n {'timezones': ['America/Guyana'], 'code': 'GY', 'continent': 'South America', 'name': 'Guyana', 'capital': 'Georgetown'},\n {'timezones': ['America/Tegucigalpa'], 'code': 'HN', 'continent': 'North America', 'name': 'Honduras', 'capital': 'Tegucigalpa'},\n {'timezones': ['Europe/Budapest'], 'code': 'HU', 'continent': 'Europe', 'name': 'Hungary', 'capital': 'Budapest'},\n {'timezones': ['Asia/Jakarta', 'Asia/Pontianak', 'Asia/Makassar', 'Asia/Jayapura'], 'code': 'ID', 'continent': 'Asia', 'name': 'Indonesia', 'capital': 'Jakarta'},\n {'timezones': ['Europe/Dublin'], 'code': 'IE', 'continent': 'Europe', 'name': 'Republic of Ireland', 'capital': 'Dublin'},\n {'timezones': ['Asia/Jerusalem'], 'code': 'IL', 'continent': 'Asia', 'name': 'Israel', 'capital': 'Jerusalem'},\n {'timezones': ['Asia/Calcutta'], 'code': 'IN', 'continent': 'Asia', 'name': 'India', 'capital': 'New Delhi'},\n {'timezones': ['Asia/Baghdad'], 'code': 'IQ', 'continent': 'Asia', 'name': 'Iraq', 'capital': 'Baghdad'},\n {'timezones': ['Asia/Tehran'], 'code': 'IR', 'continent': 'Asia', 'name': 'Iran', 'capital': 'Tehran'},\n {'timezones': ['Atlantic/Reykjavik'], 'code': 'IS', 'continent': 'Europe', 'name': 'Iceland', 'capital': 'Reykjav\\xc3\\xadk'},\n {'timezones': ['Europe/Rome'], 'code': 'IT', 'continent': 'Europe', 'name': 'Italy', 'capital': 'Rome'},\n {'timezones': ['America/Jamaica'], 'code': 'JM', 'continent': 'North America', 'name': 'Jamaica', 'capital': 'Kingston'},\n {'timezones': ['Asia/Amman'], 'code': 'JO', 'continent': 'Asia', 'name': 'Jordan', 'capital': 'Amman'},\n {'timezones': ['Asia/Tokyo'], 'code': 'JP', 'continent': 'Asia', 'name': 'Japan', 'capital': 'Tokyo'},\n {'timezones': ['Africa/Nairobi'], 'code': 'KE', 'continent': 'Africa', 'name': 'Kenya', 'capital': 'Nairobi'},\n {'timezones': ['Asia/Bishkek'], 'code': 'KG', 'continent': 'Asia', 'name': 'Kyrgyzstan', 'capital': 'Bishkek'},\n {'timezones': ['Pacific/Tarawa', 'Pacific/Enderbury', 'Pacific/Kiritimati'], 'code': 'KI', 'continent': 'Oceania', 'name': 'Kiribati', 'capital': 'Tarawa'},\n {'timezones': ['Asia/Pyongyang'], 'code': 'KP', 'continent': 'Asia', 'name': 'North Korea', 'capital': 'Pyongyang'},\n {'timezones': ['Asia/Seoul'], 'code': 'KR', 'continent': 'Asia', 'name': 'South Korea', 'capital': 'Seoul'},\n {'timezones': ['Asia/Kuwait'], 'code': 'KW', 'continent': 'Asia', 'name': 'Kuwait', 'capital': 'Kuwait City'},\n {'timezones': ['Asia/Beirut'], 'code': 'LB', 'continent': 'Asia', 'name': 'Lebanon', 'capital': 'Beirut'},\n {'timezones': ['Europe/Vaduz'], 'code': 'LI', 'continent': 'Europe', 'name': 'Liechtenstein', 'capital': 'Vaduz'},\n {'timezones': ['Africa/Monrovia'], 'code': 'LR', 'continent': 'Africa', 'name': 'Liberia', 'capital': 'Monrovia'},\n {'timezones': ['Africa/Maseru'], 'code': 'LS', 'continent': 'Africa', 'name': 'Lesotho', 'capital': 'Maseru'},\n {'timezones': ['Europe/Vilnius'], 'code': 'LT', 'continent': 'Europe', 'name': 'Lithuania', 'capital': 'Vilnius'},\n {'timezones': ['Europe/Luxembourg'], 'code': 'LU', 'continent': 'Europe', 'name': 'Luxembourg', 'capital': 'Luxembourg City'},\n {'timezones': ['Europe/Riga'], 'code': 'LV', 'continent': 'Europe', 'name': 'Latvia', 'capital': 'Riga'},\n {'timezones': ['Africa/Tripoli'], 'code': 'LY', 'continent': 'Africa', 'name': 'Libya', 'capital': 'Tripoli'},\n {'timezones': ['Indian/Antananarivo'], 'code': 'MG', 'continent': 'Africa', 'name': 'Madagascar', 'capital': 'Antananarivo'},\n {'timezones': ['Pacific/Majuro', 'Pacific/Kwajalein'], 'code': 'MH', 'continent': 'Oceania', 'name': 'Marshall Islands', 'capital': 'Majuro'},\n {'timezones': ['Europe/Skopje'], 'code': 'MK', 'continent': 'Europe', 'name': 'Macedonia', 'capital': 'Skopje'},\n {'timezones': ['Africa/Bamako'], 'code': 'ML', 'continent': 'Africa', 'name': 'Mali', 'capital': 'Bamako'},\n {'timezones': ['Asia/Rangoon'], 'code': 'MM', 'continent': 'Asia', 'name': 'Myanmar', 'capital': 'Naypyidaw'},\n {'timezones': ['Asia/Ulaanbaatar', 'Asia/Hovd', 'Asia/Choibalsan'], 'code': 'MN', 'continent': 'Asia', 'name': 'Mongolia', 'capital': 'Ulaanbaatar'},\n {'timezones': ['Africa/Nouakchott'], 'code': 'MR', 'continent': 'Africa', 'name': 'Mauritania', 'capital': 'Nouakchott'},\n {'timezones': ['Europe/Malta'], 'code': 'MT', 'continent': 'Europe', 'name': 'Malta', 'capital': 'Valletta'},\n {'timezones': ['Indian/Mauritius'], 'code': 'MU', 'continent': 'Africa', 'name': 'Mauritius', 'capital': 'Port Louis'},\n {'timezones': ['Indian/Maldives'], 'code': 'MV', 'continent': 'Asia', 'name': 'Maldives', 'capital': 'Mal\\xc3\\xa9'},\n {'timezones': ['Africa/Blantyre'], 'code': 'MW', 'continent': 'Africa', 'name': 'Malawi', 'capital': 'Lilongwe'},\n {'timezones': ['America/Mexico_City', 'America/Cancun', 'America/Merida', 'America/Monterrey', 'America/Mazatlan', 'America/Chihuahua', 'America/Hermosillo', 'America/Tijuana'], 'code': 'MX', 'continent': 'North America', 'name': 'Mexico', 'capital': 'Mexico City'},\n {'timezones': ['Asia/Kuala_Lumpur', 'Asia/Kuching'], 'code': 'MY', 'continent': 'Asia', 'name': 'Malaysia', 'capital': 'Kuala Lumpur'},\n {'timezones': ['Africa/Maputo'], 'code': 'MZ', 'continent': 'Africa', 'name': 'Mozambique', 'capital': 'Maputo'},\n {'timezones': ['Africa/Windhoek'], 'code': 'NA', 'continent': 'Africa', 'name': 'Namibia', 'capital': 'Windhoek'},\n {'timezones': ['Africa/Niamey'], 'code': 'NE', 'continent': 'Africa', 'name': 'Niger', 'capital': 'Niamey'},\n {'timezones': ['Africa/Lagos'], 'code': 'NG', 'continent': 'Africa', 'name': 'Nigeria', 'capital': 'Abuja'},\n {'timezones': ['America/Managua'], 'code': 'NI', 'continent': 'North America', 'name': 'Nicaragua', 'capital': 'Managua'},\n {'timezones': ['Europe/Amsterdam'], 'code': 'NL', 'continent': 'Europe', 'name': 'Kingdom of the Netherlands', 'capital': 'Amsterdam'},\n {'timezones': ['Europe/Oslo'], 'code': 'NO', 'continent': 'Europe', 'name': 'Norway', 'capital': 'Oslo'},\n {'timezones': ['Asia/Katmandu'], 'code': 'NP', 'continent': 'Asia', 'name': 'Nepal', 'capital': 'Kathmandu'},\n {'timezones': ['Pacific/Nauru'], 'code': 'NR', 'continent': 'Oceania', 'name': 'Nauru', 'capital': 'Yaren'},\n {'timezones': ['Pacific/Auckland', 'Pacific/Chatham'], 'code': 'NZ', 'continent': 'Oceania', 'name': 'New Zealand', 'capital': 'Wellington'},\n {'timezones': ['Asia/Muscat'], 'code': 'OM', 'continent': 'Asia', 'name': 'Oman', 'capital': 'Muscat'},\n {'timezones': ['America/Panama'], 'code': 'PA', 'continent': 'North America', 'name': 'Panama', 'capital': 'Panama City'},\n {'timezones': ['America/Lima'], 'code': 'PE', 'continent': 'South America', 'name': 'Peru', 'capital': 'Lima'},\n {'timezones': ['Pacific/Port_Moresby'], 'code': 'PG', 'continent': 'Oceania', 'name': 'Papua New Guinea', 'capital': 'Port Moresby'},\n {'timezones': ['Asia/Manila'], 'code': 'PH', 'continent': 'Asia', 'name': 'Philippines', 'capital': 'Manila'},\n {'timezones': ['Asia/Karachi'], 'code': 'PK', 'continent': 'Asia', 'name': 'Pakistan', 'capital': 'Islamabad'},\n {'timezones': ['Europe/Warsaw'], 'code': 'PL', 'continent': 'Europe', 'name': 'Poland', 'capital': 'Warsaw'},\n {'timezones': ['Europe/Lisbon', 'Atlantic/Madeira', 'Atlantic/Azores'], 'code': 'PT', 'continent': 'Europe', 'name': 'Portugal', 'capital': 'Lisbon'},\n {'timezones': ['Pacific/Palau'], 'code': 'PW', 'continent': 'Oceania', 'name': 'Palau', 'capital': 'Ngerulmud'},\n {'timezones': ['America/Asuncion'], 'code': 'PY', 'continent': 'South America', 'name': 'Paraguay', 'capital': 'Asunci\\xc3\\xb3n'},\n {'timezones': ['Asia/Qatar'], 'code': 'QA', 'continent': 'Asia', 'name': 'Qatar', 'capital': 'Doha'},\n {'timezones': ['Europe/Bucharest'], 'code': 'RO', 'continent': 'Europe', 'name': 'Romania', 'capital': 'Bucharest'},\n {'timezones': ['Europe/Kaliningrad', 'Europe/Moscow', 'Europe/Volgograd', 'Europe/Samara', 'Asia/Yekaterinburg', 'Asia/Omsk', 'Asia/Novosibirsk', 'Asia/Krasnoyarsk', 'Asia/Irkutsk', 'Asia/Yakutsk', 'Asia/Vladivostok', 'Asia/Sakhalin', 'Asia/Magadan', 'Asia/Kamchatka', 'Asia/Anadyr'], 'code': 'RU', 'continent': 'Europe', 'name': 'Russia', 'capital': 'Moscow'},\n {'timezones': ['Africa/Kigali'], 'code': 'RW', 'continent': 'Africa', 'name': 'Rwanda', 'capital': 'Kigali'},\n {'timezones': ['Asia/Riyadh'], 'code': 'SA', 'continent': 'Asia', 'name': 'Saudi Arabia', 'capital': 'Riyadh'},\n {'timezones': ['Pacific/Guadalcanal'], 'code': 'SB', 'continent': 'Oceania', 'name': 'Solomon Islands', 'capital': 'Honiara'},\n {'timezones': ['Indian/Mahe'], 'code': 'SC', 'continent': 'Africa', 'name': 'Seychelles', 'capital': 'Victoria'},\n {'timezones': ['Africa/Khartoum'], 'code': 'SD', 'continent': 'Africa', 'name': 'Sudan', 'capital': 'Khartoum'},\n {'timezones': ['Europe/Stockholm'], 'code': 'SE', 'continent': 'Europe', 'name': 'Sweden', 'capital': 'Stockholm'},\n {'timezones': ['Asia/Singapore'], 'code': 'SG', 'continent': 'Asia', 'name': 'Singapore', 'capital': 'Singapore'},\n {'timezones': ['Europe/Ljubljana'], 'code': 'SI', 'continent': 'Europe', 'name': 'Slovenia', 'capital': 'Ljubljana'},\n {'timezones': ['Europe/Bratislava'], 'code': 'SK', 'continent': 'Europe', 'name': 'Slovakia', 'capital': 'Bratislava'},\n {'timezones': ['Africa/Freetown'], 'code': 'SL', 'continent': 'Africa', 'name': 'Sierra Leone', 'capital': 'Freetown'},\n {'timezones': ['Europe/San_Marino'], 'code': 'SM', 'continent': 'Europe', 'name': 'San Marino', 'capital': 'San Marino'},\n {'timezones': ['Africa/Dakar'], 'code': 'SN', 'continent': 'Africa', 'name': 'Senegal', 'capital': 'Dakar'},\n {'timezones': ['Africa/Mogadishu'], 'code': 'SO', 'continent': 'Africa', 'name': 'Somalia', 'capital': 'Mogadishu'},\n {'timezones': ['America/Paramaribo'], 'code': 'SR', 'continent': 'South America', 'name': 'Suriname', 'capital': 'Paramaribo'},\n {'timezones': ['Africa/Sao_Tome'], 'code': 'ST', 'continent': 'Africa', 'name': 'S\\xc3\\xa3o Tom\\xc3\\xa9 and Pr\\xc3\\xadncipe', 'capital': 'S\\xc3\\xa3o Tom\\xc3\\xa9'},\n {'timezones': ['Asia/Damascus'], 'code': 'SY', 'continent': 'Asia', 'name': 'Syria', 'capital': 'Damascus'},\n {'timezones': ['Africa/Lome'], 'code': 'TG', 'continent': 'Africa', 'name': 'Togo', 'capital': 'Lom\\xc3\\xa9'},\n {'timezones': ['Asia/Bangkok'], 'code': 'TH', 'continent': 'Asia', 'name': 'Thailand', 'capital': 'Bangkok'},\n {'timezones': ['Asia/Dushanbe'], 'code': 'TJ', 'continent': 'Asia', 'name': 'Tajikistan', 'capital': 'Dushanbe'},\n {'timezones': ['Asia/Ashgabat'], 'code': 'TM', 'continent': 'Asia', 'name': 'Turkmenistan', 'capital': 'Ashgabat'},\n {'timezones': ['Africa/Tunis'], 'code': 'TN', 'continent': 'Africa', 'name': 'Tunisia', 'capital': 'Tunis'},\n {'timezones': ['Pacific/Tongatapu'], 'code': 'TO', 'continent': 'Oceania', 'name': 'Tonga', 'capital': 'Nuku\\xca\\xbbalofa'},\n {'timezones': ['Europe/Istanbul'], 'code': 'TR', 'continent': 'Asia', 'name': 'Turkey', 'capital': 'Ankara'},\n {'timezones': ['America/Port_of_Spain'], 'code': 'TT', 'continent': 'North America', 'name': 'Trinidad and Tobago', 'capital': 'Port of Spain'},\n {'timezones': ['Pacific/Funafuti'], 'code': 'TV', 'continent': 'Oceania', 'name': 'Tuvalu', 'capital': 'Funafuti'},\n {'timezones': ['Africa/Dar_es_Salaam'], 'code': 'TZ', 'continent': 'Africa', 'name': 'Tanzania', 'capital': 'Dodoma'},\n {'timezones': ['Europe/Kiev', 'Europe/Uzhgorod', 'Europe/Zaporozhye', 'Europe/Simferopol'], 'code': 'UA', 'continent': 'Europe', 'name': 'Ukraine', 'capital': 'Kiev'},\n {'timezones': ['Africa/Kampala'], 'code': 'UG', 'continent': 'Africa', 'name': 'Uganda', 'capital': 'Kampala'},\n {'timezones': ['America/New_York', 'America/Detroit', 'America/Kentucky/Louisville', 'America/Kentucky/Monticello', 'America/Indiana/Indianapolis', 'America/Indiana/Marengo', 'America/Indiana/Knox', 'America/Indiana/Vevay', 'America/Chicago', 'America/Indiana/Vincennes', 'America/Indiana/Petersburg', 'America/Menominee', 'America/North_Dakota/Center', 'America/North_Dakota/New_Salem', 'America/Denver', 'America/Boise', 'America/Shiprock', 'America/Phoenix', 'America/Los_Angeles', 'America/Anchorage', 'America/Juneau', 'America/Yakutat', 'America/Nome', 'America/Adak', 'Pacific/Honolulu'], 'code': 'US', 'continent': 'North America', 'name': 'United States', 'capital': 'Washington, D.C.'},\n {'timezones': ['America/Montevideo'], 'code': 'UY', 'continent': 'South America', 'name': 'Uruguay', 'capital': 'Montevideo'},\n {'timezones': ['Asia/Samarkand', 'Asia/Tashkent'], 'code': 'UZ', 'continent': 'Asia', 'name': 'Uzbekistan', 'capital': 'Tashkent'},\n {'timezones': ['Europe/Vatican'], 'code': 'VA', 'continent': 'Europe', 'name': 'Vatican City', 'capital': 'Vatican City'},\n {'timezones': ['America/Caracas'], 'code': 'VE', 'continent': 'South America', 'name': 'Venezuela', 'capital': 'Caracas'},\n {'timezones': ['Asia/Saigon'], 'code': 'VN', 'continent': 'Asia', 'name': 'Vietnam', 'capital': 'Hanoi'},\n {'timezones': ['Pacific/Efate'], 'code': 'VU', 'continent': 'Oceania', 'name': 'Vanuatu', 'capital': 'Port Vila'},\n {'timezones': ['Asia/Aden'], 'code': 'YE', 'continent': 'Asia', 'name': 'Yemen', 'capital': \"Sana'a\"},\n {'timezones': ['Africa/Lusaka'], 'code': 'ZM', 'continent': 'Africa', 'name': 'Zambia', 'capital': 'Lusaka'},\n {'timezones': ['Africa/Harare'], 'code': 'ZW', 'continent': 'Africa', 'name': 'Zimbabwe', 'capital': 'Harare'},\n {'timezones': ['Africa/Algiers'], 'code': 'DZ', 'continent': 'Africa', 'name': 'Algeria', 'capital': 'Algiers'},\n {'timezones': ['Europe/Sarajevo'], 'code': 'BA', 'continent': 'Europe', 'name': 'Bosnia and Herzegovina', 'capital': 'Sarajevo'},\n {'timezones': ['Asia/Phnom_Penh'], 'code': 'KH', 'continent': 'Asia', 'name': 'Cambodia', 'capital': 'Phnom Penh'},\n {'timezones': ['Africa/Bangui'], 'code': 'CF', 'continent': 'Africa', 'name': 'Central African Republic', 'capital': 'Bangui'},\n {'timezones': ['Africa/Ndjamena'], 'code': 'TD', 'continent': 'Africa', 'name': 'Chad', 'capital': \"N'Djamena\"},\n {'timezones': ['Indian/Comoro'], 'code': 'KM', 'continent': 'Africa', 'name': 'Comoros', 'capital': 'Moroni'},\n {'timezones': ['Europe/Zagreb'], 'code': 'HR', 'continent': 'Europe', 'name': 'Croatia', 'capital': 'Zagreb'},\n {'timezones': ['Asia/Dili'], 'code': 'TL', 'continent': 'Asia', 'name': 'East Timor', 'capital': 'Dili'},\n {'timezones': ['America/El_Salvador'], 'code': 'SV', 'continent': 'North America', 'name': 'El Salvador', 'capital': 'San Salvador'},\n {'timezones': ['Africa/Malabo'], 'code': 'GQ', 'continent': 'Africa', 'name': 'Equatorial Guinea', 'capital': 'Malabo'},\n {'timezones': ['America/Grenada'], 'code': 'GD', 'continent': 'North America', 'name': 'Grenada', 'capital': \"St. George's\"},\n {'timezones': ['Asia/Almaty', 'Asia/Qyzylorda', 'Asia/Aqtobe', 'Asia/Aqtau', 'Asia/Oral'], 'code': 'KZ', 'continent': 'Asia', 'name': 'Kazakhstan', 'capital': 'Astana'},\n {'timezones': ['Asia/Vientiane'], 'code': 'LA', 'continent': 'Asia', 'name': 'Laos', 'capital': 'Vientiane'},\n {'timezones': ['Pacific/Truk', 'Pacific/Ponape', 'Pacific/Kosrae'], 'code': 'FM', 'continent': 'Oceania', 'name': 'Federated States of Micronesia', 'capital': 'Palikir'},\n {'timezones': ['Europe/Chisinau'], 'code': 'MD', 'continent': 'Europe', 'name': 'Moldova', 'capital': 'Chi\\xc5\\x9fin\\xc4\\x83u'},\n {'timezones': ['Europe/Monaco'], 'code': 'MC', 'continent': 'Europe', 'name': 'Monaco', 'capital': 'Monaco'},\n {'timezones': ['Europe/Podgorica'], 'code': 'ME', 'continent': 'Europe', 'name': 'Montenegro', 'capital': 'Podgorica'},\n {'timezones': ['Africa/Casablanca'], 'code': 'MA', 'continent': 'Africa', 'name': 'Morocco', 'capital': 'Rabat'},\n {'timezones': ['America/St_Kitts'], 'code': 'KN', 'continent': 'North America', 'name': 'Saint Kitts and Nevis', 'capital': 'Basseterre'},\n {'timezones': ['America/St_Lucia'], 'code': 'LC', 'continent': 'North America', 'name': 'Saint Lucia', 'capital': 'Castries'},\n {'timezones': ['America/St_Vincent'], 'code': 'VC', 'continent': 'North America', 'name': 'Saint Vincent and the Grenadines', 'capital': 'Kingstown'},\n {'timezones': ['Pacific/Apia'], 'code': 'WS', 'continent': 'Oceania', 'name': 'Samoa', 'capital': 'Apia'},\n {'timezones': ['Europe/Belgrade'], 'code': 'RS', 'continent': 'Europe', 'name': 'Serbia', 'capital': 'Belgrade'},\n {'timezones': ['Africa/Johannesburg'], 'code': 'ZA', 'continent': 'Africa', 'name': 'South Africa', 'capital': 'Pretoria'},\n {'timezones': ['Europe/Madrid', 'Africa/Ceuta', 'Atlantic/Canary'], 'code': 'ES', 'continent': 'Europe', 'name': 'Spain', 'capital': 'Madrid'},\n {'timezones': ['Asia/Colombo'], 'code': 'LK', 'continent': 'Asia', 'name': 'Sri Lanka', 'capital': 'Sri Jayewardenepura Kotte'},\n {'timezones': ['Africa/Mbabane'], 'code': 'SZ', 'continent': 'Africa', 'name': 'Swaziland', 'capital': 'Mbabane'},\n {'timezones': ['Europe/Zurich'], 'code': 'CH', 'continent': 'Europe', 'name': 'Switzerland', 'capital': 'Bern'},\n {'timezones': ['Asia/Dubai'], 'code': 'AE', 'continent': 'Asia', 'name': 'United Arab Emirates', 'capital': 'Abu Dhabi'},\n {'timezones': ['Europe/London'], 'code': 'GB', 'continent': 'Europe', 'name': 'United Kingdom', 'capital': 'London'},\n ]\n\n regex = re.compile(timedelta_pattern)\n\n @classmethod\n def unix_time(cls):\n \"\"\"\n Get a timestamp between January 1, 1970 and now\n :example 1061306726\n \"\"\"\n return random.randint(0, int(time()))\n\n @classmethod\n def date_time(cls):\n \"\"\"\n Get a datetime object for a date between January 1, 1970 and now\n :example DateTime('2005-08-16 20:39:21')\n :return datetime\n \"\"\"\n return datetime.fromtimestamp(cls.unix_time())\n\n @classmethod\n def date_time_ad(cls):\n \"\"\"\n Get a datetime object for a date between January 1, 001 and now\n :example DateTime('1265-03-22 21:15:52')\n :return datetime\n \"\"\"\n ts = random.randint(-62135600400, int(time()))\n # NOTE: using datetime.fromtimestamp(ts) directly will raise\n # a \"ValueError: timestamp out of range for platform time_t\"\n # on some platforms due to system C functions;\n # see http://stackoverflow.com/a/10588133/2315612\n return datetime.fromtimestamp(0) + timedelta(seconds=ts)\n\n @classmethod\n def iso8601(cls):\n \"\"\"\n :example '2003-10-21T16:05:52+0000'\n \"\"\"\n return cls.date_time().isoformat()\n\n @classmethod\n def date(cls, pattern='%Y-%m-%d'):\n \"\"\"\n Get a date string between January 1, 1970 and now\n :param pattern format\n :example '2008-11-27'\n \"\"\"\n return cls.date_time().strftime(pattern)\n\n @classmethod\n def time(cls, pattern='%H:%M:%S'):\n \"\"\"\n Get a time string (24h format by default)\n :param pattern format\n :example '15:02:34'\n \"\"\"\n return cls.date_time().time().strftime(pattern)\n\n @classmethod\n def _parse_date_time(cls, text):\n if isinstance(text, (datetime, date, real_datetime, real_date)):\n return datetime_to_timestamp(text)\n now = datetime.now()\n if isinstance(text, timedelta):\n return datetime_to_timestamp(now - text)\n if is_string(text):\n if text == 'now':\n return datetime_to_timestamp(datetime.now())\n parts = cls.regex.match(text)\n if not parts:\n return\n parts = parts.groupdict()\n time_params = {}\n for (name, param) in parts.items():\n if param:\n time_params[name] = int(param)\n\n if 'years' in time_params:\n if 'days' not in time_params: time_params['days'] = 0\n time_params['days'] += 365.24 * time_params.pop('years')\n\n return datetime_to_timestamp(now + timedelta(**time_params))\n if isinstance(text, int):\n return datetime_to_timestamp(now + timedelta(text))\n raise ValueError(\"Invalid format for date '{0}'\".format(text))\n\n @classmethod\n def date_time_between(cls, start_date='-30y', end_date='now'):\n \"\"\"\n Get a DateTime object based on a random date between two given dates.\n Accepts date strings that can be recognized by strtotime().\n\n :param start_date Defaults to 30 years ago\n :param end_date Defaults to \"now\"\n :example DateTime('1999-02-02 11:42:52')\n :return DateTime\n \"\"\"\n start_date = cls._parse_date_time(start_date)\n end_date = cls._parse_date_time(end_date)\n timestamp = random.randint(start_date, end_date)\n return datetime.fromtimestamp(timestamp)\n\n @classmethod\n def date_time_this_century(cls):\n \"\"\"\n :example DateTime('1964-04-04 11:02:02')\n \"\"\"\n return cls.date_time_between('-100y')\n\n @classmethod\n def date_time_this_decade(cls):\n \"\"\"\n :example DateTime('2004-04-04 11:02:02')\n \"\"\"\n return cls.date_time_between('-10y')\n\n @classmethod\n def date_time_this_year(cls):\n \"\"\"\n :example DateTime('2012-04-04 11:02:02')\n \"\"\"\n return cls.date_time_between('-1y')\n\n @classmethod\n def date_time_this_month(cls):\n \"\"\"\n :example DateTime('2012-04-04 11:02:02')\n \"\"\"\n return cls.date_time_between('-30d')\n\n @classmethod\n def am_pm(cls):\n return cls.date('%p')\n\n @classmethod\n def day_of_month(cls):\n return cls.date('%d')\n\n @classmethod\n def day_of_week(cls):\n return cls.date('%A')\n\n @classmethod\n def month(cls):\n return cls.date('%m')\n\n @classmethod\n def month_name(cls):\n return cls.date('%B')\n\n @classmethod\n def year(cls):\n return cls.date('%Y')\n\n @classmethod\n def century(cls):\n \"\"\"\n :example 'XVII'\n \"\"\"\n return cls.random_element(cls.centuries)\n\n @classmethod\n def timezone(cls):\n return random.choice(cls.random_element(cls.countries)['timezones'])\n",
"path": "faker/providers/date_time.py"
}
] | diff --git a/faker/providers/date_time.py b/faker/providers/date_time.py
index 7d122d40cf..b91b33464b 100644
--- a/faker/providers/date_time.py
+++ b/faker/providers/date_time.py
@@ -382,7 +382,4 @@ def century(cls):
@classmethod
def timezone(cls):
- return cls.random_element(cls.countries)['timezones'].pop(0)
-
-
-
+ return random.choice(cls.random_element(cls.countries)['timezones'])
|
mitmproxy__mitmproxy-6493 | uppercase breaks block_list
#### Problem Description
using these values for `block_list`
~~~
/~u AccountsSignInUi/444
/~u accountssigninui/444
~~~
neither one is blocking the expected URL:
~~~
https://accounts.google.com/v3/signin/_/AccountsSignInUi/data/batchexecute
~~~
this works:
~~~
/~u .ccounts.ign.n.i/444
~~~
why is uppercase character breaking the search?
#### System Information
tried with both:
~~~
> mitmproxy --version
Mitmproxy: 8.0.0 binary
Python: 3.10.2
OpenSSL: OpenSSL 1.1.1n 15 Mar 2022
Platform: Windows-10-10.0.18363-SP0
> mitmproxy --version
Mitmproxy: 10.0.0 binary
Python: 3.11.4
OpenSSL: OpenSSL 3.1.2 1 Aug 2023
Platform: Windows-10-10.0.18363-SP0
~~~
| [
{
"content": "\"\"\"\n The following operators are understood:\n\n ~q Request\n ~s Response\n\n Headers:\n\n Patterns are matched against \"name: value\" strings. Field names are\n all-lowercase.\n\n ~a Asset content-type in response. Asset content types are:\n text/javascript\n application/x-javascript\n application/javascript\n text/css\n image/*\n font/*\n application/font-*\n ~h rex Header line in either request or response\n ~hq rex Header in request\n ~hs rex Header in response\n\n ~b rex Expression in the body of either request or response\n ~bq rex Expression in the body of request\n ~bs rex Expression in the body of response\n ~t rex Shortcut for content-type header.\n\n ~d rex Request domain\n ~m rex Method\n ~u rex URL\n ~c CODE Response code.\n rex Equivalent to ~u rex\n\"\"\"\nimport functools\nimport re\nimport sys\nfrom collections.abc import Sequence\nfrom typing import ClassVar\nfrom typing import Protocol\n\nimport pyparsing as pp\n\nfrom mitmproxy import dns\nfrom mitmproxy import flow\nfrom mitmproxy import http\nfrom mitmproxy import tcp\nfrom mitmproxy import udp\n\n\ndef only(*types):\n def decorator(fn):\n @functools.wraps(fn)\n def filter_types(self, flow):\n if isinstance(flow, types):\n return fn(self, flow)\n return False\n\n return filter_types\n\n return decorator\n\n\nclass _Token:\n def dump(self, indent=0, fp=sys.stdout):\n print(\n \"{spacing}{name}{expr}\".format(\n spacing=\"\\t\" * indent,\n name=self.__class__.__name__,\n expr=getattr(self, \"expr\", \"\"),\n ),\n file=fp,\n )\n\n\nclass _Action(_Token):\n code: ClassVar[str]\n help: ClassVar[str]\n\n @classmethod\n def make(klass, s, loc, toks):\n return klass(*toks[1:])\n\n\nclass FErr(_Action):\n code = \"e\"\n help = \"Match error\"\n\n def __call__(self, f):\n return True if f.error else False\n\n\nclass FMarked(_Action):\n code = \"marked\"\n help = \"Match marked flows\"\n\n def __call__(self, f):\n return bool(f.marked)\n\n\nclass FHTTP(_Action):\n code = \"http\"\n help = \"Match HTTP flows\"\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n return True\n\n\nclass FWebSocket(_Action):\n code = \"websocket\"\n help = \"Match WebSocket flows\"\n\n @only(http.HTTPFlow)\n def __call__(self, f: http.HTTPFlow):\n return f.websocket is not None\n\n\nclass FTCP(_Action):\n code = \"tcp\"\n help = \"Match TCP flows\"\n\n @only(tcp.TCPFlow)\n def __call__(self, f):\n return True\n\n\nclass FUDP(_Action):\n code = \"udp\"\n help = \"Match UDP flows\"\n\n @only(udp.UDPFlow)\n def __call__(self, f):\n return True\n\n\nclass FDNS(_Action):\n code = \"dns\"\n help = \"Match DNS flows\"\n\n @only(dns.DNSFlow)\n def __call__(self, f):\n return True\n\n\nclass FReq(_Action):\n code = \"q\"\n help = \"Match request with no response\"\n\n @only(http.HTTPFlow, dns.DNSFlow)\n def __call__(self, f):\n if not f.response:\n return True\n\n\nclass FResp(_Action):\n code = \"s\"\n help = \"Match response\"\n\n @only(http.HTTPFlow, dns.DNSFlow)\n def __call__(self, f):\n return bool(f.response)\n\n\nclass FAll(_Action):\n code = \"all\"\n help = \"Match all flows\"\n\n def __call__(self, f: flow.Flow):\n return True\n\n\nclass _Rex(_Action):\n flags = 0\n is_binary = True\n\n def __init__(self, expr):\n self.expr = expr\n if self.is_binary:\n expr = expr.encode()\n try:\n self.re = re.compile(expr, self.flags)\n except Exception:\n raise ValueError(\"Cannot compile expression.\")\n\n\ndef _check_content_type(rex, message):\n return any(\n name.lower() == b\"content-type\" and rex.search(value)\n for name, value in message.headers.fields\n )\n\n\nclass FAsset(_Action):\n code = \"a\"\n help = \"Match asset in response: CSS, JavaScript, images, fonts.\"\n ASSET_TYPES = [\n re.compile(x)\n for x in [\n b\"text/javascript\",\n b\"application/x-javascript\",\n b\"application/javascript\",\n b\"text/css\",\n b\"image/.*\",\n b\"font/.*\",\n b\"application/font.*\",\n ]\n ]\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n if f.response:\n for i in self.ASSET_TYPES:\n if _check_content_type(i, f.response):\n return True\n return False\n\n\nclass FContentType(_Rex):\n code = \"t\"\n help = \"Content-type header\"\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n if _check_content_type(self.re, f.request):\n return True\n elif f.response and _check_content_type(self.re, f.response):\n return True\n return False\n\n\nclass FContentTypeRequest(_Rex):\n code = \"tq\"\n help = \"Request Content-Type header\"\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n return _check_content_type(self.re, f.request)\n\n\nclass FContentTypeResponse(_Rex):\n code = \"ts\"\n help = \"Response Content-Type header\"\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n if f.response:\n return _check_content_type(self.re, f.response)\n return False\n\n\nclass FHead(_Rex):\n code = \"h\"\n help = \"Header\"\n flags = re.MULTILINE\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n if f.request and self.re.search(bytes(f.request.headers)):\n return True\n if f.response and self.re.search(bytes(f.response.headers)):\n return True\n return False\n\n\nclass FHeadRequest(_Rex):\n code = \"hq\"\n help = \"Request header\"\n flags = re.MULTILINE\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n if f.request and self.re.search(bytes(f.request.headers)):\n return True\n\n\nclass FHeadResponse(_Rex):\n code = \"hs\"\n help = \"Response header\"\n flags = re.MULTILINE\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n if f.response and self.re.search(bytes(f.response.headers)):\n return True\n\n\nclass FBod(_Rex):\n code = \"b\"\n help = \"Body\"\n flags = re.DOTALL\n\n @only(http.HTTPFlow, tcp.TCPFlow, udp.UDPFlow, dns.DNSFlow)\n def __call__(self, f):\n if isinstance(f, http.HTTPFlow):\n if (\n f.request\n and (content := f.request.get_content(strict=False)) is not None\n ):\n if self.re.search(content):\n return True\n if (\n f.response\n and (content := f.response.get_content(strict=False)) is not None\n ):\n if self.re.search(content):\n return True\n if f.websocket:\n for wmsg in f.websocket.messages:\n if wmsg.content is not None and self.re.search(wmsg.content):\n return True\n elif isinstance(f, (tcp.TCPFlow, udp.UDPFlow)):\n for msg in f.messages:\n if msg.content is not None and self.re.search(msg.content):\n return True\n elif isinstance(f, dns.DNSFlow):\n if f.request and self.re.search(f.request.content):\n return True\n if f.response and self.re.search(f.response.content):\n return True\n return False\n\n\nclass FBodRequest(_Rex):\n code = \"bq\"\n help = \"Request body\"\n flags = re.DOTALL\n\n @only(http.HTTPFlow, tcp.TCPFlow, udp.UDPFlow, dns.DNSFlow)\n def __call__(self, f):\n if isinstance(f, http.HTTPFlow):\n if (\n f.request\n and (content := f.request.get_content(strict=False)) is not None\n ):\n if self.re.search(content):\n return True\n if f.websocket:\n for wmsg in f.websocket.messages:\n if wmsg.from_client and self.re.search(wmsg.content):\n return True\n elif isinstance(f, (tcp.TCPFlow, udp.UDPFlow)):\n for msg in f.messages:\n if msg.from_client and self.re.search(msg.content):\n return True\n elif isinstance(f, dns.DNSFlow):\n if f.request and self.re.search(f.request.content):\n return True\n\n\nclass FBodResponse(_Rex):\n code = \"bs\"\n help = \"Response body\"\n flags = re.DOTALL\n\n @only(http.HTTPFlow, tcp.TCPFlow, udp.UDPFlow, dns.DNSFlow)\n def __call__(self, f):\n if isinstance(f, http.HTTPFlow):\n if (\n f.response\n and (content := f.response.get_content(strict=False)) is not None\n ):\n if self.re.search(content):\n return True\n if f.websocket:\n for wmsg in f.websocket.messages:\n if not wmsg.from_client and self.re.search(wmsg.content):\n return True\n elif isinstance(f, (tcp.TCPFlow, udp.UDPFlow)):\n for msg in f.messages:\n if not msg.from_client and self.re.search(msg.content):\n return True\n elif isinstance(f, dns.DNSFlow):\n if f.response and self.re.search(f.response.content):\n return True\n\n\nclass FMethod(_Rex):\n code = \"m\"\n help = \"Method\"\n flags = re.IGNORECASE\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n return bool(self.re.search(f.request.data.method))\n\n\nclass FDomain(_Rex):\n code = \"d\"\n help = \"Domain\"\n flags = re.IGNORECASE\n is_binary = False\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n return bool(\n self.re.search(f.request.host) or self.re.search(f.request.pretty_host)\n )\n\n\nclass FUrl(_Rex):\n code = \"u\"\n help = \"URL\"\n is_binary = False\n\n # FUrl is special, because it can be \"naked\".\n\n @classmethod\n def make(klass, s, loc, toks):\n if len(toks) > 1:\n toks = toks[1:]\n return klass(*toks)\n\n @only(http.HTTPFlow, dns.DNSFlow)\n def __call__(self, f):\n if not f or not f.request:\n return False\n if isinstance(f, http.HTTPFlow):\n return self.re.search(f.request.pretty_url)\n elif isinstance(f, dns.DNSFlow):\n return f.request.questions and self.re.search(f.request.questions[0].name)\n\n\nclass FSrc(_Rex):\n code = \"src\"\n help = \"Match source address\"\n is_binary = False\n\n def __call__(self, f):\n if not f.client_conn or not f.client_conn.peername:\n return False\n r = f\"{f.client_conn.peername[0]}:{f.client_conn.peername[1]}\"\n return f.client_conn.peername and self.re.search(r)\n\n\nclass FDst(_Rex):\n code = \"dst\"\n help = \"Match destination address\"\n is_binary = False\n\n def __call__(self, f):\n if not f.server_conn or not f.server_conn.address:\n return False\n r = f\"{f.server_conn.address[0]}:{f.server_conn.address[1]}\"\n return f.server_conn.address and self.re.search(r)\n\n\nclass FReplay(_Action):\n code = \"replay\"\n help = \"Match replayed flows\"\n\n def __call__(self, f):\n return f.is_replay is not None\n\n\nclass FReplayClient(_Action):\n code = \"replayq\"\n help = \"Match replayed client request\"\n\n def __call__(self, f):\n return f.is_replay == \"request\"\n\n\nclass FReplayServer(_Action):\n code = \"replays\"\n help = \"Match replayed server response\"\n\n def __call__(self, f):\n return f.is_replay == \"response\"\n\n\nclass FMeta(_Rex):\n code = \"meta\"\n help = \"Flow metadata\"\n flags = re.MULTILINE\n is_binary = False\n\n def __call__(self, f):\n m = \"\\n\".join([f\"{key}: {value}\" for key, value in f.metadata.items()])\n return self.re.search(m)\n\n\nclass FMarker(_Rex):\n code = \"marker\"\n help = \"Match marked flows with specified marker\"\n is_binary = False\n\n def __call__(self, f):\n return self.re.search(f.marked)\n\n\nclass FComment(_Rex):\n code = \"comment\"\n help = \"Flow comment\"\n flags = re.MULTILINE\n is_binary = False\n\n def __call__(self, f):\n return self.re.search(f.comment)\n\n\nclass _Int(_Action):\n def __init__(self, num):\n self.num = int(num)\n\n\nclass FCode(_Int):\n code = \"c\"\n help = \"HTTP response code\"\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n if f.response and f.response.status_code == self.num:\n return True\n\n\nclass FAnd(_Token):\n def __init__(self, lst):\n self.lst = lst\n\n def dump(self, indent=0, fp=sys.stdout):\n super().dump(indent, fp)\n for i in self.lst:\n i.dump(indent + 1, fp)\n\n def __call__(self, f):\n return all(i(f) for i in self.lst)\n\n\nclass FOr(_Token):\n def __init__(self, lst):\n self.lst = lst\n\n def dump(self, indent=0, fp=sys.stdout):\n super().dump(indent, fp)\n for i in self.lst:\n i.dump(indent + 1, fp)\n\n def __call__(self, f):\n return any(i(f) for i in self.lst)\n\n\nclass FNot(_Token):\n def __init__(self, itm):\n self.itm = itm[0]\n\n def dump(self, indent=0, fp=sys.stdout):\n super().dump(indent, fp)\n self.itm.dump(indent + 1, fp)\n\n def __call__(self, f):\n return not self.itm(f)\n\n\nfilter_unary: Sequence[type[_Action]] = [\n FAsset,\n FErr,\n FHTTP,\n FMarked,\n FReplay,\n FReplayClient,\n FReplayServer,\n FReq,\n FResp,\n FTCP,\n FUDP,\n FDNS,\n FWebSocket,\n FAll,\n]\nfilter_rex: Sequence[type[_Rex]] = [\n FBod,\n FBodRequest,\n FBodResponse,\n FContentType,\n FContentTypeRequest,\n FContentTypeResponse,\n FDomain,\n FDst,\n FHead,\n FHeadRequest,\n FHeadResponse,\n FMethod,\n FSrc,\n FUrl,\n FMeta,\n FMarker,\n FComment,\n]\nfilter_int = [FCode]\n\n\ndef _make():\n # Order is important - multi-char expressions need to come before narrow\n # ones.\n parts = []\n for cls in filter_unary:\n f = pp.Literal(f\"~{cls.code}\") + pp.WordEnd()\n f.setParseAction(cls.make)\n parts.append(f)\n\n # This is a bit of a hack to simulate Word(pyparsing_unicode.printables),\n # which has a horrible performance with len(pyparsing.pyparsing_unicode.printables) == 1114060\n unicode_words = pp.CharsNotIn(\"()~'\\\"\" + pp.ParserElement.DEFAULT_WHITE_CHARS)\n unicode_words.skipWhitespace = True\n regex = (\n unicode_words\n | pp.QuotedString('\"', escChar=\"\\\\\")\n | pp.QuotedString(\"'\", escChar=\"\\\\\")\n )\n for cls in filter_rex:\n f = pp.Literal(f\"~{cls.code}\") + pp.WordEnd() + regex.copy()\n f.setParseAction(cls.make)\n parts.append(f)\n\n for cls in filter_int:\n f = pp.Literal(f\"~{cls.code}\") + pp.WordEnd() + pp.Word(pp.nums)\n f.setParseAction(cls.make)\n parts.append(f)\n\n # A naked rex is a URL rex:\n f = regex.copy()\n f.setParseAction(FUrl.make)\n parts.append(f)\n\n atom = pp.MatchFirst(parts)\n expr = pp.infixNotation(\n atom,\n [\n (pp.Literal(\"!\").suppress(), 1, pp.opAssoc.RIGHT, lambda x: FNot(*x)),\n (pp.Literal(\"&\").suppress(), 2, pp.opAssoc.LEFT, lambda x: FAnd(*x)),\n (pp.Literal(\"|\").suppress(), 2, pp.opAssoc.LEFT, lambda x: FOr(*x)),\n ],\n )\n expr = pp.OneOrMore(expr)\n return expr.setParseAction(lambda x: FAnd(x) if len(x) != 1 else x)\n\n\nbnf = _make()\n\n\nclass TFilter(Protocol):\n pattern: str\n\n def __call__(self, f: flow.Flow) -> bool:\n ... # pragma: no cover\n\n\ndef parse(s: str) -> TFilter:\n \"\"\"\n Parse a filter expression and return the compiled filter function.\n If the filter syntax is invalid, `ValueError` is raised.\n \"\"\"\n if not s:\n raise ValueError(\"Empty filter expression\")\n try:\n flt = bnf.parseString(s, parseAll=True)[0]\n flt.pattern = s\n return flt\n except (pp.ParseException, ValueError) as e:\n raise ValueError(f\"Invalid filter expression: {s!r}\") from e\n\n\ndef match(flt: str | TFilter, flow: flow.Flow) -> bool:\n \"\"\"\n Matches a flow against a compiled filter expression.\n Returns True if matched, False if not.\n\n If flt is a string, it will be compiled as a filter expression.\n If the expression is invalid, ValueError is raised.\n \"\"\"\n if isinstance(flt, str):\n flt = parse(flt)\n if flt:\n return flt(flow)\n return True\n\n\nmatch_all: TFilter = parse(\"~all\")\n\"\"\"A filter function that matches all flows\"\"\"\n\n\nhelp = []\nfor a in filter_unary:\n help.append((f\"~{a.code}\", a.help))\nfor b in filter_rex:\n help.append((f\"~{b.code} regex\", b.help))\nfor c in filter_int:\n help.append((f\"~{c.code} int\", c.help))\nhelp.sort()\nhelp.extend(\n [\n (\"!\", \"unary not\"),\n (\"&\", \"and\"),\n (\"|\", \"or\"),\n (\"(...)\", \"grouping\"),\n ]\n)\n",
"path": "mitmproxy/flowfilter.py"
}
] | [
{
"content": "\"\"\"\n The following operators are understood:\n\n ~q Request\n ~s Response\n\n Headers:\n\n Patterns are matched against \"name: value\" strings. Field names are\n all-lowercase.\n\n ~a Asset content-type in response. Asset content types are:\n text/javascript\n application/x-javascript\n application/javascript\n text/css\n image/*\n font/*\n application/font-*\n ~h rex Header line in either request or response\n ~hq rex Header in request\n ~hs rex Header in response\n\n ~b rex Expression in the body of either request or response\n ~bq rex Expression in the body of request\n ~bs rex Expression in the body of response\n ~t rex Shortcut for content-type header.\n\n ~d rex Request domain\n ~m rex Method\n ~u rex URL\n ~c CODE Response code.\n rex Equivalent to ~u rex\n\"\"\"\nimport functools\nimport re\nimport sys\nfrom collections.abc import Sequence\nfrom typing import ClassVar\nfrom typing import Protocol\n\nimport pyparsing as pp\n\nfrom mitmproxy import dns\nfrom mitmproxy import flow\nfrom mitmproxy import http\nfrom mitmproxy import tcp\nfrom mitmproxy import udp\n\n\ndef only(*types):\n def decorator(fn):\n @functools.wraps(fn)\n def filter_types(self, flow):\n if isinstance(flow, types):\n return fn(self, flow)\n return False\n\n return filter_types\n\n return decorator\n\n\nclass _Token:\n def dump(self, indent=0, fp=sys.stdout):\n print(\n \"{spacing}{name}{expr}\".format(\n spacing=\"\\t\" * indent,\n name=self.__class__.__name__,\n expr=getattr(self, \"expr\", \"\"),\n ),\n file=fp,\n )\n\n\nclass _Action(_Token):\n code: ClassVar[str]\n help: ClassVar[str]\n\n @classmethod\n def make(klass, s, loc, toks):\n return klass(*toks[1:])\n\n\nclass FErr(_Action):\n code = \"e\"\n help = \"Match error\"\n\n def __call__(self, f):\n return True if f.error else False\n\n\nclass FMarked(_Action):\n code = \"marked\"\n help = \"Match marked flows\"\n\n def __call__(self, f):\n return bool(f.marked)\n\n\nclass FHTTP(_Action):\n code = \"http\"\n help = \"Match HTTP flows\"\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n return True\n\n\nclass FWebSocket(_Action):\n code = \"websocket\"\n help = \"Match WebSocket flows\"\n\n @only(http.HTTPFlow)\n def __call__(self, f: http.HTTPFlow):\n return f.websocket is not None\n\n\nclass FTCP(_Action):\n code = \"tcp\"\n help = \"Match TCP flows\"\n\n @only(tcp.TCPFlow)\n def __call__(self, f):\n return True\n\n\nclass FUDP(_Action):\n code = \"udp\"\n help = \"Match UDP flows\"\n\n @only(udp.UDPFlow)\n def __call__(self, f):\n return True\n\n\nclass FDNS(_Action):\n code = \"dns\"\n help = \"Match DNS flows\"\n\n @only(dns.DNSFlow)\n def __call__(self, f):\n return True\n\n\nclass FReq(_Action):\n code = \"q\"\n help = \"Match request with no response\"\n\n @only(http.HTTPFlow, dns.DNSFlow)\n def __call__(self, f):\n if not f.response:\n return True\n\n\nclass FResp(_Action):\n code = \"s\"\n help = \"Match response\"\n\n @only(http.HTTPFlow, dns.DNSFlow)\n def __call__(self, f):\n return bool(f.response)\n\n\nclass FAll(_Action):\n code = \"all\"\n help = \"Match all flows\"\n\n def __call__(self, f: flow.Flow):\n return True\n\n\nclass _Rex(_Action):\n flags = 0\n is_binary = True\n\n def __init__(self, expr):\n self.expr = expr\n if self.is_binary:\n expr = expr.encode()\n try:\n self.re = re.compile(expr, self.flags)\n except Exception:\n raise ValueError(\"Cannot compile expression.\")\n\n\ndef _check_content_type(rex, message):\n return any(\n name.lower() == b\"content-type\" and rex.search(value)\n for name, value in message.headers.fields\n )\n\n\nclass FAsset(_Action):\n code = \"a\"\n help = \"Match asset in response: CSS, JavaScript, images, fonts.\"\n ASSET_TYPES = [\n re.compile(x)\n for x in [\n b\"text/javascript\",\n b\"application/x-javascript\",\n b\"application/javascript\",\n b\"text/css\",\n b\"image/.*\",\n b\"font/.*\",\n b\"application/font.*\",\n ]\n ]\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n if f.response:\n for i in self.ASSET_TYPES:\n if _check_content_type(i, f.response):\n return True\n return False\n\n\nclass FContentType(_Rex):\n code = \"t\"\n help = \"Content-type header\"\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n if _check_content_type(self.re, f.request):\n return True\n elif f.response and _check_content_type(self.re, f.response):\n return True\n return False\n\n\nclass FContentTypeRequest(_Rex):\n code = \"tq\"\n help = \"Request Content-Type header\"\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n return _check_content_type(self.re, f.request)\n\n\nclass FContentTypeResponse(_Rex):\n code = \"ts\"\n help = \"Response Content-Type header\"\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n if f.response:\n return _check_content_type(self.re, f.response)\n return False\n\n\nclass FHead(_Rex):\n code = \"h\"\n help = \"Header\"\n flags = re.MULTILINE\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n if f.request and self.re.search(bytes(f.request.headers)):\n return True\n if f.response and self.re.search(bytes(f.response.headers)):\n return True\n return False\n\n\nclass FHeadRequest(_Rex):\n code = \"hq\"\n help = \"Request header\"\n flags = re.MULTILINE\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n if f.request and self.re.search(bytes(f.request.headers)):\n return True\n\n\nclass FHeadResponse(_Rex):\n code = \"hs\"\n help = \"Response header\"\n flags = re.MULTILINE\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n if f.response and self.re.search(bytes(f.response.headers)):\n return True\n\n\nclass FBod(_Rex):\n code = \"b\"\n help = \"Body\"\n flags = re.DOTALL\n\n @only(http.HTTPFlow, tcp.TCPFlow, udp.UDPFlow, dns.DNSFlow)\n def __call__(self, f):\n if isinstance(f, http.HTTPFlow):\n if (\n f.request\n and (content := f.request.get_content(strict=False)) is not None\n ):\n if self.re.search(content):\n return True\n if (\n f.response\n and (content := f.response.get_content(strict=False)) is not None\n ):\n if self.re.search(content):\n return True\n if f.websocket:\n for wmsg in f.websocket.messages:\n if wmsg.content is not None and self.re.search(wmsg.content):\n return True\n elif isinstance(f, (tcp.TCPFlow, udp.UDPFlow)):\n for msg in f.messages:\n if msg.content is not None and self.re.search(msg.content):\n return True\n elif isinstance(f, dns.DNSFlow):\n if f.request and self.re.search(f.request.content):\n return True\n if f.response and self.re.search(f.response.content):\n return True\n return False\n\n\nclass FBodRequest(_Rex):\n code = \"bq\"\n help = \"Request body\"\n flags = re.DOTALL\n\n @only(http.HTTPFlow, tcp.TCPFlow, udp.UDPFlow, dns.DNSFlow)\n def __call__(self, f):\n if isinstance(f, http.HTTPFlow):\n if (\n f.request\n and (content := f.request.get_content(strict=False)) is not None\n ):\n if self.re.search(content):\n return True\n if f.websocket:\n for wmsg in f.websocket.messages:\n if wmsg.from_client and self.re.search(wmsg.content):\n return True\n elif isinstance(f, (tcp.TCPFlow, udp.UDPFlow)):\n for msg in f.messages:\n if msg.from_client and self.re.search(msg.content):\n return True\n elif isinstance(f, dns.DNSFlow):\n if f.request and self.re.search(f.request.content):\n return True\n\n\nclass FBodResponse(_Rex):\n code = \"bs\"\n help = \"Response body\"\n flags = re.DOTALL\n\n @only(http.HTTPFlow, tcp.TCPFlow, udp.UDPFlow, dns.DNSFlow)\n def __call__(self, f):\n if isinstance(f, http.HTTPFlow):\n if (\n f.response\n and (content := f.response.get_content(strict=False)) is not None\n ):\n if self.re.search(content):\n return True\n if f.websocket:\n for wmsg in f.websocket.messages:\n if not wmsg.from_client and self.re.search(wmsg.content):\n return True\n elif isinstance(f, (tcp.TCPFlow, udp.UDPFlow)):\n for msg in f.messages:\n if not msg.from_client and self.re.search(msg.content):\n return True\n elif isinstance(f, dns.DNSFlow):\n if f.response and self.re.search(f.response.content):\n return True\n\n\nclass FMethod(_Rex):\n code = \"m\"\n help = \"Method\"\n flags = re.IGNORECASE\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n return bool(self.re.search(f.request.data.method))\n\n\nclass FDomain(_Rex):\n code = \"d\"\n help = \"Domain\"\n flags = re.IGNORECASE\n is_binary = False\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n return bool(\n self.re.search(f.request.host) or self.re.search(f.request.pretty_host)\n )\n\n\nclass FUrl(_Rex):\n code = \"u\"\n help = \"URL\"\n is_binary = False\n flags = re.IGNORECASE\n\n # FUrl is special, because it can be \"naked\".\n\n @classmethod\n def make(klass, s, loc, toks):\n if len(toks) > 1:\n toks = toks[1:]\n return klass(*toks)\n\n @only(http.HTTPFlow, dns.DNSFlow)\n def __call__(self, f):\n if not f or not f.request:\n return False\n if isinstance(f, http.HTTPFlow):\n return self.re.search(f.request.pretty_url)\n elif isinstance(f, dns.DNSFlow):\n return f.request.questions and self.re.search(f.request.questions[0].name)\n\n\nclass FSrc(_Rex):\n code = \"src\"\n help = \"Match source address\"\n is_binary = False\n\n def __call__(self, f):\n if not f.client_conn or not f.client_conn.peername:\n return False\n r = f\"{f.client_conn.peername[0]}:{f.client_conn.peername[1]}\"\n return f.client_conn.peername and self.re.search(r)\n\n\nclass FDst(_Rex):\n code = \"dst\"\n help = \"Match destination address\"\n is_binary = False\n\n def __call__(self, f):\n if not f.server_conn or not f.server_conn.address:\n return False\n r = f\"{f.server_conn.address[0]}:{f.server_conn.address[1]}\"\n return f.server_conn.address and self.re.search(r)\n\n\nclass FReplay(_Action):\n code = \"replay\"\n help = \"Match replayed flows\"\n\n def __call__(self, f):\n return f.is_replay is not None\n\n\nclass FReplayClient(_Action):\n code = \"replayq\"\n help = \"Match replayed client request\"\n\n def __call__(self, f):\n return f.is_replay == \"request\"\n\n\nclass FReplayServer(_Action):\n code = \"replays\"\n help = \"Match replayed server response\"\n\n def __call__(self, f):\n return f.is_replay == \"response\"\n\n\nclass FMeta(_Rex):\n code = \"meta\"\n help = \"Flow metadata\"\n flags = re.MULTILINE\n is_binary = False\n\n def __call__(self, f):\n m = \"\\n\".join([f\"{key}: {value}\" for key, value in f.metadata.items()])\n return self.re.search(m)\n\n\nclass FMarker(_Rex):\n code = \"marker\"\n help = \"Match marked flows with specified marker\"\n is_binary = False\n\n def __call__(self, f):\n return self.re.search(f.marked)\n\n\nclass FComment(_Rex):\n code = \"comment\"\n help = \"Flow comment\"\n flags = re.MULTILINE\n is_binary = False\n\n def __call__(self, f):\n return self.re.search(f.comment)\n\n\nclass _Int(_Action):\n def __init__(self, num):\n self.num = int(num)\n\n\nclass FCode(_Int):\n code = \"c\"\n help = \"HTTP response code\"\n\n @only(http.HTTPFlow)\n def __call__(self, f):\n if f.response and f.response.status_code == self.num:\n return True\n\n\nclass FAnd(_Token):\n def __init__(self, lst):\n self.lst = lst\n\n def dump(self, indent=0, fp=sys.stdout):\n super().dump(indent, fp)\n for i in self.lst:\n i.dump(indent + 1, fp)\n\n def __call__(self, f):\n return all(i(f) for i in self.lst)\n\n\nclass FOr(_Token):\n def __init__(self, lst):\n self.lst = lst\n\n def dump(self, indent=0, fp=sys.stdout):\n super().dump(indent, fp)\n for i in self.lst:\n i.dump(indent + 1, fp)\n\n def __call__(self, f):\n return any(i(f) for i in self.lst)\n\n\nclass FNot(_Token):\n def __init__(self, itm):\n self.itm = itm[0]\n\n def dump(self, indent=0, fp=sys.stdout):\n super().dump(indent, fp)\n self.itm.dump(indent + 1, fp)\n\n def __call__(self, f):\n return not self.itm(f)\n\n\nfilter_unary: Sequence[type[_Action]] = [\n FAsset,\n FErr,\n FHTTP,\n FMarked,\n FReplay,\n FReplayClient,\n FReplayServer,\n FReq,\n FResp,\n FTCP,\n FUDP,\n FDNS,\n FWebSocket,\n FAll,\n]\nfilter_rex: Sequence[type[_Rex]] = [\n FBod,\n FBodRequest,\n FBodResponse,\n FContentType,\n FContentTypeRequest,\n FContentTypeResponse,\n FDomain,\n FDst,\n FHead,\n FHeadRequest,\n FHeadResponse,\n FMethod,\n FSrc,\n FUrl,\n FMeta,\n FMarker,\n FComment,\n]\nfilter_int = [FCode]\n\n\ndef _make():\n # Order is important - multi-char expressions need to come before narrow\n # ones.\n parts = []\n for cls in filter_unary:\n f = pp.Literal(f\"~{cls.code}\") + pp.WordEnd()\n f.setParseAction(cls.make)\n parts.append(f)\n\n # This is a bit of a hack to simulate Word(pyparsing_unicode.printables),\n # which has a horrible performance with len(pyparsing.pyparsing_unicode.printables) == 1114060\n unicode_words = pp.CharsNotIn(\"()~'\\\"\" + pp.ParserElement.DEFAULT_WHITE_CHARS)\n unicode_words.skipWhitespace = True\n regex = (\n unicode_words\n | pp.QuotedString('\"', escChar=\"\\\\\")\n | pp.QuotedString(\"'\", escChar=\"\\\\\")\n )\n for cls in filter_rex:\n f = pp.Literal(f\"~{cls.code}\") + pp.WordEnd() + regex.copy()\n f.setParseAction(cls.make)\n parts.append(f)\n\n for cls in filter_int:\n f = pp.Literal(f\"~{cls.code}\") + pp.WordEnd() + pp.Word(pp.nums)\n f.setParseAction(cls.make)\n parts.append(f)\n\n # A naked rex is a URL rex:\n f = regex.copy()\n f.setParseAction(FUrl.make)\n parts.append(f)\n\n atom = pp.MatchFirst(parts)\n expr = pp.infixNotation(\n atom,\n [\n (pp.Literal(\"!\").suppress(), 1, pp.opAssoc.RIGHT, lambda x: FNot(*x)),\n (pp.Literal(\"&\").suppress(), 2, pp.opAssoc.LEFT, lambda x: FAnd(*x)),\n (pp.Literal(\"|\").suppress(), 2, pp.opAssoc.LEFT, lambda x: FOr(*x)),\n ],\n )\n expr = pp.OneOrMore(expr)\n return expr.setParseAction(lambda x: FAnd(x) if len(x) != 1 else x)\n\n\nbnf = _make()\n\n\nclass TFilter(Protocol):\n pattern: str\n\n def __call__(self, f: flow.Flow) -> bool:\n ... # pragma: no cover\n\n\ndef parse(s: str) -> TFilter:\n \"\"\"\n Parse a filter expression and return the compiled filter function.\n If the filter syntax is invalid, `ValueError` is raised.\n \"\"\"\n if not s:\n raise ValueError(\"Empty filter expression\")\n try:\n flt = bnf.parseString(s, parseAll=True)[0]\n flt.pattern = s\n return flt\n except (pp.ParseException, ValueError) as e:\n raise ValueError(f\"Invalid filter expression: {s!r}\") from e\n\n\ndef match(flt: str | TFilter, flow: flow.Flow) -> bool:\n \"\"\"\n Matches a flow against a compiled filter expression.\n Returns True if matched, False if not.\n\n If flt is a string, it will be compiled as a filter expression.\n If the expression is invalid, ValueError is raised.\n \"\"\"\n if isinstance(flt, str):\n flt = parse(flt)\n if flt:\n return flt(flow)\n return True\n\n\nmatch_all: TFilter = parse(\"~all\")\n\"\"\"A filter function that matches all flows\"\"\"\n\n\nhelp = []\nfor a in filter_unary:\n help.append((f\"~{a.code}\", a.help))\nfor b in filter_rex:\n help.append((f\"~{b.code} regex\", b.help))\nfor c in filter_int:\n help.append((f\"~{c.code} int\", c.help))\nhelp.sort()\nhelp.extend(\n [\n (\"!\", \"unary not\"),\n (\"&\", \"and\"),\n (\"|\", \"or\"),\n (\"(...)\", \"grouping\"),\n ]\n)\n",
"path": "mitmproxy/flowfilter.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index bdac343e75..a80ac80cac 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -18,6 +18,8 @@
([#6543](https://github.com/mitmproxy/mitmproxy/pull/6543), @mhils)
* DNS resolution is now exempted from `--ignore-hosts` in WireGuard Mode.
([#6513](https://github.com/mitmproxy/mitmproxy/pull/6513), @dsphper)
+* Fix case sensitivity of URL added to blocklist
+ ([#6493](https://github.com/mitmproxy/mitmproxy/pull/6493), @emanuele-em)
* Fix a bug where logging was stopped prematurely during shutdown.
([#6541](https://github.com/mitmproxy/mitmproxy/pull/6541), @mhils)
* For plaintext traffic, `--ignore-hosts` now also takes HTTP/1 host headers into account.
diff --git a/mitmproxy/flowfilter.py b/mitmproxy/flowfilter.py
index 52db22be03..840583f3d1 100644
--- a/mitmproxy/flowfilter.py
+++ b/mitmproxy/flowfilter.py
@@ -402,6 +402,7 @@ class FUrl(_Rex):
code = "u"
help = "URL"
is_binary = False
+ flags = re.IGNORECASE
# FUrl is special, because it can be "naked".
diff --git a/test/mitmproxy/addons/test_blocklist.py b/test/mitmproxy/addons/test_blocklist.py
index 9187443b28..b7c7e536d3 100644
--- a/test/mitmproxy/addons/test_blocklist.py
+++ b/test/mitmproxy/addons/test_blocklist.py
@@ -22,20 +22,21 @@ def test_parse_spec_err(filter, err):
class TestBlockList:
@pytest.mark.parametrize(
- "filter,status_code",
+ "filter,request_url,status_code",
[
- (":~u example.org:404", 404),
- (":~u example.com:404", None),
- ("/!jpg/418", None),
- ("/!png/418", 418),
+ (":~u example.org:404", b"https://example.org/images/test.jpg", 404),
+ (":~u example.com:404", b"https://example.org/images/test.jpg", None),
+ (":~u test:404", b"https://example.org/images/TEST.jpg", 404),
+ ("/!jpg/418", b"https://example.org/images/test.jpg", None),
+ ("/!png/418", b"https://example.org/images/test.jpg", 418),
],
)
- def test_block(self, filter, status_code):
+ def test_block(self, filter, request_url, status_code):
bl = blocklist.BlockList()
with taddons.context(bl) as tctx:
tctx.configure(bl, block_list=[filter])
f = tflow.tflow()
- f.request.url = b"https://example.org/images/test.jpg"
+ f.request.url = request_url
bl.request(f)
if status_code is not None:
assert f.response.status_code == status_code
|
google__flax-3540 | Error when using nn.scan with negative output_axes
### System information
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): N/A
- Flax, jax, jaxlib versions (obtain with `pip show flax jax jaxlib`: ```flax==0.6.11, jax==0.4.9, jaxlib==0.4.9```
- Python version: ```3.8```
- GPU/TPU model and memory: N/A
- CUDA version (if applicable): N/A
### Problem you have encountered:
When using ```flax.linen.scan``` with a negative ```output_axes```, there is an unexpected ```AssertionError```. If I have understood the source code correctly, it is due to a typo [here](https://github.com/google/flax/blob/main/flax/core/axes_scan.py#L103) (namely, a minus sign instead of a plus sign).
### What you expected to happen:
Apply scan as usual, stacking the outputs along the specified axis.
### Logs, error messages, etc:
```
(projectabcde) lucaslingle@Lucass-MacBook-Pro projectabcde % python3 scripts/scan_issue.py
Traceback (most recent call last):
File "scripts/scan_issue.py", line 39, in <module>
main()
File "scripts/scan_issue.py", line 32, in main
params = cls().init(
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/jax/_src/traceback_util.py", line 166, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/linen/module.py", line 1689, in init
_, v_out = self.init_with_output(
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/jax/_src/traceback_util.py", line 166, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/linen/module.py", line 1594, in init_with_output
return init_with_output(
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/core/scope.py", line 968, in wrapper
return apply(fn, mutable=mutable, flags=init_flags)({}, *args, rngs=rngs,
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/core/scope.py", line 936, in wrapper
y = fn(root, *args, **kwargs)
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/linen/module.py", line 2170, in scope_fn
return fn(module.clone(parent=scope, _deep_clone=True), *args, **kwargs)
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/linen/module.py", line 432, in wrapped_module_method
return self._call_wrapped_method(fun, args, kwargs)
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/linen/module.py", line 868, in _call_wrapped_method
y = fun(self, *args, **kwargs)
File "scripts/scan_issue.py", line 18, in __call__
_, outputs = nn.scan(
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/linen/transforms.py", line 323, in wrapped_fn
ret = trafo_fn(module_scopes, *args, **kwargs)
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/core/lift.py", line 219, in wrapper
y, out_variable_groups_xs_t = fn(
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/core/lift.py", line 806, in inner
broadcast_vars, (carry_vars, c), (ys, scan_vars) = scanned(
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/core/axes_scan.py", line 151, in scan_fn
ys = jax.tree_util.tree_map(transpose_from_front, out_axes, ys)
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/jax/_src/tree_util.py", line 210, in tree_map
return treedef.unflatten(f(*xs) for xs in zip(*all_leaves))
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/jax/_src/tree_util.py", line 210, in <genexpr>
return treedef.unflatten(f(*xs) for xs in zip(*all_leaves))
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/core/axes_scan.py", line 106, in transpose_from_front
return jax.tree_util.tree_map(trans, xs)
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/jax/_src/tree_util.py", line 210, in tree_map
return treedef.unflatten(f(*xs) for xs in zip(*all_leaves))
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/jax/_src/tree_util.py", line 210, in <genexpr>
return treedef.unflatten(f(*xs) for xs in zip(*all_leaves))
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/core/axes_scan.py", line 103, in trans
assert pax < x.ndim
jax._src.traceback_util.UnfilteredStackTrace: AssertionError
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "scripts/scan_issue.py", line 39, in <module>
main()
File "scripts/scan_issue.py", line 32, in main
params = cls().init(
File "scripts/scan_issue.py", line 18, in __call__
_, outputs = nn.scan(
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/core/axes_scan.py", line 151, in scan_fn
ys = jax.tree_util.tree_map(transpose_from_front, out_axes, ys)
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/core/axes_scan.py", line 106, in transpose_from_front
return jax.tree_util.tree_map(trans, xs)
File "/Users/lucaslingle/opt/miniconda3/envs/projectabcde/lib/python3.8/site-packages/flax/core/axes_scan.py", line 103, in trans
assert pax < x.ndim
AssertionError
```
### Steps to reproduce:
```
# issue appears to be at https://github.com/google/flax/blob/main/flax/core/axes_scan.py#L101
import flax.linen as nn
import jax.random
class Foo(nn.Module):
unused_config: int
@nn.compact
def __call__(self, state, input_dict):
return state, nn.Dense(100)(input_dict["x"])
class Bar(nn.Module):
@nn.compact
def __call__(self, x):
_, outputs = nn.scan(
Foo,
variable_broadcast="params",
split_rngs=dict(
params=False,
),
in_axes=0,
out_axes=-1,
)(unused_config=123)(dict(unused_state_item=None), dict(x=x))
return outputs
def main():
cls = Bar
params = cls().init(
{"params": jax.random.PRNGKey(0)},
jax.random.normal(jax.random.PRNGKey(1), shape=[8, 128, 16])
)["params"]
if __name__ == "__main__":
main()
```
Thank you for your attention to this matter!
| [
{
"content": "# Copyright 2023 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Wrapper around jax.lax.scan with in_axes/out_axes API.\"\"\"\nimport functools\nfrom typing import Any, Callable, Optional\n\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom jax import core, lax\nfrom jax.extend import linear_util as lu\nfrom jax.interpreters import partial_eval as pe\n\nScanAxis = Optional[int]\n\n\nclass _Broadcast:\n pass\n\n\nbroadcast = _Broadcast()\n\n\ndef scan(\n fn: Callable[..., Any],\n in_axes: Any,\n out_axes: Any,\n length: Optional[int] = None,\n reverse: bool = False,\n unroll: int = 1,\n):\n \"\"\"A wrapper around `jax.lax.scan` with in_axes/out_axes api.\n\n Example::\n def body_fn(b, c, x):\n return b + 2, c + 1, 2 * x\n\n loop = scan(body_fn, in_axes=0, out_axes=0)\n broadcast_in = 1\n carry = 2\n xs = jnp.arange(3)\n broadcast_out, carry, ys = loop(broadcast_in, carry, xs)\n print(broadcast_out) # prints: 3\n print(carry) # prints: 5\n print(ys) # prints: [0, 2, 4]\n\n\n Args:\n fn: the body function of the scan loop of the form\n `(broadcast_in, carry, *args) -> (broadcast_out, carry, scan_out)`.\n the broadcast argument allows for loop independent inputs/outputs to\n be computed inside `fn`. `fn` will be called once to compute\n `broadcast_out`. The actual loop will receive `broadcast_out` as the new\n `broadcast_in`. This is useful for initializing values inside the loop.\n in_axes: specifies the axis along which arguments are scanned.\n Use `broadcast` to use the same value across iterations.\n out_axes: specifies the axis along which outputs are concatenated.\n Use `broadcast` if a return value should not be concatenated and\n is independent of the loop body.\n length: number of iterations. Only needs to be specified if there\n is no scan axis from which it can be derived.\n reverse: scan in reverse order from end to start.\n unroll: how many scan iterations to unroll within a single\n iteration of a loop (default: 1).\n Returns:\n the function that performs the scan of the form:\n (broadcast_in, carry_in, *args) -> (broadcast_out, carry_out, scan_out).\n \"\"\"\n\n def transpose_to_front(ax, xs):\n if ax is broadcast:\n return ()\n if ax == 0:\n return xs\n\n def trans(x):\n perm = tuple(range(x.ndim))\n perm = (ax,) + tuple(np.delete(perm, ax))\n return jnp.transpose(x, perm)\n\n return jax.tree_util.tree_map(trans, xs)\n\n def transpose_from_front(ax, xs):\n if ax is broadcast:\n return ()\n if ax == 0:\n return xs\n\n def trans(x):\n if ax < 0:\n pax = x.ndim - ax\n else:\n pax = ax\n assert pax < x.ndim\n perm = tuple(range(1, pax + 1)) + (0,) + tuple(range(pax + 1, x.ndim))\n return jnp.transpose(x, perm)\n\n return jax.tree_util.tree_map(trans, xs)\n\n def scan_fn(broadcast_in, init, *args):\n xs = jax.tree_util.tree_map(transpose_to_front, in_axes, args)\n\n def body_fn(c, xs, init_mode=False):\n # inject constants\n xs = jax.tree_util.tree_map(\n lambda ax, arg, x: (arg if ax is broadcast else x), in_axes, args, xs\n )\n broadcast_out, c, ys = fn(broadcast_in, c, *xs)\n\n if init_mode:\n ys = jax.tree_util.tree_map(\n lambda ax, y: (y if ax is broadcast else ()), out_axes, ys\n )\n return broadcast_out, ys\n else:\n ys = jax.tree_util.tree_map(\n lambda ax, y: (() if ax is broadcast else y), out_axes, ys\n )\n return c, ys\n\n broadcast_body = functools.partial(body_fn, init_mode=True)\n\n carry_avals = jax.tree_util.tree_map(\n lambda x: core.ShapedArray(jnp.shape(x), jnp.result_type(x)), init\n )\n scan_avals = jax.tree_util.tree_map(\n lambda x: core.ShapedArray(jnp.shape(x)[1:], jnp.result_type(x)), xs\n )\n input_avals = (carry_avals, scan_avals)\n\n in_avals, in_tree = jax.tree_util.tree_flatten(input_avals)\n f_flat, out_tree = jax.api_util.flatten_fun_nokwargs(\n lu.wrap_init(broadcast_body), in_tree\n )\n in_pvals = list(map(pe.PartialVal.unknown, in_avals))\n _, out_pvals, _ = pe.trace_to_jaxpr_nounits(f_flat, in_pvals)\n\n out_flat = []\n for pv, const in out_pvals:\n if pv is not None:\n raise ValueError(\n 'broadcasted variable has a data dependency on the scan body.'\n )\n out_flat.append(const)\n broadcast_in, constants_out = jax.tree_util.tree_unflatten(\n out_tree(), out_flat\n )\n\n c, ys = lax.scan(\n body_fn, init, xs, length=length, reverse=reverse, unroll=unroll\n )\n ys = jax.tree_util.tree_map(transpose_from_front, out_axes, ys)\n ys = jax.tree_util.tree_map(\n lambda ax, const, y: (const if ax is broadcast else y),\n out_axes,\n constants_out,\n ys,\n )\n return broadcast_in, c, ys\n\n return scan_fn\n",
"path": "flax/core/axes_scan.py"
}
] | [
{
"content": "# Copyright 2023 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Wrapper around jax.lax.scan with in_axes/out_axes API.\"\"\"\nimport functools\nfrom typing import Any, Callable, Optional\n\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom jax import core, lax\nfrom jax.extend import linear_util as lu\nfrom jax.interpreters import partial_eval as pe\n\nScanAxis = Optional[int]\n\n\nclass _Broadcast:\n pass\n\n\nbroadcast = _Broadcast()\n\n\ndef scan(\n fn: Callable[..., Any],\n in_axes: Any,\n out_axes: Any,\n length: Optional[int] = None,\n reverse: bool = False,\n unroll: int = 1,\n):\n \"\"\"A wrapper around `jax.lax.scan` with in_axes/out_axes api.\n\n Example::\n def body_fn(b, c, x):\n return b + 2, c + 1, 2 * x\n\n loop = scan(body_fn, in_axes=0, out_axes=0)\n broadcast_in = 1\n carry = 2\n xs = jnp.arange(3)\n broadcast_out, carry, ys = loop(broadcast_in, carry, xs)\n print(broadcast_out) # prints: 3\n print(carry) # prints: 5\n print(ys) # prints: [0, 2, 4]\n\n\n Args:\n fn: the body function of the scan loop of the form\n `(broadcast_in, carry, *args) -> (broadcast_out, carry, scan_out)`.\n the broadcast argument allows for loop independent inputs/outputs to\n be computed inside `fn`. `fn` will be called once to compute\n `broadcast_out`. The actual loop will receive `broadcast_out` as the new\n `broadcast_in`. This is useful for initializing values inside the loop.\n in_axes: specifies the axis along which arguments are scanned.\n Use `broadcast` to use the same value across iterations.\n out_axes: specifies the axis along which outputs are concatenated.\n Use `broadcast` if a return value should not be concatenated and\n is independent of the loop body.\n length: number of iterations. Only needs to be specified if there\n is no scan axis from which it can be derived.\n reverse: scan in reverse order from end to start.\n unroll: how many scan iterations to unroll within a single\n iteration of a loop (default: 1).\n Returns:\n the function that performs the scan of the form:\n (broadcast_in, carry_in, *args) -> (broadcast_out, carry_out, scan_out).\n \"\"\"\n\n def transpose_to_front(ax, xs):\n if ax is broadcast:\n return ()\n if ax == 0:\n return xs\n\n def trans(x):\n perm = tuple(range(x.ndim))\n perm = (ax,) + tuple(np.delete(perm, ax))\n return jnp.transpose(x, perm)\n\n return jax.tree_util.tree_map(trans, xs)\n\n def transpose_from_front(ax, xs):\n if ax is broadcast:\n return ()\n if ax == 0:\n return xs\n\n def trans(x):\n if ax < 0:\n pax = x.ndim + ax\n else:\n pax = ax\n assert pax < x.ndim\n perm = tuple(range(1, pax + 1)) + (0,) + tuple(range(pax + 1, x.ndim))\n return jnp.transpose(x, perm)\n\n return jax.tree_util.tree_map(trans, xs)\n\n def scan_fn(broadcast_in, init, *args):\n xs = jax.tree_util.tree_map(transpose_to_front, in_axes, args)\n\n def body_fn(c, xs, init_mode=False):\n # inject constants\n xs = jax.tree_util.tree_map(\n lambda ax, arg, x: (arg if ax is broadcast else x), in_axes, args, xs\n )\n broadcast_out, c, ys = fn(broadcast_in, c, *xs)\n\n if init_mode:\n ys = jax.tree_util.tree_map(\n lambda ax, y: (y if ax is broadcast else ()), out_axes, ys\n )\n return broadcast_out, ys\n else:\n ys = jax.tree_util.tree_map(\n lambda ax, y: (() if ax is broadcast else y), out_axes, ys\n )\n return c, ys\n\n broadcast_body = functools.partial(body_fn, init_mode=True)\n\n carry_avals = jax.tree_util.tree_map(\n lambda x: core.ShapedArray(jnp.shape(x), jnp.result_type(x)), init\n )\n scan_avals = jax.tree_util.tree_map(\n lambda x: core.ShapedArray(jnp.shape(x)[1:], jnp.result_type(x)), xs\n )\n input_avals = (carry_avals, scan_avals)\n\n in_avals, in_tree = jax.tree_util.tree_flatten(input_avals)\n f_flat, out_tree = jax.api_util.flatten_fun_nokwargs(\n lu.wrap_init(broadcast_body), in_tree\n )\n in_pvals = list(map(pe.PartialVal.unknown, in_avals))\n _, out_pvals, _ = pe.trace_to_jaxpr_nounits(f_flat, in_pvals)\n\n out_flat = []\n for pv, const in out_pvals:\n if pv is not None:\n raise ValueError(\n 'broadcasted variable has a data dependency on the scan body.'\n )\n out_flat.append(const)\n broadcast_in, constants_out = jax.tree_util.tree_unflatten(\n out_tree(), out_flat\n )\n\n c, ys = lax.scan(\n body_fn, init, xs, length=length, reverse=reverse, unroll=unroll\n )\n ys = jax.tree_util.tree_map(transpose_from_front, out_axes, ys)\n ys = jax.tree_util.tree_map(\n lambda ax, const, y: (const if ax is broadcast else y),\n out_axes,\n constants_out,\n ys,\n )\n return broadcast_in, c, ys\n\n return scan_fn\n",
"path": "flax/core/axes_scan.py"
}
] | diff --git a/flax/core/axes_scan.py b/flax/core/axes_scan.py
index 8c51ca3df..2ffd347db 100644
--- a/flax/core/axes_scan.py
+++ b/flax/core/axes_scan.py
@@ -100,7 +100,7 @@ def transpose_from_front(ax, xs):
def trans(x):
if ax < 0:
- pax = x.ndim - ax
+ pax = x.ndim + ax
else:
pax = ax
assert pax < x.ndim
|
Kinto__kinto-1302 | Cannot import name `Utc`
While trying to debug #1299 I encountered the following error:
```
$ make serve
...
~/.virtualenvs/test/bin/kinto migrate --ini config/kinto.ini
Traceback (most recent call last):
File "~/.virtualenvs/test/bin/kinto", line 11, in <module>
load_entry_point('kinto', 'console_scripts', 'kinto')()
File "~/.virtualenvs/test/lib/python3.5/site-packages/pkg_resources/__init__.py", line 560, in load_entry_point
return get_distribution(dist).load_entry_point(group, name)
File "~/.virtualenvs/test/lib/python3.5/site-packages/pkg_resources/__init__.py", line 2648, in load_entry_point
return ep.load()
File "~/.virtualenvs/test/lib/python3.5/site-packages/pkg_resources/__init__.py", line 2302, in load
return self.resolve()
File "~/.virtualenvs/test/lib/python3.5/site-packages/pkg_resources/__init__.py", line 2308, in resolve
module = __import__(self.module_name, fromlist=['__name__'], level=0)
File "~/mozilla/kinto/kinto/__init__.py", line 4, in <module>
import kinto.core
File "~/mozilla/kinto/kinto/core/__init__.py", line 10, in <module>
from kinto.core import errors
File "~/mozilla/kinto/kinto/core/errors.py", line 1, in <module>
import colander
File "~/.virtualenvs/test/lib/python3.5/site-packages/colander/__init__.py", line 22, in <module>
from . import iso8601
File "~/.virtualenvs/test/lib/python3.5/site-packages/colander/iso8601.py", line 3, in <module>
from iso8601.iso8601 import (parse_date, ParseError, Utc, FixedOffset, UTC, ZERO, ISO8601_REGEX)
ImportError: cannot import name 'Utc'
Makefile:87 : la recette pour la cible « migrate » a échouée
make: *** [migrate] Erreur 1
```
Cannot import name `Utc`
While trying to debug #1299 I encountered the following error:
```
$ make serve
...
~/.virtualenvs/test/bin/kinto migrate --ini config/kinto.ini
Traceback (most recent call last):
File "~/.virtualenvs/test/bin/kinto", line 11, in <module>
load_entry_point('kinto', 'console_scripts', 'kinto')()
File "~/.virtualenvs/test/lib/python3.5/site-packages/pkg_resources/__init__.py", line 560, in load_entry_point
return get_distribution(dist).load_entry_point(group, name)
File "~/.virtualenvs/test/lib/python3.5/site-packages/pkg_resources/__init__.py", line 2648, in load_entry_point
return ep.load()
File "~/.virtualenvs/test/lib/python3.5/site-packages/pkg_resources/__init__.py", line 2302, in load
return self.resolve()
File "~/.virtualenvs/test/lib/python3.5/site-packages/pkg_resources/__init__.py", line 2308, in resolve
module = __import__(self.module_name, fromlist=['__name__'], level=0)
File "~/mozilla/kinto/kinto/__init__.py", line 4, in <module>
import kinto.core
File "~/mozilla/kinto/kinto/core/__init__.py", line 10, in <module>
from kinto.core import errors
File "~/mozilla/kinto/kinto/core/errors.py", line 1, in <module>
import colander
File "~/.virtualenvs/test/lib/python3.5/site-packages/colander/__init__.py", line 22, in <module>
from . import iso8601
File "~/.virtualenvs/test/lib/python3.5/site-packages/colander/iso8601.py", line 3, in <module>
from iso8601.iso8601 import (parse_date, ParseError, Utc, FixedOffset, UTC, ZERO, ISO8601_REGEX)
ImportError: cannot import name 'Utc'
Makefile:87 : la recette pour la cible « migrate » a échouée
make: *** [migrate] Erreur 1
```
| [
{
"content": "import codecs\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\nREQUIREMENTS = [\n 'bcrypt',\n 'colander >= 1.3.2',\n 'cornice >= 2.4',\n 'cornice_swagger >= 0.5.1',\n 'jsonschema',\n 'jsonpatch',\n 'logging-color-formatter >= 1.0.1', # Message interpolations.\n 'python-dateutil',\n 'pyramid > 1.8, < 1.9b1',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'transaction',\n # pyramid_tm changed the location of their tween in 2.x and one of\n # our tests fails on 2.0.\n 'pyramid_tm >= 2.1',\n 'requests',\n 'waitress',\n 'ujson >= 1.35'\n]\n\nPOSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2 > 2.5',\n 'zope.sqlalchemy',\n]\n\nREDIS_REQUIRES = [\n 'kinto_redis'\n]\n\nSETUP_REQUIRES = [\n 'pytest-runner'\n]\n\nTEST_REQUIREMENTS = [\n 'bravado_core',\n 'pytest',\n 'WebTest'\n]\n\nDEPENDENCY_LINKS = [\n]\n\nMONITORING_REQUIRES = [\n 'raven',\n 'statsd',\n 'newrelic',\n 'werkzeug',\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\n\nsetup(name='kinto',\n version='7.3.2.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=\"{}\\n\\n{}\\n\\n{}\".format(README, CHANGELOG, CONTRIBUTORS),\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web sync json storage services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n package_data={'': ['*.rst', '*.py', '*.yaml']},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n 'redis': REDIS_REQUIRES,\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n },\n test_suite=\"tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n",
"path": "setup.py"
}
] | [
{
"content": "import codecs\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\nREQUIREMENTS = [\n 'bcrypt',\n 'iso8601==0.1.11', # Refs #1301\n 'colander >= 1.3.2',\n 'cornice >= 2.4',\n 'cornice_swagger >= 0.5.1',\n 'jsonschema',\n 'jsonpatch',\n 'logging-color-formatter >= 1.0.1', # Message interpolations.\n 'python-dateutil',\n 'pyramid > 1.8, < 1.9b1',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'transaction',\n # pyramid_tm changed the location of their tween in 2.x and one of\n # our tests fails on 2.0.\n 'pyramid_tm >= 2.1',\n 'requests',\n 'waitress',\n 'ujson >= 1.35'\n]\n\nPOSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2 > 2.5',\n 'zope.sqlalchemy',\n]\n\nREDIS_REQUIRES = [\n 'kinto_redis'\n]\n\nSETUP_REQUIRES = [\n 'pytest-runner'\n]\n\nTEST_REQUIREMENTS = [\n 'bravado_core',\n 'pytest',\n 'WebTest'\n]\n\nDEPENDENCY_LINKS = [\n]\n\nMONITORING_REQUIRES = [\n 'raven',\n 'statsd',\n 'newrelic',\n 'werkzeug',\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\n\nsetup(name='kinto',\n version='7.3.2.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=\"{}\\n\\n{}\\n\\n{}\".format(README, CHANGELOG, CONTRIBUTORS),\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web sync json storage services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n package_data={'': ['*.rst', '*.py', '*.yaml']},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n 'redis': REDIS_REQUIRES,\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n },\n test_suite=\"tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 36edbd494..1ffb4863d 100644
--- a/setup.py
+++ b/setup.py
@@ -18,6 +18,7 @@ def read_file(filename):
REQUIREMENTS = [
'bcrypt',
+ 'iso8601==0.1.11', # Refs #1301
'colander >= 1.3.2',
'cornice >= 2.4',
'cornice_swagger >= 0.5.1',
|
pretalx__pretalx-263 | Mail config doesn't work
The fallback/default config is not used, even when an event's mail config is untouched.
| [
{
"content": "import configparser\nimport os\nimport sys\nfrom contextlib import suppress\nfrom urllib.parse import urlparse\n\nfrom django.contrib.messages import constants as messages # NOQA\nfrom django.utils.crypto import get_random_string\nfrom django.utils.translation import ugettext_lazy as _ # NOQA\n\n\ndef reduce_dict(data):\n return {\n section_name: {\n key: value\n for key, value in section_content.items()\n if value is not None\n }\n for section_name, section_content in env_config.items()\n }\n\n\nconfig = configparser.RawConfigParser()\nconfig.read_dict({\n 'filesystem': {\n 'base': os.path.dirname(os.path.dirname(__file__)),\n }, # defaults depend on the data dir and need to be set once the data dir is fixed\n 'site': {\n 'debug': 'runserver' in sys.argv,\n 'url': 'http://localhost',\n 'cookie_domain': '',\n }, # the https setting is determined by url if not explicitly set\n 'database': {\n 'backend': 'sqlite3',\n # 'name': '',\n 'user': '',\n 'password': '',\n 'host': '',\n 'port': '',\n },\n 'mail': {\n 'from': 'admin@localhost',\n 'host': 'localhost',\n 'port': '25',\n 'user': '',\n 'password': '',\n 'tls': 'False',\n 'ssl': 'True',\n },\n 'cache': {\n },\n 'celery': {\n 'broker': '',\n 'backend': '',\n },\n})\n\nlegacy_config = {\n 'filesystem': {\n 'data': config.get('django', 'data_dir', fallback=None),\n 'static': config.get('django', 'static', fallback=None),\n },\n 'site': {\n 'debug': config.get('django', 'debug', fallback=None),\n 'secret': config.get('django', 'secret', fallback=None),\n },\n}\n\n\nif 'PRETALX_CONFIG_FILE' in os.environ:\n config_files = config.read_file(open(os.environ.get('PRETALX_CONFIG_FILE'), encoding='utf-8'))\nelse:\n config_files = config.read([\n '/etc/pretalx/pretalx.cfg',\n os.path.expanduser('~/.pretalx.cfg'),\n 'pretalx.cfg',\n ], encoding='utf-8')\n\nenv_config = {\n 'filesystem': {\n 'data': os.getenv('PRETALX_DATA_DIR'),\n },\n 'site': {\n 'debug': os.getenv('PRETALX_DEBUG'),\n 'url': os.getenv('PRETALX_SITE_URL'),\n 'https': os.getenv('PRETALX_HTTPS'),\n 'cookie_domain': os.getenv('PRETALX_COOKIE_DOMAIN'),\n },\n 'mail': {\n 'from': os.getenv('PRETALX_MAIL_FROM'),\n 'host': os.getenv('PRETALX_MAIL_HOST'),\n 'port': os.getenv('PRETALX_MAIL_PORT'),\n 'user': os.getenv('PRETALX_MAIL_USER'),\n 'password': os.getenv('PRETALX_MAIL_PASSWORD'),\n 'tls': os.getenv('PRETALX_MAIL_TLS'),\n 'ssl': os.getenv('PRETALX_MAIL_SSL'),\n },\n 'database': {\n 'backend': os.getenv('PRETALX_DB_TYPE'),\n 'name': os.getenv('PRETALX_DB_NAME'),\n 'user': os.getenv('PRETALX_DB_USER'),\n 'password': os.getenv('PRETALX_DB_PASS'),\n 'host': os.getenv('PRETALX_DB_HOST'),\n 'port': os.getenv('PRETALX_DB_PORT'),\n },\n 'celery': {\n 'broker': os.getenv('PRETALX_CELERY_BROKER'),\n 'backend': os.getenv('PRETALX_CELERY_BACKEND'),\n },\n}\n\nconfig.read_dict(reduce_dict(legacy_config))\nconfig.read_dict(reduce_dict(env_config))\n\n# File system and directory settings\nBASE_DIR = config.get('filesystem', 'base')\nDATA_DIR = config.get('filesystem', 'data', fallback=os.path.join(BASE_DIR, 'data'))\nLOG_DIR = config.get('filesystem', 'logs', fallback=os.path.join(DATA_DIR, 'logs'))\nMEDIA_ROOT = config.get('filesystem', 'media', fallback=os.path.join(DATA_DIR, 'media'))\nSTATIC_ROOT = config.get('filesystem', 'static', fallback=os.path.join(BASE_DIR, 'static.dist'))\n\nfor directory in (BASE_DIR, DATA_DIR, LOG_DIR, MEDIA_ROOT):\n if not os.path.exists(directory):\n os.mkdir(directory)\n\nif config.has_option('site', 'secret'):\n SECRET_KEY = config.get('site', 'secret')\nelse:\n SECRET_FILE = os.path.join(DATA_DIR, '.secret')\n if os.path.exists(SECRET_FILE):\n with open(SECRET_FILE, 'r') as f:\n SECRET_KEY = f.read().strip()\n else:\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n SECRET_KEY = get_random_string(50, chars)\n with open(SECRET_FILE, 'w') as f:\n os.chmod(SECRET_FILE, 0o600)\n os.chown(SECRET_FILE, os.getuid(), os.getgid())\n f.write(SECRET_KEY)\n\n# General setup settings\nDEBUG = config.getboolean('site', 'debug')\n\nif DEBUG:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nelse:\n MAIL_FROM = SERVER_EMAIL = DEFAULT_FROM_EMAIL = config.get('mail', 'from')\n EMAIL_HOST = config.get('mail', 'host')\n EMAIL_PORT = config.get('mail', 'port')\n EMAIL_HOST_USER = config.get('mail', 'user')\n EMAIL_HOST_PASSWORD = config.get('mail', 'password')\n EMAIL_USE_TLS = config.getboolean('mail', 'tls')\n EMAIL_USE_SSL = config.getboolean('mail', 'ssl')\n\n\n# Database configuration\ndb_backend = config.get('database', 'backend')\ndb_name = config.get('database', 'name', fallback=os.path.join(DATA_DIR, 'db.sqlite3'))\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.' + db_backend,\n 'NAME': db_name,\n 'USER': config.get('database', 'user'),\n 'PASSWORD': config.get('database', 'password'),\n 'HOST': config.get('database', 'host'),\n 'PORT': config.get('database', 'port'),\n 'CONN_MAX_AGE': 0 if db_backend == 'sqlite3' else 120,\n }\n}\n\n# URL configuration\nSITE_URL = config.get('site', 'url', fallback='http://localhost')\nSITE_NETLOC = urlparse(SITE_URL).netloc\nALLOWED_HOSTS = ['*']\n\nif config.get('site', 'cookie_domain'):\n SESSION_COOKIE_DOMAIN = CSRF_COOKIE_DOMAIN = config.get('site', 'cookie_domain')\n\nSESSION_COOKIE_SECURE = config.getboolean('site', 'https', fallback=SITE_URL.startswith('https:'))\n\nROOT_URLCONF = 'pretalx.urls'\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\n\n# Cache configuration\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n }\n}\nREAL_CACHE_USED = False\nSESSION_ENGINE = None\n\nHAS_MEMCACHED = bool(os.getenv('PRETALX_MEMCACHE', ''))\nif HAS_MEMCACHED:\n REAL_CACHE_USED = True\n CACHES['default'] = {\n 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',\n 'LOCATION': os.getenv('PRETALX_MEMCACHE')\n }\n\nHAS_REDIS = bool(os.getenv('PRETALX_REDIS', ''))\nif HAS_REDIS:\n CACHES['redis'] = {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": os.getenv('PRETALX_REDIS'),\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n }\n }\n CACHES['redis_sessions'] = {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": os.getenv('PRETALX_REDIS'),\n \"TIMEOUT\": 3600 * 24 * 30,\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n }\n }\n if not HAS_MEMCACHED:\n CACHES['default'] = CACHES['redis']\n REAL_CACHE_USED = True\n\n if os.getenv('PRETALX_REDIS_SESSIONS', 'False') == 'True':\n SESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\n SESSION_CACHE_ALIAS = \"redis_sessions\"\n\nif not SESSION_ENGINE:\n if REAL_CACHE_USED:\n SESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n else:\n SESSION_ENGINE = \"django.contrib.sessions.backends.db\"\n\nHAS_CELERY = bool(config.get('celery', 'broker'))\nif HAS_CELERY:\n CELERY_BROKER_URL = config.get('celery', 'broker')\n CELERY_RESULT_BACKEND = config.get('celery', 'backend')\nelse:\n CELERY_TASK_ALWAYS_EAGER = True\n\n# Internal settings\nLANGUAGES = [\n ('en', _('English')),\n ('de', _('German')),\n]\nLANGUAGES_NATURAL_NAMES = [\n ('en', 'English'),\n ('de', 'Deutsch'),\n]\nLANGUAGE_CODE = 'en'\n\nLOCALE_PATHS = (\n os.path.join(os.path.dirname(__file__), 'locale'),\n)\n\nFORMAT_MODULE_PATH = [\n 'pretalx.common.formats',\n]\n\nSESSION_COOKIE_NAME = 'pretalx_session'\nCSRF_COOKIE_NAME = 'pretalx_csrftoken'\nSESSION_COOKIE_HTTPONLY = True\n\nDJANGO_APPS = [\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n]\nEXTERNAL_APPS = [\n 'compressor',\n 'bootstrap4',\n 'djangoformsetjs',\n 'jquery',\n 'rules',\n 'zxcvbn_password',\n]\nLOCAL_APPS = [\n 'pretalx.common.CommonConfig',\n 'pretalx.event',\n 'pretalx.mail.MailConfig',\n 'pretalx.person',\n 'pretalx.schedule',\n 'pretalx.submission.SubmissionConfig',\n 'pretalx.agenda.AgendaConfig',\n 'pretalx.cfp.CfPConfig',\n 'pretalx.orga.OrgaConfig',\n]\nINSTALLED_APPS = DJANGO_APPS + EXTERNAL_APPS + LOCAL_APPS\n\nwith suppress(ImportError):\n import django_extensions # noqa\n INSTALLED_APPS.append('django_extensions')\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware', # Security first\n 'django.middleware.common.CommonMiddleware', # Set some sensible defaults, now, before responses are modified\n 'pretalx.common.middleware.MultiDomainMiddleware', # Verifying the proper domain next\n 'whitenoise.middleware.WhiteNoiseMiddleware', # Next up: static files\n 'pretalx.common.middleware.SessionMiddleware', # Add session handling\n 'pretalx.common.middleware.CsrfViewMiddleware', # Protect against CSRF attacks before forms/data are processed\n 'django.contrib.auth.middleware.AuthenticationMiddleware', # Uses sessions\n 'django.contrib.messages.middleware.MessageMiddleware', # Uses sessions\n 'django.middleware.clickjacking.XFrameOptionsMiddleware', # Protects against clickjacking\n 'pretalx.common.middleware.EventPermissionMiddleware', # Sets locales, mostly\n 'csp.middleware.CSPMiddleware', # Modifies/sets CSP headers\n]\n\nwith suppress(ImportError):\n import debug_toolbar # noqa\n if DEBUG:\n INSTALLED_APPS.append('debug_toolbar.apps.DebugToolbarConfig')\n MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware')\n\n\n# Security settings\nX_FRAME_OPTIONS = 'DENY'\nSECURE_BROWSER_XSS_FILTER = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nCSP_DEFAULT_SRC = (\"'self'\", \"'unsafe-eval'\")\nCSP_STYLE_SRC = (\"'self'\", \"'unsafe-inline'\")\nCSP_IMG_SRC = (\"'self'\", \"data:\")\n\nWSGI_APPLICATION = 'pretalx.wsgi.application'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nAUTH_USER_MODEL = 'person.User'\nLOGIN_URL = '/login' # global login does not yet exist\n\ntemplate_loaders = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\nif not DEBUG:\n template_loaders = (\n ('django.template.loaders.cached.Loader', template_loaders),\n )\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(DATA_DIR, 'templates'),\n os.path.join(BASE_DIR, 'templates'),\n ],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n \"django.template.context_processors.request\",\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'pretalx.common.context_processors.add_events',\n 'pretalx.common.context_processors.locale_context',\n 'pretalx.common.context_processors.messages',\n 'pretalx.common.context_processors.system_information',\n ],\n 'loaders': template_loaders\n },\n },\n]\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n)\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'pretalx', 'static')\n] if os.path.exists(os.path.join(BASE_DIR, 'pretalx', 'static')) else []\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\nCOMPRESS_ENABLED = COMPRESS_OFFLINE = not DEBUG\n\nCOMPRESS_PRECOMPILERS = (\n ('text/x-scss', 'django_libsass.SassCompiler'),\n)\n\nCOMPRESS_CSS_FILTERS = (\n # CssAbsoluteFilter is incredibly slow, especially when dealing with our _flags.scss\n # However, we don't need it if we consequently use the static() function in Sass\n # 'compressor.filters.css_default.CssAbsoluteFilter',\n 'compressor.filters.cssmin.CSSCompressorFilter',\n)\n\nDEBUG_TOOLBAR_PATCH_SETTINGS = False\n\nDEBUG_TOOLBAR_CONFIG = {\n 'JQUERY_URL': '',\n}\n\nINTERNAL_IPS = ('127.0.0.1', '::1')\n\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\n# Logging settings\nloglevel = 'DEBUG' if DEBUG else 'INFO'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': '%(levelname)s %(asctime)s %(name)s %(module)s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': loglevel,\n 'class': 'logging.StreamHandler',\n 'formatter': 'default'\n },\n 'file': {\n 'level': loglevel,\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(LOG_DIR, 'pretalx.log'),\n 'formatter': 'default'\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['file', 'console'],\n 'level': loglevel,\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['file', 'console'],\n 'level': loglevel,\n 'propagate': True,\n },\n 'django.security': {\n 'handlers': ['file', 'console'],\n 'level': loglevel,\n 'propagate': True,\n },\n 'django.db.backends': {\n 'handlers': ['file', 'console'],\n 'level': 'INFO', # Do not output all the queries\n 'propagate': True,\n }\n },\n}\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n {\n 'NAME': 'zxcvbn_password.ZXCVBNValidator',\n 'OPTIONS': {\n 'min_score': 3,\n 'user_attributes': ('nick', 'email', 'name'),\n },\n },\n]\n\nMESSAGE_TAGS = {\n messages.INFO: 'info',\n messages.ERROR: 'danger',\n messages.WARNING: 'warning',\n messages.SUCCESS: 'success',\n}\n\nBOOTSTRAP4 = {\n 'field_renderers': {\n 'default': 'bootstrap4.renderers.FieldRenderer',\n 'inline': 'bootstrap4.renderers.InlineFieldRenderer',\n 'event': 'pretalx.common.forms.renderers.EventFieldRenderer',\n },\n}\n\n\ndef log_initial():\n from pretalx.common.console import start_box, end_box, print_line\n mode = 'development' if DEBUG else 'production'\n lines = [\n (f'This is pretalx calling, running in {mode} mode.', True),\n ('', False),\n (f'Settings:', True),\n (f'Read from: {config_files}', False),\n (f'Database: {db_name} ({db_backend})', False),\n (f'Logging: {LOG_DIR}', False),\n ('', False),\n ]\n\n size = max(len(line[0]) for line in lines) + 4\n start_box(size)\n for line in lines:\n print_line(line[0], box=True, bold=line[1], size=size)\n end_box(size)\n\n\nlog_initial()\n",
"path": "src/pretalx/settings.py"
}
] | [
{
"content": "import configparser\nimport os\nimport sys\nfrom contextlib import suppress\nfrom urllib.parse import urlparse\n\nfrom django.contrib.messages import constants as messages # NOQA\nfrom django.utils.crypto import get_random_string\nfrom django.utils.translation import ugettext_lazy as _ # NOQA\n\n\ndef reduce_dict(data):\n return {\n section_name: {\n key: value\n for key, value in section_content.items()\n if value is not None\n }\n for section_name, section_content in env_config.items()\n }\n\n\nconfig = configparser.RawConfigParser()\nconfig.read_dict({\n 'filesystem': {\n 'base': os.path.dirname(os.path.dirname(__file__)),\n }, # defaults depend on the data dir and need to be set once the data dir is fixed\n 'site': {\n 'debug': 'runserver' in sys.argv,\n 'url': 'http://localhost',\n 'cookie_domain': '',\n }, # the https setting is determined by url if not explicitly set\n 'database': {\n 'backend': 'sqlite3',\n # 'name': '',\n 'user': '',\n 'password': '',\n 'host': '',\n 'port': '',\n },\n 'mail': {\n 'from': 'admin@localhost',\n 'host': 'localhost',\n 'port': '25',\n 'user': '',\n 'password': '',\n 'tls': 'False',\n 'ssl': 'False',\n },\n 'cache': {\n },\n 'celery': {\n 'broker': '',\n 'backend': '',\n },\n})\n\nlegacy_config = {\n 'filesystem': {\n 'data': config.get('django', 'data_dir', fallback=None),\n 'static': config.get('django', 'static', fallback=None),\n },\n 'site': {\n 'debug': config.get('django', 'debug', fallback=None),\n 'secret': config.get('django', 'secret', fallback=None),\n },\n}\n\n\nif 'PRETALX_CONFIG_FILE' in os.environ:\n config_files = config.read_file(open(os.environ.get('PRETALX_CONFIG_FILE'), encoding='utf-8'))\nelse:\n config_files = config.read([\n '/etc/pretalx/pretalx.cfg',\n os.path.expanduser('~/.pretalx.cfg'),\n 'pretalx.cfg',\n ], encoding='utf-8')\n\nenv_config = {\n 'filesystem': {\n 'data': os.getenv('PRETALX_DATA_DIR'),\n },\n 'site': {\n 'debug': os.getenv('PRETALX_DEBUG'),\n 'url': os.getenv('PRETALX_SITE_URL'),\n 'https': os.getenv('PRETALX_HTTPS'),\n 'cookie_domain': os.getenv('PRETALX_COOKIE_DOMAIN'),\n },\n 'mail': {\n 'from': os.getenv('PRETALX_MAIL_FROM'),\n 'host': os.getenv('PRETALX_MAIL_HOST'),\n 'port': os.getenv('PRETALX_MAIL_PORT'),\n 'user': os.getenv('PRETALX_MAIL_USER'),\n 'password': os.getenv('PRETALX_MAIL_PASSWORD'),\n 'tls': os.getenv('PRETALX_MAIL_TLS'),\n 'ssl': os.getenv('PRETALX_MAIL_SSL'),\n },\n 'database': {\n 'backend': os.getenv('PRETALX_DB_TYPE'),\n 'name': os.getenv('PRETALX_DB_NAME'),\n 'user': os.getenv('PRETALX_DB_USER'),\n 'password': os.getenv('PRETALX_DB_PASS'),\n 'host': os.getenv('PRETALX_DB_HOST'),\n 'port': os.getenv('PRETALX_DB_PORT'),\n },\n 'celery': {\n 'broker': os.getenv('PRETALX_CELERY_BROKER'),\n 'backend': os.getenv('PRETALX_CELERY_BACKEND'),\n },\n}\n\nconfig.read_dict(reduce_dict(legacy_config))\nconfig.read_dict(reduce_dict(env_config))\n\n# File system and directory settings\nBASE_DIR = config.get('filesystem', 'base')\nDATA_DIR = config.get('filesystem', 'data', fallback=os.path.join(BASE_DIR, 'data'))\nLOG_DIR = config.get('filesystem', 'logs', fallback=os.path.join(DATA_DIR, 'logs'))\nMEDIA_ROOT = config.get('filesystem', 'media', fallback=os.path.join(DATA_DIR, 'media'))\nSTATIC_ROOT = config.get('filesystem', 'static', fallback=os.path.join(BASE_DIR, 'static.dist'))\n\nfor directory in (BASE_DIR, DATA_DIR, LOG_DIR, MEDIA_ROOT):\n if not os.path.exists(directory):\n os.mkdir(directory)\n\nif config.has_option('site', 'secret'):\n SECRET_KEY = config.get('site', 'secret')\nelse:\n SECRET_FILE = os.path.join(DATA_DIR, '.secret')\n if os.path.exists(SECRET_FILE):\n with open(SECRET_FILE, 'r') as f:\n SECRET_KEY = f.read().strip()\n else:\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n SECRET_KEY = get_random_string(50, chars)\n with open(SECRET_FILE, 'w') as f:\n os.chmod(SECRET_FILE, 0o600)\n os.chown(SECRET_FILE, os.getuid(), os.getgid())\n f.write(SECRET_KEY)\n\n# General setup settings\nDEBUG = config.getboolean('site', 'debug')\n\nif DEBUG:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nelse:\n MAIL_FROM = SERVER_EMAIL = DEFAULT_FROM_EMAIL = config.get('mail', 'from')\n EMAIL_HOST = config.get('mail', 'host')\n EMAIL_PORT = config.get('mail', 'port')\n EMAIL_HOST_USER = config.get('mail', 'user')\n EMAIL_HOST_PASSWORD = config.get('mail', 'password')\n EMAIL_USE_TLS = config.getboolean('mail', 'tls')\n EMAIL_USE_SSL = config.getboolean('mail', 'ssl')\n\n\n# Database configuration\ndb_backend = config.get('database', 'backend')\ndb_name = config.get('database', 'name', fallback=os.path.join(DATA_DIR, 'db.sqlite3'))\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.' + db_backend,\n 'NAME': db_name,\n 'USER': config.get('database', 'user'),\n 'PASSWORD': config.get('database', 'password'),\n 'HOST': config.get('database', 'host'),\n 'PORT': config.get('database', 'port'),\n 'CONN_MAX_AGE': 0 if db_backend == 'sqlite3' else 120,\n }\n}\n\n# URL configuration\nSITE_URL = config.get('site', 'url', fallback='http://localhost')\nSITE_NETLOC = urlparse(SITE_URL).netloc\nALLOWED_HOSTS = ['*']\n\nif config.get('site', 'cookie_domain'):\n SESSION_COOKIE_DOMAIN = CSRF_COOKIE_DOMAIN = config.get('site', 'cookie_domain')\n\nSESSION_COOKIE_SECURE = config.getboolean('site', 'https', fallback=SITE_URL.startswith('https:'))\n\nROOT_URLCONF = 'pretalx.urls'\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\n\n# Cache configuration\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n }\n}\nREAL_CACHE_USED = False\nSESSION_ENGINE = None\n\nHAS_MEMCACHED = bool(os.getenv('PRETALX_MEMCACHE', ''))\nif HAS_MEMCACHED:\n REAL_CACHE_USED = True\n CACHES['default'] = {\n 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',\n 'LOCATION': os.getenv('PRETALX_MEMCACHE')\n }\n\nHAS_REDIS = bool(os.getenv('PRETALX_REDIS', ''))\nif HAS_REDIS:\n CACHES['redis'] = {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": os.getenv('PRETALX_REDIS'),\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n }\n }\n CACHES['redis_sessions'] = {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": os.getenv('PRETALX_REDIS'),\n \"TIMEOUT\": 3600 * 24 * 30,\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n }\n }\n if not HAS_MEMCACHED:\n CACHES['default'] = CACHES['redis']\n REAL_CACHE_USED = True\n\n if os.getenv('PRETALX_REDIS_SESSIONS', 'False') == 'True':\n SESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\n SESSION_CACHE_ALIAS = \"redis_sessions\"\n\nif not SESSION_ENGINE:\n if REAL_CACHE_USED:\n SESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n else:\n SESSION_ENGINE = \"django.contrib.sessions.backends.db\"\n\nHAS_CELERY = bool(config.get('celery', 'broker'))\nif HAS_CELERY:\n CELERY_BROKER_URL = config.get('celery', 'broker')\n CELERY_RESULT_BACKEND = config.get('celery', 'backend')\nelse:\n CELERY_TASK_ALWAYS_EAGER = True\n\n# Internal settings\nLANGUAGES = [\n ('en', _('English')),\n ('de', _('German')),\n]\nLANGUAGES_NATURAL_NAMES = [\n ('en', 'English'),\n ('de', 'Deutsch'),\n]\nLANGUAGE_CODE = 'en'\n\nLOCALE_PATHS = (\n os.path.join(os.path.dirname(__file__), 'locale'),\n)\n\nFORMAT_MODULE_PATH = [\n 'pretalx.common.formats',\n]\n\nSESSION_COOKIE_NAME = 'pretalx_session'\nCSRF_COOKIE_NAME = 'pretalx_csrftoken'\nSESSION_COOKIE_HTTPONLY = True\n\nDJANGO_APPS = [\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n]\nEXTERNAL_APPS = [\n 'compressor',\n 'bootstrap4',\n 'djangoformsetjs',\n 'jquery',\n 'rules',\n]\nLOCAL_APPS = [\n 'pretalx.common.CommonConfig',\n 'pretalx.event',\n 'pretalx.mail.MailConfig',\n 'pretalx.person',\n 'pretalx.schedule',\n 'pretalx.submission.SubmissionConfig',\n 'pretalx.agenda.AgendaConfig',\n 'pretalx.cfp.CfPConfig',\n 'pretalx.orga.OrgaConfig',\n]\nINSTALLED_APPS = DJANGO_APPS + EXTERNAL_APPS + LOCAL_APPS\n\nwith suppress(ImportError):\n import django_extensions # noqa\n INSTALLED_APPS.append('django_extensions')\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware', # Security first\n 'django.middleware.common.CommonMiddleware', # Set some sensible defaults, now, before responses are modified\n 'pretalx.common.middleware.MultiDomainMiddleware', # Verifying the proper domain next\n 'whitenoise.middleware.WhiteNoiseMiddleware', # Next up: static files\n 'pretalx.common.middleware.SessionMiddleware', # Add session handling\n 'pretalx.common.middleware.CsrfViewMiddleware', # Protect against CSRF attacks before forms/data are processed\n 'django.contrib.auth.middleware.AuthenticationMiddleware', # Uses sessions\n 'django.contrib.messages.middleware.MessageMiddleware', # Uses sessions\n 'django.middleware.clickjacking.XFrameOptionsMiddleware', # Protects against clickjacking\n 'pretalx.common.middleware.EventPermissionMiddleware', # Sets locales, mostly\n 'csp.middleware.CSPMiddleware', # Modifies/sets CSP headers\n]\n\nwith suppress(ImportError):\n import debug_toolbar # noqa\n if DEBUG:\n INSTALLED_APPS.append('debug_toolbar.apps.DebugToolbarConfig')\n MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware')\n\n\n# Security settings\nX_FRAME_OPTIONS = 'DENY'\nSECURE_BROWSER_XSS_FILTER = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nCSP_DEFAULT_SRC = (\"'self'\", \"'unsafe-eval'\")\nCSP_STYLE_SRC = (\"'self'\", \"'unsafe-inline'\")\nCSP_IMG_SRC = (\"'self'\", \"data:\")\n\nWSGI_APPLICATION = 'pretalx.wsgi.application'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nAUTH_USER_MODEL = 'person.User'\nLOGIN_URL = '/login' # global login does not yet exist\n\ntemplate_loaders = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\nif not DEBUG:\n template_loaders = (\n ('django.template.loaders.cached.Loader', template_loaders),\n )\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(DATA_DIR, 'templates'),\n os.path.join(BASE_DIR, 'templates'),\n ],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n \"django.template.context_processors.request\",\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'pretalx.common.context_processors.add_events',\n 'pretalx.common.context_processors.locale_context',\n 'pretalx.common.context_processors.messages',\n 'pretalx.common.context_processors.system_information',\n ],\n 'loaders': template_loaders\n },\n },\n]\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n)\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'pretalx', 'static')\n] if os.path.exists(os.path.join(BASE_DIR, 'pretalx', 'static')) else []\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\nCOMPRESS_ENABLED = COMPRESS_OFFLINE = not DEBUG\n\nCOMPRESS_PRECOMPILERS = (\n ('text/x-scss', 'django_libsass.SassCompiler'),\n)\n\nCOMPRESS_CSS_FILTERS = (\n # CssAbsoluteFilter is incredibly slow, especially when dealing with our _flags.scss\n # However, we don't need it if we consequently use the static() function in Sass\n # 'compressor.filters.css_default.CssAbsoluteFilter',\n 'compressor.filters.cssmin.CSSCompressorFilter',\n)\n\nDEBUG_TOOLBAR_PATCH_SETTINGS = False\n\nDEBUG_TOOLBAR_CONFIG = {\n 'JQUERY_URL': '',\n}\n\nINTERNAL_IPS = ('127.0.0.1', '::1')\n\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\n# Logging settings\nloglevel = 'DEBUG' if DEBUG else 'INFO'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': '%(levelname)s %(asctime)s %(name)s %(module)s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': loglevel,\n 'class': 'logging.StreamHandler',\n 'formatter': 'default'\n },\n 'file': {\n 'level': loglevel,\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(LOG_DIR, 'pretalx.log'),\n 'formatter': 'default'\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['file', 'console'],\n 'level': loglevel,\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['file', 'console'],\n 'level': loglevel,\n 'propagate': True,\n },\n 'django.security': {\n 'handlers': ['file', 'console'],\n 'level': loglevel,\n 'propagate': True,\n },\n 'django.db.backends': {\n 'handlers': ['file', 'console'],\n 'level': 'INFO', # Do not output all the queries\n 'propagate': True,\n }\n },\n}\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nMESSAGE_TAGS = {\n messages.INFO: 'info',\n messages.ERROR: 'danger',\n messages.WARNING: 'warning',\n messages.SUCCESS: 'success',\n}\n\nBOOTSTRAP4 = {\n 'field_renderers': {\n 'default': 'bootstrap4.renderers.FieldRenderer',\n 'inline': 'bootstrap4.renderers.InlineFieldRenderer',\n 'event': 'pretalx.common.forms.renderers.EventFieldRenderer',\n },\n}\n\n\ndef log_initial():\n from pretalx.common.console import start_box, end_box, print_line\n mode = 'development' if DEBUG else 'production'\n lines = [\n (f'This is pretalx calling, running in {mode} mode.', True),\n ('', False),\n (f'Settings:', True),\n (f'Read from: {config_files}', False),\n (f'Database: {db_name} ({db_backend})', False),\n (f'Logging: {LOG_DIR}', False),\n ('', False),\n ]\n\n size = max(len(line[0]) for line in lines) + 4\n start_box(size)\n for line in lines:\n print_line(line[0], box=True, bold=line[1], size=size)\n end_box(size)\n\n\nlog_initial()\n",
"path": "src/pretalx/settings.py"
}
] | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index e61b4867ee..207e5d71df 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -9,6 +9,12 @@ vx.x.x
*Released on 2017-xx-xx*
+Breaking Changes
+~~~~~~~~~~~~~~~~
+
+- The default value for email SSL usage is now ``False``, permitting the default
+ configuration of ``localhost:25`` to work on more machines out of the box.
+
Features
~~~~~~~~
diff --git a/doc/install/configure.rst b/doc/install/configure.rst
index a84df48063..0f1ab879cf 100644
--- a/doc/install/configure.rst
+++ b/doc/install/configure.rst
@@ -182,7 +182,7 @@ The mail section
- Should SSL be used when sending mail? Only one of TLS and SSL may be used.
- **Environment variable:** ``PRETALX_MAIL_SSL``
-- **Default:** ``True``
+- **Default:** ``False``
The celery section
------------------
diff --git a/src/pretalx/settings.py b/src/pretalx/settings.py
index df99d2ada9..d3c204eef9 100644
--- a/src/pretalx/settings.py
+++ b/src/pretalx/settings.py
@@ -45,7 +45,7 @@ def reduce_dict(data):
'user': '',
'password': '',
'tls': 'False',
- 'ssl': 'True',
+ 'ssl': 'False',
},
'cache': {
},
|
microsoft__ptvsd-843 | Using sys.exit() with no arguments causes TypeError inside ptvsd
## Environment data
- PTVSD version: 4.1.3
- OS and version: Windows 10
- Python version (& distribution if applicable, e.g. Anaconda): 3.6
- Using VS Code or Visual Studio: VS
## Actual behavior
```
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\program files (x86)\microsoft visual studio\2017\community\common7\ide\extensions\microsoft\python\core\ptvsd_launcher.py", line 119, in <module>
vspd.debug(filename, port_num, debug_id, debug_options, run_as)
File "c:\program files (x86)\microsoft visual studio\2017\community\common7\ide\extensions\microsoft\python\core\Packages\ptvsd\debugger.py", line 37, in debug
run(address, filename, *args, **kwargs)
File "c:\program files (x86)\microsoft visual studio\2017\community\common7\ide\extensions\microsoft\python\core\Packages\ptvsd\_local.py", line 48, in run_file
run(argv, addr, **kwargs)
File "c:\program files (x86)\microsoft visual studio\2017\community\common7\ide\extensions\microsoft\python\core\Packages\ptvsd\_local.py", line 101, in _run
daemon.exitcode = int(ex.code)
TypeError
:
int() argument must be a string, a bytes-like object or a number, not 'NoneType'
```
## Expected behavior
No error
## Steps to reproduce:
1. Debug this code:
```
sys.exit()
```
https://docs.python.org/3/library/sys.html#sys.exit
| [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\nimport time\n\nimport pydevd\nfrom _pydevd_bundle.pydevd_comm import get_global_debugger\n\nfrom ptvsd.pydevd_hooks import install\nfrom ptvsd.runner import run as no_debug_runner\nfrom ptvsd.socket import Address\nfrom ptvsd._util import new_hidden_thread\n\n\nPYDEVD_DEFAULTS = {\n '--qt-support=auto',\n}\n\n\ndef _set_pydevd_defaults(pydevd_args):\n args_to_append = []\n for arg in PYDEVD_DEFAULTS:\n if arg not in pydevd_args:\n args_to_append.append(arg)\n return pydevd_args + args_to_append\n\n\n########################\n# high-level functions\n\ndef debug_main(address, name, kind, *extra, **kwargs):\n if not kwargs.pop('wait', False) and address.isserver:\n def unblock_debugger():\n debugger = get_global_debugger()\n while debugger is None:\n time.sleep(0.1)\n debugger = get_global_debugger()\n debugger.ready_to_run = True\n new_hidden_thread('ptvsd.unblock_debugger', unblock_debugger).start()\n if kind == 'module':\n run_module(address, name, *extra, **kwargs)\n else:\n run_file(address, name, *extra, **kwargs)\n\n\ndef run_main(address, name, kind, *extra, **kwargs):\n addr = Address.from_raw(address)\n sys.argv[:] = _run_main_argv(name, extra)\n runner = kwargs.pop('_runner', no_debug_runner)\n runner(addr, name, kind == 'module', *extra, **kwargs)\n\n\n########################\n# low-level functions\n\ndef run_module(address, modname, *extra, **kwargs):\n \"\"\"Run pydevd for the given module.\"\"\"\n addr = Address.from_raw(address)\n if not addr.isserver:\n kwargs['singlesession'] = True\n run = kwargs.pop('_run', _run)\n prog = kwargs.pop('_prog', sys.argv[0])\n filename = modname + ':'\n argv = _run_argv(addr, filename, extra, _prog=prog)\n argv.insert(argv.index('--file'), '--module')\n run(argv, addr, **kwargs)\n\n\ndef run_file(address, filename, *extra, **kwargs):\n \"\"\"Run pydevd for the given Python file.\"\"\"\n addr = Address.from_raw(address)\n if not addr.isserver:\n kwargs['singlesession'] = True\n run = kwargs.pop('_run', _run)\n prog = kwargs.pop('_prog', sys.argv[0])\n argv = _run_argv(addr, filename, extra, _prog=prog)\n run(argv, addr, **kwargs)\n\n\ndef _run_argv(address, filename, extra, _prog=sys.argv[0]):\n \"\"\"Convert the given values to an argv that pydevd.main() supports.\"\"\"\n if '--' in extra:\n pydevd = list(extra[:extra.index('--')])\n extra = list(extra[len(pydevd) + 1:])\n else:\n pydevd = []\n extra = list(extra)\n\n pydevd = _set_pydevd_defaults(pydevd)\n host, port = address\n argv = [\n _prog,\n '--port', str(port),\n ]\n if not address.isserver:\n argv.extend([\n '--client', host or 'localhost',\n ])\n return argv + pydevd + [\n '--file', filename,\n ] + extra\n\n\ndef _run_main_argv(filename, extra):\n if '--' in extra:\n pydevd = list(extra[:extra.index('--')])\n extra = list(extra[len(pydevd) + 1:])\n else:\n extra = list(extra)\n return [filename] + extra\n\n\ndef _run(argv, addr, _pydevd=pydevd, _install=install, **kwargs):\n \"\"\"Start pydevd with the given commandline args.\"\"\"\n #print(' '.join(argv))\n\n # Pydevd assumes that the \"__main__\" module is the \"pydevd\" module\n # and does some tricky stuff under that assumption. For example,\n # when the debugger starts up it calls save_main_module()\n # (in pydevd_bundle/pydevd_utils.py). That function explicitly sets\n # sys.modules[\"pydevd\"] to sys.modules[\"__main__\"] and then sets\n # the __main__ module to a new one. This makes some sense since\n # it gives the debugged script a fresh __main__ module.\n #\n # This complicates things for us since we are running a different\n # file (i.e. this one) as the __main__ module. Consequently,\n # sys.modules[\"pydevd\"] gets set to ptvsd/__main__.py. Subsequent\n # imports of the \"pydevd\" module then return the wrong module. We\n # work around this by avoiding lazy imports of the \"pydevd\" module.\n # We also replace the __main__ module with the \"pydevd\" module here.\n if sys.modules['__main__'].__file__ != _pydevd.__file__:\n sys.modules['__main___orig'] = sys.modules['__main__']\n sys.modules['__main__'] = _pydevd\n\n daemon = _install(_pydevd, addr, **kwargs)\n sys.argv[:] = argv\n try:\n _pydevd.main()\n except SystemExit as ex:\n daemon.exitcode = int(ex.code)\n raise\n",
"path": "ptvsd/_local.py"
}
] | [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\nimport time\n\nimport pydevd\nfrom _pydevd_bundle.pydevd_comm import get_global_debugger\n\nfrom ptvsd.pydevd_hooks import install\nfrom ptvsd.runner import run as no_debug_runner\nfrom ptvsd.socket import Address\nfrom ptvsd._util import new_hidden_thread\n\n\nPYDEVD_DEFAULTS = {\n '--qt-support=auto',\n}\n\n\ndef _set_pydevd_defaults(pydevd_args):\n args_to_append = []\n for arg in PYDEVD_DEFAULTS:\n if arg not in pydevd_args:\n args_to_append.append(arg)\n return pydevd_args + args_to_append\n\n\n########################\n# high-level functions\n\ndef debug_main(address, name, kind, *extra, **kwargs):\n if not kwargs.pop('wait', False) and address.isserver:\n def unblock_debugger():\n debugger = get_global_debugger()\n while debugger is None:\n time.sleep(0.1)\n debugger = get_global_debugger()\n debugger.ready_to_run = True\n new_hidden_thread('ptvsd.unblock_debugger', unblock_debugger).start()\n if kind == 'module':\n run_module(address, name, *extra, **kwargs)\n else:\n run_file(address, name, *extra, **kwargs)\n\n\ndef run_main(address, name, kind, *extra, **kwargs):\n addr = Address.from_raw(address)\n sys.argv[:] = _run_main_argv(name, extra)\n runner = kwargs.pop('_runner', no_debug_runner)\n runner(addr, name, kind == 'module', *extra, **kwargs)\n\n\n########################\n# low-level functions\n\ndef run_module(address, modname, *extra, **kwargs):\n \"\"\"Run pydevd for the given module.\"\"\"\n addr = Address.from_raw(address)\n if not addr.isserver:\n kwargs['singlesession'] = True\n run = kwargs.pop('_run', _run)\n prog = kwargs.pop('_prog', sys.argv[0])\n filename = modname + ':'\n argv = _run_argv(addr, filename, extra, _prog=prog)\n argv.insert(argv.index('--file'), '--module')\n run(argv, addr, **kwargs)\n\n\ndef run_file(address, filename, *extra, **kwargs):\n \"\"\"Run pydevd for the given Python file.\"\"\"\n addr = Address.from_raw(address)\n if not addr.isserver:\n kwargs['singlesession'] = True\n run = kwargs.pop('_run', _run)\n prog = kwargs.pop('_prog', sys.argv[0])\n argv = _run_argv(addr, filename, extra, _prog=prog)\n run(argv, addr, **kwargs)\n\n\ndef _run_argv(address, filename, extra, _prog=sys.argv[0]):\n \"\"\"Convert the given values to an argv that pydevd.main() supports.\"\"\"\n if '--' in extra:\n pydevd = list(extra[:extra.index('--')])\n extra = list(extra[len(pydevd) + 1:])\n else:\n pydevd = []\n extra = list(extra)\n\n pydevd = _set_pydevd_defaults(pydevd)\n host, port = address\n argv = [\n _prog,\n '--port', str(port),\n ]\n if not address.isserver:\n argv.extend([\n '--client', host or 'localhost',\n ])\n return argv + pydevd + [\n '--file', filename,\n ] + extra\n\n\ndef _run_main_argv(filename, extra):\n if '--' in extra:\n pydevd = list(extra[:extra.index('--')])\n extra = list(extra[len(pydevd) + 1:])\n else:\n extra = list(extra)\n return [filename] + extra\n\n\ndef _run(argv, addr, _pydevd=pydevd, _install=install, **kwargs):\n \"\"\"Start pydevd with the given commandline args.\"\"\"\n #print(' '.join(argv))\n\n # Pydevd assumes that the \"__main__\" module is the \"pydevd\" module\n # and does some tricky stuff under that assumption. For example,\n # when the debugger starts up it calls save_main_module()\n # (in pydevd_bundle/pydevd_utils.py). That function explicitly sets\n # sys.modules[\"pydevd\"] to sys.modules[\"__main__\"] and then sets\n # the __main__ module to a new one. This makes some sense since\n # it gives the debugged script a fresh __main__ module.\n #\n # This complicates things for us since we are running a different\n # file (i.e. this one) as the __main__ module. Consequently,\n # sys.modules[\"pydevd\"] gets set to ptvsd/__main__.py. Subsequent\n # imports of the \"pydevd\" module then return the wrong module. We\n # work around this by avoiding lazy imports of the \"pydevd\" module.\n # We also replace the __main__ module with the \"pydevd\" module here.\n if sys.modules['__main__'].__file__ != _pydevd.__file__:\n sys.modules['__main___orig'] = sys.modules['__main__']\n sys.modules['__main__'] = _pydevd\n\n daemon = _install(_pydevd, addr, **kwargs)\n sys.argv[:] = argv\n try:\n _pydevd.main()\n except SystemExit as ex:\n daemon.exitcode = 0 if ex.code is None else int(ex.code)\n raise\n",
"path": "ptvsd/_local.py"
}
] | diff --git a/ptvsd/_local.py b/ptvsd/_local.py
index 7872cb1c8..09f671eae 100644
--- a/ptvsd/_local.py
+++ b/ptvsd/_local.py
@@ -139,5 +139,5 @@ def _run(argv, addr, _pydevd=pydevd, _install=install, **kwargs):
try:
_pydevd.main()
except SystemExit as ex:
- daemon.exitcode = int(ex.code)
+ daemon.exitcode = 0 if ex.code is None else int(ex.code)
raise
|
aio-libs__aiohttp-3107 | StreamResponse instances are all equal
## Long story short
Since #2494 , `StreamResponse` inherits `collections.MutableMapping.__eq__`, which makes them basically all equal.
The implementation in `Mapping` looks like `return dict(self.items()) == dict(other.items())`.
This is especially the case for `WebSocketResponse` : when following https://aiohttp.readthedocs.io/en/stable/faq.html#how-do-i-programmatically-close-a-websocket-server-side, if you use a `list` instead of a `set`, it's impossible to `remove` the correct websocket.
## Expected behaviour
```python
>>> from aiohttp.web_ws import WebSocketResponse
>>> r1 = WebSocketResponse()
>>> r2 = WebSocketResponse()
>>> r1 == r2
False
>>> id(r1) == id(r2)
False
>>> r1 is r2
False
>>> hash(r1) == hash(r2)
False
```
As a rule, `a == b` implies `hash(a) == hash(b)`. But it's now broken.
## Actual behaviour
Since v3.0:
```python
>>> r1 == r2
True
>>> id(r1) == id(r2)
False
>>> r1 is r2
False
>>> hash(r1) == hash(r2)
False
```
## Steps to reproduce
Described above
## Your environment
* `aiohttp >= 3.0`
| [
{
"content": "import collections\nimport datetime\nimport enum\nimport json\nimport math\nimport time\nimport warnings\nimport zlib\nfrom email.utils import parsedate\nfrom http.cookies import SimpleCookie\n\nfrom multidict import CIMultiDict, CIMultiDictProxy\n\nfrom . import hdrs, payload\nfrom .helpers import HeadersMixin, rfc822_formatted_time, sentinel\nfrom .http import RESPONSES, SERVER_SOFTWARE, HttpVersion10, HttpVersion11\n\n\n__all__ = ('ContentCoding', 'StreamResponse', 'Response', 'json_response')\n\n\nclass ContentCoding(enum.Enum):\n # The content codings that we have support for.\n #\n # Additional registered codings are listed at:\n # https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding\n deflate = 'deflate'\n gzip = 'gzip'\n identity = 'identity'\n\n\n############################################################\n# HTTP Response classes\n############################################################\n\n\nclass StreamResponse(collections.MutableMapping, HeadersMixin):\n\n _length_check = True\n\n def __init__(self, *, status=200, reason=None, headers=None):\n self._body = None\n self._keep_alive = None\n self._chunked = False\n self._compression = False\n self._compression_force = None\n self._cookies = SimpleCookie()\n\n self._req = None\n self._payload_writer = None\n self._eof_sent = False\n self._body_length = 0\n self._state = {}\n\n if headers is not None:\n self._headers = CIMultiDict(headers)\n else:\n self._headers = CIMultiDict()\n\n self.set_status(status, reason)\n\n @property\n def prepared(self):\n return self._payload_writer is not None\n\n @property\n def task(self):\n return getattr(self._req, 'task', None)\n\n @property\n def status(self):\n return self._status\n\n @property\n def chunked(self):\n return self._chunked\n\n @property\n def compression(self):\n return self._compression\n\n @property\n def reason(self):\n return self._reason\n\n def set_status(self, status, reason=None, _RESPONSES=RESPONSES):\n assert not self.prepared, \\\n 'Cannot change the response status code after ' \\\n 'the headers have been sent'\n self._status = int(status)\n if reason is None:\n try:\n reason = _RESPONSES[self._status][0]\n except Exception:\n reason = ''\n self._reason = reason\n\n @property\n def keep_alive(self):\n return self._keep_alive\n\n def force_close(self):\n self._keep_alive = False\n\n @property\n def body_length(self):\n return self._body_length\n\n @property\n def output_length(self):\n warnings.warn('output_length is deprecated', DeprecationWarning)\n return self._payload_writer.buffer_size\n\n def enable_chunked_encoding(self, chunk_size=None):\n \"\"\"Enables automatic chunked transfer encoding.\"\"\"\n self._chunked = True\n\n if hdrs.CONTENT_LENGTH in self._headers:\n raise RuntimeError(\"You can't enable chunked encoding when \"\n \"a content length is set\")\n if chunk_size is not None:\n warnings.warn('Chunk size is deprecated #1615', DeprecationWarning)\n\n def enable_compression(self, force=None):\n \"\"\"Enables response compression encoding.\"\"\"\n # Backwards compatibility for when force was a bool <0.17.\n if type(force) == bool:\n force = ContentCoding.deflate if force else ContentCoding.identity\n elif force is not None:\n assert isinstance(force, ContentCoding), (\"force should one of \"\n \"None, bool or \"\n \"ContentEncoding\")\n\n self._compression = True\n self._compression_force = force\n\n @property\n def headers(self):\n return self._headers\n\n @property\n def cookies(self):\n return self._cookies\n\n def set_cookie(self, name, value, *, expires=None,\n domain=None, max_age=None, path='/',\n secure=None, httponly=None, version=None):\n \"\"\"Set or update response cookie.\n\n Sets new cookie or updates existent with new value.\n Also updates only those params which are not None.\n \"\"\"\n\n old = self._cookies.get(name)\n if old is not None and old.coded_value == '':\n # deleted cookie\n self._cookies.pop(name, None)\n\n self._cookies[name] = value\n c = self._cookies[name]\n\n if expires is not None:\n c['expires'] = expires\n elif c.get('expires') == 'Thu, 01 Jan 1970 00:00:00 GMT':\n del c['expires']\n\n if domain is not None:\n c['domain'] = domain\n\n if max_age is not None:\n c['max-age'] = max_age\n elif 'max-age' in c:\n del c['max-age']\n\n c['path'] = path\n\n if secure is not None:\n c['secure'] = secure\n if httponly is not None:\n c['httponly'] = httponly\n if version is not None:\n c['version'] = version\n\n def del_cookie(self, name, *, domain=None, path='/'):\n \"\"\"Delete cookie.\n\n Creates new empty expired cookie.\n \"\"\"\n # TODO: do we need domain/path here?\n self._cookies.pop(name, None)\n self.set_cookie(name, '', max_age=0,\n expires=\"Thu, 01 Jan 1970 00:00:00 GMT\",\n domain=domain, path=path)\n\n @property\n def content_length(self):\n # Just a placeholder for adding setter\n return super().content_length\n\n @content_length.setter\n def content_length(self, value):\n if value is not None:\n value = int(value)\n if self._chunked:\n raise RuntimeError(\"You can't set content length when \"\n \"chunked encoding is enable\")\n self._headers[hdrs.CONTENT_LENGTH] = str(value)\n else:\n self._headers.pop(hdrs.CONTENT_LENGTH, None)\n\n @property\n def content_type(self):\n # Just a placeholder for adding setter\n return super().content_type\n\n @content_type.setter\n def content_type(self, value):\n self.content_type # read header values if needed\n self._content_type = str(value)\n self._generate_content_type_header()\n\n @property\n def charset(self):\n # Just a placeholder for adding setter\n return super().charset\n\n @charset.setter\n def charset(self, value):\n ctype = self.content_type # read header values if needed\n if ctype == 'application/octet-stream':\n raise RuntimeError(\"Setting charset for application/octet-stream \"\n \"doesn't make sense, setup content_type first\")\n if value is None:\n self._content_dict.pop('charset', None)\n else:\n self._content_dict['charset'] = str(value).lower()\n self._generate_content_type_header()\n\n @property\n def last_modified(self):\n \"\"\"The value of Last-Modified HTTP header, or None.\n\n This header is represented as a `datetime` object.\n \"\"\"\n httpdate = self.headers.get(hdrs.LAST_MODIFIED)\n if httpdate is not None:\n timetuple = parsedate(httpdate)\n if timetuple is not None:\n return datetime.datetime(*timetuple[:6],\n tzinfo=datetime.timezone.utc)\n return None\n\n @last_modified.setter\n def last_modified(self, value):\n if value is None:\n self.headers.pop(hdrs.LAST_MODIFIED, None)\n elif isinstance(value, (int, float)):\n self.headers[hdrs.LAST_MODIFIED] = time.strftime(\n \"%a, %d %b %Y %H:%M:%S GMT\", time.gmtime(math.ceil(value)))\n elif isinstance(value, datetime.datetime):\n self.headers[hdrs.LAST_MODIFIED] = time.strftime(\n \"%a, %d %b %Y %H:%M:%S GMT\", value.utctimetuple())\n elif isinstance(value, str):\n self.headers[hdrs.LAST_MODIFIED] = value\n\n def _generate_content_type_header(self, CONTENT_TYPE=hdrs.CONTENT_TYPE):\n params = '; '.join(\"%s=%s\" % i for i in self._content_dict.items())\n if params:\n ctype = self._content_type + '; ' + params\n else:\n ctype = self._content_type\n self.headers[CONTENT_TYPE] = ctype\n\n def _do_start_compression(self, coding):\n if coding != ContentCoding.identity:\n self.headers[hdrs.CONTENT_ENCODING] = coding.value\n self._payload_writer.enable_compression(coding.value)\n # Compressed payload may have different content length,\n # remove the header\n self._headers.popall(hdrs.CONTENT_LENGTH, None)\n\n def _start_compression(self, request):\n if self._compression_force:\n self._do_start_compression(self._compression_force)\n else:\n accept_encoding = request.headers.get(\n hdrs.ACCEPT_ENCODING, '').lower()\n for coding in ContentCoding:\n if coding.value in accept_encoding:\n self._do_start_compression(coding)\n return\n\n async def prepare(self, request):\n if self._eof_sent:\n return\n if self._payload_writer is not None:\n return self._payload_writer\n\n await request._prepare_hook(self)\n return await self._start(request)\n\n async def _start(self, request,\n HttpVersion10=HttpVersion10,\n HttpVersion11=HttpVersion11,\n CONNECTION=hdrs.CONNECTION,\n DATE=hdrs.DATE,\n SERVER=hdrs.SERVER,\n CONTENT_TYPE=hdrs.CONTENT_TYPE,\n CONTENT_LENGTH=hdrs.CONTENT_LENGTH,\n SET_COOKIE=hdrs.SET_COOKIE,\n SERVER_SOFTWARE=SERVER_SOFTWARE,\n TRANSFER_ENCODING=hdrs.TRANSFER_ENCODING):\n self._req = request\n\n keep_alive = self._keep_alive\n if keep_alive is None:\n keep_alive = request.keep_alive\n self._keep_alive = keep_alive\n\n version = request.version\n writer = self._payload_writer = request._payload_writer\n\n headers = self._headers\n for cookie in self._cookies.values():\n value = cookie.output(header='')[1:]\n headers.add(SET_COOKIE, value)\n\n if self._compression:\n self._start_compression(request)\n\n if self._chunked:\n if version != HttpVersion11:\n raise RuntimeError(\n \"Using chunked encoding is forbidden \"\n \"for HTTP/{0.major}.{0.minor}\".format(request.version))\n writer.enable_chunking()\n headers[TRANSFER_ENCODING] = 'chunked'\n if CONTENT_LENGTH in headers:\n del headers[CONTENT_LENGTH]\n elif self._length_check:\n writer.length = self.content_length\n if writer.length is None:\n if version >= HttpVersion11:\n writer.enable_chunking()\n headers[TRANSFER_ENCODING] = 'chunked'\n if CONTENT_LENGTH in headers:\n del headers[CONTENT_LENGTH]\n else:\n keep_alive = False\n\n headers.setdefault(CONTENT_TYPE, 'application/octet-stream')\n headers.setdefault(DATE, rfc822_formatted_time())\n headers.setdefault(SERVER, SERVER_SOFTWARE)\n\n # connection header\n if CONNECTION not in headers:\n if keep_alive:\n if version == HttpVersion10:\n headers[CONNECTION] = 'keep-alive'\n else:\n if version == HttpVersion11:\n headers[CONNECTION] = 'close'\n\n # status line\n status_line = 'HTTP/{}.{} {} {}'.format(\n version[0], version[1], self._status, self._reason)\n await writer.write_headers(status_line, headers)\n\n return writer\n\n async def write(self, data):\n assert isinstance(data, (bytes, bytearray, memoryview)), \\\n \"data argument must be byte-ish (%r)\" % type(data)\n\n if self._eof_sent:\n raise RuntimeError(\"Cannot call write() after write_eof()\")\n if self._payload_writer is None:\n raise RuntimeError(\"Cannot call write() before prepare()\")\n\n await self._payload_writer.write(data)\n\n async def drain(self):\n assert not self._eof_sent, \"EOF has already been sent\"\n assert self._payload_writer is not None, \\\n \"Response has not been started\"\n warnings.warn(\"drain method is deprecated, use await resp.write()\",\n DeprecationWarning,\n stacklevel=2)\n await self._payload_writer.drain()\n\n async def write_eof(self, data=b''):\n assert isinstance(data, (bytes, bytearray, memoryview)), \\\n \"data argument must be byte-ish (%r)\" % type(data)\n\n if self._eof_sent:\n return\n\n assert self._payload_writer is not None, \\\n \"Response has not been started\"\n\n await self._payload_writer.write_eof(data)\n self._eof_sent = True\n self._req = None\n self._body_length = self._payload_writer.output_size\n self._payload_writer = None\n\n def __repr__(self):\n if self._eof_sent:\n info = \"eof\"\n elif self.prepared:\n info = \"{} {} \".format(self._req.method, self._req.path)\n else:\n info = \"not prepared\"\n return \"<{} {} {}>\".format(self.__class__.__name__,\n self.reason, info)\n\n def __getitem__(self, key):\n return self._state[key]\n\n def __setitem__(self, key, value):\n self._state[key] = value\n\n def __delitem__(self, key):\n del self._state[key]\n\n def __len__(self):\n return len(self._state)\n\n def __iter__(self):\n return iter(self._state)\n\n def __hash__(self):\n return hash(id(self))\n\n\nclass Response(StreamResponse):\n\n def __init__(self, *, body=None, status=200,\n reason=None, text=None, headers=None, content_type=None,\n charset=None):\n if body is not None and text is not None:\n raise ValueError(\"body and text are not allowed together\")\n\n if headers is None:\n headers = CIMultiDict()\n elif not isinstance(headers, (CIMultiDict, CIMultiDictProxy)):\n headers = CIMultiDict(headers)\n\n if content_type is not None and \"charset\" in content_type:\n raise ValueError(\"charset must not be in content_type \"\n \"argument\")\n\n if text is not None:\n if hdrs.CONTENT_TYPE in headers:\n if content_type or charset:\n raise ValueError(\"passing both Content-Type header and \"\n \"content_type or charset params \"\n \"is forbidden\")\n else:\n # fast path for filling headers\n if not isinstance(text, str):\n raise TypeError(\"text argument must be str (%r)\" %\n type(text))\n if content_type is None:\n content_type = 'text/plain'\n if charset is None:\n charset = 'utf-8'\n headers[hdrs.CONTENT_TYPE] = (\n content_type + '; charset=' + charset)\n body = text.encode(charset)\n text = None\n else:\n if hdrs.CONTENT_TYPE in headers:\n if content_type is not None or charset is not None:\n raise ValueError(\"passing both Content-Type header and \"\n \"content_type or charset params \"\n \"is forbidden\")\n else:\n if content_type is not None:\n if charset is not None:\n content_type += '; charset=' + charset\n headers[hdrs.CONTENT_TYPE] = content_type\n\n super().__init__(status=status, reason=reason, headers=headers)\n\n if text is not None:\n self.text = text\n else:\n self.body = body\n\n self._compressed_body = None\n\n @property\n def body(self):\n return self._body\n\n @body.setter\n def body(self, body,\n CONTENT_TYPE=hdrs.CONTENT_TYPE,\n CONTENT_LENGTH=hdrs.CONTENT_LENGTH):\n if body is None:\n self._body = None\n self._body_payload = False\n elif isinstance(body, (bytes, bytearray)):\n self._body = body\n self._body_payload = False\n else:\n try:\n self._body = body = payload.PAYLOAD_REGISTRY.get(body)\n except payload.LookupError:\n raise ValueError('Unsupported body type %r' % type(body))\n\n self._body_payload = True\n\n headers = self._headers\n\n # set content-length header if needed\n if not self._chunked and CONTENT_LENGTH not in headers:\n size = body.size\n if size is not None:\n headers[CONTENT_LENGTH] = str(size)\n\n # set content-type\n if CONTENT_TYPE not in headers:\n headers[CONTENT_TYPE] = body.content_type\n\n # copy payload headers\n if body.headers:\n for (key, value) in body.headers.items():\n if key not in headers:\n headers[key] = value\n\n self._compressed_body = None\n\n @property\n def text(self):\n if self._body is None:\n return None\n return self._body.decode(self.charset or 'utf-8')\n\n @text.setter\n def text(self, text):\n assert text is None or isinstance(text, str), \\\n \"text argument must be str (%r)\" % type(text)\n\n if self.content_type == 'application/octet-stream':\n self.content_type = 'text/plain'\n if self.charset is None:\n self.charset = 'utf-8'\n\n self._body = text.encode(self.charset)\n self._body_payload = False\n self._compressed_body = None\n\n @property\n def content_length(self):\n if self._chunked:\n return None\n\n if hdrs.CONTENT_LENGTH in self.headers:\n return super().content_length\n\n if self._compressed_body is not None:\n # Return length of the compressed body\n return len(self._compressed_body)\n elif self._body_payload:\n # A payload without content length, or a compressed payload\n return None\n elif self._body is not None:\n return len(self._body)\n else:\n return 0\n\n @content_length.setter\n def content_length(self, value):\n raise RuntimeError(\"Content length is set automatically\")\n\n async def write_eof(self):\n if self._eof_sent:\n return\n if self._compressed_body is not None:\n body = self._compressed_body\n else:\n body = self._body\n if body is not None:\n if (self._req._method == hdrs.METH_HEAD or\n self._status in [204, 304]):\n await super().write_eof()\n elif self._body_payload:\n await body.write(self._payload_writer)\n await super().write_eof()\n else:\n await super().write_eof(body)\n else:\n await super().write_eof()\n\n async def _start(self, request):\n if not self._chunked and hdrs.CONTENT_LENGTH not in self._headers:\n if not self._body_payload:\n if self._body is not None:\n self._headers[hdrs.CONTENT_LENGTH] = str(len(self._body))\n else:\n self._headers[hdrs.CONTENT_LENGTH] = '0'\n\n return await super()._start(request)\n\n def _do_start_compression(self, coding):\n if self._body_payload or self._chunked:\n return super()._do_start_compression(coding)\n if coding != ContentCoding.identity:\n # Instead of using _payload_writer.enable_compression,\n # compress the whole body\n zlib_mode = (16 + zlib.MAX_WBITS\n if coding.value == 'gzip' else -zlib.MAX_WBITS)\n compressobj = zlib.compressobj(wbits=zlib_mode)\n self._compressed_body = compressobj.compress(self._body) +\\\n compressobj.flush()\n self._headers[hdrs.CONTENT_ENCODING] = coding.value\n self._headers[hdrs.CONTENT_LENGTH] = \\\n str(len(self._compressed_body))\n\n\ndef json_response(data=sentinel, *, text=None, body=None, status=200,\n reason=None, headers=None, content_type='application/json',\n dumps=json.dumps):\n if data is not sentinel:\n if text or body:\n raise ValueError(\n \"only one of data, text, or body should be specified\"\n )\n else:\n text = dumps(data)\n return Response(text=text, body=body, status=status, reason=reason,\n headers=headers, content_type=content_type)\n",
"path": "aiohttp/web_response.py"
}
] | [
{
"content": "import collections\nimport datetime\nimport enum\nimport json\nimport math\nimport time\nimport warnings\nimport zlib\nfrom email.utils import parsedate\nfrom http.cookies import SimpleCookie\n\nfrom multidict import CIMultiDict, CIMultiDictProxy\n\nfrom . import hdrs, payload\nfrom .helpers import HeadersMixin, rfc822_formatted_time, sentinel\nfrom .http import RESPONSES, SERVER_SOFTWARE, HttpVersion10, HttpVersion11\n\n\n__all__ = ('ContentCoding', 'StreamResponse', 'Response', 'json_response')\n\n\nclass ContentCoding(enum.Enum):\n # The content codings that we have support for.\n #\n # Additional registered codings are listed at:\n # https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding\n deflate = 'deflate'\n gzip = 'gzip'\n identity = 'identity'\n\n\n############################################################\n# HTTP Response classes\n############################################################\n\n\nclass StreamResponse(collections.MutableMapping, HeadersMixin):\n\n _length_check = True\n\n def __init__(self, *, status=200, reason=None, headers=None):\n self._body = None\n self._keep_alive = None\n self._chunked = False\n self._compression = False\n self._compression_force = None\n self._cookies = SimpleCookie()\n\n self._req = None\n self._payload_writer = None\n self._eof_sent = False\n self._body_length = 0\n self._state = {}\n\n if headers is not None:\n self._headers = CIMultiDict(headers)\n else:\n self._headers = CIMultiDict()\n\n self.set_status(status, reason)\n\n @property\n def prepared(self):\n return self._payload_writer is not None\n\n @property\n def task(self):\n return getattr(self._req, 'task', None)\n\n @property\n def status(self):\n return self._status\n\n @property\n def chunked(self):\n return self._chunked\n\n @property\n def compression(self):\n return self._compression\n\n @property\n def reason(self):\n return self._reason\n\n def set_status(self, status, reason=None, _RESPONSES=RESPONSES):\n assert not self.prepared, \\\n 'Cannot change the response status code after ' \\\n 'the headers have been sent'\n self._status = int(status)\n if reason is None:\n try:\n reason = _RESPONSES[self._status][0]\n except Exception:\n reason = ''\n self._reason = reason\n\n @property\n def keep_alive(self):\n return self._keep_alive\n\n def force_close(self):\n self._keep_alive = False\n\n @property\n def body_length(self):\n return self._body_length\n\n @property\n def output_length(self):\n warnings.warn('output_length is deprecated', DeprecationWarning)\n return self._payload_writer.buffer_size\n\n def enable_chunked_encoding(self, chunk_size=None):\n \"\"\"Enables automatic chunked transfer encoding.\"\"\"\n self._chunked = True\n\n if hdrs.CONTENT_LENGTH in self._headers:\n raise RuntimeError(\"You can't enable chunked encoding when \"\n \"a content length is set\")\n if chunk_size is not None:\n warnings.warn('Chunk size is deprecated #1615', DeprecationWarning)\n\n def enable_compression(self, force=None):\n \"\"\"Enables response compression encoding.\"\"\"\n # Backwards compatibility for when force was a bool <0.17.\n if type(force) == bool:\n force = ContentCoding.deflate if force else ContentCoding.identity\n elif force is not None:\n assert isinstance(force, ContentCoding), (\"force should one of \"\n \"None, bool or \"\n \"ContentEncoding\")\n\n self._compression = True\n self._compression_force = force\n\n @property\n def headers(self):\n return self._headers\n\n @property\n def cookies(self):\n return self._cookies\n\n def set_cookie(self, name, value, *, expires=None,\n domain=None, max_age=None, path='/',\n secure=None, httponly=None, version=None):\n \"\"\"Set or update response cookie.\n\n Sets new cookie or updates existent with new value.\n Also updates only those params which are not None.\n \"\"\"\n\n old = self._cookies.get(name)\n if old is not None and old.coded_value == '':\n # deleted cookie\n self._cookies.pop(name, None)\n\n self._cookies[name] = value\n c = self._cookies[name]\n\n if expires is not None:\n c['expires'] = expires\n elif c.get('expires') == 'Thu, 01 Jan 1970 00:00:00 GMT':\n del c['expires']\n\n if domain is not None:\n c['domain'] = domain\n\n if max_age is not None:\n c['max-age'] = max_age\n elif 'max-age' in c:\n del c['max-age']\n\n c['path'] = path\n\n if secure is not None:\n c['secure'] = secure\n if httponly is not None:\n c['httponly'] = httponly\n if version is not None:\n c['version'] = version\n\n def del_cookie(self, name, *, domain=None, path='/'):\n \"\"\"Delete cookie.\n\n Creates new empty expired cookie.\n \"\"\"\n # TODO: do we need domain/path here?\n self._cookies.pop(name, None)\n self.set_cookie(name, '', max_age=0,\n expires=\"Thu, 01 Jan 1970 00:00:00 GMT\",\n domain=domain, path=path)\n\n @property\n def content_length(self):\n # Just a placeholder for adding setter\n return super().content_length\n\n @content_length.setter\n def content_length(self, value):\n if value is not None:\n value = int(value)\n if self._chunked:\n raise RuntimeError(\"You can't set content length when \"\n \"chunked encoding is enable\")\n self._headers[hdrs.CONTENT_LENGTH] = str(value)\n else:\n self._headers.pop(hdrs.CONTENT_LENGTH, None)\n\n @property\n def content_type(self):\n # Just a placeholder for adding setter\n return super().content_type\n\n @content_type.setter\n def content_type(self, value):\n self.content_type # read header values if needed\n self._content_type = str(value)\n self._generate_content_type_header()\n\n @property\n def charset(self):\n # Just a placeholder for adding setter\n return super().charset\n\n @charset.setter\n def charset(self, value):\n ctype = self.content_type # read header values if needed\n if ctype == 'application/octet-stream':\n raise RuntimeError(\"Setting charset for application/octet-stream \"\n \"doesn't make sense, setup content_type first\")\n if value is None:\n self._content_dict.pop('charset', None)\n else:\n self._content_dict['charset'] = str(value).lower()\n self._generate_content_type_header()\n\n @property\n def last_modified(self):\n \"\"\"The value of Last-Modified HTTP header, or None.\n\n This header is represented as a `datetime` object.\n \"\"\"\n httpdate = self.headers.get(hdrs.LAST_MODIFIED)\n if httpdate is not None:\n timetuple = parsedate(httpdate)\n if timetuple is not None:\n return datetime.datetime(*timetuple[:6],\n tzinfo=datetime.timezone.utc)\n return None\n\n @last_modified.setter\n def last_modified(self, value):\n if value is None:\n self.headers.pop(hdrs.LAST_MODIFIED, None)\n elif isinstance(value, (int, float)):\n self.headers[hdrs.LAST_MODIFIED] = time.strftime(\n \"%a, %d %b %Y %H:%M:%S GMT\", time.gmtime(math.ceil(value)))\n elif isinstance(value, datetime.datetime):\n self.headers[hdrs.LAST_MODIFIED] = time.strftime(\n \"%a, %d %b %Y %H:%M:%S GMT\", value.utctimetuple())\n elif isinstance(value, str):\n self.headers[hdrs.LAST_MODIFIED] = value\n\n def _generate_content_type_header(self, CONTENT_TYPE=hdrs.CONTENT_TYPE):\n params = '; '.join(\"%s=%s\" % i for i in self._content_dict.items())\n if params:\n ctype = self._content_type + '; ' + params\n else:\n ctype = self._content_type\n self.headers[CONTENT_TYPE] = ctype\n\n def _do_start_compression(self, coding):\n if coding != ContentCoding.identity:\n self.headers[hdrs.CONTENT_ENCODING] = coding.value\n self._payload_writer.enable_compression(coding.value)\n # Compressed payload may have different content length,\n # remove the header\n self._headers.popall(hdrs.CONTENT_LENGTH, None)\n\n def _start_compression(self, request):\n if self._compression_force:\n self._do_start_compression(self._compression_force)\n else:\n accept_encoding = request.headers.get(\n hdrs.ACCEPT_ENCODING, '').lower()\n for coding in ContentCoding:\n if coding.value in accept_encoding:\n self._do_start_compression(coding)\n return\n\n async def prepare(self, request):\n if self._eof_sent:\n return\n if self._payload_writer is not None:\n return self._payload_writer\n\n await request._prepare_hook(self)\n return await self._start(request)\n\n async def _start(self, request,\n HttpVersion10=HttpVersion10,\n HttpVersion11=HttpVersion11,\n CONNECTION=hdrs.CONNECTION,\n DATE=hdrs.DATE,\n SERVER=hdrs.SERVER,\n CONTENT_TYPE=hdrs.CONTENT_TYPE,\n CONTENT_LENGTH=hdrs.CONTENT_LENGTH,\n SET_COOKIE=hdrs.SET_COOKIE,\n SERVER_SOFTWARE=SERVER_SOFTWARE,\n TRANSFER_ENCODING=hdrs.TRANSFER_ENCODING):\n self._req = request\n\n keep_alive = self._keep_alive\n if keep_alive is None:\n keep_alive = request.keep_alive\n self._keep_alive = keep_alive\n\n version = request.version\n writer = self._payload_writer = request._payload_writer\n\n headers = self._headers\n for cookie in self._cookies.values():\n value = cookie.output(header='')[1:]\n headers.add(SET_COOKIE, value)\n\n if self._compression:\n self._start_compression(request)\n\n if self._chunked:\n if version != HttpVersion11:\n raise RuntimeError(\n \"Using chunked encoding is forbidden \"\n \"for HTTP/{0.major}.{0.minor}\".format(request.version))\n writer.enable_chunking()\n headers[TRANSFER_ENCODING] = 'chunked'\n if CONTENT_LENGTH in headers:\n del headers[CONTENT_LENGTH]\n elif self._length_check:\n writer.length = self.content_length\n if writer.length is None:\n if version >= HttpVersion11:\n writer.enable_chunking()\n headers[TRANSFER_ENCODING] = 'chunked'\n if CONTENT_LENGTH in headers:\n del headers[CONTENT_LENGTH]\n else:\n keep_alive = False\n\n headers.setdefault(CONTENT_TYPE, 'application/octet-stream')\n headers.setdefault(DATE, rfc822_formatted_time())\n headers.setdefault(SERVER, SERVER_SOFTWARE)\n\n # connection header\n if CONNECTION not in headers:\n if keep_alive:\n if version == HttpVersion10:\n headers[CONNECTION] = 'keep-alive'\n else:\n if version == HttpVersion11:\n headers[CONNECTION] = 'close'\n\n # status line\n status_line = 'HTTP/{}.{} {} {}'.format(\n version[0], version[1], self._status, self._reason)\n await writer.write_headers(status_line, headers)\n\n return writer\n\n async def write(self, data):\n assert isinstance(data, (bytes, bytearray, memoryview)), \\\n \"data argument must be byte-ish (%r)\" % type(data)\n\n if self._eof_sent:\n raise RuntimeError(\"Cannot call write() after write_eof()\")\n if self._payload_writer is None:\n raise RuntimeError(\"Cannot call write() before prepare()\")\n\n await self._payload_writer.write(data)\n\n async def drain(self):\n assert not self._eof_sent, \"EOF has already been sent\"\n assert self._payload_writer is not None, \\\n \"Response has not been started\"\n warnings.warn(\"drain method is deprecated, use await resp.write()\",\n DeprecationWarning,\n stacklevel=2)\n await self._payload_writer.drain()\n\n async def write_eof(self, data=b''):\n assert isinstance(data, (bytes, bytearray, memoryview)), \\\n \"data argument must be byte-ish (%r)\" % type(data)\n\n if self._eof_sent:\n return\n\n assert self._payload_writer is not None, \\\n \"Response has not been started\"\n\n await self._payload_writer.write_eof(data)\n self._eof_sent = True\n self._req = None\n self._body_length = self._payload_writer.output_size\n self._payload_writer = None\n\n def __repr__(self):\n if self._eof_sent:\n info = \"eof\"\n elif self.prepared:\n info = \"{} {} \".format(self._req.method, self._req.path)\n else:\n info = \"not prepared\"\n return \"<{} {} {}>\".format(self.__class__.__name__,\n self.reason, info)\n\n def __getitem__(self, key):\n return self._state[key]\n\n def __setitem__(self, key, value):\n self._state[key] = value\n\n def __delitem__(self, key):\n del self._state[key]\n\n def __len__(self):\n return len(self._state)\n\n def __iter__(self):\n return iter(self._state)\n\n def __hash__(self):\n return hash(id(self))\n\n def __eq__(self, other):\n return self is other\n\n\nclass Response(StreamResponse):\n\n def __init__(self, *, body=None, status=200,\n reason=None, text=None, headers=None, content_type=None,\n charset=None):\n if body is not None and text is not None:\n raise ValueError(\"body and text are not allowed together\")\n\n if headers is None:\n headers = CIMultiDict()\n elif not isinstance(headers, (CIMultiDict, CIMultiDictProxy)):\n headers = CIMultiDict(headers)\n\n if content_type is not None and \"charset\" in content_type:\n raise ValueError(\"charset must not be in content_type \"\n \"argument\")\n\n if text is not None:\n if hdrs.CONTENT_TYPE in headers:\n if content_type or charset:\n raise ValueError(\"passing both Content-Type header and \"\n \"content_type or charset params \"\n \"is forbidden\")\n else:\n # fast path for filling headers\n if not isinstance(text, str):\n raise TypeError(\"text argument must be str (%r)\" %\n type(text))\n if content_type is None:\n content_type = 'text/plain'\n if charset is None:\n charset = 'utf-8'\n headers[hdrs.CONTENT_TYPE] = (\n content_type + '; charset=' + charset)\n body = text.encode(charset)\n text = None\n else:\n if hdrs.CONTENT_TYPE in headers:\n if content_type is not None or charset is not None:\n raise ValueError(\"passing both Content-Type header and \"\n \"content_type or charset params \"\n \"is forbidden\")\n else:\n if content_type is not None:\n if charset is not None:\n content_type += '; charset=' + charset\n headers[hdrs.CONTENT_TYPE] = content_type\n\n super().__init__(status=status, reason=reason, headers=headers)\n\n if text is not None:\n self.text = text\n else:\n self.body = body\n\n self._compressed_body = None\n\n @property\n def body(self):\n return self._body\n\n @body.setter\n def body(self, body,\n CONTENT_TYPE=hdrs.CONTENT_TYPE,\n CONTENT_LENGTH=hdrs.CONTENT_LENGTH):\n if body is None:\n self._body = None\n self._body_payload = False\n elif isinstance(body, (bytes, bytearray)):\n self._body = body\n self._body_payload = False\n else:\n try:\n self._body = body = payload.PAYLOAD_REGISTRY.get(body)\n except payload.LookupError:\n raise ValueError('Unsupported body type %r' % type(body))\n\n self._body_payload = True\n\n headers = self._headers\n\n # set content-length header if needed\n if not self._chunked and CONTENT_LENGTH not in headers:\n size = body.size\n if size is not None:\n headers[CONTENT_LENGTH] = str(size)\n\n # set content-type\n if CONTENT_TYPE not in headers:\n headers[CONTENT_TYPE] = body.content_type\n\n # copy payload headers\n if body.headers:\n for (key, value) in body.headers.items():\n if key not in headers:\n headers[key] = value\n\n self._compressed_body = None\n\n @property\n def text(self):\n if self._body is None:\n return None\n return self._body.decode(self.charset or 'utf-8')\n\n @text.setter\n def text(self, text):\n assert text is None or isinstance(text, str), \\\n \"text argument must be str (%r)\" % type(text)\n\n if self.content_type == 'application/octet-stream':\n self.content_type = 'text/plain'\n if self.charset is None:\n self.charset = 'utf-8'\n\n self._body = text.encode(self.charset)\n self._body_payload = False\n self._compressed_body = None\n\n @property\n def content_length(self):\n if self._chunked:\n return None\n\n if hdrs.CONTENT_LENGTH in self.headers:\n return super().content_length\n\n if self._compressed_body is not None:\n # Return length of the compressed body\n return len(self._compressed_body)\n elif self._body_payload:\n # A payload without content length, or a compressed payload\n return None\n elif self._body is not None:\n return len(self._body)\n else:\n return 0\n\n @content_length.setter\n def content_length(self, value):\n raise RuntimeError(\"Content length is set automatically\")\n\n async def write_eof(self):\n if self._eof_sent:\n return\n if self._compressed_body is not None:\n body = self._compressed_body\n else:\n body = self._body\n if body is not None:\n if (self._req._method == hdrs.METH_HEAD or\n self._status in [204, 304]):\n await super().write_eof()\n elif self._body_payload:\n await body.write(self._payload_writer)\n await super().write_eof()\n else:\n await super().write_eof(body)\n else:\n await super().write_eof()\n\n async def _start(self, request):\n if not self._chunked and hdrs.CONTENT_LENGTH not in self._headers:\n if not self._body_payload:\n if self._body is not None:\n self._headers[hdrs.CONTENT_LENGTH] = str(len(self._body))\n else:\n self._headers[hdrs.CONTENT_LENGTH] = '0'\n\n return await super()._start(request)\n\n def _do_start_compression(self, coding):\n if self._body_payload or self._chunked:\n return super()._do_start_compression(coding)\n if coding != ContentCoding.identity:\n # Instead of using _payload_writer.enable_compression,\n # compress the whole body\n zlib_mode = (16 + zlib.MAX_WBITS\n if coding.value == 'gzip' else -zlib.MAX_WBITS)\n compressobj = zlib.compressobj(wbits=zlib_mode)\n self._compressed_body = compressobj.compress(self._body) +\\\n compressobj.flush()\n self._headers[hdrs.CONTENT_ENCODING] = coding.value\n self._headers[hdrs.CONTENT_LENGTH] = \\\n str(len(self._compressed_body))\n\n\ndef json_response(data=sentinel, *, text=None, body=None, status=200,\n reason=None, headers=None, content_type='application/json',\n dumps=json.dumps):\n if data is not sentinel:\n if text or body:\n raise ValueError(\n \"only one of data, text, or body should be specified\"\n )\n else:\n text = dumps(data)\n return Response(text=text, body=body, status=status, reason=reason,\n headers=headers, content_type=content_type)\n",
"path": "aiohttp/web_response.py"
}
] | diff --git a/CHANGES/3100.bugfix b/CHANGES/3100.bugfix
new file mode 100644
index 00000000000..9d9d3301f69
--- /dev/null
+++ b/CHANGES/3100.bugfix
@@ -0,0 +1 @@
+Fix `StreamResponse` equality, now that they are `MutableMapping` objects.
diff --git a/aiohttp/web_response.py b/aiohttp/web_response.py
index f3c2a5311a8..89835c63342 100644
--- a/aiohttp/web_response.py
+++ b/aiohttp/web_response.py
@@ -432,6 +432,9 @@ def __iter__(self):
def __hash__(self):
return hash(id(self))
+ def __eq__(self, other):
+ return self is other
+
class Response(StreamResponse):
diff --git a/tests/test_web_response.py b/tests/test_web_response.py
index 12a7f25deb2..da414e8c974 100644
--- a/tests/test_web_response.py
+++ b/tests/test_web_response.py
@@ -82,6 +82,14 @@ def test_stream_response_hashable():
hash(StreamResponse())
+def test_stream_response_eq():
+ resp1 = StreamResponse()
+ resp2 = StreamResponse()
+
+ assert resp1 == resp1
+ assert not resp1 == resp2
+
+
def test_stream_response_is_mutable_mapping():
resp = StreamResponse()
assert isinstance(resp, collections.MutableMapping)
|
networkx__networkx-2535 | missing commits
@hagberg , @dschult I just noticed that there is no ``doc/release/api_1.11.rst``, but there is one here:
https://github.com/networkx/networkx/tree/v1.11
in ``doc/source/reference/api_1.11.rst``. It appears this file was never committed on the master branch.
The v1.11 branch is " 59 commits ahead, 1066 commits behind master. " So it looks like there may be a number of missing commits on master. For example, this is also missing:
https://github.com/networkx/networkx/commit/5665c71f3a9aec0325078de2de43537aee03386d
As this shows:
```
$ git lg networkx/drawing/tests/test_agraph.py
* d8ada85 - Make graph attributes work both to/from with agraph (#2507) (11 days ago) [Dan Schult]
* 7bfb768 - Improve drawing test scripts (typos, newlines, methods) (1 year, 5 months ago) [Michael-E-Rose]
* f5031dd - Adjust imports in drawing layouts with graphviz (1 year, 6 months ago) [Dan Schult]
* 9922ec7 - doc, formatting, and whitespace cleanup (5 years ago) [Aric Hagberg]
* 47565b1 - Handle name in translation between pygraphviz (AGraph) and networkx. Fixes #734 (5 years ago) [Aric Hagberg]
* 3665bc1 - Update tests (6 years ago) [Aric Hagberg]
* d41d15f - More imports cleanup and exceptions fixed. (6 years ago) [Loïc Séguin-C.]
* baceff1 - Added tests for multigraph conversion to/from agraph. Changed from_agraph() so that the tests pass. (8 years ago) [dschult]
* ca6df32 - Convert drawing tests to functional tests and use SkipTest if optional packages are not available. (8 years ago) [aric]
```
I suspect that this was unintentional and that I should go through the missing commits and either cherry-pick the appropriate ones or make a new commit when cherry-picking doesn't work. I just wanted to check whether I am correct before I go through the effort. I will make a PR so you can review the commits I grab before merging to master.
| [
{
"content": "\"\"\"Functions to convert NetworkX graphs to and from numpy/scipy matrices.\n\nThe preferred way of converting data to a NetworkX graph is through the\ngraph constuctor. The constructor calls the to_networkx_graph() function\nwhich attempts to guess the input type and convert it automatically.\n\nExamples\n--------\nCreate a 10 node random graph from a numpy matrix\n\n>>> import numpy\n>>> a = numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))\n>>> D = nx.DiGraph(a)\n\nor equivalently\n\n>>> D = nx.to_networkx_graph(a,create_using=nx.DiGraph())\n\nSee Also\n--------\nnx_agraph, nx_pydot\n\"\"\"\n# Copyright (C) 2006-2014 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\nimport warnings\nimport itertools\nimport networkx as nx\nfrom networkx.convert import _prep_create_using\nfrom networkx.utils import not_implemented_for\n__author__ = \"\"\"\\n\"\"\".join(['Aric Hagberg <[email protected]>',\n 'Pieter Swart ([email protected])',\n 'Dan Schult([email protected])'])\n__all__ = ['from_numpy_matrix', 'to_numpy_matrix',\n 'from_pandas_dataframe', 'to_pandas_dataframe',\n 'to_numpy_recarray',\n 'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix',\n 'from_numpy_array', 'to_numpy_array']\n\n\ndef to_pandas_dataframe(G, nodelist=None, dtype=None, order=None,\n multigraph_weight=sum, weight='weight', nonedge=0.0):\n \"\"\"Return the graph adjacency matrix as a Pandas DataFrame.\n\n Parameters\n ----------\n G : graph\n The NetworkX graph used to construct the Pandas DataFrame.\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in `nodelist`.\n If `nodelist` is None, then the ordering is produced by G.nodes().\n\n multigraph_weight : {sum, min, max}, optional\n An operator that determines how weights in multigraphs are handled.\n The default is to sum the weights of the multiple edges.\n\n weight : string or None, optional\n The edge attribute that holds the numerical value used for\n the edge weight. If an edge does not have that attribute, then the\n value 1 is used instead.\n\n nonedge : float, optional\n The matrix values corresponding to nonedges are typically set to zero.\n However, this could be undesirable if there are matrix values\n corresponding to actual edges that also have the value zero. If so,\n one might prefer nonedges to have some other value, such as nan.\n\n Returns\n -------\n df : Pandas DataFrame\n Graph adjacency matrix\n\n Notes\n -----\n The DataFrame entries are assigned to the weight edge attribute. When\n an edge does not have a weight attribute, the value of the entry is set to\n the number 1. For multiple (parallel) edges, the values of the entries\n are determined by the 'multigraph_weight' parameter. The default is to\n sum the weight attributes for each of the parallel edges.\n\n When `nodelist` does not contain every node in `G`, the matrix is built\n from the subgraph of `G` that is induced by the nodes in `nodelist`.\n\n The convention used for self-loop edges in graphs is to assign the\n diagonal matrix entry value to the weight attribute of the edge\n (or the number 1 if the edge has no weight attribute). If the\n alternate convention of doubling the edge weight is desired the\n resulting Pandas DataFrame can be modified as follows:\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> G = nx.Graph([(1,1)])\n >>> df = nx.to_pandas_dataframe(G, dtype=int)\n >>> df\n 1\n 1 1\n >>> df.values[np.diag_indices_from(df)] *= 2\n >>> df\n 1\n 1 2\n\n Examples\n --------\n >>> G = nx.MultiDiGraph()\n >>> G.add_edge(0,1,weight=2)\n 0\n >>> G.add_edge(1,0)\n 0\n >>> G.add_edge(2,2,weight=3)\n 0\n >>> G.add_edge(2,2)\n 1\n >>> nx.to_pandas_dataframe(G, nodelist=[0,1,2], dtype=int)\n 0 1 2\n 0 0 2 0\n 1 1 0 0\n 2 0 0 4\n \"\"\"\n import pandas as pd\n M = to_numpy_matrix(G, nodelist=nodelist, dtype=dtype, order=order,\n multigraph_weight=multigraph_weight, weight=weight,\n nonedge=nonedge)\n if nodelist is None:\n nodelist = list(G)\n return pd.DataFrame(data=M, index=nodelist, columns=nodelist)\n\n\ndef from_pandas_dataframe(df, source='source', target='target', edge_attr=None,\n create_using=None):\n \"\"\"Return a graph from Pandas DataFrame containing an edge list.\n\n The Pandas DataFrame should contain at least two columns of node names and\n zero or more columns of node attributes. Each row will be processed as one\n edge instance.\n\n Note: This function iterates over DataFrame.values, which is not\n guaranteed to retain the data type across columns in the row. This is only\n a problem if your row is entirely numeric and a mix of ints and floats. In\n that case, all values will be returned as floats. See the\n DataFrame.iterrows documentation for an example.\n\n Parameters\n ----------\n df : Pandas DataFrame\n An edge list representation of a graph\n\n source : str or int\n A valid column name (string or iteger) for the source nodes (for the\n directed case).\n\n target : str or int\n A valid column name (string or iteger) for the target nodes (for the\n directed case).\n\n edge_attr : str or int, iterable, True\n A valid column name (str or integer) or list of column names that will\n be used to retrieve items from the row and add them to the graph as edge\n attributes. If `True`, all of the remaining columns will be added.\n\n create_using : NetworkX graph\n Use specified graph for result. The default is Graph()\n\n See Also\n --------\n to_pandas_dataframe\n\n Examples\n --------\n Simple integer weights on edges:\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> r = np.random.RandomState(seed=5)\n >>> ints = r.random_integers(1, 10, size=(3,2))\n >>> a = ['A', 'B', 'C']\n >>> b = ['D', 'A', 'E']\n >>> df = pd.DataFrame(ints, columns=['weight', 'cost'])\n >>> df[0] = a\n >>> df['b'] = b\n >>> df\n weight cost 0 b\n 0 4 7 A D\n 1 7 1 B A\n 2 10 9 C E\n >>> G=nx.from_pandas_dataframe(df, 0, 'b', ['weight', 'cost'])\n >>> G['E']['C']['weight']\n 10\n >>> G['E']['C']['cost']\n 9\n >>> edges = pd.DataFrame({'source': [0, 1, 2],\n ... 'target': [2, 2, 3],\n ... 'weight': [3, 4, 5],\n ... 'color': ['red', 'blue', 'blue']})\n >>> G = nx.from_pandas_dataframe(edges, edge_attr=True)\n >>> G[0][2]['color']\n 'red'\n \"\"\"\n\n g = _prep_create_using(create_using)\n\n # Index of source and target\n src_i = df.columns.get_loc(source)\n tar_i = df.columns.get_loc(target)\n if edge_attr:\n # If all additional columns requested, build up a list of tuples\n # [(name, index),...]\n if edge_attr is True:\n # Create a list of all columns indices, ignore nodes\n edge_i = []\n for i, col in enumerate(df.columns):\n if col is not source and col is not target:\n edge_i.append((col, i))\n # If a list or tuple of name is requested\n elif isinstance(edge_attr, (list, tuple)):\n edge_i = [(i, df.columns.get_loc(i)) for i in edge_attr]\n # If a string or int is passed\n else:\n edge_i = [(edge_attr, df.columns.get_loc(edge_attr)), ]\n\n # Iteration on values returns the rows as Numpy arrays\n for row in df.values:\n s, t = row[src_i], row[tar_i]\n if g.is_multigraph():\n g.add_edge(s, t)\n key = max(g[s][t]) # default keys just count, so max is most recent\n g[s][t][key].update((i, row[j]) for i, j in edge_i)\n else:\n g.add_edge(s, t)\n g[s][t].update((i, row[j]) for i, j in edge_i)\n\n # If no column names are given, then just return the edges.\n else:\n for row in df.values:\n g.add_edge(row[src_i], row[tar_i])\n\n return g\n\n\ndef to_numpy_matrix(G, nodelist=None, dtype=None, order=None,\n multigraph_weight=sum, weight='weight', nonedge=0.0):\n \"\"\"Return the graph adjacency matrix as a NumPy matrix.\n\n Parameters\n ----------\n G : graph\n The NetworkX graph used to construct the NumPy matrix.\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in `nodelist`.\n If `nodelist` is None, then the ordering is produced by G.nodes().\n\n dtype : NumPy data type, optional\n A valid single NumPy data type used to initialize the array.\n This must be a simple type such as int or numpy.float64 and\n not a compound data type (see to_numpy_recarray)\n If None, then the NumPy default is used.\n\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. If None, then the NumPy default\n is used.\n\n multigraph_weight : {sum, min, max}, optional\n An operator that determines how weights in multigraphs are handled.\n The default is to sum the weights of the multiple edges.\n\n weight : string or None optional (default = 'weight')\n The edge attribute that holds the numerical value used for\n the edge weight. If an edge does not have that attribute, then the\n value 1 is used instead.\n\n nonedge : float (default = 0.0)\n The matrix values corresponding to nonedges are typically set to zero.\n However, this could be undesirable if there are matrix values\n corresponding to actual edges that also have the value zero. If so,\n one might prefer nonedges to have some other value, such as nan.\n\n Returns\n -------\n M : NumPy matrix\n Graph adjacency matrix\n\n See Also\n --------\n to_numpy_recarray, from_numpy_matrix\n\n Notes\n -----\n The matrix entries are assigned to the weight edge attribute. When\n an edge does not have a weight attribute, the value of the entry is set to\n the number 1. For multiple (parallel) edges, the values of the entries\n are determined by the `multigraph_weight` parameter. The default is to\n sum the weight attributes for each of the parallel edges.\n\n When `nodelist` does not contain every node in `G`, the matrix is built\n from the subgraph of `G` that is induced by the nodes in `nodelist`.\n\n The convention used for self-loop edges in graphs is to assign the\n diagonal matrix entry value to the weight attribute of the edge\n (or the number 1 if the edge has no weight attribute). If the\n alternate convention of doubling the edge weight is desired the\n resulting Numpy matrix can be modified as follows:\n\n >>> import numpy as np\n >>> G = nx.Graph([(1, 1)])\n >>> A = nx.to_numpy_matrix(G)\n >>> A\n matrix([[ 1.]])\n >>> A.A[np.diag_indices_from(A)] *= 2\n >>> A\n matrix([[ 2.]])\n\n Examples\n --------\n >>> G = nx.MultiDiGraph()\n >>> G.add_edge(0,1,weight=2)\n 0\n >>> G.add_edge(1,0)\n 0\n >>> G.add_edge(2,2,weight=3)\n 0\n >>> G.add_edge(2,2)\n 1\n >>> nx.to_numpy_matrix(G, nodelist=[0,1,2])\n matrix([[ 0., 2., 0.],\n [ 1., 0., 0.],\n [ 0., 0., 4.]])\n \"\"\"\n import numpy as np\n\n A = to_numpy_array(G, nodelist=nodelist, dtype=dtype, order=order,\n multigraph_weight=multigraph_weight, weight=weight,\n nonedge=nonedge)\n M = np.asmatrix(A, dtype=dtype)\n return M\n\n\ndef from_numpy_matrix(A, parallel_edges=False, create_using=None):\n \"\"\"Return a graph from numpy matrix.\n\n The numpy matrix is interpreted as an adjacency matrix for the graph.\n\n Parameters\n ----------\n A : numpy matrix\n An adjacency matrix representation of a graph\n\n parallel_edges : Boolean\n If this is True, `create_using` is a multigraph, and `A` is an\n integer matrix, then entry *(i, j)* in the matrix is interpreted as the\n number of parallel edges joining vertices *i* and *j* in the graph. If it\n is False, then the entries in the adjacency matrix are interpreted as\n the weight of a single edge joining the vertices.\n\n create_using : NetworkX graph\n Use specified graph for result. The default is Graph()\n\n Notes\n -----\n If `create_using` is an instance of :class:`networkx.MultiGraph` or\n :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the\n entries of `A` are of type :class:`int`, then this function returns a\n multigraph (of the same type as `create_using`) with parallel edges.\n\n If `create_using` is an undirected multigraph, then only the edges\n indicated by the upper triangle of the matrix `A` will be added to the\n graph.\n\n If the numpy matrix has a single data type for each matrix entry it\n will be converted to an appropriate Python data type.\n\n If the numpy matrix has a user-specified compound data type the names\n of the data fields will be used as attribute keys in the resulting\n NetworkX graph.\n\n See Also\n --------\n to_numpy_matrix, to_numpy_recarray\n\n Examples\n --------\n Simple integer weights on edges:\n\n >>> import numpy\n >>> A=numpy.matrix([[1, 1], [2, 1]])\n >>> G=nx.from_numpy_matrix(A)\n\n If `create_using` is a multigraph and the matrix has only integer entries,\n the entries will be interpreted as weighted edges joining the vertices\n (without creating parallel edges):\n\n >>> import numpy\n >>> A = numpy.matrix([[1, 1], [1, 2]])\n >>> G = nx.from_numpy_matrix(A, create_using = nx.MultiGraph())\n >>> G[1][1]\n AtlasView({0: {'weight': 2}})\n\n If `create_using` is a multigraph and the matrix has only integer entries\n but `parallel_edges` is True, then the entries will be interpreted as\n the number of parallel edges joining those two vertices:\n\n >>> import numpy\n >>> A = numpy.matrix([[1, 1], [1, 2]])\n >>> temp = nx.MultiGraph()\n >>> G = nx.from_numpy_matrix(A, parallel_edges = True, create_using = temp)\n >>> G[1][1]\n AtlasView({0: {'weight': 1}, 1: {'weight': 1}})\n\n User defined compound data type on edges:\n\n >>> import numpy\n >>> dt = [('weight', float), ('cost', int)]\n >>> A = numpy.matrix([[(1.0, 2)]], dtype = dt)\n >>> G = nx.from_numpy_matrix(A)\n >>> list(G.edges())\n [(0, 0)]\n >>> G[0][0]['cost']\n 2\n >>> G[0][0]['weight']\n 1.0\n\n \"\"\"\n # This should never fail if you have created a numpy matrix with numpy...\n import numpy as np\n kind_to_python_type = {'f': float,\n 'i': int,\n 'u': int,\n 'b': bool,\n 'c': complex,\n 'S': str,\n 'V': 'void'}\n try: # Python 3.x\n blurb = chr(1245) # just to trigger the exception\n kind_to_python_type['U'] = str\n except ValueError: # Python 2.6+\n kind_to_python_type['U'] = unicode\n G = _prep_create_using(create_using)\n n, m = A.shape\n if n != m:\n raise nx.NetworkXError(\"Adjacency matrix is not square.\",\n \"nx,ny=%s\" % (A.shape,))\n dt = A.dtype\n try:\n python_type = kind_to_python_type[dt.kind]\n except:\n raise TypeError(\"Unknown numpy data type: %s\" % dt)\n\n # Make sure we get even the isolated nodes of the graph.\n G.add_nodes_from(range(n))\n # Get a list of all the entries in the matrix with nonzero entries. These\n # coordinates will become the edges in the graph.\n edges = zip(*(np.asarray(A).nonzero()))\n # handle numpy constructed data type\n if python_type is 'void':\n # Sort the fields by their offset, then by dtype, then by name.\n fields = sorted((offset, dtype, name) for name, (dtype, offset) in\n A.dtype.fields.items())\n triples = ((u, v, {name: kind_to_python_type[dtype.kind](val)\n for (_, dtype, name), val in zip(fields, A[u, v])})\n for u, v in edges)\n # If the entries in the adjacency matrix are integers, the graph is a\n # multigraph, and parallel_edges is True, then create parallel edges, each\n # with weight 1, for each entry in the adjacency matrix. Otherwise, create\n # one edge for each positive entry in the adjacency matrix and set the\n # weight of that edge to be the entry in the matrix.\n elif python_type is int and G.is_multigraph() and parallel_edges:\n chain = itertools.chain.from_iterable\n # The following line is equivalent to:\n #\n # for (u, v) in edges:\n # for d in range(A[u, v]):\n # G.add_edge(u, v, weight=1)\n #\n triples = chain(((u, v, dict(weight=1)) for d in range(A[u, v]))\n for (u, v) in edges)\n else: # basic data type\n triples = ((u, v, dict(weight=python_type(A[u, v])))\n for u, v in edges)\n # If we are creating an undirected multigraph, only add the edges from the\n # upper triangle of the matrix. Otherwise, add all the edges. This relies\n # on the fact that the vertices created in the\n # `_generated_weighted_edges()` function are actually the row/column\n # indices for the matrix `A`.\n #\n # Without this check, we run into a problem where each edge is added twice\n # when `G.add_edges_from()` is invoked below.\n if G.is_multigraph() and not G.is_directed():\n triples = ((u, v, d) for u, v, d in triples if u <= v)\n G.add_edges_from(triples)\n return G\n\n\n@not_implemented_for('multigraph')\ndef to_numpy_recarray(G, nodelist=None, dtype=None, order=None):\n \"\"\"Return the graph adjacency matrix as a NumPy recarray.\n\n Parameters\n ----------\n G : graph\n The NetworkX graph used to construct the NumPy matrix.\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in `nodelist`.\n If `nodelist` is None, then the ordering is produced by G.nodes().\n\n dtype : NumPy data-type, optional\n A valid NumPy named dtype used to initialize the NumPy recarray.\n The data type names are assumed to be keys in the graph edge attribute\n dictionary.\n\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. If None, then the NumPy default\n is used.\n\n Returns\n -------\n M : NumPy recarray\n The graph with specified edge data as a Numpy recarray\n\n Notes\n -----\n When `nodelist` does not contain every node in `G`, the matrix is built\n from the subgraph of `G` that is induced by the nodes in `nodelist`.\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> G.add_edge(1,2,weight=7.0,cost=5)\n >>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])\n >>> print(A.weight)\n [[ 0. 7.]\n [ 7. 0.]]\n >>> print(A.cost)\n [[0 5]\n [5 0]]\n \"\"\"\n if dtype is None:\n dtype = [('weight', float)]\n import numpy as np\n if nodelist is None:\n nodelist = list(G)\n nodeset = set(nodelist)\n if len(nodelist) != len(nodeset):\n msg = \"Ambiguous ordering: `nodelist` contained duplicates.\"\n raise nx.NetworkXError(msg)\n nlen = len(nodelist)\n undirected = not G.is_directed()\n index = dict(zip(nodelist, range(nlen)))\n M = np.zeros((nlen, nlen), dtype=dtype, order=order)\n\n names = M.dtype.names\n for u, v, attrs in G.edges(data=True):\n if (u in nodeset) and (v in nodeset):\n i, j = index[u], index[v]\n values = tuple([attrs[n] for n in names])\n M[i, j] = values\n if undirected:\n M[j, i] = M[i, j]\n\n return M.view(np.recarray)\n\n\ndef to_scipy_sparse_matrix(G, nodelist=None, dtype=None,\n weight='weight', format='csr'):\n \"\"\"Return the graph adjacency matrix as a SciPy sparse matrix.\n\n Parameters\n ----------\n G : graph\n The NetworkX graph used to construct the NumPy matrix.\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in `nodelist`.\n If `nodelist` is None, then the ordering is produced by G.nodes().\n\n dtype : NumPy data-type, optional\n A valid NumPy dtype used to initialize the array. If None, then the\n NumPy default is used.\n\n weight : string or None optional (default='weight')\n The edge attribute that holds the numerical value used for\n the edge weight. If None then all edge weights are 1.\n\n format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}\n The type of the matrix to be returned (default 'csr'). For\n some algorithms different implementations of sparse matrices\n can perform better. See [1]_ for details.\n\n Returns\n -------\n M : SciPy sparse matrix\n Graph adjacency matrix.\n\n Notes\n -----\n The matrix entries are populated using the edge attribute held in\n parameter weight. When an edge does not have that attribute, the\n value of the entry is 1.\n\n For multiple edges the matrix values are the sums of the edge weights.\n\n When `nodelist` does not contain every node in `G`, the matrix is built\n from the subgraph of `G` that is induced by the nodes in `nodelist`.\n\n Uses coo_matrix format. To convert to other formats specify the\n format= keyword.\n\n The convention used for self-loop edges in graphs is to assign the\n diagonal matrix entry value to the weight attribute of the edge\n (or the number 1 if the edge has no weight attribute). If the\n alternate convention of doubling the edge weight is desired the\n resulting Scipy sparse matrix can be modified as follows:\n\n >>> import scipy as sp\n >>> G = nx.Graph([(1,1)])\n >>> A = nx.to_scipy_sparse_matrix(G)\n >>> print(A.todense())\n [[1]]\n >>> A.setdiag(A.diagonal()*2)\n >>> print(A.todense())\n [[2]]\n\n Examples\n --------\n >>> G = nx.MultiDiGraph()\n >>> G.add_edge(0,1,weight=2)\n 0\n >>> G.add_edge(1,0)\n 0\n >>> G.add_edge(2,2,weight=3)\n 0\n >>> G.add_edge(2,2)\n 1\n >>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])\n >>> print(S.todense())\n [[0 2 0]\n [1 0 0]\n [0 0 4]]\n\n References\n ----------\n .. [1] Scipy Dev. References, \"Sparse Matrices\",\n http://docs.scipy.org/doc/scipy/reference/sparse.html\n \"\"\"\n from scipy import sparse\n if nodelist is None:\n nodelist = list(G)\n nlen = len(nodelist)\n if nlen == 0:\n raise nx.NetworkXError(\"Graph has no nodes or edges\")\n\n if len(nodelist) != len(set(nodelist)):\n msg = \"Ambiguous ordering: `nodelist` contained duplicates.\"\n raise nx.NetworkXError(msg)\n\n index = dict(zip(nodelist, range(nlen)))\n coefficients = zip(*((index[u], index[v], d.get(weight, 1))\n for u, v, d in G.edges(nodelist, data=True)\n if u in index and v in index))\n try:\n row, col, data = coefficients\n except ValueError:\n # there is no edge in the subgraph\n row, col, data = [], [], []\n\n if G.is_directed():\n M = sparse.coo_matrix((data, (row, col)),\n shape=(nlen, nlen), dtype=dtype)\n else:\n # symmetrize matrix\n d = data + data\n r = row + col\n c = col + row\n # selfloop entries get double counted when symmetrizing\n # so we subtract the data on the diagonal\n selfloops = list(G.selfloop_edges(data=True))\n if selfloops:\n diag_index, diag_data = zip(*((index[u], -d.get(weight, 1))\n for u, v, d in selfloops\n if u in index and v in index))\n d += diag_data\n r += diag_index\n c += diag_index\n M = sparse.coo_matrix((d, (r, c)), shape=(nlen, nlen), dtype=dtype)\n try:\n return M.asformat(format)\n except AttributeError:\n raise nx.NetworkXError(\"Unknown sparse matrix format: %s\" % format)\n\n\ndef _csr_gen_triples(A):\n \"\"\"Converts a SciPy sparse matrix in **Compressed Sparse Row** format to\n an iterable of weighted edge triples.\n\n \"\"\"\n nrows = A.shape[0]\n data, indices, indptr = A.data, A.indices, A.indptr\n for i in range(nrows):\n for j in range(indptr[i], indptr[i + 1]):\n yield i, indices[j], data[j]\n\n\ndef _csc_gen_triples(A):\n \"\"\"Converts a SciPy sparse matrix in **Compressed Sparse Column** format to\n an iterable of weighted edge triples.\n\n \"\"\"\n ncols = A.shape[1]\n data, indices, indptr = A.data, A.indices, A.indptr\n for i in range(ncols):\n for j in range(indptr[i], indptr[i + 1]):\n yield indices[j], i, data[j]\n\n\ndef _coo_gen_triples(A):\n \"\"\"Converts a SciPy sparse matrix in **Coordinate** format to an iterable\n of weighted edge triples.\n\n \"\"\"\n row, col, data = A.row, A.col, A.data\n return zip(row, col, data)\n\n\ndef _dok_gen_triples(A):\n \"\"\"Converts a SciPy sparse matrix in **Dictionary of Keys** format to an\n iterable of weighted edge triples.\n\n \"\"\"\n for (r, c), v in A.items():\n yield r, c, v\n\n\ndef _generate_weighted_edges(A):\n \"\"\"Returns an iterable over (u, v, w) triples, where u and v are adjacent\n vertices and w is the weight of the edge joining u and v.\n\n `A` is a SciPy sparse matrix (in any format).\n\n \"\"\"\n if A.format == 'csr':\n return _csr_gen_triples(A)\n if A.format == 'csc':\n return _csc_gen_triples(A)\n if A.format == 'dok':\n return _dok_gen_triples(A)\n # If A is in any other format (including COO), convert it to COO format.\n return _coo_gen_triples(A.tocoo())\n\n\ndef from_scipy_sparse_matrix(A, parallel_edges=False, create_using=None,\n edge_attribute='weight'):\n \"\"\"Creates a new graph from an adjacency matrix given as a SciPy sparse\n matrix.\n\n Parameters\n ----------\n A: scipy sparse matrix\n An adjacency matrix representation of a graph\n\n parallel_edges : Boolean\n If this is True, `create_using` is a multigraph, and `A` is an\n integer matrix, then entry *(i, j)* in the matrix is interpreted as the\n number of parallel edges joining vertices *i* and *j* in the graph. If it\n is False, then the entries in the adjacency matrix are interpreted as\n the weight of a single edge joining the vertices.\n\n create_using: NetworkX graph\n Use specified graph for result. The default is Graph()\n\n edge_attribute: string\n Name of edge attribute to store matrix numeric value. The data will\n have the same type as the matrix entry (int, float, (real,imag)).\n\n Notes\n -----\n\n If `create_using` is an instance of :class:`networkx.MultiGraph` or\n :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the\n entries of `A` are of type :class:`int`, then this function returns a\n multigraph (of the same type as `create_using`) with parallel edges.\n In this case, `edge_attribute` will be ignored.\n\n If `create_using` is an undirected multigraph, then only the edges\n indicated by the upper triangle of the matrix `A` will be added to the\n graph.\n\n Examples\n --------\n >>> import scipy.sparse\n >>> A = scipy.sparse.eye(2,2,1)\n >>> G = nx.from_scipy_sparse_matrix(A)\n\n If `create_using` is a multigraph and the matrix has only integer entries,\n the entries will be interpreted as weighted edges joining the vertices\n (without creating parallel edges):\n\n >>> import scipy\n >>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])\n >>> G = nx.from_scipy_sparse_matrix(A, create_using=nx.MultiGraph())\n >>> G[1][1]\n AtlasView({0: {'weight': 2}})\n\n If `create_using` is a multigraph and the matrix has only integer entries\n but `parallel_edges` is True, then the entries will be interpreted as\n the number of parallel edges joining those two vertices:\n\n >>> import scipy\n >>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])\n >>> G = nx.from_scipy_sparse_matrix(A, parallel_edges=True,\n ... create_using=nx.MultiGraph())\n >>> G[1][1]\n AtlasView({0: {'weight': 1}, 1: {'weight': 1}})\n\n \"\"\"\n G = _prep_create_using(create_using)\n n, m = A.shape\n if n != m:\n raise nx.NetworkXError(\n \"Adjacency matrix is not square. nx,ny=%s\" % (A.shape,))\n # Make sure we get even the isolated nodes of the graph.\n G.add_nodes_from(range(n))\n # Create an iterable over (u, v, w) triples and for each triple, add an\n # edge from u to v with weight w.\n triples = _generate_weighted_edges(A)\n # If the entries in the adjacency matrix are integers, the graph is a\n # multigraph, and parallel_edges is True, then create parallel edges, each\n # with weight 1, for each entry in the adjacency matrix. Otherwise, create\n # one edge for each positive entry in the adjacency matrix and set the\n # weight of that edge to be the entry in the matrix.\n if A.dtype.kind in ('i', 'u') and G.is_multigraph() and parallel_edges:\n chain = itertools.chain.from_iterable\n # The following line is equivalent to:\n #\n # for (u, v) in edges:\n # for d in range(A[u, v]):\n # G.add_edge(u, v, weight=1)\n #\n triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)\n # If we are creating an undirected multigraph, only add the edges from the\n # upper triangle of the matrix. Otherwise, add all the edges. This relies\n # on the fact that the vertices created in the\n # `_generated_weighted_edges()` function are actually the row/column\n # indices for the matrix `A`.\n #\n # Without this check, we run into a problem where each edge is added twice\n # when `G.add_weighted_edges_from()` is invoked below.\n if G.is_multigraph() and not G.is_directed():\n triples = ((u, v, d) for u, v, d in triples if u <= v)\n G.add_weighted_edges_from(triples, weight=edge_attribute)\n return G\n\n\ndef to_numpy_array(G, nodelist=None, dtype=None, order=None,\n multigraph_weight=sum, weight='weight', nonedge=0.0):\n \"\"\"Return the graph adjacency matrix as a NumPy array.\n\n Parameters\n ----------\n G : graph\n The NetworkX graph used to construct the NumPy array.\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in `nodelist`.\n If `nodelist` is None, then the ordering is produced by G.nodes().\n\n dtype : NumPy data type, optional\n A valid single NumPy data type used to initialize the array.\n This must be a simple type such as int or numpy.float64 and\n not a compound data type (see to_numpy_recarray)\n If None, then the NumPy default is used.\n\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. If None, then the NumPy default\n is used.\n\n multigraph_weight : {sum, min, max}, optional\n An operator that determines how weights in multigraphs are handled.\n The default is to sum the weights of the multiple edges.\n\n weight : string or None optional (default = 'weight')\n The edge attribute that holds the numerical value used for\n the edge weight. If an edge does not have that attribute, then the\n value 1 is used instead.\n\n nonedge : float (default = 0.0)\n The array values corresponding to nonedges are typically set to zero.\n However, this could be undesirable if there are array values\n corresponding to actual edges that also have the value zero. If so,\n one might prefer nonedges to have some other value, such as nan.\n\n Returns\n -------\n A : NumPy ndarray\n Graph adjacency matrix\n\n See Also\n --------\n from_numpy_array\n\n Notes\n -----\n Entries in the adjacency matrix are assigned to the weight edge attribute.\n When an edge does not have a weight attribute, the value of the entry is\n set to the number 1. For multiple (parallel) edges, the values of the\n entries are determined by the `multigraph_weight` parameter. The default is\n to sum the weight attributes for each of the parallel edges.\n\n When `nodelist` does not contain every node in `G`, the adjacency matrix is\n built from the subgraph of `G` that is induced by the nodes in `nodelist`.\n\n The convention used for self-loop edges in graphs is to assign the\n diagonal array entry value to the weight attribute of the edge\n (or the number 1 if the edge has no weight attribute). If the\n alternate convention of doubling the edge weight is desired the\n resulting NumPy array can be modified as follows:\n\n >>> import numpy as np\n >>> G = nx.Graph([(1, 1)])\n >>> A = nx.to_numpy_array(G)\n >>> A\n array([[ 1.]])\n >>> A[np.diag_indices_from(A)] *= 2\n >>> A\n array([[ 2.]])\n\n Examples\n --------\n >>> G = nx.MultiDiGraph()\n >>> G.add_edge(0,1,weight=2)\n 0\n >>> G.add_edge(1,0)\n 0\n >>> G.add_edge(2,2,weight=3)\n 0\n >>> G.add_edge(2,2)\n 1\n >>> nx.to_numpy_array(G, nodelist=[0,1,2])\n array([[ 0., 2., 0.],\n [ 1., 0., 0.],\n [ 0., 0., 4.]])\n \"\"\"\n import numpy as np\n if nodelist is None:\n nodelist = list(G)\n nodeset = set(nodelist)\n if len(nodelist) != len(nodeset):\n msg = \"Ambiguous ordering: `nodelist` contained duplicates.\"\n raise nx.NetworkXError(msg)\n\n nlen = len(nodelist)\n undirected = not G.is_directed()\n index = dict(zip(nodelist, range(nlen)))\n\n # Initially, we start with an array of nans. Then we populate the array\n # using data from the graph. Afterwards, any leftover nans will be\n # converted to the value of `nonedge`. Note, we use nans initially,\n # instead of zero, for two reasons:\n #\n # 1) It can be important to distinguish a real edge with the value 0\n # from a nonedge with the value 0.\n #\n # 2) When working with multi(di)graphs, we must combine the values of all\n # edges between any two nodes in some manner. This often takes the\n # form of a sum, min, or max. Using the value 0 for a nonedge would\n # have undesirable effects with min and max, but using nanmin and\n # nanmax with initially nan values is not problematic at all.\n #\n # That said, there are still some drawbacks to this approach. Namely, if\n # a real edge is nan, then that value is a) not distinguishable from\n # nonedges and b) is ignored by the default combinator (nansum, nanmin,\n # nanmax) functions used for multi(di)graphs. If this becomes an issue,\n # an alternative approach is to use masked arrays. Initially, every\n # element is masked and set to some `initial` value. As we populate the\n # graph, elements are unmasked (automatically) when we combine the initial\n # value with the values given by real edges. At the end, we convert all\n # masked values to `nonedge`. Using masked arrays fully addresses reason 1,\n # but for reason 2, we would still have the issue with min and max if the\n # initial values were 0.0. Note: an initial value of +inf is appropriate\n # for min, while an initial value of -inf is appropriate for max. When\n # working with sum, an initial value of zero is appropriate. Ideally then,\n # we'd want to allow users to specify both a value for nonedges and also\n # an initial value. For multi(di)graphs, the choice of the initial value\n # will, in general, depend on the combinator function---sensible defaults\n # can be provided.\n\n if G.is_multigraph():\n # Handle MultiGraphs and MultiDiGraphs\n A = np.full((nlen, nlen), np.nan, order=order)\n # use numpy nan-aware operations\n operator = {sum: np.nansum, min: np.nanmin, max: np.nanmax}\n try:\n op = operator[multigraph_weight]\n except:\n raise ValueError('multigraph_weight must be sum, min, or max')\n\n for u, v, attrs in G.edges(data=True):\n if (u in nodeset) and (v in nodeset):\n i, j = index[u], index[v]\n e_weight = attrs.get(weight, 1)\n A[i, j] = op([e_weight, A[i, j]])\n if undirected:\n A[j, i] = A[i, j]\n else:\n # Graph or DiGraph, this is much faster than above\n A = np.full((nlen, nlen), np.nan, order=order)\n for u, nbrdict in G.adjacency():\n for v, d in nbrdict.items():\n try:\n A[index[u], index[v]] = d.get(weight, 1)\n except KeyError:\n # This occurs when there are fewer desired nodes than\n # there are nodes in the graph: len(nodelist) < len(G)\n pass\n\n A[np.isnan(A)] = nonedge\n A = np.asarray(A, dtype=dtype)\n return A\n\n\ndef from_numpy_array(A, parallel_edges=False, create_using=None):\n \"\"\"Return a graph from NumPy array.\n\n The NumPy array is interpreted as an adjacency matrix for the graph.\n\n Parameters\n ----------\n A : NumPy ndarray\n An adjacency matrix representation of a graph\n\n parallel_edges : Boolean\n If this is True, `create_using` is a multigraph, and `A` is an\n integer array, then entry *(i, j)* in the adjacency matrix is\n interpreted as the number of parallel edges joining vertices *i*\n and *j* in the graph. If it is False, then the entries in the\n adjacency matrix are interpreted as the weight of a single edge\n joining the vertices.\n\n create_using : NetworkX graph\n Use specified graph for result. The default is Graph()\n\n Notes\n -----\n If `create_using` is an instance of :class:`networkx.MultiGraph` or\n :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the\n entries of `A` are of type :class:`int`, then this function returns a\n multigraph (of the same type as `create_using`) with parallel edges.\n\n If `create_using` is an undirected multigraph, then only the edges\n indicated by the upper triangle of the array `A` will be added to the\n graph.\n\n If the NumPy array has a single data type for each array entry it\n will be converted to an appropriate Python data type.\n\n If the NumPy array has a user-specified compound data type the names\n of the data fields will be used as attribute keys in the resulting\n NetworkX graph.\n\n See Also\n --------\n to_numpy_array\n\n Examples\n --------\n Simple integer weights on edges:\n\n >>> import numpy as np\n >>> A = np.array([[1, 1], [2, 1]])\n >>> G = nx.from_numpy_array(A)\n >>> G.edges(data=True)\n EdgeDataView([(0, 0, {'weight': 1}), (0, 1, {'weight': 2}), (1, 1, {'weight': 1})])\n\n If `create_using` is a multigraph and the array has only integer entries,\n the entries will be interpreted as weighted edges joining the vertices\n (without creating parallel edges):\n\n >>> import numpy as np\n >>> A = np.array([[1, 1], [1, 2]])\n >>> G = nx.from_numpy_array(A, create_using=nx.MultiGraph())\n >>> G[1][1]\n AtlasView({0: {'weight': 2}})\n\n If `create_using` is a multigraph and the array has only integer entries\n but `parallel_edges` is True, then the entries will be interpreted as\n the number of parallel edges joining those two vertices:\n\n >>> import numpy as np\n >>> A = np.array([[1, 1], [1, 2]])\n >>> temp = nx.MultiGraph()\n >>> G = nx.from_numpy_array(A, parallel_edges=True, create_using=temp)\n >>> G[1][1]\n AtlasView({0: {'weight': 1}, 1: {'weight': 1}})\n\n User defined compound data type on edges:\n\n >>> import numpy\n >>> dt = [('weight', float), ('cost', int)]\n >>> A = np.array([[(1.0, 2)]], dtype=dt)\n >>> G = nx.from_numpy_array(A)\n >>> G.edges()\n EdgeView([(0, 0)])\n >>> G[0][0]['cost']\n 2\n >>> G[0][0]['weight']\n 1.0\n\n \"\"\"\n return from_numpy_matrix(A, parallel_edges=parallel_edges,\n create_using=create_using)\n\n\n# fixture for nose tests\ndef setup_module(module):\n from nose import SkipTest\n try:\n import numpy\n except:\n raise SkipTest(\"NumPy not available\")\n try:\n import scipy\n except:\n raise SkipTest(\"SciPy not available\")\n",
"path": "networkx/convert_matrix.py"
}
] | [
{
"content": "\"\"\"Functions to convert NetworkX graphs to and from numpy/scipy matrices.\n\nThe preferred way of converting data to a NetworkX graph is through the\ngraph constuctor. The constructor calls the to_networkx_graph() function\nwhich attempts to guess the input type and convert it automatically.\n\nExamples\n--------\nCreate a 10 node random graph from a numpy matrix\n\n>>> import numpy\n>>> a = numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))\n>>> D = nx.DiGraph(a)\n\nor equivalently\n\n>>> D = nx.to_networkx_graph(a,create_using=nx.DiGraph())\n\nSee Also\n--------\nnx_agraph, nx_pydot\n\"\"\"\n# Copyright (C) 2006-2014 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\nimport warnings\nimport itertools\nimport networkx as nx\nfrom networkx.convert import _prep_create_using\nfrom networkx.utils import not_implemented_for\n__author__ = \"\"\"\\n\"\"\".join(['Aric Hagberg <[email protected]>',\n 'Pieter Swart ([email protected])',\n 'Dan Schult([email protected])'])\n__all__ = ['from_numpy_matrix', 'to_numpy_matrix',\n 'from_pandas_dataframe', 'to_pandas_dataframe',\n 'to_numpy_recarray',\n 'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix',\n 'from_numpy_array', 'to_numpy_array']\n\n\ndef to_pandas_dataframe(G, nodelist=None, dtype=None, order=None,\n multigraph_weight=sum, weight='weight', nonedge=0.0):\n \"\"\"Return the graph adjacency matrix as a Pandas DataFrame.\n\n Parameters\n ----------\n G : graph\n The NetworkX graph used to construct the Pandas DataFrame.\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in `nodelist`.\n If `nodelist` is None, then the ordering is produced by G.nodes().\n\n multigraph_weight : {sum, min, max}, optional\n An operator that determines how weights in multigraphs are handled.\n The default is to sum the weights of the multiple edges.\n\n weight : string or None, optional\n The edge attribute that holds the numerical value used for\n the edge weight. If an edge does not have that attribute, then the\n value 1 is used instead.\n\n nonedge : float, optional\n The matrix values corresponding to nonedges are typically set to zero.\n However, this could be undesirable if there are matrix values\n corresponding to actual edges that also have the value zero. If so,\n one might prefer nonedges to have some other value, such as nan.\n\n Returns\n -------\n df : Pandas DataFrame\n Graph adjacency matrix\n\n Notes\n -----\n The DataFrame entries are assigned to the weight edge attribute. When\n an edge does not have a weight attribute, the value of the entry is set to\n the number 1. For multiple (parallel) edges, the values of the entries\n are determined by the 'multigraph_weight' parameter. The default is to\n sum the weight attributes for each of the parallel edges.\n\n When `nodelist` does not contain every node in `G`, the matrix is built\n from the subgraph of `G` that is induced by the nodes in `nodelist`.\n\n The convention used for self-loop edges in graphs is to assign the\n diagonal matrix entry value to the weight attribute of the edge\n (or the number 1 if the edge has no weight attribute). If the\n alternate convention of doubling the edge weight is desired the\n resulting Pandas DataFrame can be modified as follows:\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> G = nx.Graph([(1,1)])\n >>> df = nx.to_pandas_dataframe(G, dtype=int)\n >>> df\n 1\n 1 1\n >>> df.values[np.diag_indices_from(df)] *= 2\n >>> df\n 1\n 1 2\n\n Examples\n --------\n >>> G = nx.MultiDiGraph()\n >>> G.add_edge(0,1,weight=2)\n 0\n >>> G.add_edge(1,0)\n 0\n >>> G.add_edge(2,2,weight=3)\n 0\n >>> G.add_edge(2,2)\n 1\n >>> nx.to_pandas_dataframe(G, nodelist=[0,1,2], dtype=int)\n 0 1 2\n 0 0 2 0\n 1 1 0 0\n 2 0 0 4\n \"\"\"\n import pandas as pd\n M = to_numpy_matrix(G, nodelist=nodelist, dtype=dtype, order=order,\n multigraph_weight=multigraph_weight, weight=weight,\n nonedge=nonedge)\n if nodelist is None:\n nodelist = list(G)\n return pd.DataFrame(data=M, index=nodelist, columns=nodelist)\n\n\ndef from_pandas_dataframe(df, source='source', target='target', edge_attr=None,\n create_using=None):\n \"\"\"Return a graph from Pandas DataFrame containing an edge list.\n\n The Pandas DataFrame should contain at least two columns of node names and\n zero or more columns of node attributes. Each row will be processed as one\n edge instance.\n\n Note: This function iterates over DataFrame.values, which is not\n guaranteed to retain the data type across columns in the row. This is only\n a problem if your row is entirely numeric and a mix of ints and floats. In\n that case, all values will be returned as floats. See the\n DataFrame.iterrows documentation for an example.\n\n Parameters\n ----------\n df : Pandas DataFrame\n An edge list representation of a graph\n\n source : str or int\n A valid column name (string or iteger) for the source nodes (for the\n directed case).\n\n target : str or int\n A valid column name (string or iteger) for the target nodes (for the\n directed case).\n\n edge_attr : str or int, iterable, True\n A valid column name (str or integer) or list of column names that will\n be used to retrieve items from the row and add them to the graph as edge\n attributes. If `True`, all of the remaining columns will be added.\n\n create_using : NetworkX graph\n Use specified graph for result. The default is Graph()\n\n See Also\n --------\n to_pandas_dataframe\n\n Examples\n --------\n Simple integer weights on edges:\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> r = np.random.RandomState(seed=5)\n >>> ints = r.random_integers(1, 10, size=(3,2))\n >>> a = ['A', 'B', 'C']\n >>> b = ['D', 'A', 'E']\n >>> df = pd.DataFrame(ints, columns=['weight', 'cost'])\n >>> df[0] = a\n >>> df['b'] = b\n >>> df\n weight cost 0 b\n 0 4 7 A D\n 1 7 1 B A\n 2 10 9 C E\n >>> G=nx.from_pandas_dataframe(df, 0, 'b', ['weight', 'cost'])\n >>> G['E']['C']['weight']\n 10\n >>> G['E']['C']['cost']\n 9\n >>> edges = pd.DataFrame({'source': [0, 1, 2],\n ... 'target': [2, 2, 3],\n ... 'weight': [3, 4, 5],\n ... 'color': ['red', 'blue', 'blue']})\n >>> G = nx.from_pandas_dataframe(edges, edge_attr=True)\n >>> G[0][2]['color']\n 'red'\n \"\"\"\n\n g = _prep_create_using(create_using)\n\n # Index of source and target\n src_i = df.columns.get_loc(source)\n tar_i = df.columns.get_loc(target)\n if edge_attr:\n # If all additional columns requested, build up a list of tuples\n # [(name, index),...]\n if edge_attr is True:\n # Create a list of all columns indices, ignore nodes\n edge_i = []\n for i, col in enumerate(df.columns):\n if col is not source and col is not target:\n edge_i.append((col, i))\n # If a list or tuple of name is requested\n elif isinstance(edge_attr, (list, tuple)):\n edge_i = [(i, df.columns.get_loc(i)) for i in edge_attr]\n # If a string or int is passed\n else:\n edge_i = [(edge_attr, df.columns.get_loc(edge_attr)), ]\n\n # Iteration on values returns the rows as Numpy arrays\n for row in df.values:\n s, t = row[src_i], row[tar_i]\n if g.is_multigraph():\n g.add_edge(s, t)\n key = max(g[s][t]) # default keys just count, so max is most recent\n g[s][t][key].update((i, row[j]) for i, j in edge_i)\n else:\n g.add_edge(s, t)\n g[s][t].update((i, row[j]) for i, j in edge_i)\n\n # If no column names are given, then just return the edges.\n else:\n for row in df.values:\n g.add_edge(row[src_i], row[tar_i])\n\n return g\n\n\ndef to_numpy_matrix(G, nodelist=None, dtype=None, order=None,\n multigraph_weight=sum, weight='weight', nonedge=0.0):\n \"\"\"Return the graph adjacency matrix as a NumPy matrix.\n\n Parameters\n ----------\n G : graph\n The NetworkX graph used to construct the NumPy matrix.\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in `nodelist`.\n If `nodelist` is None, then the ordering is produced by G.nodes().\n\n dtype : NumPy data type, optional\n A valid single NumPy data type used to initialize the array.\n This must be a simple type such as int or numpy.float64 and\n not a compound data type (see to_numpy_recarray)\n If None, then the NumPy default is used.\n\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. If None, then the NumPy default\n is used.\n\n multigraph_weight : {sum, min, max}, optional\n An operator that determines how weights in multigraphs are handled.\n The default is to sum the weights of the multiple edges.\n\n weight : string or None optional (default = 'weight')\n The edge attribute that holds the numerical value used for\n the edge weight. If an edge does not have that attribute, then the\n value 1 is used instead.\n\n nonedge : float (default = 0.0)\n The matrix values corresponding to nonedges are typically set to zero.\n However, this could be undesirable if there are matrix values\n corresponding to actual edges that also have the value zero. If so,\n one might prefer nonedges to have some other value, such as nan.\n\n Returns\n -------\n M : NumPy matrix\n Graph adjacency matrix\n\n See Also\n --------\n to_numpy_recarray, from_numpy_matrix\n\n Notes\n -----\n The matrix entries are assigned to the weight edge attribute. When\n an edge does not have a weight attribute, the value of the entry is set to\n the number 1. For multiple (parallel) edges, the values of the entries\n are determined by the `multigraph_weight` parameter. The default is to\n sum the weight attributes for each of the parallel edges.\n\n When `nodelist` does not contain every node in `G`, the matrix is built\n from the subgraph of `G` that is induced by the nodes in `nodelist`.\n\n The convention used for self-loop edges in graphs is to assign the\n diagonal matrix entry value to the weight attribute of the edge\n (or the number 1 if the edge has no weight attribute). If the\n alternate convention of doubling the edge weight is desired the\n resulting Numpy matrix can be modified as follows:\n\n >>> import numpy as np\n >>> G = nx.Graph([(1, 1)])\n >>> A = nx.to_numpy_matrix(G)\n >>> A\n matrix([[ 1.]])\n >>> A.A[np.diag_indices_from(A)] *= 2\n >>> A\n matrix([[ 2.]])\n\n Examples\n --------\n >>> G = nx.MultiDiGraph()\n >>> G.add_edge(0,1,weight=2)\n 0\n >>> G.add_edge(1,0)\n 0\n >>> G.add_edge(2,2,weight=3)\n 0\n >>> G.add_edge(2,2)\n 1\n >>> nx.to_numpy_matrix(G, nodelist=[0,1,2])\n matrix([[ 0., 2., 0.],\n [ 1., 0., 0.],\n [ 0., 0., 4.]])\n \"\"\"\n import numpy as np\n\n A = to_numpy_array(G, nodelist=nodelist, dtype=dtype, order=order,\n multigraph_weight=multigraph_weight, weight=weight,\n nonedge=nonedge)\n M = np.asmatrix(A, dtype=dtype)\n return M\n\n\ndef from_numpy_matrix(A, parallel_edges=False, create_using=None):\n \"\"\"Return a graph from numpy matrix.\n\n The numpy matrix is interpreted as an adjacency matrix for the graph.\n\n Parameters\n ----------\n A : numpy matrix\n An adjacency matrix representation of a graph\n\n parallel_edges : Boolean\n If this is True, `create_using` is a multigraph, and `A` is an\n integer matrix, then entry *(i, j)* in the matrix is interpreted as the\n number of parallel edges joining vertices *i* and *j* in the graph. If it\n is False, then the entries in the adjacency matrix are interpreted as\n the weight of a single edge joining the vertices.\n\n create_using : NetworkX graph\n Use specified graph for result. The default is Graph()\n\n Notes\n -----\n If `create_using` is an instance of :class:`networkx.MultiGraph` or\n :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the\n entries of `A` are of type :class:`int`, then this function returns a\n multigraph (of the same type as `create_using`) with parallel edges.\n\n If `create_using` is an undirected multigraph, then only the edges\n indicated by the upper triangle of the matrix `A` will be added to the\n graph.\n\n If the numpy matrix has a single data type for each matrix entry it\n will be converted to an appropriate Python data type.\n\n If the numpy matrix has a user-specified compound data type the names\n of the data fields will be used as attribute keys in the resulting\n NetworkX graph.\n\n See Also\n --------\n to_numpy_matrix, to_numpy_recarray\n\n Examples\n --------\n Simple integer weights on edges:\n\n >>> import numpy\n >>> A=numpy.matrix([[1, 1], [2, 1]])\n >>> G=nx.from_numpy_matrix(A)\n\n If `create_using` is a multigraph and the matrix has only integer entries,\n the entries will be interpreted as weighted edges joining the vertices\n (without creating parallel edges):\n\n >>> import numpy\n >>> A = numpy.matrix([[1, 1], [1, 2]])\n >>> G = nx.from_numpy_matrix(A, create_using = nx.MultiGraph())\n >>> G[1][1]\n AtlasView({0: {'weight': 2}})\n\n If `create_using` is a multigraph and the matrix has only integer entries\n but `parallel_edges` is True, then the entries will be interpreted as\n the number of parallel edges joining those two vertices:\n\n >>> import numpy\n >>> A = numpy.matrix([[1, 1], [1, 2]])\n >>> temp = nx.MultiGraph()\n >>> G = nx.from_numpy_matrix(A, parallel_edges = True, create_using = temp)\n >>> G[1][1]\n AtlasView({0: {'weight': 1}, 1: {'weight': 1}})\n\n User defined compound data type on edges:\n\n >>> import numpy\n >>> dt = [('weight', float), ('cost', int)]\n >>> A = numpy.matrix([[(1.0, 2)]], dtype = dt)\n >>> G = nx.from_numpy_matrix(A)\n >>> list(G.edges())\n [(0, 0)]\n >>> G[0][0]['cost']\n 2\n >>> G[0][0]['weight']\n 1.0\n\n \"\"\"\n # This should never fail if you have created a numpy matrix with numpy...\n import numpy as np\n kind_to_python_type = {'f': float,\n 'i': int,\n 'u': int,\n 'b': bool,\n 'c': complex,\n 'S': str,\n 'V': 'void'}\n try: # Python 3.x\n blurb = chr(1245) # just to trigger the exception\n kind_to_python_type['U'] = str\n except ValueError: # Python 2.6+\n kind_to_python_type['U'] = unicode\n G = _prep_create_using(create_using)\n n, m = A.shape\n if n != m:\n raise nx.NetworkXError(\"Adjacency matrix is not square.\",\n \"nx,ny=%s\" % (A.shape,))\n dt = A.dtype\n try:\n python_type = kind_to_python_type[dt.kind]\n except:\n raise TypeError(\"Unknown numpy data type: %s\" % dt)\n\n # Make sure we get even the isolated nodes of the graph.\n G.add_nodes_from(range(n))\n # Get a list of all the entries in the matrix with nonzero entries. These\n # coordinates will become the edges in the graph.\n edges = zip(*(np.asarray(A).nonzero()))\n # handle numpy constructed data type\n if python_type is 'void':\n # Sort the fields by their offset, then by dtype, then by name.\n fields = sorted((offset, dtype, name) for name, (dtype, offset) in\n A.dtype.fields.items())\n triples = ((u, v, {name: kind_to_python_type[dtype.kind](val)\n for (_, dtype, name), val in zip(fields, A[u, v])})\n for u, v in edges)\n # If the entries in the adjacency matrix are integers, the graph is a\n # multigraph, and parallel_edges is True, then create parallel edges, each\n # with weight 1, for each entry in the adjacency matrix. Otherwise, create\n # one edge for each positive entry in the adjacency matrix and set the\n # weight of that edge to be the entry in the matrix.\n elif python_type is int and G.is_multigraph() and parallel_edges:\n chain = itertools.chain.from_iterable\n # The following line is equivalent to:\n #\n # for (u, v) in edges:\n # for d in range(A[u, v]):\n # G.add_edge(u, v, weight=1)\n #\n triples = chain(((u, v, dict(weight=1)) for d in range(A[u, v]))\n for (u, v) in edges)\n else: # basic data type\n triples = ((u, v, dict(weight=python_type(A[u, v])))\n for u, v in edges)\n # If we are creating an undirected multigraph, only add the edges from the\n # upper triangle of the matrix. Otherwise, add all the edges. This relies\n # on the fact that the vertices created in the\n # `_generated_weighted_edges()` function are actually the row/column\n # indices for the matrix `A`.\n #\n # Without this check, we run into a problem where each edge is added twice\n # when `G.add_edges_from()` is invoked below.\n if G.is_multigraph() and not G.is_directed():\n triples = ((u, v, d) for u, v, d in triples if u <= v)\n G.add_edges_from(triples)\n return G\n\n\n@not_implemented_for('multigraph')\ndef to_numpy_recarray(G, nodelist=None, dtype=None, order=None):\n \"\"\"Return the graph adjacency matrix as a NumPy recarray.\n\n Parameters\n ----------\n G : graph\n The NetworkX graph used to construct the NumPy matrix.\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in `nodelist`.\n If `nodelist` is None, then the ordering is produced by G.nodes().\n\n dtype : NumPy data-type, optional\n A valid NumPy named dtype used to initialize the NumPy recarray.\n The data type names are assumed to be keys in the graph edge attribute\n dictionary.\n\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. If None, then the NumPy default\n is used.\n\n Returns\n -------\n M : NumPy recarray\n The graph with specified edge data as a Numpy recarray\n\n Notes\n -----\n When `nodelist` does not contain every node in `G`, the matrix is built\n from the subgraph of `G` that is induced by the nodes in `nodelist`.\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> G.add_edge(1,2,weight=7.0,cost=5)\n >>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])\n >>> print(A.weight)\n [[ 0. 7.]\n [ 7. 0.]]\n >>> print(A.cost)\n [[0 5]\n [5 0]]\n \"\"\"\n if dtype is None:\n dtype = [('weight', float)]\n import numpy as np\n if nodelist is None:\n nodelist = list(G)\n nodeset = set(nodelist)\n if len(nodelist) != len(nodeset):\n msg = \"Ambiguous ordering: `nodelist` contained duplicates.\"\n raise nx.NetworkXError(msg)\n nlen = len(nodelist)\n undirected = not G.is_directed()\n index = dict(zip(nodelist, range(nlen)))\n M = np.zeros((nlen, nlen), dtype=dtype, order=order)\n\n names = M.dtype.names\n for u, v, attrs in G.edges(data=True):\n if (u in nodeset) and (v in nodeset):\n i, j = index[u], index[v]\n values = tuple([attrs[n] for n in names])\n M[i, j] = values\n if undirected:\n M[j, i] = M[i, j]\n\n return M.view(np.recarray)\n\n\ndef to_scipy_sparse_matrix(G, nodelist=None, dtype=None,\n weight='weight', format='csr'):\n \"\"\"Return the graph adjacency matrix as a SciPy sparse matrix.\n\n Parameters\n ----------\n G : graph\n The NetworkX graph used to construct the NumPy matrix.\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in `nodelist`.\n If `nodelist` is None, then the ordering is produced by G.nodes().\n\n dtype : NumPy data-type, optional\n A valid NumPy dtype used to initialize the array. If None, then the\n NumPy default is used.\n\n weight : string or None optional (default='weight')\n The edge attribute that holds the numerical value used for\n the edge weight. If None then all edge weights are 1.\n\n format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}\n The type of the matrix to be returned (default 'csr'). For\n some algorithms different implementations of sparse matrices\n can perform better. See [1]_ for details.\n\n Returns\n -------\n M : SciPy sparse matrix\n Graph adjacency matrix.\n\n Notes\n -----\n The matrix entries are populated using the edge attribute held in\n parameter weight. When an edge does not have that attribute, the\n value of the entry is 1.\n\n For multiple edges the matrix values are the sums of the edge weights.\n\n When `nodelist` does not contain every node in `G`, the matrix is built\n from the subgraph of `G` that is induced by the nodes in `nodelist`.\n\n Uses coo_matrix format. To convert to other formats specify the\n format= keyword.\n\n The convention used for self-loop edges in graphs is to assign the\n diagonal matrix entry value to the weight attribute of the edge\n (or the number 1 if the edge has no weight attribute). If the\n alternate convention of doubling the edge weight is desired the\n resulting Scipy sparse matrix can be modified as follows:\n\n >>> import scipy as sp\n >>> G = nx.Graph([(1,1)])\n >>> A = nx.to_scipy_sparse_matrix(G)\n >>> print(A.todense())\n [[1]]\n >>> A.setdiag(A.diagonal()*2)\n >>> print(A.todense())\n [[2]]\n\n Examples\n --------\n >>> G = nx.MultiDiGraph()\n >>> G.add_edge(0,1,weight=2)\n 0\n >>> G.add_edge(1,0)\n 0\n >>> G.add_edge(2,2,weight=3)\n 0\n >>> G.add_edge(2,2)\n 1\n >>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])\n >>> print(S.todense())\n [[0 2 0]\n [1 0 0]\n [0 0 4]]\n\n References\n ----------\n .. [1] Scipy Dev. References, \"Sparse Matrices\",\n http://docs.scipy.org/doc/scipy/reference/sparse.html\n \"\"\"\n from scipy import sparse\n if nodelist is None:\n nodelist = list(G)\n nlen = len(nodelist)\n if nlen == 0:\n raise nx.NetworkXError(\"Graph has no nodes or edges\")\n\n if len(nodelist) != len(set(nodelist)):\n msg = \"Ambiguous ordering: `nodelist` contained duplicates.\"\n raise nx.NetworkXError(msg)\n\n index = dict(zip(nodelist, range(nlen)))\n coefficients = zip(*((index[u], index[v], d.get(weight, 1))\n for u, v, d in G.edges(nodelist, data=True)\n if u in index and v in index))\n try:\n row, col, data = coefficients\n except ValueError:\n # there is no edge in the subgraph\n row, col, data = [], [], []\n\n if G.is_directed():\n M = sparse.coo_matrix((data, (row, col)),\n shape=(nlen, nlen), dtype=dtype)\n else:\n # symmetrize matrix\n d = data + data\n r = row + col\n c = col + row\n # selfloop entries get double counted when symmetrizing\n # so we subtract the data on the diagonal\n selfloops = list(G.selfloop_edges(data=True))\n if selfloops:\n diag_index, diag_data = zip(*((index[u], -d.get(weight, 1))\n for u, v, d in selfloops\n if u in index and v in index))\n d += diag_data\n r += diag_index\n c += diag_index\n M = sparse.coo_matrix((d, (r, c)), shape=(nlen, nlen), dtype=dtype)\n try:\n return M.asformat(format)\n except AttributeError:\n raise nx.NetworkXError(\"Unknown sparse matrix format: %s\" % format)\n\n\ndef _csr_gen_triples(A):\n \"\"\"Converts a SciPy sparse matrix in **Compressed Sparse Row** format to\n an iterable of weighted edge triples.\n\n \"\"\"\n nrows = A.shape[0]\n data, indices, indptr = A.data, A.indices, A.indptr\n for i in range(nrows):\n for j in range(indptr[i], indptr[i + 1]):\n yield i, indices[j], data[j]\n\n\ndef _csc_gen_triples(A):\n \"\"\"Converts a SciPy sparse matrix in **Compressed Sparse Column** format to\n an iterable of weighted edge triples.\n\n \"\"\"\n ncols = A.shape[1]\n data, indices, indptr = A.data, A.indices, A.indptr\n for i in range(ncols):\n for j in range(indptr[i], indptr[i + 1]):\n yield indices[j], i, data[j]\n\n\ndef _coo_gen_triples(A):\n \"\"\"Converts a SciPy sparse matrix in **Coordinate** format to an iterable\n of weighted edge triples.\n\n \"\"\"\n row, col, data = A.row, A.col, A.data\n return zip(row, col, data)\n\n\ndef _dok_gen_triples(A):\n \"\"\"Converts a SciPy sparse matrix in **Dictionary of Keys** format to an\n iterable of weighted edge triples.\n\n \"\"\"\n for (r, c), v in A.items():\n yield r, c, v\n\n\ndef _generate_weighted_edges(A):\n \"\"\"Returns an iterable over (u, v, w) triples, where u and v are adjacent\n vertices and w is the weight of the edge joining u and v.\n\n `A` is a SciPy sparse matrix (in any format).\n\n \"\"\"\n if A.format == 'csr':\n return _csr_gen_triples(A)\n if A.format == 'csc':\n return _csc_gen_triples(A)\n if A.format == 'dok':\n return _dok_gen_triples(A)\n # If A is in any other format (including COO), convert it to COO format.\n return _coo_gen_triples(A.tocoo())\n\n\ndef from_scipy_sparse_matrix(A, parallel_edges=False, create_using=None,\n edge_attribute='weight'):\n \"\"\"Creates a new graph from an adjacency matrix given as a SciPy sparse\n matrix.\n\n Parameters\n ----------\n A: scipy sparse matrix\n An adjacency matrix representation of a graph\n\n parallel_edges : Boolean\n If this is True, `create_using` is a multigraph, and `A` is an\n integer matrix, then entry *(i, j)* in the matrix is interpreted as the\n number of parallel edges joining vertices *i* and *j* in the graph. If it\n is False, then the entries in the adjacency matrix are interpreted as\n the weight of a single edge joining the vertices.\n\n create_using: NetworkX graph\n Use specified graph for result. The default is Graph()\n\n edge_attribute: string\n Name of edge attribute to store matrix numeric value. The data will\n have the same type as the matrix entry (int, float, (real,imag)).\n\n Notes\n -----\n\n If `create_using` is an instance of :class:`networkx.MultiGraph` or\n :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the\n entries of `A` are of type :class:`int`, then this function returns a\n multigraph (of the same type as `create_using`) with parallel edges.\n In this case, `edge_attribute` will be ignored.\n\n If `create_using` is an undirected multigraph, then only the edges\n indicated by the upper triangle of the matrix `A` will be added to the\n graph.\n\n Examples\n --------\n >>> import scipy.sparse\n >>> A = scipy.sparse.eye(2,2,1)\n >>> G = nx.from_scipy_sparse_matrix(A)\n\n If `create_using` is a multigraph and the matrix has only integer entries,\n the entries will be interpreted as weighted edges joining the vertices\n (without creating parallel edges):\n\n >>> import scipy\n >>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])\n >>> G = nx.from_scipy_sparse_matrix(A, create_using=nx.MultiGraph())\n >>> G[1][1]\n AtlasView({0: {'weight': 2}})\n\n If `create_using` is a multigraph and the matrix has only integer entries\n but `parallel_edges` is True, then the entries will be interpreted as\n the number of parallel edges joining those two vertices:\n\n >>> import scipy\n >>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])\n >>> G = nx.from_scipy_sparse_matrix(A, parallel_edges=True,\n ... create_using=nx.MultiGraph())\n >>> G[1][1]\n AtlasView({0: {'weight': 1}, 1: {'weight': 1}})\n\n \"\"\"\n G = _prep_create_using(create_using)\n n, m = A.shape\n if n != m:\n raise nx.NetworkXError(\n \"Adjacency matrix is not square. nx,ny=%s\" % (A.shape,))\n # Make sure we get even the isolated nodes of the graph.\n G.add_nodes_from(range(n))\n # Create an iterable over (u, v, w) triples and for each triple, add an\n # edge from u to v with weight w.\n triples = _generate_weighted_edges(A)\n # If the entries in the adjacency matrix are integers, the graph is a\n # multigraph, and parallel_edges is True, then create parallel edges, each\n # with weight 1, for each entry in the adjacency matrix. Otherwise, create\n # one edge for each positive entry in the adjacency matrix and set the\n # weight of that edge to be the entry in the matrix.\n if A.dtype.kind in ('i', 'u') and G.is_multigraph() and parallel_edges:\n chain = itertools.chain.from_iterable\n # The following line is equivalent to:\n #\n # for (u, v) in edges:\n # for d in range(A[u, v]):\n # G.add_edge(u, v, weight=1)\n #\n triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)\n # If we are creating an undirected multigraph, only add the edges from the\n # upper triangle of the matrix. Otherwise, add all the edges. This relies\n # on the fact that the vertices created in the\n # `_generated_weighted_edges()` function are actually the row/column\n # indices for the matrix `A`.\n #\n # Without this check, we run into a problem where each edge is added twice\n # when `G.add_weighted_edges_from()` is invoked below.\n if G.is_multigraph() and not G.is_directed():\n triples = ((u, v, d) for u, v, d in triples if u <= v)\n G.add_weighted_edges_from(triples, weight=edge_attribute)\n return G\n\n\ndef to_numpy_array(G, nodelist=None, dtype=None, order=None,\n multigraph_weight=sum, weight='weight', nonedge=0.0):\n \"\"\"Return the graph adjacency matrix as a NumPy array.\n\n Parameters\n ----------\n G : graph\n The NetworkX graph used to construct the NumPy array.\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in `nodelist`.\n If `nodelist` is None, then the ordering is produced by G.nodes().\n\n dtype : NumPy data type, optional\n A valid single NumPy data type used to initialize the array.\n This must be a simple type such as int or numpy.float64 and\n not a compound data type (see to_numpy_recarray)\n If None, then the NumPy default is used.\n\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. If None, then the NumPy default\n is used.\n\n multigraph_weight : {sum, min, max}, optional\n An operator that determines how weights in multigraphs are handled.\n The default is to sum the weights of the multiple edges.\n\n weight : string or None optional (default = 'weight')\n The edge attribute that holds the numerical value used for\n the edge weight. If an edge does not have that attribute, then the\n value 1 is used instead.\n\n nonedge : float (default = 0.0)\n The array values corresponding to nonedges are typically set to zero.\n However, this could be undesirable if there are array values\n corresponding to actual edges that also have the value zero. If so,\n one might prefer nonedges to have some other value, such as nan.\n\n Returns\n -------\n A : NumPy ndarray\n Graph adjacency matrix\n\n See Also\n --------\n from_numpy_array\n\n Notes\n -----\n Entries in the adjacency matrix are assigned to the weight edge attribute.\n When an edge does not have a weight attribute, the value of the entry is\n set to the number 1. For multiple (parallel) edges, the values of the\n entries are determined by the `multigraph_weight` parameter. The default is\n to sum the weight attributes for each of the parallel edges.\n\n When `nodelist` does not contain every node in `G`, the adjacency matrix is\n built from the subgraph of `G` that is induced by the nodes in `nodelist`.\n\n The convention used for self-loop edges in graphs is to assign the\n diagonal array entry value to the weight attribute of the edge\n (or the number 1 if the edge has no weight attribute). If the\n alternate convention of doubling the edge weight is desired the\n resulting NumPy array can be modified as follows:\n\n >>> import numpy as np\n >>> G = nx.Graph([(1, 1)])\n >>> A = nx.to_numpy_array(G)\n >>> A\n array([[ 1.]])\n >>> A[np.diag_indices_from(A)] *= 2\n >>> A\n array([[ 2.]])\n\n Examples\n --------\n >>> G = nx.MultiDiGraph()\n >>> G.add_edge(0,1,weight=2)\n 0\n >>> G.add_edge(1,0)\n 0\n >>> G.add_edge(2,2,weight=3)\n 0\n >>> G.add_edge(2,2)\n 1\n >>> nx.to_numpy_array(G, nodelist=[0,1,2])\n array([[ 0., 2., 0.],\n [ 1., 0., 0.],\n [ 0., 0., 4.]])\n \"\"\"\n import numpy as np\n if nodelist is None:\n nodelist = list(G)\n nodeset = set(nodelist)\n if len(nodelist) != len(nodeset):\n msg = \"Ambiguous ordering: `nodelist` contained duplicates.\"\n raise nx.NetworkXError(msg)\n\n nlen = len(nodelist)\n undirected = not G.is_directed()\n index = dict(zip(nodelist, range(nlen)))\n\n # Initially, we start with an array of nans. Then we populate the array\n # using data from the graph. Afterwards, any leftover nans will be\n # converted to the value of `nonedge`. Note, we use nans initially,\n # instead of zero, for two reasons:\n #\n # 1) It can be important to distinguish a real edge with the value 0\n # from a nonedge with the value 0.\n #\n # 2) When working with multi(di)graphs, we must combine the values of all\n # edges between any two nodes in some manner. This often takes the\n # form of a sum, min, or max. Using the value 0 for a nonedge would\n # have undesirable effects with min and max, but using nanmin and\n # nanmax with initially nan values is not problematic at all.\n #\n # That said, there are still some drawbacks to this approach. Namely, if\n # a real edge is nan, then that value is a) not distinguishable from\n # nonedges and b) is ignored by the default combinator (nansum, nanmin,\n # nanmax) functions used for multi(di)graphs. If this becomes an issue,\n # an alternative approach is to use masked arrays. Initially, every\n # element is masked and set to some `initial` value. As we populate the\n # graph, elements are unmasked (automatically) when we combine the initial\n # value with the values given by real edges. At the end, we convert all\n # masked values to `nonedge`. Using masked arrays fully addresses reason 1,\n # but for reason 2, we would still have the issue with min and max if the\n # initial values were 0.0. Note: an initial value of +inf is appropriate\n # for min, while an initial value of -inf is appropriate for max. When\n # working with sum, an initial value of zero is appropriate. Ideally then,\n # we'd want to allow users to specify both a value for nonedges and also\n # an initial value. For multi(di)graphs, the choice of the initial value\n # will, in general, depend on the combinator function---sensible defaults\n # can be provided.\n\n if G.is_multigraph():\n # Handle MultiGraphs and MultiDiGraphs\n A = np.full((nlen, nlen), np.nan, order=order)\n # use numpy nan-aware operations\n operator = {sum: np.nansum, min: np.nanmin, max: np.nanmax}\n try:\n op = operator[multigraph_weight]\n except:\n raise ValueError('multigraph_weight must be sum, min, or max')\n\n for u, v, attrs in G.edges(data=True):\n if (u in nodeset) and (v in nodeset):\n i, j = index[u], index[v]\n e_weight = attrs.get(weight, 1)\n A[i, j] = op([e_weight, A[i, j]])\n if undirected:\n A[j, i] = A[i, j]\n else:\n # Graph or DiGraph, this is much faster than above\n A = np.full((nlen, nlen), np.nan, order=order)\n for u, nbrdict in G.adjacency():\n for v, d in nbrdict.items():\n try:\n A[index[u], index[v]] = d.get(weight, 1)\n except KeyError:\n # This occurs when there are fewer desired nodes than\n # there are nodes in the graph: len(nodelist) < len(G)\n pass\n\n A[np.isnan(A)] = nonedge\n A = np.asarray(A, dtype=dtype)\n return A\n\n\ndef from_numpy_array(A, parallel_edges=False, create_using=None):\n \"\"\"Return a graph from NumPy array.\n\n The NumPy array is interpreted as an adjacency matrix for the graph.\n\n Parameters\n ----------\n A : NumPy ndarray\n An adjacency matrix representation of a graph\n\n parallel_edges : Boolean\n If this is True, `create_using` is a multigraph, and `A` is an\n integer array, then entry *(i, j)* in the adjacency matrix is\n interpreted as the number of parallel edges joining vertices *i*\n and *j* in the graph. If it is False, then the entries in the\n adjacency matrix are interpreted as the weight of a single edge\n joining the vertices.\n\n create_using : NetworkX graph\n Use specified graph for result. The default is Graph()\n\n Notes\n -----\n If `create_using` is an instance of :class:`networkx.MultiGraph` or\n :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the\n entries of `A` are of type :class:`int`, then this function returns a\n multigraph (of the same type as `create_using`) with parallel edges.\n\n If `create_using` is an undirected multigraph, then only the edges\n indicated by the upper triangle of the array `A` will be added to the\n graph.\n\n If the NumPy array has a single data type for each array entry it\n will be converted to an appropriate Python data type.\n\n If the NumPy array has a user-specified compound data type the names\n of the data fields will be used as attribute keys in the resulting\n NetworkX graph.\n\n See Also\n --------\n to_numpy_array\n\n Examples\n --------\n Simple integer weights on edges:\n\n >>> import numpy as np\n >>> A = np.array([[1, 1], [2, 1]])\n >>> G = nx.from_numpy_array(A)\n >>> G.edges(data=True)\n EdgeDataView([(0, 0, {'weight': 1}), (0, 1, {'weight': 2}), (1, 1, {'weight': 1})])\n\n If `create_using` is a multigraph and the array has only integer entries,\n the entries will be interpreted as weighted edges joining the vertices\n (without creating parallel edges):\n\n >>> import numpy as np\n >>> A = np.array([[1, 1], [1, 2]])\n >>> G = nx.from_numpy_array(A, create_using=nx.MultiGraph())\n >>> G[1][1]\n AtlasView({0: {'weight': 2}})\n\n If `create_using` is a multigraph and the array has only integer entries\n but `parallel_edges` is True, then the entries will be interpreted as\n the number of parallel edges joining those two vertices:\n\n >>> import numpy as np\n >>> A = np.array([[1, 1], [1, 2]])\n >>> temp = nx.MultiGraph()\n >>> G = nx.from_numpy_array(A, parallel_edges=True, create_using=temp)\n >>> G[1][1]\n AtlasView({0: {'weight': 1}, 1: {'weight': 1}})\n\n User defined compound data type on edges:\n\n >>> import numpy\n >>> dt = [('weight', float), ('cost', int)]\n >>> A = np.array([[(1.0, 2)]], dtype=dt)\n >>> G = nx.from_numpy_array(A)\n >>> G.edges()\n EdgeView([(0, 0)])\n >>> G[0][0]['cost']\n 2\n >>> G[0][0]['weight']\n 1.0\n\n \"\"\"\n return from_numpy_matrix(A, parallel_edges=parallel_edges,\n create_using=create_using)\n\n\n# fixture for nose tests\ndef setup_module(module):\n from nose import SkipTest\n try:\n import numpy\n except:\n raise SkipTest(\"NumPy not available\")\n try:\n import scipy\n except:\n raise SkipTest(\"SciPy not available\")\n try:\n import pandas\n except:\n raise SkipTest(\"Pandas not available\")\n",
"path": "networkx/convert_matrix.py"
}
] | diff --git a/doc/news.rst b/doc/news.rst
index 3f9df003576..a02f60858ca 100644
--- a/doc/news.rst
+++ b/doc/news.rst
@@ -8,22 +8,37 @@ NetworkX 2.0
------------
Release date: TBD
-
See :doc:`release/migration_guide_from_1.x_to_2.0`.
API changes
~~~~~~~~~~~
See :doc:`release/release_2.0`.
+NetworkX 1.11
+-------------
+Release date: 30 January 2016
+
+Support for Python 3.5 added, drop support for Python 3.2.
+
+Highlights
+~~~~~~~~~~
+
+Pydot features now use pydotplus.
+Fixes installation on some machines and test with appveyor.
+Restores default center and scale of layout routines.
+Fixes various docs including no symbolic links in examples.
+Docs can now build using autosummary on readthedocs.org.
NetworkX 1.10
--------------
+
Release date: 2 August 2015
Support for Python 2.6 is dropped in this release.
Highlights
~~~~~~~~~~
+
- Connected components now return generators
- new functions including
@@ -61,8 +76,6 @@ Release date: 13 September 2014
Bugfix release for minor installation and documentation issues.
-https://github.com/networkx/networkx/milestones/networkx-1.9.1
-
NetworkX 1.9
------------
Release date: 21 June 2014
diff --git a/doc/release/api_1.11.rst b/doc/release/api_1.11.rst
new file mode 100644
index 00000000000..6a0520a1a1d
--- /dev/null
+++ b/doc/release/api_1.11.rst
@@ -0,0 +1,40 @@
+**********************************
+Version 1.11 notes and API changes
+**********************************
+
+This page includes more detailed release information and API changes from
+NetworkX 1.10 to NetworkX 1.11.
+
+Please send comments and questions to the networkx-discuss mailing list:
+<http://groups.google.com/group/networkx-discuss>.
+
+API changes
+-----------
+* [`#1930 <https://github.com/networkx/networkx/pull/1930>`_]
+ No longer import nx_agraph and nx_pydot into the top-level namespace.
+ They can be accessed within networkx as e.g. ``nx.nx_agraph.write_dot``
+ or imported as ``from networkx.drawing.nx_agraph import write_dot``.
+
+* [`#1750 <https://github.com/networkx/networkx/pull/1750>`_]
+ Arguments center and scale are now available for all layout functions.
+ The defaul values revert to the v1.9 values (center is the origin
+ for circular layouts and domain is [0, scale) for others.
+
+* [`#1924 <https://github.com/networkx/networkx/pull/1924>`_]
+ Replace pydot with pydotplus for drawing with the pydot interface.
+
+* [`#1888 <https://github.com/networkx/networkx/pull/1888>`_]
+ Replace support for Python3.2 with support for Python 3.5.
+
+Miscellaneous changes
+---------------------
+
+* [`#1763 <https://github.com/networkx/networkx/pull/1763>`_]
+ Set up appveyor to automatically test installation on Windows machines.
+ Remove symbolic links in examples to help such istallation.
+
+Change many doc_string typos to allow sphinx
+to build the docs without errors or warnings.
+
+Enable the docs to be automatically built on
+readthedocs.org by changing requirements.txt
diff --git a/doc/release/index.rst b/doc/release/index.rst
index 6320b0d13bb..1b17cd0f6a6 100644
--- a/doc/release/index.rst
+++ b/doc/release/index.rst
@@ -6,6 +6,7 @@ API changes
:maxdepth: 2
release_2.0
+ api_1.11
api_1.10
api_1.9
api_1.8
diff --git a/doc/tutorial.rst b/doc/tutorial.rst
index a59d53aa2e1..e80689e1684 100644
--- a/doc/tutorial.rst
+++ b/doc/tutorial.rst
@@ -456,8 +456,9 @@ PyGraphviz or pydot, are available on your system, you can also use
``nx_agraph.graphviz_layout(G)`` or ``nx_pydot.graphviz_layout(G)`` to get the
node positions, or write the graph in dot format for further processing.
+>>> from networkx.drawing.nx_pydot import write_dot
>>> pos = nx.nx_agraph.graphviz_layout(G)
>>> nx.draw(G, pos=pos)
->>> nx.write_dot(G,'file.dot')
+>>> nx.write_dot(G, 'file.dot')
See :doc:`/reference/drawing` for additional details.
diff --git a/networkx/convert_matrix.py b/networkx/convert_matrix.py
index 2b1a1882bb9..8938e1447f9 100644
--- a/networkx/convert_matrix.py
+++ b/networkx/convert_matrix.py
@@ -1126,3 +1126,7 @@ def setup_module(module):
import scipy
except:
raise SkipTest("SciPy not available")
+ try:
+ import pandas
+ except:
+ raise SkipTest("Pandas not available")
diff --git a/networkx/drawing/tests/test_agraph.py b/networkx/drawing/tests/test_agraph.py
index f24cf9622ea..780a5e1b1e4 100644
--- a/networkx/drawing/tests/test_agraph.py
+++ b/networkx/drawing/tests/test_agraph.py
@@ -3,6 +3,8 @@
import tempfile
from nose import SkipTest
from nose.tools import assert_true, assert_equal
+from networkx.testing import assert_edges_equal, assert_nodes_equal
+
import networkx as nx
@@ -23,8 +25,8 @@ def build_graph(self, G):
return G
def assert_equal(self, G1, G2):
- assert_equal(sorted(G1.nodes()), sorted(G2.nodes()))
- assert_equal(sorted(G1.edges()), sorted(G2.edges()))
+ assert_nodes_equal(G1.nodes(), G2.nodes())
+ assert_edges_equal(G1.edges(), G2.edges())
assert_equal(G1.graph['metal'], G2.graph['metal'])
def agraph_checks(self, G):
|
Pylons__pyramid-3272 | Bump Sphinx to >=1.7.2
Would anyone be opposed to bumping Sphinx to >=1.7.2, != 1.7.3 in `setup.py`? I really want our PDFs to have `emphasize-lines` support, at long last, and bring in support for Unicode characters in PDFs via xelatex.
Refs:
* #667
* #2572
* https://github.com/rtfd/readthedocs.org/issues/4015
| [
{
"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires = [\n 'setuptools',\n 'WebOb >= 1.7.0', # Response.has_body\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n 'plaster',\n 'plaster_pastedeploy',\n 'hupper',\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n 'zope.component >= 4.0', # py3 compat\n ]\n\n\ndocs_extras = [\n 'Sphinx >= 1.3.5, != 1.7.3',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-autoprogram',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.9.2',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"https://trypyramid.com\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n install_requires=install_requires,\n extras_require={\n 'testing': testing_extras,\n 'docs': docs_extras,\n },\n tests_require=tests_require,\n test_suite=\"pyramid.tests\",\n entry_points=\"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n",
"path": "setup.py"
}
] | [
{
"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires = [\n 'setuptools',\n 'WebOb >= 1.7.0', # Response.has_body\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n 'plaster',\n 'plaster_pastedeploy',\n 'hupper',\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n 'zope.component >= 4.0', # py3 compat\n ]\n\n\ndocs_extras = [\n 'Sphinx >= 1.7.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-autoprogram',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.9.2',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"https://trypyramid.com\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n install_requires=install_requires,\n extras_require={\n 'testing': testing_extras,\n 'docs': docs_extras,\n },\n tests_require=tests_require,\n test_suite=\"pyramid.tests\",\n entry_points=\"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index c06c5142a1..6a3c8d26b5 100644
--- a/setup.py
+++ b/setup.py
@@ -46,7 +46,7 @@
docs_extras = [
- 'Sphinx >= 1.3.5, != 1.7.3',
+ 'Sphinx >= 1.7.4',
'docutils',
'repoze.sphinx.autointerface',
'pylons_sphinx_latesturl',
|
InternLM__lmdeploy-205 | Serving gradio报错

报错: no module named lmdeploy.serve.gradio
环境里已安装了lmdeploy 0.0.2 python包。
| [
{
"content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple\n\n__version__ = '0.0.2'\nshort_version = __version__\n\n\ndef parse_version_info(version_str: str) -> Tuple:\n \"\"\"Parse version from a string.\n\n Args:\n version_str (str): A string represents a version info.\n\n Returns:\n tuple: A sequence of integer and string represents version.\n \"\"\"\n _version_info = []\n for x in version_str.split('.'):\n if x.isdigit():\n _version_info.append(int(x))\n elif x.find('rc') != -1:\n patch_version = x.split('rc')\n _version_info.append(int(patch_version[0]))\n _version_info.append(f'rc{patch_version[1]}')\n return tuple(_version_info)\n\n\nversion_info = parse_version_info(__version__)\n\n__all__ = ['__version__', 'version_info', 'parse_version_info']\n",
"path": "lmdeploy/version.py"
}
] | [
{
"content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Tuple\n\n__version__ = '0.0.3'\nshort_version = __version__\n\n\ndef parse_version_info(version_str: str) -> Tuple:\n \"\"\"Parse version from a string.\n\n Args:\n version_str (str): A string represents a version info.\n\n Returns:\n tuple: A sequence of integer and string represents version.\n \"\"\"\n _version_info = []\n for x in version_str.split('.'):\n if x.isdigit():\n _version_info.append(int(x))\n elif x.find('rc') != -1:\n patch_version = x.split('rc')\n _version_info.append(int(patch_version[0]))\n _version_info.append(f'rc{patch_version[1]}')\n return tuple(_version_info)\n\n\nversion_info = parse_version_info(__version__)\n\n__all__ = ['__version__', 'version_info', 'parse_version_info']\n",
"path": "lmdeploy/version.py"
}
] | diff --git a/lmdeploy/version.py b/lmdeploy/version.py
index 8960a13972..81a7f9de1a 100644
--- a/lmdeploy/version.py
+++ b/lmdeploy/version.py
@@ -1,7 +1,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
-__version__ = '0.0.2'
+__version__ = '0.0.3'
short_version = __version__
|
mozmeao__basket-836 | Expand set of fields for PII scrubbing
Based on what I'm seeing, we should add `primary_email` to the list of fields we scrub before sending to Sentry.
| [
{
"content": "import os\nimport platform\nimport socket\nimport struct\nimport sys\nfrom datetime import timedelta\nfrom pathlib import Path\n\nimport dj_database_url\nimport django_cache_url\nimport sentry_sdk\nfrom decouple import Csv, UndefinedValueError, config\nfrom sentry_processor import DesensitizationProcessor\nfrom sentry_sdk.integrations.celery import CeleryIntegration\nfrom sentry_sdk.integrations.django import DjangoIntegration\nfrom sentry_sdk.integrations.logging import ignore_logger\n\n# Application version.\nVERSION = (0, 1)\n\n# ROOT path of the project. A pathlib.Path object.\nROOT_PATH = Path(__file__).resolve().parents[1]\nROOT = str(ROOT_PATH)\n\n\ndef path(*args):\n return str(ROOT_PATH.joinpath(*args))\n\n\nDEBUG = config(\"DEBUG\", default=False, cast=bool)\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\nMANAGERS = ADMINS\n# avoids a warning from django\nTEST_RUNNER = \"django.test.runner.DiscoverRunner\"\n\n# DB read-only, API can still read-write to Salesforce\nREAD_ONLY_MODE = config(\"READ_ONLY_MODE\", False, cast=bool)\n# Disables the API and changes redirects\nADMIN_ONLY_MODE = config(\"ADMIN_ONLY_MODE\", False, cast=bool)\nBASKET_RW_URL = config(\n \"BASKET_RW_URL\",\n default=\"https://prod-oregon-b.basket.moz.works\",\n)\n\nREDIS_URL = config(\"REDIS_URL\", None)\nif REDIS_URL:\n REDIS_URL = REDIS_URL.rstrip(\"/0\")\n # use redis for celery and cache\n os.environ[\"CELERY_BROKER_URL\"] = REDIS_URL + \"/\" + config(\"REDIS_CELERY_DB\", \"0\")\n os.environ[\"CACHE_URL\"] = REDIS_URL + \"/\" + config(\"REDIS_CACHE_DB\", \"1\")\n\n# Production uses MySQL, but Sqlite should be sufficient for local development.\n# Our CI server tests against MySQL.\nDATABASES = {\n \"default\": config(\n \"DATABASE_URL\",\n default=\"sqlite:///basket.db\",\n cast=dj_database_url.parse,\n ),\n}\nif DATABASES[\"default\"][\"ENGINE\"] == \"django.db.backends.mysql\":\n DATABASES[\"default\"][\"OPTIONS\"] = {\n \"init_command\": \"SET sql_mode='STRICT_TRANS_TABLES'\",\n }\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\nCACHES = {\n \"default\": config(\"CACHE_URL\", default=\"locmem://\", cast=django_cache_url.parse),\n \"bad_message_ids\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"TIMEOUT\": 12 * 60 * 60, # 12 hours\n },\n \"email_block_list\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"TIMEOUT\": 60 * 60, # 1 hour\n },\n \"sfdc_sessions\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"TIMEOUT\": 60 * 60, # 1 hour\n },\n \"product_details\": {\"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\"},\n}\n\nif CACHES[\"default\"][\"BACKEND\"].startswith(\"django_redis\"):\n options = CACHES[\"default\"].setdefault(\"OPTIONS\", {})\n options[\"PARSER_CLASS\"] = \"redis.connection.HiredisParser\"\n\ndefault_email_backend = (\n \"django.core.mail.backends.console.EmailBackend\"\n if DEBUG\n else \"django.core.mail.backends.smtp.EmailBackend\"\n)\nEMAIL_BACKEND = config(\"EMAIL_BACKEND\", default=default_email_backend)\nEMAIL_HOST = config(\"EMAIL_HOST\", default=\"localhost\")\nEMAIL_PORT = config(\"EMAIL_PORT\", default=25, cast=int)\nEMAIL_USE_TLS = config(\"EMAIL_USE_TLS\", default=False, cast=bool)\nEMAIL_SUBJECT_PREFIX = config(\"EMAIL_SUBJECT_PREFIX\", default=\"[basket] \")\nEMAIL_HOST_USER = config(\"EMAIL_HOST_USER\", default=\"\")\nEMAIL_HOST_PASSWORD = config(\"EMAIL_HOST_PASSWORD\", default=\"\")\n\nALLOWED_HOSTS = config(\n \"ALLOWED_HOSTS\",\n default=\".allizom.org, .moz.works, basket.mozmar.org, \"\n \"basket.mozilla.com, basket.mozilla.org\",\n cast=Csv(),\n)\nALLOWED_CIDR_NETS = config(\"ALLOWED_CIDR_NETS\", default=\"\", cast=Csv())\nENFORCE_HOSTNAME = config(\"ENFORCE_HOSTNAME\", default=\"\", cast=Csv())\nUSE_X_FORWARDED_HOST = True\n\nSESSION_COOKIE_SECURE = config(\"SESSION_COOKIE_SECURE\", not DEBUG, cast=bool)\nSESSION_ENGINE = config(\n \"SESSION_ENGINE\",\n default=\"django.contrib.sessions.backends.cache\",\n)\nCSRF_COOKIE_SECURE = config(\"CSRF_COOKIE_SECURE\", not DEBUG, cast=bool)\nDISABLE_ADMIN = config(\"DISABLE_ADMIN\", READ_ONLY_MODE, cast=bool)\nSTORE_TASK_FAILURES = config(\"STORE_TASK_FAILURES\", not READ_ONLY_MODE, cast=bool)\n# if DISABLE_ADMIN is True redirect /admin/ to this URL\nADMIN_REDIRECT_URL = config(\n \"ADMIN_REDIRECT_URL\",\n \"https://admin.basket.moz.works/admin/\",\n)\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\nSITE_ID = 1\nUSE_I18N = False\n\nSTATIC_ROOT = path(\"static\")\nSTATIC_URL = \"/static/\"\nif not DEBUG:\n STATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\ntry:\n # Make this unique, and don't share it with anybody.\n SECRET_KEY = config(\"SECRET_KEY\")\nexcept UndefinedValueError:\n raise UndefinedValueError(\n \"The SECRET_KEY environment variable is required. \"\n \"Move env-dist to .env if you want the defaults.\",\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"basket.news.context_processors.settings\",\n ],\n },\n },\n]\n\nMIDDLEWARE = (\n \"allow_cidr.middleware.AllowCIDRMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"basket.news.middleware.EnforceHostnameMiddleware\",\n \"basket.news.middleware.HostnameMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"basket.news.middleware.GraphiteViewHitCountMiddleware\",\n \"django_statsd.middleware.GraphiteRequestTimingMiddleware\",\n \"django_statsd.middleware.GraphiteMiddleware\",\n \"ratelimit.middleware.RatelimitMiddleware\",\n)\n\nROOT_URLCONF = \"basket.urls\"\n\nINSTALLED_APPS = (\n \"basket.news\",\n \"basket.base\",\n \"corsheaders\",\n \"product_details\",\n \"django_extensions\",\n \"mozilla_django_oidc\",\n \"watchman\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.sites\",\n \"django.contrib.messages\",\n \"django.contrib.admin\",\n \"django.contrib.staticfiles\",\n)\n\n# SecurityMiddleware settings\nSECURE_HSTS_SECONDS = config(\"SECURE_HSTS_SECONDS\", default=\"0\", cast=int)\nSECURE_HSTS_INCLUDE_SUBDOMAINS = False\nSECURE_BROWSER_XSS_FILTER = config(\"SECURE_BROWSER_XSS_FILTER\", default=True, cast=bool)\nSECURE_CONTENT_TYPE_NOSNIFF = config(\n \"SECURE_CONTENT_TYPE_NOSNIFF\",\n default=True,\n cast=bool,\n)\nSECURE_SSL_REDIRECT = config(\"SECURE_SSL_REDIRECT\", default=False, cast=bool)\nSECURE_REDIRECT_EXEMPT = [\n r\"^healthz/$\",\n r\"^readiness/$\",\n]\nif config(\"USE_SECURE_PROXY_HEADER\", default=SECURE_SSL_REDIRECT, cast=bool):\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\n# watchman\nWATCHMAN_DISABLE_APM = True\nWATCHMAN_CHECKS = (\n \"watchman.checks.caches\",\n \"watchman.checks.databases\",\n)\n\n# Salesforce.com\n# legacy names\nEXACTTARGET_USE_SANDBOX = config(\"EXACTTARGET_USE_SANDBOX\", False, cast=bool)\nUSE_SANDBOX_BACKEND = config(\"USE_SANDBOX_BACKEND\", EXACTTARGET_USE_SANDBOX, cast=bool)\nSFDC_USE_SANDBOX = config(\"SFDC_USE_SANDBOX\", USE_SANDBOX_BACKEND, cast=bool)\nSFDC_SETTINGS = {\n \"username\": config(\"SFDC_USERNAME\", None),\n \"password\": config(\"SFDC_PASSWORD\", None),\n \"security_token\": config(\"SFDC_SEC_TOKEN\", None),\n \"domain\": \"test\" if SFDC_USE_SANDBOX else \"login\",\n}\ndefault_sfdc_enabled = bool(SFDC_SETTINGS[\"username\"])\nSFDC_ENABLED = config(\"SFDC_ENABLED\", default_sfdc_enabled, cast=bool)\n# default SFDC sessions timeout after 2 hours of inactivity. so they never timeout on\n# prod. Let's make it every 4 hours by default.\nSFDC_SESSION_TIMEOUT = config(\"SFDC_SESSION_TIMEOUT\", 60 * 60 * 4, cast=int)\nSFDC_REQUEST_TIMEOUT = config(\"SFDC_REQUEST_TIMEOUT\", 30, cast=int)\n\nACOUSTIC_CLIENT_ID = config(\"ACOUSTIC_CLIENT_ID\", None)\nACOUSTIC_CLIENT_SECRET = config(\"ACOUSTIC_CLIENT_SECRET\", None)\nACOUSTIC_REFRESH_TOKEN = config(\"ACOUSTIC_REFRESH_TOKEN\", None)\nACOUSTIC_SERVER_NUMBER = config(\"ACOUSTIC_SERVER_NUMBER\", None)\nACOUSTIC_FXA_TABLE_ID = config(\"ACOUSTIC_FXA_TABLE_ID\", None)\nACOUSTIC_FXA_LOG_ENABLED = config(\"ACOUSTIC_FXA_LOG_ENABLED\", True, cast=bool)\n\nACOUSTIC_TX_CLIENT_ID = config(\"ACOUSTIC_TX_CLIENT_ID\", None)\nACOUSTIC_TX_CLIENT_SECRET = config(\"ACOUSTIC_TX_CLIENT_SECRET\", None)\nACOUSTIC_TX_REFRESH_TOKEN = config(\"ACOUSTIC_TX_REFRESH_TOKEN\", None)\nACOUSTIC_TX_SERVER_NUMBER = config(\"ACOUSTIC_TX_SERVER_NUMBER\", None)\n# Send confirmation messages via Acoustic Transact\nSEND_CONFIRM_MESSAGES = config(\"SEND_CONFIRM_MESSAGES\", False, cast=bool)\n\n# Mozilla CTMS\nCTMS_ENV = config(\"CTMS_ENV\", \"\").lower()\nCTMS_ENABLED = config(\"CTMS_ENABLED\", False, cast=bool)\nif CTMS_ENV == \"stage\":\n default_url = \"https://ctms.stage.mozilla-ess.mozit.cloud\"\nelif CTMS_ENV == \"prod\":\n default_url = \"https://ctms.prod.mozilla-ess.mozit.cloud\"\nelse:\n default_url = \"\"\nCTMS_URL = config(\"CTMS_URL\", default_url)\nCTMS_CLIENT_ID = config(\"CTMS_CLIENT_ID\", None)\nCTMS_CLIENT_SECRET = config(\"CTMS_CLIENT_SECRET\", None)\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r\"^/(news/|subscribe)\"\n\n# view rate limiting\nRATELIMIT_VIEW = \"basket.news.views.ratelimited\"\n\nKOMBU_FERNET_KEY = config(\"KOMBU_FERNET_KEY\", None)\n# for key rotation\nKOMBU_FERNET_KEY_PREVIOUS = config(\"KOMBU_FERNET_KEY_PREVIOUS\", None)\nCELERY_TASK_ALWAYS_EAGER = config(\"CELERY_TASK_ALWAYS_EAGER\", DEBUG, cast=bool)\nCELERY_TASK_SERIALIZER = \"json\"\nCELERY_TASK_ACKS_LATE = config(\"CELERY_TASK_ACKS_LATE\", True, cast=bool)\nCELERY_TASK_REJECT_ON_WORKER_LOST = False\nCELERY_ACCEPT_CONTENT = [\"json\"]\nCELERY_MAX_RETRY_DELAY_MINUTES = 2048\nCELERY_BROKER_TRANSPORT_OPTIONS = {\n \"visibility_timeout\": CELERY_MAX_RETRY_DELAY_MINUTES * 60,\n}\nCELERY_BROKER_URL = config(\"CELERY_BROKER_URL\", None)\nCELERY_REDIS_MAX_CONNECTIONS = config(\"CELERY_REDIS_MAX_CONNECTIONS\", 2, cast=int)\nCELERY_WORKER_DISABLE_RATE_LIMITS = True\nCELERY_TASK_IGNORE_RESULT = True\nCELERY_WORKER_PREFETCH_MULTIPLIER = config(\n \"CELERY_WORKER_PREFETCH_MULTIPLIER\",\n 1,\n cast=int,\n)\nCELERY_TASK_COMPRESSION = \"gzip\"\nCELERY_TASK_ROUTES = {\n \"basket.news.tasks.snitch\": {\"queue\": \"snitch\"},\n}\n\n# size in kb\nCELERY_WORKER_MAX_MEMORY_PER_CHILD = config(\n \"CELERY_WORKER_MAX_MEMORY_PER_CHILD\",\n 200000,\n cast=int,\n)\n\nSNITCH_ID = config(\"SNITCH_ID\", None)\n\nCELERY_BEAT_SCHEDULE = {}\n\nif SNITCH_ID:\n CELERY_BEAT_SCHEDULE[\"snitch\"] = {\n \"task\": \"basket.news.tasks.snitch\",\n \"schedule\": timedelta(minutes=5),\n }\n\nif not READ_ONLY_MODE:\n CELERY_BEAT_SCHEDULE[\"common-voice\"] = {\n \"task\": \"basket.news.tasks.process_common_voice_batch\",\n \"schedule\": timedelta(hours=1),\n }\n\n\n# via http://stackoverflow.com/a/6556951/107114\ndef get_default_gateway_linux():\n \"\"\"Read the default gateway directly from /proc.\"\"\"\n try:\n with open(\"/proc/net/route\") as fh:\n for line in fh:\n fields = line.strip().split()\n if fields[1] != \"00000000\" or not int(fields[3], 16) & 2:\n continue\n\n return socket.inet_ntoa(struct.pack(\"<L\", int(fields[2], 16)))\n except IOError:\n return \"localhost\"\n\n\nHOSTNAME = platform.node()\nCLUSTER_NAME = config(\"CLUSTER_NAME\", default=None)\nK8S_NAMESPACE = config(\"K8S_NAMESPACE\", default=None)\nK8S_POD_NAME = config(\"K8S_POD_NAME\", default=None)\n\n# Data scrubbing before Sentry\n# https://github.com/laiyongtao/sentry-processor\nSENSITIVE_FIELDS_TO_MASK_ENTIRELY = [\n \"amo_id\",\n \"custom_id\",\n \"email\",\n \"first_name\",\n \"fxa_id\",\n \"id\",\n \"ip_address\",\n \"last_name\",\n \"mobile_number\",\n \"payee_id\",\n \"remote_addr\",\n \"remoteaddresschain\",\n \"token\",\n \"uid\",\n \"user\",\n \"x-forwarded-for\",\n]\n\nSENSITIVE_FIELDS_TO_MASK_PARTIALLY = []\n\n\ndef before_send(event, hint):\n processor = DesensitizationProcessor(\n with_default_keys=True,\n sensitive_keys=SENSITIVE_FIELDS_TO_MASK_ENTIRELY,\n # partial_keys=SENSITIVE_FIELDS_TO_MASK_PARTIALLY,\n # mask_position=POSITION.LEFT, # import from sentry_processor if you need it\n # off_set=3,\n )\n event = processor.process(event, hint)\n return event\n\n\nsentry_sdk.init(\n dsn=config(\"SENTRY_DSN\", None),\n release=config(\"GIT_SHA\", None),\n server_name=\".\".join(x for x in [K8S_NAMESPACE, CLUSTER_NAME, HOSTNAME] if x),\n integrations=[CeleryIntegration(), DjangoIntegration()],\n before_send=before_send,\n)\n\nSTATSD_HOST = config(\"STATSD_HOST\", get_default_gateway_linux())\nSTATSD_PORT = config(\"STATSD_PORT\", 8125, cast=int)\nSTATSD_PREFIX = config(\"STATSD_PREFIX\", K8S_NAMESPACE)\nSTATSD_CLIENT = config(\"STATSD_CLIENT\", \"django_statsd.clients.null\")\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"root\": {\n \"level\": config(\"DJANGO_LOG_LEVEL\", default=\"WARNING\"),\n \"handlers\": [\"console\"],\n },\n \"formatters\": {\n \"verbose\": {\"format\": \"%(levelname)s %(asctime)s %(module)s %(message)s\"},\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"null\": {\"class\": \"logging.NullHandler\"},\n },\n \"loggers\": {\n \"django.db.backends\": {\n \"level\": \"ERROR\",\n \"handlers\": [\"console\"],\n \"propagate\": False,\n },\n \"suds.client\": {\"level\": \"ERROR\", \"handlers\": [\"console\"], \"propagate\": False},\n },\n}\n\n# DisallowedHost gets a lot of action thanks to scans/bots/scripts,\n# but we need not take any action because it's already HTTP 400-ed.\n# Note that we ignore at the Sentry client level\n\nignore_logger(\"django.security.DisallowedHost\")\n\nPROD_DETAILS_CACHE_NAME = \"product_details\"\nPROD_DETAILS_CACHE_TIMEOUT = None\n\nRECOVER_MSG_LANGS = config(\"RECOVER_MSG_LANGS\", \"en\", cast=Csv())\n# language codes that we support and send through to SFDC\n# regardless of their existence in the DB\nEXTRA_SUPPORTED_LANGS = config(\"EXTRA_SUPPORTED_LANGS\", \"\", cast=Csv())\n\nSYNC_KEY = config(\"SYNC_KEY\", None)\nTESTING_EMAIL_DOMAINS = config(\n \"TESTING_EMAIL_DOMAINS\",\n \"restmail.net,restmail.lcip.org,example.com\",\n cast=Csv(),\n)\n\nMAINTENANCE_MODE = config(\"MAINTENANCE_MODE\", False, cast=bool)\nQUEUE_BATCH_SIZE = config(\"QUEUE_BATCH_SIZE\", 500, cast=int)\n# can we read user data in maintenance mode\nMAINTENANCE_READ_ONLY = config(\"MAINTENANCE_READ_ONLY\", False, cast=bool)\n\nTASK_LOCK_TIMEOUT = config(\"TASK_LOCK_TIMEOUT\", 60, cast=int)\nTASK_LOCKING_ENABLE = config(\"TASK_LOCKING_ENABLE\", False, cast=bool)\n\nDONATE_ACCESS_KEY_ID = config(\"DONATE_ACCESS_KEY_ID\", default=\"\")\nDONATE_SECRET_ACCESS_KEY = config(\"DONATE_SECRET_ACCESS_KEY\", default=\"\")\nDONATE_QUEUE_REGION = config(\"DONATE_QUEUE_REGION\", default=\"\")\nDONATE_QUEUE_URL = config(\"DONATE_QUEUE_URL\", default=\"\")\nDONATE_QUEUE_WAIT_TIME = config(\"DONATE_QUEUE_WAIT_TIME\", cast=int, default=10)\n# turn this on to consume the queue but ignore the messages\n# needed so that donate.m.o can run continuous tests w/o filling the SFDC sandbox\nDONATE_QUEUE_IGNORE_MODE = config(\"DONATE_QUEUE_IGNORE_MODE\", cast=bool, default=False)\nDONATE_SEND_RECEIPTS = config(\"DONATE_SEND_RECEIPTS\", cast=bool, default=False)\nDONATE_RECEIPTS_BCC = config(\"DONATE_RECEIPTS_BCC\", \"\", cast=Csv())\nDONATE_OPP_RECORD_TYPE = config(\"DONATE_OPP_RECORD_TYPE\", default=\"\")\nDONATE_CONTACT_RECORD_TYPE = config(\"DONATE_CONTACT_RECORD_TYPE\", default=\"\")\nDONATE_SNITCH_ID = config(\"DONATE_SNITCH_ID\", default=\"\")\nDONATE_NOTIFY_EMAIL = config(\"DONATE_NOTIFY_EMAIL\", default=\"\")\nDONATE_UPDATE_FAIL_DE = config(\"DONATE_UPDATE_FAIL_DE\", default=\"Donation_Diff\")\n\nFXA_EVENTS_QUEUE_ENABLE = config(\"FXA_EVENTS_QUEUE_ENABLE\", cast=bool, default=False)\nFXA_EVENTS_QUEUE_IGNORE_MODE = config(\n \"FXA_EVENTS_QUEUE_IGNORE_MODE\",\n cast=bool,\n default=False,\n)\nFXA_EVENTS_ACCESS_KEY_ID = config(\"FXA_EVENTS_ACCESS_KEY_ID\", default=\"\")\nFXA_EVENTS_SECRET_ACCESS_KEY = config(\"FXA_EVENTS_SECRET_ACCESS_KEY\", default=\"\")\nFXA_EVENTS_QUEUE_REGION = config(\"FXA_EVENTS_QUEUE_REGION\", default=\"\")\nFXA_EVENTS_QUEUE_URL = config(\"FXA_EVENTS_QUEUE_URL\", default=\"\")\nFXA_EVENTS_QUEUE_WAIT_TIME = config(\"FXA_EVENTS_QUEUE_WAIT_TIME\", cast=int, default=10)\nFXA_EVENTS_SNITCH_ID = config(\"FXA_EVENTS_SNITCH_ID\", default=\"\")\nFXA_EVENTS_VERIFIED_SFDC_ENABLE = config(\n \"FXA_EVENTS_VERIFIED_SFDC_ENABLE\",\n cast=bool,\n default=False,\n)\n\n# stable, stage, or production\n# https://github.com/mozilla/PyFxA/blob/master/fxa/constants.py\nFXA_OAUTH_SERVER_ENV = config(\"FXA_OAUTH_SERVER_ENV\", default=\"stable\")\nFXA_CLIENT_ID = config(\"FXA_CLIENT_ID\", default=\"\")\nFXA_CLIENT_SECRET = config(\"FXA_CLIENT_SECRET\", default=\"\")\nFXA_OAUTH_TOKEN_TTL = config(\"FXA_OAUTH_TOKEN_TTL\", default=300, cast=int) # 5 minutes\n\nFXA_EMAIL_PREFS_DOMAIN = config(\"FXA_EMAIL_PREFS_DOMAIN\", default=\"www.mozilla.org\")\nFXA_REGISTER_NEWSLETTER = config(\n \"FXA_REGISTER_NEWSLETTER\",\n default=\"firefox-accounts-journey\",\n)\nFXA_REGISTER_SOURCE_URL = config(\n \"FXA_REGISTER_SOURCE_URL\",\n default=\"https://accounts.firefox.com/\",\n)\n# TODO move this to the DB\nFXA_LOGIN_CAMPAIGNS = {\n \"fxa-embedded-form-moz\": \"mozilla-welcome\",\n \"fxa-embedded-form-fx\": \"firefox-welcome\",\n \"membership-idealo\": \"member-idealo\",\n \"membership-comm\": \"member-comm\",\n \"membership-tech\": \"member-tech\",\n \"membership-tk\": \"member-tk\",\n}\n\nCOMMON_VOICE_NEWSLETTER = config(\"COMMON_VOICE_NEWSLETTER\", default=\"common-voice\")\nCOMMON_VOICE_BATCH_UPDATES = config(\n \"COMMON_VOICE_BATCH_UPDATES\",\n default=False,\n cast=bool,\n)\nCOMMON_VOICE_BATCH_PROCESSING = config(\n \"COMMON_VOICE_BATCH_PROCESSING\",\n default=False,\n cast=bool,\n)\nCOMMON_VOICE_BATCH_CHUNK_SIZE = config(\n \"COMMON_VOICE_BATCH_CHUNK_SIZE\",\n default=1000,\n cast=int,\n)\n\nOIDC_ENABLE = config(\"OIDC_ENABLE\", default=False, cast=bool)\nif OIDC_ENABLE:\n AUTHENTICATION_BACKENDS = (\"basket.base.authentication.OIDCModelBackend\",)\n OIDC_OP_AUTHORIZATION_ENDPOINT = config(\"OIDC_OP_AUTHORIZATION_ENDPOINT\")\n OIDC_OP_TOKEN_ENDPOINT = config(\"OIDC_OP_TOKEN_ENDPOINT\")\n OIDC_OP_USER_ENDPOINT = config(\"OIDC_OP_USER_ENDPOINT\")\n\n OIDC_RP_CLIENT_ID = config(\"OIDC_RP_CLIENT_ID\")\n OIDC_RP_CLIENT_SECRET = config(\"OIDC_RP_CLIENT_SECRET\")\n OIDC_CREATE_USER = config(\"OIDC_CREATE_USER\", default=False, cast=bool)\n MIDDLEWARE += (\"basket.news.middleware.OIDCSessionRefreshMiddleware\",)\n LOGIN_REDIRECT_URL = \"/admin/\"\n\nif (\n sys.argv[0].endswith(\"py.test\")\n or sys.argv[0].endswith(\"pytest\")\n or (len(sys.argv) > 1 and sys.argv[1] == \"test\")\n):\n # stuff that's absolutely required for a test run\n CELERY_TASK_ALWAYS_EAGER = True\n SFDC_SETTINGS.pop(\"username\", None)\n SFDC_SETTINGS.pop(\"password\", None)\n TESTING_EMAIL_DOMAINS = []\n",
"path": "basket/settings.py"
}
] | [
{
"content": "import os\nimport platform\nimport socket\nimport struct\nimport sys\nfrom datetime import timedelta\nfrom pathlib import Path\n\nimport dj_database_url\nimport django_cache_url\nimport sentry_sdk\nfrom decouple import Csv, UndefinedValueError, config\nfrom sentry_processor import DesensitizationProcessor\nfrom sentry_sdk.integrations.celery import CeleryIntegration\nfrom sentry_sdk.integrations.django import DjangoIntegration\nfrom sentry_sdk.integrations.logging import ignore_logger\n\n# Application version.\nVERSION = (0, 1)\n\n# ROOT path of the project. A pathlib.Path object.\nROOT_PATH = Path(__file__).resolve().parents[1]\nROOT = str(ROOT_PATH)\n\n\ndef path(*args):\n return str(ROOT_PATH.joinpath(*args))\n\n\nDEBUG = config(\"DEBUG\", default=False, cast=bool)\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\nMANAGERS = ADMINS\n# avoids a warning from django\nTEST_RUNNER = \"django.test.runner.DiscoverRunner\"\n\n# DB read-only, API can still read-write to Salesforce\nREAD_ONLY_MODE = config(\"READ_ONLY_MODE\", False, cast=bool)\n# Disables the API and changes redirects\nADMIN_ONLY_MODE = config(\"ADMIN_ONLY_MODE\", False, cast=bool)\nBASKET_RW_URL = config(\n \"BASKET_RW_URL\",\n default=\"https://prod-oregon-b.basket.moz.works\",\n)\n\nREDIS_URL = config(\"REDIS_URL\", None)\nif REDIS_URL:\n REDIS_URL = REDIS_URL.rstrip(\"/0\")\n # use redis for celery and cache\n os.environ[\"CELERY_BROKER_URL\"] = REDIS_URL + \"/\" + config(\"REDIS_CELERY_DB\", \"0\")\n os.environ[\"CACHE_URL\"] = REDIS_URL + \"/\" + config(\"REDIS_CACHE_DB\", \"1\")\n\n# Production uses MySQL, but Sqlite should be sufficient for local development.\n# Our CI server tests against MySQL.\nDATABASES = {\n \"default\": config(\n \"DATABASE_URL\",\n default=\"sqlite:///basket.db\",\n cast=dj_database_url.parse,\n ),\n}\nif DATABASES[\"default\"][\"ENGINE\"] == \"django.db.backends.mysql\":\n DATABASES[\"default\"][\"OPTIONS\"] = {\n \"init_command\": \"SET sql_mode='STRICT_TRANS_TABLES'\",\n }\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\nCACHES = {\n \"default\": config(\"CACHE_URL\", default=\"locmem://\", cast=django_cache_url.parse),\n \"bad_message_ids\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"TIMEOUT\": 12 * 60 * 60, # 12 hours\n },\n \"email_block_list\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"TIMEOUT\": 60 * 60, # 1 hour\n },\n \"sfdc_sessions\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"TIMEOUT\": 60 * 60, # 1 hour\n },\n \"product_details\": {\"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\"},\n}\n\nif CACHES[\"default\"][\"BACKEND\"].startswith(\"django_redis\"):\n options = CACHES[\"default\"].setdefault(\"OPTIONS\", {})\n options[\"PARSER_CLASS\"] = \"redis.connection.HiredisParser\"\n\ndefault_email_backend = (\n \"django.core.mail.backends.console.EmailBackend\"\n if DEBUG\n else \"django.core.mail.backends.smtp.EmailBackend\"\n)\nEMAIL_BACKEND = config(\"EMAIL_BACKEND\", default=default_email_backend)\nEMAIL_HOST = config(\"EMAIL_HOST\", default=\"localhost\")\nEMAIL_PORT = config(\"EMAIL_PORT\", default=25, cast=int)\nEMAIL_USE_TLS = config(\"EMAIL_USE_TLS\", default=False, cast=bool)\nEMAIL_SUBJECT_PREFIX = config(\"EMAIL_SUBJECT_PREFIX\", default=\"[basket] \")\nEMAIL_HOST_USER = config(\"EMAIL_HOST_USER\", default=\"\")\nEMAIL_HOST_PASSWORD = config(\"EMAIL_HOST_PASSWORD\", default=\"\")\n\nALLOWED_HOSTS = config(\n \"ALLOWED_HOSTS\",\n default=\".allizom.org, .moz.works, basket.mozmar.org, \"\n \"basket.mozilla.com, basket.mozilla.org\",\n cast=Csv(),\n)\nALLOWED_CIDR_NETS = config(\"ALLOWED_CIDR_NETS\", default=\"\", cast=Csv())\nENFORCE_HOSTNAME = config(\"ENFORCE_HOSTNAME\", default=\"\", cast=Csv())\nUSE_X_FORWARDED_HOST = True\n\nSESSION_COOKIE_SECURE = config(\"SESSION_COOKIE_SECURE\", not DEBUG, cast=bool)\nSESSION_ENGINE = config(\n \"SESSION_ENGINE\",\n default=\"django.contrib.sessions.backends.cache\",\n)\nCSRF_COOKIE_SECURE = config(\"CSRF_COOKIE_SECURE\", not DEBUG, cast=bool)\nDISABLE_ADMIN = config(\"DISABLE_ADMIN\", READ_ONLY_MODE, cast=bool)\nSTORE_TASK_FAILURES = config(\"STORE_TASK_FAILURES\", not READ_ONLY_MODE, cast=bool)\n# if DISABLE_ADMIN is True redirect /admin/ to this URL\nADMIN_REDIRECT_URL = config(\n \"ADMIN_REDIRECT_URL\",\n \"https://admin.basket.moz.works/admin/\",\n)\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\nSITE_ID = 1\nUSE_I18N = False\n\nSTATIC_ROOT = path(\"static\")\nSTATIC_URL = \"/static/\"\nif not DEBUG:\n STATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\ntry:\n # Make this unique, and don't share it with anybody.\n SECRET_KEY = config(\"SECRET_KEY\")\nexcept UndefinedValueError:\n raise UndefinedValueError(\n \"The SECRET_KEY environment variable is required. \"\n \"Move env-dist to .env if you want the defaults.\",\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"basket.news.context_processors.settings\",\n ],\n },\n },\n]\n\nMIDDLEWARE = (\n \"allow_cidr.middleware.AllowCIDRMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"basket.news.middleware.EnforceHostnameMiddleware\",\n \"basket.news.middleware.HostnameMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"basket.news.middleware.GraphiteViewHitCountMiddleware\",\n \"django_statsd.middleware.GraphiteRequestTimingMiddleware\",\n \"django_statsd.middleware.GraphiteMiddleware\",\n \"ratelimit.middleware.RatelimitMiddleware\",\n)\n\nROOT_URLCONF = \"basket.urls\"\n\nINSTALLED_APPS = (\n \"basket.news\",\n \"basket.base\",\n \"corsheaders\",\n \"product_details\",\n \"django_extensions\",\n \"mozilla_django_oidc\",\n \"watchman\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.sites\",\n \"django.contrib.messages\",\n \"django.contrib.admin\",\n \"django.contrib.staticfiles\",\n)\n\n# SecurityMiddleware settings\nSECURE_HSTS_SECONDS = config(\"SECURE_HSTS_SECONDS\", default=\"0\", cast=int)\nSECURE_HSTS_INCLUDE_SUBDOMAINS = False\nSECURE_BROWSER_XSS_FILTER = config(\"SECURE_BROWSER_XSS_FILTER\", default=True, cast=bool)\nSECURE_CONTENT_TYPE_NOSNIFF = config(\n \"SECURE_CONTENT_TYPE_NOSNIFF\",\n default=True,\n cast=bool,\n)\nSECURE_SSL_REDIRECT = config(\"SECURE_SSL_REDIRECT\", default=False, cast=bool)\nSECURE_REDIRECT_EXEMPT = [\n r\"^healthz/$\",\n r\"^readiness/$\",\n]\nif config(\"USE_SECURE_PROXY_HEADER\", default=SECURE_SSL_REDIRECT, cast=bool):\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\n# watchman\nWATCHMAN_DISABLE_APM = True\nWATCHMAN_CHECKS = (\n \"watchman.checks.caches\",\n \"watchman.checks.databases\",\n)\n\n# Salesforce.com\n# legacy names\nEXACTTARGET_USE_SANDBOX = config(\"EXACTTARGET_USE_SANDBOX\", False, cast=bool)\nUSE_SANDBOX_BACKEND = config(\"USE_SANDBOX_BACKEND\", EXACTTARGET_USE_SANDBOX, cast=bool)\nSFDC_USE_SANDBOX = config(\"SFDC_USE_SANDBOX\", USE_SANDBOX_BACKEND, cast=bool)\nSFDC_SETTINGS = {\n \"username\": config(\"SFDC_USERNAME\", None),\n \"password\": config(\"SFDC_PASSWORD\", None),\n \"security_token\": config(\"SFDC_SEC_TOKEN\", None),\n \"domain\": \"test\" if SFDC_USE_SANDBOX else \"login\",\n}\ndefault_sfdc_enabled = bool(SFDC_SETTINGS[\"username\"])\nSFDC_ENABLED = config(\"SFDC_ENABLED\", default_sfdc_enabled, cast=bool)\n# default SFDC sessions timeout after 2 hours of inactivity. so they never timeout on\n# prod. Let's make it every 4 hours by default.\nSFDC_SESSION_TIMEOUT = config(\"SFDC_SESSION_TIMEOUT\", 60 * 60 * 4, cast=int)\nSFDC_REQUEST_TIMEOUT = config(\"SFDC_REQUEST_TIMEOUT\", 30, cast=int)\n\nACOUSTIC_CLIENT_ID = config(\"ACOUSTIC_CLIENT_ID\", None)\nACOUSTIC_CLIENT_SECRET = config(\"ACOUSTIC_CLIENT_SECRET\", None)\nACOUSTIC_REFRESH_TOKEN = config(\"ACOUSTIC_REFRESH_TOKEN\", None)\nACOUSTIC_SERVER_NUMBER = config(\"ACOUSTIC_SERVER_NUMBER\", None)\nACOUSTIC_FXA_TABLE_ID = config(\"ACOUSTIC_FXA_TABLE_ID\", None)\nACOUSTIC_FXA_LOG_ENABLED = config(\"ACOUSTIC_FXA_LOG_ENABLED\", True, cast=bool)\n\nACOUSTIC_TX_CLIENT_ID = config(\"ACOUSTIC_TX_CLIENT_ID\", None)\nACOUSTIC_TX_CLIENT_SECRET = config(\"ACOUSTIC_TX_CLIENT_SECRET\", None)\nACOUSTIC_TX_REFRESH_TOKEN = config(\"ACOUSTIC_TX_REFRESH_TOKEN\", None)\nACOUSTIC_TX_SERVER_NUMBER = config(\"ACOUSTIC_TX_SERVER_NUMBER\", None)\n# Send confirmation messages via Acoustic Transact\nSEND_CONFIRM_MESSAGES = config(\"SEND_CONFIRM_MESSAGES\", False, cast=bool)\n\n# Mozilla CTMS\nCTMS_ENV = config(\"CTMS_ENV\", \"\").lower()\nCTMS_ENABLED = config(\"CTMS_ENABLED\", False, cast=bool)\nif CTMS_ENV == \"stage\":\n default_url = \"https://ctms.stage.mozilla-ess.mozit.cloud\"\nelif CTMS_ENV == \"prod\":\n default_url = \"https://ctms.prod.mozilla-ess.mozit.cloud\"\nelse:\n default_url = \"\"\nCTMS_URL = config(\"CTMS_URL\", default_url)\nCTMS_CLIENT_ID = config(\"CTMS_CLIENT_ID\", None)\nCTMS_CLIENT_SECRET = config(\"CTMS_CLIENT_SECRET\", None)\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r\"^/(news/|subscribe)\"\n\n# view rate limiting\nRATELIMIT_VIEW = \"basket.news.views.ratelimited\"\n\nKOMBU_FERNET_KEY = config(\"KOMBU_FERNET_KEY\", None)\n# for key rotation\nKOMBU_FERNET_KEY_PREVIOUS = config(\"KOMBU_FERNET_KEY_PREVIOUS\", None)\nCELERY_TASK_ALWAYS_EAGER = config(\"CELERY_TASK_ALWAYS_EAGER\", DEBUG, cast=bool)\nCELERY_TASK_SERIALIZER = \"json\"\nCELERY_TASK_ACKS_LATE = config(\"CELERY_TASK_ACKS_LATE\", True, cast=bool)\nCELERY_TASK_REJECT_ON_WORKER_LOST = False\nCELERY_ACCEPT_CONTENT = [\"json\"]\nCELERY_MAX_RETRY_DELAY_MINUTES = 2048\nCELERY_BROKER_TRANSPORT_OPTIONS = {\n \"visibility_timeout\": CELERY_MAX_RETRY_DELAY_MINUTES * 60,\n}\nCELERY_BROKER_URL = config(\"CELERY_BROKER_URL\", None)\nCELERY_REDIS_MAX_CONNECTIONS = config(\"CELERY_REDIS_MAX_CONNECTIONS\", 2, cast=int)\nCELERY_WORKER_DISABLE_RATE_LIMITS = True\nCELERY_TASK_IGNORE_RESULT = True\nCELERY_WORKER_PREFETCH_MULTIPLIER = config(\n \"CELERY_WORKER_PREFETCH_MULTIPLIER\",\n 1,\n cast=int,\n)\nCELERY_TASK_COMPRESSION = \"gzip\"\nCELERY_TASK_ROUTES = {\n \"basket.news.tasks.snitch\": {\"queue\": \"snitch\"},\n}\n\n# size in kb\nCELERY_WORKER_MAX_MEMORY_PER_CHILD = config(\n \"CELERY_WORKER_MAX_MEMORY_PER_CHILD\",\n 200000,\n cast=int,\n)\n\nSNITCH_ID = config(\"SNITCH_ID\", None)\n\nCELERY_BEAT_SCHEDULE = {}\n\nif SNITCH_ID:\n CELERY_BEAT_SCHEDULE[\"snitch\"] = {\n \"task\": \"basket.news.tasks.snitch\",\n \"schedule\": timedelta(minutes=5),\n }\n\nif not READ_ONLY_MODE:\n CELERY_BEAT_SCHEDULE[\"common-voice\"] = {\n \"task\": \"basket.news.tasks.process_common_voice_batch\",\n \"schedule\": timedelta(hours=1),\n }\n\n\n# via http://stackoverflow.com/a/6556951/107114\ndef get_default_gateway_linux():\n \"\"\"Read the default gateway directly from /proc.\"\"\"\n try:\n with open(\"/proc/net/route\") as fh:\n for line in fh:\n fields = line.strip().split()\n if fields[1] != \"00000000\" or not int(fields[3], 16) & 2:\n continue\n\n return socket.inet_ntoa(struct.pack(\"<L\", int(fields[2], 16)))\n except IOError:\n return \"localhost\"\n\n\nHOSTNAME = platform.node()\nCLUSTER_NAME = config(\"CLUSTER_NAME\", default=None)\nK8S_NAMESPACE = config(\"K8S_NAMESPACE\", default=None)\nK8S_POD_NAME = config(\"K8S_POD_NAME\", default=None)\n\n# Data scrubbing before Sentry\n# https://github.com/laiyongtao/sentry-processor\nSENSITIVE_FIELDS_TO_MASK_ENTIRELY = [\n \"amo_id\",\n \"custom_id\",\n \"email\",\n \"first_name\",\n \"fxa_id\",\n \"id\",\n \"ip_address\",\n \"last_name\",\n \"mobile_number\",\n \"payee_id\",\n \"primary_email\",\n \"remote_addr\",\n \"remoteaddresschain\",\n \"token\",\n \"uid\",\n \"user\",\n \"x-forwarded-for\",\n]\n\nSENSITIVE_FIELDS_TO_MASK_PARTIALLY = []\n\n\ndef before_send(event, hint):\n processor = DesensitizationProcessor(\n with_default_keys=True,\n sensitive_keys=SENSITIVE_FIELDS_TO_MASK_ENTIRELY,\n # partial_keys=SENSITIVE_FIELDS_TO_MASK_PARTIALLY,\n # mask_position=POSITION.LEFT, # import from sentry_processor if you need it\n # off_set=3,\n )\n event = processor.process(event, hint)\n return event\n\n\nsentry_sdk.init(\n dsn=config(\"SENTRY_DSN\", None),\n release=config(\"GIT_SHA\", None),\n server_name=\".\".join(x for x in [K8S_NAMESPACE, CLUSTER_NAME, HOSTNAME] if x),\n integrations=[CeleryIntegration(), DjangoIntegration()],\n before_send=before_send,\n)\n\nSTATSD_HOST = config(\"STATSD_HOST\", get_default_gateway_linux())\nSTATSD_PORT = config(\"STATSD_PORT\", 8125, cast=int)\nSTATSD_PREFIX = config(\"STATSD_PREFIX\", K8S_NAMESPACE)\nSTATSD_CLIENT = config(\"STATSD_CLIENT\", \"django_statsd.clients.null\")\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"root\": {\n \"level\": config(\"DJANGO_LOG_LEVEL\", default=\"WARNING\"),\n \"handlers\": [\"console\"],\n },\n \"formatters\": {\n \"verbose\": {\"format\": \"%(levelname)s %(asctime)s %(module)s %(message)s\"},\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"null\": {\"class\": \"logging.NullHandler\"},\n },\n \"loggers\": {\n \"django.db.backends\": {\n \"level\": \"ERROR\",\n \"handlers\": [\"console\"],\n \"propagate\": False,\n },\n \"suds.client\": {\"level\": \"ERROR\", \"handlers\": [\"console\"], \"propagate\": False},\n },\n}\n\n# DisallowedHost gets a lot of action thanks to scans/bots/scripts,\n# but we need not take any action because it's already HTTP 400-ed.\n# Note that we ignore at the Sentry client level\n\nignore_logger(\"django.security.DisallowedHost\")\n\nPROD_DETAILS_CACHE_NAME = \"product_details\"\nPROD_DETAILS_CACHE_TIMEOUT = None\n\nRECOVER_MSG_LANGS = config(\"RECOVER_MSG_LANGS\", \"en\", cast=Csv())\n# language codes that we support and send through to SFDC\n# regardless of their existence in the DB\nEXTRA_SUPPORTED_LANGS = config(\"EXTRA_SUPPORTED_LANGS\", \"\", cast=Csv())\n\nSYNC_KEY = config(\"SYNC_KEY\", None)\nTESTING_EMAIL_DOMAINS = config(\n \"TESTING_EMAIL_DOMAINS\",\n \"restmail.net,restmail.lcip.org,example.com\",\n cast=Csv(),\n)\n\nMAINTENANCE_MODE = config(\"MAINTENANCE_MODE\", False, cast=bool)\nQUEUE_BATCH_SIZE = config(\"QUEUE_BATCH_SIZE\", 500, cast=int)\n# can we read user data in maintenance mode\nMAINTENANCE_READ_ONLY = config(\"MAINTENANCE_READ_ONLY\", False, cast=bool)\n\nTASK_LOCK_TIMEOUT = config(\"TASK_LOCK_TIMEOUT\", 60, cast=int)\nTASK_LOCKING_ENABLE = config(\"TASK_LOCKING_ENABLE\", False, cast=bool)\n\nDONATE_ACCESS_KEY_ID = config(\"DONATE_ACCESS_KEY_ID\", default=\"\")\nDONATE_SECRET_ACCESS_KEY = config(\"DONATE_SECRET_ACCESS_KEY\", default=\"\")\nDONATE_QUEUE_REGION = config(\"DONATE_QUEUE_REGION\", default=\"\")\nDONATE_QUEUE_URL = config(\"DONATE_QUEUE_URL\", default=\"\")\nDONATE_QUEUE_WAIT_TIME = config(\"DONATE_QUEUE_WAIT_TIME\", cast=int, default=10)\n# turn this on to consume the queue but ignore the messages\n# needed so that donate.m.o can run continuous tests w/o filling the SFDC sandbox\nDONATE_QUEUE_IGNORE_MODE = config(\"DONATE_QUEUE_IGNORE_MODE\", cast=bool, default=False)\nDONATE_SEND_RECEIPTS = config(\"DONATE_SEND_RECEIPTS\", cast=bool, default=False)\nDONATE_RECEIPTS_BCC = config(\"DONATE_RECEIPTS_BCC\", \"\", cast=Csv())\nDONATE_OPP_RECORD_TYPE = config(\"DONATE_OPP_RECORD_TYPE\", default=\"\")\nDONATE_CONTACT_RECORD_TYPE = config(\"DONATE_CONTACT_RECORD_TYPE\", default=\"\")\nDONATE_SNITCH_ID = config(\"DONATE_SNITCH_ID\", default=\"\")\nDONATE_NOTIFY_EMAIL = config(\"DONATE_NOTIFY_EMAIL\", default=\"\")\nDONATE_UPDATE_FAIL_DE = config(\"DONATE_UPDATE_FAIL_DE\", default=\"Donation_Diff\")\n\nFXA_EVENTS_QUEUE_ENABLE = config(\"FXA_EVENTS_QUEUE_ENABLE\", cast=bool, default=False)\nFXA_EVENTS_QUEUE_IGNORE_MODE = config(\n \"FXA_EVENTS_QUEUE_IGNORE_MODE\",\n cast=bool,\n default=False,\n)\nFXA_EVENTS_ACCESS_KEY_ID = config(\"FXA_EVENTS_ACCESS_KEY_ID\", default=\"\")\nFXA_EVENTS_SECRET_ACCESS_KEY = config(\"FXA_EVENTS_SECRET_ACCESS_KEY\", default=\"\")\nFXA_EVENTS_QUEUE_REGION = config(\"FXA_EVENTS_QUEUE_REGION\", default=\"\")\nFXA_EVENTS_QUEUE_URL = config(\"FXA_EVENTS_QUEUE_URL\", default=\"\")\nFXA_EVENTS_QUEUE_WAIT_TIME = config(\"FXA_EVENTS_QUEUE_WAIT_TIME\", cast=int, default=10)\nFXA_EVENTS_SNITCH_ID = config(\"FXA_EVENTS_SNITCH_ID\", default=\"\")\nFXA_EVENTS_VERIFIED_SFDC_ENABLE = config(\n \"FXA_EVENTS_VERIFIED_SFDC_ENABLE\",\n cast=bool,\n default=False,\n)\n\n# stable, stage, or production\n# https://github.com/mozilla/PyFxA/blob/master/fxa/constants.py\nFXA_OAUTH_SERVER_ENV = config(\"FXA_OAUTH_SERVER_ENV\", default=\"stable\")\nFXA_CLIENT_ID = config(\"FXA_CLIENT_ID\", default=\"\")\nFXA_CLIENT_SECRET = config(\"FXA_CLIENT_SECRET\", default=\"\")\nFXA_OAUTH_TOKEN_TTL = config(\"FXA_OAUTH_TOKEN_TTL\", default=300, cast=int) # 5 minutes\n\nFXA_EMAIL_PREFS_DOMAIN = config(\"FXA_EMAIL_PREFS_DOMAIN\", default=\"www.mozilla.org\")\nFXA_REGISTER_NEWSLETTER = config(\n \"FXA_REGISTER_NEWSLETTER\",\n default=\"firefox-accounts-journey\",\n)\nFXA_REGISTER_SOURCE_URL = config(\n \"FXA_REGISTER_SOURCE_URL\",\n default=\"https://accounts.firefox.com/\",\n)\n# TODO move this to the DB\nFXA_LOGIN_CAMPAIGNS = {\n \"fxa-embedded-form-moz\": \"mozilla-welcome\",\n \"fxa-embedded-form-fx\": \"firefox-welcome\",\n \"membership-idealo\": \"member-idealo\",\n \"membership-comm\": \"member-comm\",\n \"membership-tech\": \"member-tech\",\n \"membership-tk\": \"member-tk\",\n}\n\nCOMMON_VOICE_NEWSLETTER = config(\"COMMON_VOICE_NEWSLETTER\", default=\"common-voice\")\nCOMMON_VOICE_BATCH_UPDATES = config(\n \"COMMON_VOICE_BATCH_UPDATES\",\n default=False,\n cast=bool,\n)\nCOMMON_VOICE_BATCH_PROCESSING = config(\n \"COMMON_VOICE_BATCH_PROCESSING\",\n default=False,\n cast=bool,\n)\nCOMMON_VOICE_BATCH_CHUNK_SIZE = config(\n \"COMMON_VOICE_BATCH_CHUNK_SIZE\",\n default=1000,\n cast=int,\n)\n\nOIDC_ENABLE = config(\"OIDC_ENABLE\", default=False, cast=bool)\nif OIDC_ENABLE:\n AUTHENTICATION_BACKENDS = (\"basket.base.authentication.OIDCModelBackend\",)\n OIDC_OP_AUTHORIZATION_ENDPOINT = config(\"OIDC_OP_AUTHORIZATION_ENDPOINT\")\n OIDC_OP_TOKEN_ENDPOINT = config(\"OIDC_OP_TOKEN_ENDPOINT\")\n OIDC_OP_USER_ENDPOINT = config(\"OIDC_OP_USER_ENDPOINT\")\n\n OIDC_RP_CLIENT_ID = config(\"OIDC_RP_CLIENT_ID\")\n OIDC_RP_CLIENT_SECRET = config(\"OIDC_RP_CLIENT_SECRET\")\n OIDC_CREATE_USER = config(\"OIDC_CREATE_USER\", default=False, cast=bool)\n MIDDLEWARE += (\"basket.news.middleware.OIDCSessionRefreshMiddleware\",)\n LOGIN_REDIRECT_URL = \"/admin/\"\n\nif (\n sys.argv[0].endswith(\"py.test\")\n or sys.argv[0].endswith(\"pytest\")\n or (len(sys.argv) > 1 and sys.argv[1] == \"test\")\n):\n # stuff that's absolutely required for a test run\n CELERY_TASK_ALWAYS_EAGER = True\n SFDC_SETTINGS.pop(\"username\", None)\n SFDC_SETTINGS.pop(\"password\", None)\n TESTING_EMAIL_DOMAINS = []\n",
"path": "basket/settings.py"
}
] | diff --git a/basket/base/tests/test__utils.py b/basket/base/tests/test__utils.py
index 0989b987d..d6ffe5c71 100644
--- a/basket/base/tests/test__utils.py
+++ b/basket/base/tests/test__utils.py
@@ -43,6 +43,7 @@ def test_pre_sentry_sanitisation__before_send_setup():
"last_name",
"mobile_number",
"payee_id",
+ "primary_email",
"remote_addr",
"remoteaddresschain",
"token",
diff --git a/basket/settings.py b/basket/settings.py
index d37245dfa..a40cb9e92 100644
--- a/basket/settings.py
+++ b/basket/settings.py
@@ -356,6 +356,7 @@ def get_default_gateway_linux():
"last_name",
"mobile_number",
"payee_id",
+ "primary_email",
"remote_addr",
"remoteaddresschain",
"token",
|
web2py__web2py-1498 | unittest response.render() inside scheduler
to avoid #1485 to happen again
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n| This file is part of the web2py Web Framework\n| Developed by Massimo Di Pierro <[email protected]>,\n| limodou <[email protected]> and srackham <[email protected]>.\n| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n\nWeb2py environment in the shell\n--------------------------------\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport code\nimport copy\nimport logging\nimport types\nimport re\nimport optparse\nimport glob\nimport traceback\nimport gluon.fileutils as fileutils\nfrom gluon.settings import global_settings\nfrom gluon.utils import web2py_uuid\nfrom gluon.compileapp import build_environment, read_pyc, run_models_in\nfrom gluon.restricted import RestrictedError\nfrom gluon.globals import Request, Response, Session\nfrom gluon.storage import Storage, List\nfrom gluon.admin import w2p_unpack\nfrom pydal.base import BaseAdapter\nfrom gluon._compat import iteritems, ClassType\n\nlogger = logging.getLogger(\"web2py\")\n\n\ndef enable_autocomplete_and_history(adir, env):\n try:\n import rlcompleter\n import atexit\n import readline\n except ImportError:\n pass\n else:\n readline.parse_and_bind(\"tab: complete\")\n history_file = os.path.join(adir, '.pythonhistory')\n try:\n readline.read_history_file(history_file)\n except IOError:\n open(history_file, 'a').close()\n atexit.register(readline.write_history_file, history_file)\n readline.set_completer(rlcompleter.Completer(env).complete)\n\n\ndef exec_environment(\n pyfile='',\n request=None,\n response=None,\n session=None,\n):\n \"\"\"Environment builder and module loader.\n\n Builds a web2py environment and optionally executes a Python file into\n the environment.\n\n A Storage dictionary containing the resulting environment is returned.\n The working directory must be web2py root -- this is the web2py default.\n\n \"\"\"\n\n if request is None:\n request = Request({})\n if response is None:\n response = Response()\n if session is None:\n session = Session()\n\n if request.folder is None:\n mo = re.match(r'(|.*/)applications/(?P<appname>[^/]+)', pyfile)\n if mo:\n appname = mo.group('appname')\n request.folder = os.path.join('applications', appname)\n else:\n request.folder = ''\n env = build_environment(request, response, session, store_current=False)\n if pyfile:\n pycfile = pyfile + 'c'\n if os.path.isfile(pycfile):\n exec (read_pyc(pycfile), env)\n else:\n execfile(pyfile, env)\n return Storage(env)\n\n\ndef env(\n a,\n import_models=False,\n c=None,\n f=None,\n dir='',\n extra_request={},\n):\n \"\"\"\n Returns web2py execution environment for application (a), controller (c),\n function (f).\n If import_models is True the exec all application models into the\n environment.\n\n extra_request allows you to pass along any extra variables to the request\n object before your models get executed. This was mainly done to support\n web2py_utils.test_runner, however you can use it with any wrapper scripts\n that need access to the web2py environment.\n \"\"\"\n\n request = Request({})\n response = Response()\n session = Session()\n request.application = a\n\n # Populate the dummy environment with sensible defaults.\n\n if not dir:\n request.folder = os.path.join('applications', a)\n else:\n request.folder = dir\n request.controller = c or 'default'\n request.function = f or 'index'\n response.view = '%s/%s.html' % (request.controller,\n request.function)\n if global_settings.cmd_options:\n ip = global_settings.cmd_options.ip\n port = global_settings.cmd_options.port\n request.is_shell = global_settings.cmd_options.shell is not None\n request.is_scheduler = global_settings.cmd_options.scheduler is not None\n else:\n ip, port = '127.0.0.1', '8000'\n request.env.http_host = '%s:%s' % (ip, port)\n request.env.remote_addr = '127.0.0.1'\n request.env.web2py_runtime_gae = global_settings.web2py_runtime_gae\n\n for k, v in extra_request.items():\n request[k] = v\n\n path_info = '/%s/%s/%s' % (a, c, f)\n if request.args:\n path_info = '%s/%s' % (path_info, '/'.join(request.args))\n if request.vars:\n vars = ['%s=%s' % (k, v) if v else '%s' % k\n for (k, v) in iteritems(request.vars)]\n path_info = '%s?%s' % (path_info, '&'.join(vars))\n request.env.path_info = path_info\n\n # Monkey patch so credentials checks pass.\n\n def check_credentials(request, other_application='admin'):\n return True\n\n fileutils.check_credentials = check_credentials\n\n environment = build_environment(request, response, session)\n\n if import_models:\n try:\n run_models_in(environment)\n except RestrictedError as e:\n sys.stderr.write(e.traceback + '\\n')\n sys.exit(1)\n\n response._view_environment = copy.copy(environment)\n\n environment['__name__'] = '__main__'\n return environment\n\n\ndef exec_pythonrc():\n pythonrc = os.environ.get('PYTHONSTARTUP')\n if pythonrc and os.path.isfile(pythonrc):\n def execfile_getlocals(file):\n execfile(file)\n return locals()\n try:\n return execfile_getlocals(pythonrc)\n except NameError:\n pass\n return dict()\n\n\ndef run(\n appname,\n plain=False,\n import_models=False,\n startfile=None,\n bpython=False,\n python_code=False,\n cronjob=False):\n \"\"\"\n Start interactive shell or run Python script (startfile) in web2py\n controller environment. appname is formatted like:\n\n - a : web2py application name\n - a/c : exec the controller c into the application environment\n \"\"\"\n\n (a, c, f, args, vars) = parse_path_info(appname, av=True)\n errmsg = 'invalid application name: %s' % appname\n if not a:\n die(errmsg)\n adir = os.path.join('applications', a)\n\n if not os.path.exists(adir):\n if sys.stdin and not sys.stdin.name == '/dev/null':\n confirm = raw_input(\n 'application %s does not exist, create (y/n)?' % a)\n else:\n logging.warn('application does not exist and will not be created')\n return\n if confirm.lower() in ['y', 'yes']:\n\n os.mkdir(adir)\n w2p_unpack('welcome.w2p', adir)\n for subfolder in ['models', 'views', 'controllers', 'databases',\n 'modules', 'cron', 'errors', 'sessions',\n 'languages', 'static', 'private', 'uploads']:\n subpath = os.path.join(adir, subfolder)\n if not os.path.exists(subpath):\n os.mkdir(subpath)\n db = os.path.join(adir, 'models/db.py')\n if os.path.exists(db):\n data = fileutils.read_file(db)\n data = data.replace(\n '<your secret key>', 'sha512:' + web2py_uuid())\n fileutils.write_file(db, data)\n\n if c:\n import_models = True\n extra_request = {}\n if args:\n extra_request['args'] = args\n if vars:\n extra_request['vars'] = vars\n _env = env(a, c=c, f=f, import_models=import_models, extra_request=extra_request)\n if c:\n pyfile = os.path.join('applications', a, 'controllers', c + '.py')\n pycfile = os.path.join('applications', a, 'compiled',\n \"controllers_%s_%s.pyc\" % (c, f))\n if ((cronjob and os.path.isfile(pycfile))\n or not os.path.isfile(pyfile)):\n exec(read_pyc(pycfile), _env)\n elif os.path.isfile(pyfile):\n execfile(pyfile, _env)\n else:\n die(errmsg)\n\n if f:\n exec('print %s()' % f, _env)\n return\n\n _env.update(exec_pythonrc())\n if startfile:\n try:\n ccode = None\n if startfile.endswith('.pyc'):\n ccode = read_pyc(startfile)\n exec(ccode, _env)\n else:\n execfile(startfile, _env)\n\n if import_models:\n BaseAdapter.close_all_instances('commit')\n except Exception as e:\n print(traceback.format_exc())\n if import_models:\n BaseAdapter.close_all_instances('rollback')\n elif python_code:\n try:\n exec(python_code, _env)\n if import_models:\n BaseAdapter.close_all_instances('commit')\n except Exception as e:\n print(traceback.format_exc())\n if import_models:\n BaseAdapter.close_all_instances('rollback')\n else:\n if not plain:\n if bpython:\n try:\n import bpython\n bpython.embed(locals_=_env)\n return\n except:\n logger.warning(\n 'import bpython error; trying ipython...')\n else:\n try:\n import IPython\n if IPython.__version__ > '1.0.0':\n IPython.start_ipython(user_ns=_env)\n return\n elif IPython.__version__ == '1.0.0':\n from IPython.terminal.embed import InteractiveShellEmbed\n shell = InteractiveShellEmbed(user_ns=_env)\n shell()\n return\n elif IPython.__version__ >= '0.11':\n from IPython.frontend.terminal.embed import InteractiveShellEmbed\n shell = InteractiveShellEmbed(user_ns=_env)\n shell()\n return\n else:\n # following 2 lines fix a problem with\n # IPython; thanks Michael Toomim\n if '__builtins__' in _env:\n del _env['__builtins__']\n shell = IPython.Shell.IPShell(argv=[], user_ns=_env)\n shell.mainloop()\n return\n except:\n logger.warning(\n 'import IPython error; use default python shell')\n enable_autocomplete_and_history(adir, _env)\n code.interact(local=_env)\n\n\ndef parse_path_info(path_info, av=False):\n \"\"\"\n Parses path info formatted like a/c/f where c and f are optional\n and a leading `/` is accepted.\n Return tuple (a, c, f). If invalid path_info a is set to None.\n If c or f are omitted they are set to None.\n If av=True, parse args and vars\n \"\"\"\n if av:\n vars = None\n if '?' in path_info:\n path_info, query = path_info.split('?', 2)\n vars = Storage()\n for var in query.split('&'):\n (var, val) = var.split('=', 2) if '=' in var else (var, None)\n vars[var] = val\n items = List(path_info.split('/'))\n args = List(items[3:]) if len(items) > 3 else None\n return (items(0), items(1), items(2), args, vars)\n\n mo = re.match(r'^/?(?P<a>\\w+)(/(?P<c>\\w+)(/(?P<f>\\w+))?)?$',\n path_info)\n if mo:\n return (mo.group('a'), mo.group('c'), mo.group('f'))\n else:\n return (None, None, None)\n\n\ndef die(msg):\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef test(testpath, import_models=True, verbose=False):\n \"\"\"\n Run doctests in web2py environment. testpath is formatted like:\n\n - a: tests all controllers in application a\n - a/c: tests controller c in application a\n - a/c/f test function f in controller c, application a\n\n Where a, c and f are application, controller and function names\n respectively. If the testpath is a file name the file is tested.\n If a controller is specified models are executed by default.\n \"\"\"\n\n import doctest\n if os.path.isfile(testpath):\n mo = re.match(r'(|.*/)applications/(?P<a>[^/]+)', testpath)\n if not mo:\n die('test file is not in application directory: %s'\n % testpath)\n a = mo.group('a')\n c = f = None\n files = [testpath]\n else:\n (a, c, f) = parse_path_info(testpath)\n errmsg = 'invalid test path: %s' % testpath\n if not a:\n die(errmsg)\n cdir = os.path.join('applications', a, 'controllers')\n if not os.path.isdir(cdir):\n die(errmsg)\n if c:\n cfile = os.path.join(cdir, c + '.py')\n if not os.path.isfile(cfile):\n die(errmsg)\n files = [cfile]\n else:\n files = glob.glob(os.path.join(cdir, '*.py'))\n for testfile in files:\n globs = env(a, import_models)\n ignores = globs.keys()\n execfile(testfile, globs)\n\n def doctest_object(name, obj):\n \"\"\"doctest obj and enclosed methods and classes.\"\"\"\n\n if type(obj) in (types.FunctionType, type, ClassType, types.MethodType,\n types.UnboundMethodType):\n\n # Reload environment before each test.\n\n globs = env(a, c=c, f=f, import_models=import_models)\n execfile(testfile, globs)\n doctest.run_docstring_examples(\n obj, globs=globs,\n name='%s: %s' % (os.path.basename(testfile),\n name), verbose=verbose)\n if type(obj) in (type, ClassType):\n for attr_name in dir(obj):\n\n # Execute . operator so decorators are executed.\n\n o = eval('%s.%s' % (name, attr_name), globs)\n doctest_object(attr_name, o)\n\n for (name, obj) in globs.items():\n if name not in ignores and (f is None or f == name):\n doctest_object(name, obj)\n\n\ndef get_usage():\n usage = \"\"\"\n %prog [options] pythonfile\n\"\"\"\n return usage\n\n\ndef execute_from_command_line(argv=None):\n if argv is None:\n argv = sys.argv\n\n parser = optparse.OptionParser(usage=get_usage())\n\n parser.add_option('-S', '--shell', dest='shell', metavar='APPNAME',\n help='run web2py in interactive shell ' +\n 'or IPython(if installed) with specified appname')\n msg = 'run web2py in interactive shell or bpython (if installed) with'\n msg += ' specified appname (if app does not exist it will be created).'\n msg += '\\n Use combined with --shell'\n parser.add_option(\n '-B',\n '--bpython',\n action='store_true',\n default=False,\n dest='bpython',\n help=msg,\n )\n parser.add_option(\n '-P',\n '--plain',\n action='store_true',\n default=False,\n dest='plain',\n help='only use plain python shell, should be used with --shell option',\n )\n parser.add_option(\n '-M',\n '--import_models',\n action='store_true',\n default=False,\n dest='import_models',\n help='auto import model files, default is False, ' +\n ' should be used with --shell option',\n )\n parser.add_option(\n '-R',\n '--run',\n dest='run',\n metavar='PYTHON_FILE',\n default='',\n help='run PYTHON_FILE in web2py environment, ' +\n 'should be used with --shell option',\n )\n\n (options, args) = parser.parse_args(argv[1:])\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(0)\n\n if len(args) > 0:\n startfile = args[0]\n else:\n startfile = ''\n run(options.shell, options.plain, startfile=startfile,\n bpython=options.bpython)\n\n\nif __name__ == '__main__':\n execute_from_command_line()\n",
"path": "gluon/shell.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n| This file is part of the web2py Web Framework\n| Developed by Massimo Di Pierro <[email protected]>,\n| limodou <[email protected]> and srackham <[email protected]>.\n| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n\nWeb2py environment in the shell\n--------------------------------\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport code\nimport copy\nimport logging\nimport types\nimport re\nimport optparse\nimport glob\nimport traceback\nimport gluon.fileutils as fileutils\nfrom gluon.settings import global_settings\nfrom gluon.utils import web2py_uuid\nfrom gluon.compileapp import build_environment, read_pyc, run_models_in\nfrom gluon.restricted import RestrictedError\nfrom gluon.globals import Request, Response, Session\nfrom gluon.storage import Storage, List\nfrom gluon.admin import w2p_unpack\nfrom pydal.base import BaseAdapter\nfrom gluon._compat import iteritems, ClassType\n\nlogger = logging.getLogger(\"web2py\")\n\n\ndef enable_autocomplete_and_history(adir, env):\n try:\n import rlcompleter\n import atexit\n import readline\n except ImportError:\n pass\n else:\n readline.parse_and_bind(\"tab: complete\")\n history_file = os.path.join(adir, '.pythonhistory')\n try:\n readline.read_history_file(history_file)\n except IOError:\n open(history_file, 'a').close()\n atexit.register(readline.write_history_file, history_file)\n readline.set_completer(rlcompleter.Completer(env).complete)\n\n\ndef exec_environment(\n pyfile='',\n request=None,\n response=None,\n session=None,\n):\n \"\"\"Environment builder and module loader.\n\n Builds a web2py environment and optionally executes a Python file into\n the environment.\n\n A Storage dictionary containing the resulting environment is returned.\n The working directory must be web2py root -- this is the web2py default.\n\n \"\"\"\n\n if request is None:\n request = Request({})\n if response is None:\n response = Response()\n if session is None:\n session = Session()\n\n if request.folder is None:\n mo = re.match(r'(|.*/)applications/(?P<appname>[^/]+)', pyfile)\n if mo:\n appname = mo.group('appname')\n request.folder = os.path.join('applications', appname)\n else:\n request.folder = ''\n env = build_environment(request, response, session, store_current=False)\n if pyfile:\n pycfile = pyfile + 'c'\n if os.path.isfile(pycfile):\n exec (read_pyc(pycfile), env)\n else:\n execfile(pyfile, env)\n return Storage(env)\n\n\ndef env(\n a,\n import_models=False,\n c=None,\n f=None,\n dir='',\n extra_request={},\n):\n \"\"\"\n Returns web2py execution environment for application (a), controller (c),\n function (f).\n If import_models is True the exec all application models into the\n environment.\n\n extra_request allows you to pass along any extra variables to the request\n object before your models get executed. This was mainly done to support\n web2py_utils.test_runner, however you can use it with any wrapper scripts\n that need access to the web2py environment.\n \"\"\"\n\n request = Request({})\n response = Response()\n session = Session()\n request.application = a\n\n # Populate the dummy environment with sensible defaults.\n\n if not dir:\n request.folder = os.path.join('applications', a)\n else:\n request.folder = dir\n request.controller = c or 'default'\n request.function = f or 'index'\n response.view = '%s/%s.html' % (request.controller,\n request.function)\n if global_settings.cmd_options:\n ip = global_settings.cmd_options.ip\n port = global_settings.cmd_options.port\n request.is_shell = global_settings.cmd_options.shell is not None\n request.is_scheduler = global_settings.cmd_options.scheduler is not None\n else:\n ip, port = '127.0.0.1', '8000'\n request.env.http_host = '%s:%s' % (ip, port)\n request.env.remote_addr = '127.0.0.1'\n request.env.web2py_runtime_gae = global_settings.web2py_runtime_gae\n\n for k, v in extra_request.items():\n request[k] = v\n\n path_info = '/%s/%s/%s' % (a, c, f)\n if request.args:\n path_info = '%s/%s' % (path_info, '/'.join(request.args))\n if request.vars:\n vars = ['%s=%s' % (k, v) if v else '%s' % k\n for (k, v) in iteritems(request.vars)]\n path_info = '%s?%s' % (path_info, '&'.join(vars))\n request.env.path_info = path_info\n\n # Monkey patch so credentials checks pass.\n\n def check_credentials(request, other_application='admin'):\n return True\n\n fileutils.check_credentials = check_credentials\n\n environment = build_environment(request, response, session)\n\n if import_models:\n try:\n run_models_in(environment)\n except RestrictedError as e:\n sys.stderr.write(e.traceback + '\\n')\n sys.exit(1)\n\n response._view_environment = copy.copy(environment)\n\n environment['__name__'] = '__main__'\n return environment\n\n\ndef exec_pythonrc():\n pythonrc = os.environ.get('PYTHONSTARTUP')\n if pythonrc and os.path.isfile(pythonrc):\n def execfile_getlocals(file):\n execfile(file)\n return locals()\n try:\n return execfile_getlocals(pythonrc)\n except NameError:\n pass\n return dict()\n\n\ndef run(\n appname,\n plain=False,\n import_models=False,\n startfile=None,\n bpython=False,\n python_code=False,\n cronjob=False):\n \"\"\"\n Start interactive shell or run Python script (startfile) in web2py\n controller environment. appname is formatted like:\n\n - a : web2py application name\n - a/c : exec the controller c into the application environment\n \"\"\"\n\n (a, c, f, args, vars) = parse_path_info(appname, av=True)\n errmsg = 'invalid application name: %s' % appname\n if not a:\n die(errmsg)\n adir = os.path.join('applications', a)\n\n if not os.path.exists(adir):\n if sys.stdin and not sys.stdin.name == '/dev/null':\n confirm = raw_input(\n 'application %s does not exist, create (y/n)?' % a)\n else:\n logging.warn('application does not exist and will not be created')\n return\n if confirm.lower() in ['y', 'yes']:\n\n os.mkdir(adir)\n w2p_unpack('welcome.w2p', adir)\n for subfolder in ['models', 'views', 'controllers', 'databases',\n 'modules', 'cron', 'errors', 'sessions',\n 'languages', 'static', 'private', 'uploads']:\n subpath = os.path.join(adir, subfolder)\n if not os.path.exists(subpath):\n os.mkdir(subpath)\n db = os.path.join(adir, 'models/db.py')\n if os.path.exists(db):\n data = fileutils.read_file(db)\n data = data.replace(\n '<your secret key>', 'sha512:' + web2py_uuid())\n fileutils.write_file(db, data)\n\n if c:\n import_models = True\n extra_request = {}\n if args:\n extra_request['args'] = args\n if vars:\n extra_request['vars'] = vars\n _env = env(a, c=c, f=f, import_models=import_models, extra_request=extra_request)\n if c:\n pyfile = os.path.join('applications', a, 'controllers', c + '.py')\n pycfile = os.path.join('applications', a, 'compiled',\n \"controllers_%s_%s.pyc\" % (c, f))\n if ((cronjob and os.path.isfile(pycfile))\n or not os.path.isfile(pyfile)):\n exec(read_pyc(pycfile), _env)\n elif os.path.isfile(pyfile):\n execfile(pyfile, _env)\n else:\n die(errmsg)\n\n if f:\n exec('print( %s())' % f, _env)\n return\n\n _env.update(exec_pythonrc())\n if startfile:\n try:\n ccode = None\n if startfile.endswith('.pyc'):\n ccode = read_pyc(startfile)\n exec(ccode, _env)\n else:\n execfile(startfile, _env)\n\n if import_models:\n BaseAdapter.close_all_instances('commit')\n except Exception as e:\n print(traceback.format_exc())\n if import_models:\n BaseAdapter.close_all_instances('rollback')\n elif python_code:\n try:\n exec(python_code, _env)\n if import_models:\n BaseAdapter.close_all_instances('commit')\n except Exception as e:\n print(traceback.format_exc())\n if import_models:\n BaseAdapter.close_all_instances('rollback')\n else:\n if not plain:\n if bpython:\n try:\n import bpython\n bpython.embed(locals_=_env)\n return\n except:\n logger.warning(\n 'import bpython error; trying ipython...')\n else:\n try:\n import IPython\n if IPython.__version__ > '1.0.0':\n IPython.start_ipython(user_ns=_env)\n return\n elif IPython.__version__ == '1.0.0':\n from IPython.terminal.embed import InteractiveShellEmbed\n shell = InteractiveShellEmbed(user_ns=_env)\n shell()\n return\n elif IPython.__version__ >= '0.11':\n from IPython.frontend.terminal.embed import InteractiveShellEmbed\n shell = InteractiveShellEmbed(user_ns=_env)\n shell()\n return\n else:\n # following 2 lines fix a problem with\n # IPython; thanks Michael Toomim\n if '__builtins__' in _env:\n del _env['__builtins__']\n shell = IPython.Shell.IPShell(argv=[], user_ns=_env)\n shell.mainloop()\n return\n except:\n logger.warning(\n 'import IPython error; use default python shell')\n enable_autocomplete_and_history(adir, _env)\n code.interact(local=_env)\n\n\ndef parse_path_info(path_info, av=False):\n \"\"\"\n Parses path info formatted like a/c/f where c and f are optional\n and a leading `/` is accepted.\n Return tuple (a, c, f). If invalid path_info a is set to None.\n If c or f are omitted they are set to None.\n If av=True, parse args and vars\n \"\"\"\n if av:\n vars = None\n if '?' in path_info:\n path_info, query = path_info.split('?', 2)\n vars = Storage()\n for var in query.split('&'):\n (var, val) = var.split('=', 2) if '=' in var else (var, None)\n vars[var] = val\n items = List(path_info.split('/'))\n args = List(items[3:]) if len(items) > 3 else None\n return (items(0), items(1), items(2), args, vars)\n\n mo = re.match(r'^/?(?P<a>\\w+)(/(?P<c>\\w+)(/(?P<f>\\w+))?)?$',\n path_info)\n if mo:\n return (mo.group('a'), mo.group('c'), mo.group('f'))\n else:\n return (None, None, None)\n\n\ndef die(msg):\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef test(testpath, import_models=True, verbose=False):\n \"\"\"\n Run doctests in web2py environment. testpath is formatted like:\n\n - a: tests all controllers in application a\n - a/c: tests controller c in application a\n - a/c/f test function f in controller c, application a\n\n Where a, c and f are application, controller and function names\n respectively. If the testpath is a file name the file is tested.\n If a controller is specified models are executed by default.\n \"\"\"\n\n import doctest\n if os.path.isfile(testpath):\n mo = re.match(r'(|.*/)applications/(?P<a>[^/]+)', testpath)\n if not mo:\n die('test file is not in application directory: %s'\n % testpath)\n a = mo.group('a')\n c = f = None\n files = [testpath]\n else:\n (a, c, f) = parse_path_info(testpath)\n errmsg = 'invalid test path: %s' % testpath\n if not a:\n die(errmsg)\n cdir = os.path.join('applications', a, 'controllers')\n if not os.path.isdir(cdir):\n die(errmsg)\n if c:\n cfile = os.path.join(cdir, c + '.py')\n if not os.path.isfile(cfile):\n die(errmsg)\n files = [cfile]\n else:\n files = glob.glob(os.path.join(cdir, '*.py'))\n for testfile in files:\n globs = env(a, import_models)\n ignores = globs.keys()\n execfile(testfile, globs)\n\n def doctest_object(name, obj):\n \"\"\"doctest obj and enclosed methods and classes.\"\"\"\n\n if type(obj) in (types.FunctionType, type, ClassType, types.MethodType,\n types.UnboundMethodType):\n\n # Reload environment before each test.\n\n globs = env(a, c=c, f=f, import_models=import_models)\n execfile(testfile, globs)\n doctest.run_docstring_examples(\n obj, globs=globs,\n name='%s: %s' % (os.path.basename(testfile),\n name), verbose=verbose)\n if type(obj) in (type, ClassType):\n for attr_name in dir(obj):\n\n # Execute . operator so decorators are executed.\n\n o = eval('%s.%s' % (name, attr_name), globs)\n doctest_object(attr_name, o)\n\n for (name, obj) in globs.items():\n if name not in ignores and (f is None or f == name):\n doctest_object(name, obj)\n\n\ndef get_usage():\n usage = \"\"\"\n %prog [options] pythonfile\n\"\"\"\n return usage\n\n\ndef execute_from_command_line(argv=None):\n if argv is None:\n argv = sys.argv\n\n parser = optparse.OptionParser(usage=get_usage())\n\n parser.add_option('-S', '--shell', dest='shell', metavar='APPNAME',\n help='run web2py in interactive shell ' +\n 'or IPython(if installed) with specified appname')\n msg = 'run web2py in interactive shell or bpython (if installed) with'\n msg += ' specified appname (if app does not exist it will be created).'\n msg += '\\n Use combined with --shell'\n parser.add_option(\n '-B',\n '--bpython',\n action='store_true',\n default=False,\n dest='bpython',\n help=msg,\n )\n parser.add_option(\n '-P',\n '--plain',\n action='store_true',\n default=False,\n dest='plain',\n help='only use plain python shell, should be used with --shell option',\n )\n parser.add_option(\n '-M',\n '--import_models',\n action='store_true',\n default=False,\n dest='import_models',\n help='auto import model files, default is False, ' +\n ' should be used with --shell option',\n )\n parser.add_option(\n '-R',\n '--run',\n dest='run',\n metavar='PYTHON_FILE',\n default='',\n help='run PYTHON_FILE in web2py environment, ' +\n 'should be used with --shell option',\n )\n\n (options, args) = parser.parse_args(argv[1:])\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(0)\n\n if len(args) > 0:\n startfile = args[0]\n else:\n startfile = ''\n run(options.shell, options.plain, startfile=startfile,\n bpython=options.bpython)\n\n\nif __name__ == '__main__':\n execute_from_command_line()\n",
"path": "gluon/shell.py"
}
] | diff --git a/gluon/shell.py b/gluon/shell.py
index 9d93f1a6d..b6c8f27ba 100644
--- a/gluon/shell.py
+++ b/gluon/shell.py
@@ -254,7 +254,7 @@ def run(
die(errmsg)
if f:
- exec('print %s()' % f, _env)
+ exec('print( %s())' % f, _env)
return
_env.update(exec_pythonrc())
diff --git a/gluon/tests/test_scheduler.py b/gluon/tests/test_scheduler.py
index b473fe7b5..ee1270fba 100644
--- a/gluon/tests/test_scheduler.py
+++ b/gluon/tests/test_scheduler.py
@@ -601,7 +601,8 @@ def inner_teardown(self):
fdest = os.path.join(current.request.folder, 'models', 'scheduler.py')
os.unlink(fdest)
additional_files = [
- os.path.join(current.request.folder, 'private', 'demo8.pholder')
+ os.path.join(current.request.folder, 'private', 'demo8.pholder'),
+ os.path.join(current.request.folder, 'views', 'issue_1485_2.html'),
]
for f in additional_files:
try:
@@ -609,6 +610,12 @@ def inner_teardown(self):
except:
pass
+ def writeview(self, content, dest=None):
+ from gluon import current
+ fdest = os.path.join(current.request.folder, 'views', dest)
+ with open(fdest, 'w') as q:
+ q.write(content)
+
def writefunction(self, content, initlines=None):
from gluon import current
fdest = os.path.join(current.request.folder, 'models', 'scheduler.py')
@@ -620,6 +627,9 @@ def writefunction(self, content, initlines=None):
db_dal = os.path.abspath(os.path.join(request.folder, '..', '..', 'dummy2.db'))
sched_dal = DAL('sqlite://%s' % db_dal, folder=os.path.dirname(db_dal))
sched = Scheduler(sched_dal, max_empty_runs=15, migrate=False, heartbeat=1)
+def termination():
+ sched.terminate()
+ sched_dal.commit()
"""
with open(fdest, 'w') as q:
q.write(initlines)
@@ -699,10 +709,11 @@ def testNoReturn_and_Timeout_and_Progress(self):
timeout1 = s.queue_task('demo4', timeout=5)
timeout2 = s.queue_task('demo4')
progress = s.queue_task('demo6', sync_output=2)
+ termination = s.queue_task('termination')
self.db.commit()
self.writefunction(r"""
def demo3():
- time.sleep(15)
+ time.sleep(3)
print(1/0)
return None
@@ -712,7 +723,7 @@ def demo4():
return dict(a=1, b=2)
def demo5():
- time.sleep(15)
+ time.sleep(3)
print("I'm printing something")
rtn = dict(a=1, b=2)
@@ -758,6 +769,7 @@ def testDrift_and_env_and_immediate(self):
immediate = s.queue_task('demo1', ['a', 'b'], dict(c=1, d=2), immediate=True)
env = s.queue_task('demo7')
drift = s.queue_task('demo1', ['a', 'b'], dict(c=1, d=2), period=93, prevent_drift=True)
+ termination = s.queue_task('termination')
self.db.commit()
self.writefunction(r"""
def demo1(*args,**vars):
@@ -844,27 +856,41 @@ def demo8():
]
self.exec_asserts(res, 'FAILED_CONSECUTIVE')
- def testHugeResult(self):
+ def testRegressions(self):
s = Scheduler(self.db)
huge_result = s.queue_task('demo10', retry_failed=1, period=1)
+ issue_1485 = s.queue_task('issue_1485')
+ termination = s.queue_task('termination')
self.db.commit()
self.writefunction(r"""
def demo10():
res = 'a' * 99999
return dict(res=res)
+
+def issue_1485():
+ return response.render('issue_1485.html', dict(variable='abc'))
""")
+ self.writeview(r"""<span>{{=variable}}</span>""", 'issue_1485.html')
ret = self.exec_sched()
# process finished just fine
self.assertEqual(ret, 0)
# huge_result - checks
- task = s.task_status(huge_result.id, output=True)
+ task_huge = s.task_status(huge_result.id, output=True)
res = [
- ("task status completed", task.scheduler_task.status == 'COMPLETED'),
- ("task times_run is 1", task.scheduler_task.times_run == 1),
- ("result is the correct one", task.result == dict(res='a' * 99999))
+ ("task status completed", task_huge.scheduler_task.status == 'COMPLETED'),
+ ("task times_run is 1", task_huge.scheduler_task.times_run == 1),
+ ("result is the correct one", task_huge.result == dict(res='a' * 99999))
]
self.exec_asserts(res, 'HUGE_RESULT')
+ task_issue_1485 = s.task_status(issue_1485.id, output=True)
+ res = [
+ ("task status completed", task_issue_1485.scheduler_task.status == 'COMPLETED'),
+ ("task times_run is 1", task_issue_1485.scheduler_task.times_run == 1),
+ ("result is the correct one", task_issue_1485.result == '<span>abc</span>')
+ ]
+ self.exec_asserts(res, 'issue_1485')
+
if __name__ == '__main__':
unittest.main()
|
searx__searx-2391 | SUGGESTION: Contacting the instance's maintainer(s)
Hello, so I use searx, but I personally think that there should be any way to contact the maintainer(s) of a public instance (email for example). It is harder to trust this awesome service if there is no way to contact the maintainer(s).
| [
{
"content": "GIT_URL = 'https://github.com/searx/searx'\nGIT_BRANCH = 'master'\nISSUE_URL = 'https://github.com/searx/searx/issues'\nSEARX_URL = 'https://searx.me'\nDOCS_URL = 'https://searx.github.io/searx'\nPUBLIC_INSTANCES = 'https://searx.space'\n",
"path": "searx/brand.py"
}
] | [
{
"content": "GIT_URL = 'https://github.com/searx/searx'\nGIT_BRANCH = 'master'\nISSUE_URL = 'https://github.com/searx/searx/issues'\nSEARX_URL = 'https://searx.me'\nDOCS_URL = 'https://searx.github.io/searx'\nPUBLIC_INSTANCES = 'https://searx.space'\nCONTACT_URL = 'mailto:[email protected]'\n",
"path": "searx/brand.py"
}
] | diff --git a/Makefile b/Makefile
index 7704b337f6..8a4a38e1c4 100644
--- a/Makefile
+++ b/Makefile
@@ -6,6 +6,7 @@ export GIT_URL=https://github.com/searx/searx
export GIT_BRANCH=master
export SEARX_URL=https://searx.me
export DOCS_URL=https://searx.github.io/searx
+export CONTACT_URL=mailto:[email protected]
# END Makefile setup
include utils/makefile.include
@@ -46,6 +47,7 @@ help-min:
@echo ' SEARX_URL = $(SEARX_URL)'
@echo ' GIT_URL = $(GIT_URL)'
@echo ' DOCS_URL = $(DOCS_URL)'
+ @echo ' CONTACT_URL = $(CONTACT_URL)'
@echo ''
@$(MAKE) -e -s make-help
@@ -124,6 +126,7 @@ buildenv:
$(Q)echo "SEARX_URL = '$(SEARX_URL)'" >> searx/brand.py
$(Q)echo "DOCS_URL = '$(DOCS_URL)'" >> searx/brand.py
$(Q)echo "PUBLIC_INSTANCES = 'https://searx.space'" >> searx/brand.py
+ $(Q)echo "CONTACT_URL = '$(CONTACT_URL)'" >> searx/brand.py
$(Q)echo "build utils/brand.env"
$(Q)echo "export GIT_URL='$(GIT_URL)'" > utils/brand.env
$(Q)echo "export GIT_BRANCH='$(GIT_BRANCH)'" >> utils/brand.env
@@ -131,6 +134,7 @@ buildenv:
$(Q)echo "export SEARX_URL='$(SEARX_URL)'" >> utils/brand.env
$(Q)echo "export DOCS_URL='$(DOCS_URL)'" >> utils/brand.env
$(Q)echo "export PUBLIC_INSTANCES='https://searx.space'" >> utils/brand.env
+ $(Q)echo "export CONTACT_URL='$(CONTACT_URL)'" >> utils/brand.env
# node / npm
diff --git a/searx/brand.py b/searx/brand.py
index d71c57db7c..0eaaf0be39 100644
--- a/searx/brand.py
+++ b/searx/brand.py
@@ -4,3 +4,4 @@
SEARX_URL = 'https://searx.me'
DOCS_URL = 'https://searx.github.io/searx'
PUBLIC_INSTANCES = 'https://searx.space'
+CONTACT_URL = 'mailto:[email protected]'
diff --git a/searx/templates/oscar/base.html b/searx/templates/oscar/base.html
index 7b3d33f7a3..c8f390d3a0 100644
--- a/searx/templates/oscar/base.html
+++ b/searx/templates/oscar/base.html
@@ -88,7 +88,8 @@
{{ _('Powered by') }} <a href="{{ brand.DOCS_URL }}">searx</a> - {{ searx_version }} - {{ _('a privacy-respecting, hackable metasearch engine') }}<br/>
<a href="{{ brand.GIT_URL }}">{{ _('Source code') }}</a> |
<a href="{{ brand.ISSUE_URL }}">{{ _('Issue tracker') }}</a> |
- <a href="{{ brand.PUBLIC_INSTANCES }}">{{ _('Public instances') }}</a>
+ <a href="{{ brand.PUBLIC_INSTANCES }}">{{ _('Public instances') }}</a> |
+ <a href="{{ brand.CONTACT_URL }}">{{ _('Contact instance maintainer') }}</a>
</small>
</p>
</div>
diff --git a/searx/templates/simple/base.html b/searx/templates/simple/base.html
index 10fb424bf0..2318f6bfce 100644
--- a/searx/templates/simple/base.html
+++ b/searx/templates/simple/base.html
@@ -54,7 +54,8 @@
{{ _('Powered by') }} <a href="{{ url_for('about') }}">searx</a> - {{ searx_version }} — {{ _('a privacy-respecting, hackable metasearch engine') }}<br/>
<a href="{{ brand.GIT_URL }}">{{ _('Source code') }}</a> |
<a href="{{ brand.ISSUE_URL }}">{{ _('Issue tracker') }}</a> |
- <a href="{{ brand.PUBLIC_INSTANCES }}">{{ _('Public instances') }}</a>
+ <a href="{{ brand.PUBLIC_INSTANCES }}">{{ _('Public instances') }}</a> |
+ <a href="{{ brand.CONTACT_URL }}">{{ _('Contact instance maintainer') }}</a>
</p>
</footer>
<!--[if gte IE 9]>-->
diff --git a/utils/brand.env b/utils/brand.env
index 55244bd724..660160e43d 100644
--- a/utils/brand.env
+++ b/utils/brand.env
@@ -4,3 +4,4 @@ export ISSUE_URL='https://github.com/searx/searx/issues'
export SEARX_URL='https://searx.me'
export DOCS_URL='https://searx.github.io/searx'
export PUBLIC_INSTANCES='https://searx.space'
+export CONTACT_URL='mailto:[email protected]'
|
pymodbus-dev__pymodbus-1395 | pip show pymodbus, misses information.
```
pymodbus) pymodbus % pip show pymodbus
Name: pymodbus
Version: 3.1.x
Summary: A fully featured modbus protocol stack in python
Home-page: https://github.com/pymodbus-dev/pymodbus/
Author: attr: pymodbus.__author__
Author-email:
License: BSD-3-Clause
Location: /Users/jan/repos/pymodbus
Editable project location: /Users/jan/repos/pymodbus
Requires: setuptools
Required-by:
```
Normally it gets the information from setup.cfg, but for some reason it does not work with "pip show".
| [
{
"content": "\"\"\"Pymodbus: Modbus Protocol Implementation.\n\nReleased under the the BSD license\n\"\"\"\n\nfrom logging import WARNING\n\nimport pymodbus.version as __version\nfrom pymodbus.logging import Log\n\n\n__version__ = __version.version.short()\n__author__ = \"Galen Collins\"\n__maintainer__ = \"dhoomakethu, janiversen\"\n\n\ndef pymodbus_apply_logging_config(level=WARNING):\n \"\"\"Apply basic logging configuration used by default by Pymodbus maintainers.\n\n Please call this function to format logging appropriately when opening issues.\n \"\"\"\n Log.apply_logging_config(level)\n",
"path": "pymodbus/__init__.py"
}
] | [
{
"content": "\"\"\"Pymodbus: Modbus Protocol Implementation.\n\nReleased under the the BSD license\n\"\"\"\n\nfrom logging import WARNING\n\nimport pymodbus.version as __version\nfrom pymodbus.logging import Log\n\n\n__version__ = __version.version.short()\n__author__ = \"Galen Collins, Jan Iversen\"\n__maintainer__ = \"dhoomakethu, janiversen\"\n\n\ndef pymodbus_apply_logging_config(level=WARNING):\n \"\"\"Apply basic logging configuration used by default by Pymodbus maintainers.\n\n Please call this function to format logging appropriately when opening issues.\n \"\"\"\n Log.apply_logging_config(level)\n",
"path": "pymodbus/__init__.py"
}
] | diff --git a/pymodbus/__init__.py b/pymodbus/__init__.py
index 89bc4b9df..2d61c1290 100644
--- a/pymodbus/__init__.py
+++ b/pymodbus/__init__.py
@@ -10,7 +10,7 @@
__version__ = __version.version.short()
-__author__ = "Galen Collins"
+__author__ = "Galen Collins, Jan Iversen"
__maintainer__ = "dhoomakethu, janiversen"
diff --git a/setup.cfg b/setup.cfg
index 90eeeb230..46645d00b 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -2,8 +2,8 @@
[metadata]
name = pymodbus
version = attr: pymodbus.__version__
-author = attr: pymodbus.__author__
-maintainer = attr: pymodbus.__maintainer__
+author = "Galen Collins, Jan Iversen"
+maintainer = "dhoomakethu, janiversen"
license = BSD-3-Clause
platforms = 'Linux', 'Mac OS X', 'Win'
description = A fully featured modbus protocol stack in python
|
piskvorky__gensim-919 | import gensim fails since updating to Xcode 7.3
I just updated my version of Xcode to 7.3. When I run `pip install --upgrade gensim` the process completed without any issues. However, when I try `import gensim` within the python shell the terminal barfs a bunch of C++ output with a block of execution errors that begins with:
`Exception: Compilation failed (return status=1): clang: error: unsupported option '-b mi2'. clang: error: unsupported option '-b mi'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-sse4a'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-tbm'. clang: error: unknown argument: '-target-feature'. clang: error: unknown argument: '-target-feature'....`
I think this has something to do with where gensim is looking for its header files, but I'm somewhat at a loss. Any help debugging would be greatly appreciated.
| [
{
"content": "\"\"\"\nThis package contains implementations of various streaming corpus I/O format.\n\"\"\"\n\n# bring corpus classes directly into package namespace, to save some typing\nfrom .indexedcorpus import IndexedCorpus # must appear before the other classes\n\nfrom .mmcorpus import MmCorpus\nfrom .bleicorpus import BleiCorpus\nfrom .svmlightcorpus import SvmLightCorpus\nfrom .lowcorpus import LowCorpus\nfrom .dictionary import Dictionary\nfrom .hashdictionary import HashDictionary\nfrom .wikicorpus import WikiCorpus\nfrom .textcorpus import TextCorpus\nfrom .ucicorpus import UciCorpus\nfrom .malletcorpus import MalletCorpus\nfrom .sharded_corpus import ShardedCorpus\n",
"path": "gensim/corpora/__init__.py"
}
] | [
{
"content": "\"\"\"\nThis package contains implementations of various streaming corpus I/O format.\n\"\"\"\n\n# bring corpus classes directly into package namespace, to save some typing\nfrom .indexedcorpus import IndexedCorpus # must appear before the other classes\n\nfrom .mmcorpus import MmCorpus\nfrom .bleicorpus import BleiCorpus\nfrom .svmlightcorpus import SvmLightCorpus\nfrom .lowcorpus import LowCorpus\nfrom .dictionary import Dictionary\nfrom .hashdictionary import HashDictionary\nfrom .wikicorpus import WikiCorpus\nfrom .textcorpus import TextCorpus\nfrom .ucicorpus import UciCorpus\nfrom .malletcorpus import MalletCorpus\n",
"path": "gensim/corpora/__init__.py"
}
] | diff --git a/gensim/corpora/__init__.py b/gensim/corpora/__init__.py
index a577cdc59d..a11a0df229 100644
--- a/gensim/corpora/__init__.py
+++ b/gensim/corpora/__init__.py
@@ -15,4 +15,3 @@
from .textcorpus import TextCorpus
from .ucicorpus import UciCorpus
from .malletcorpus import MalletCorpus
-from .sharded_corpus import ShardedCorpus
|
searx__searx-2358 | Bug in external command engine, resulting in engine crash.
**Version of Searx, commit number if you are using on master branch and stipulate if you forked Searx**
```
commit a0ddc27766271428d6f1f906c774cf6f5ccbf3fa (HEAD -> master)
Merge: 8c887382 cdceec1c
Author: Searx Service Account <[email protected]>
Date: Sat Dec 5 17:21:41 2020 -0800
Merge branch 'master' of https://github.com/asciimoo/searx
```
**How did you install Searx?**
Installed using instructions from the official wiki, by hand.
**What happened?**
I went to the Searx page on my server and ran the query `!locate art bell mp3` after enabling the Locate search engine in `/opt/searx/searx/searx/settings.yml`.
**How To Reproduce**
Enable the Locate search engine in settings.yml thusly:
```
- name: locate
engine: command
command: ['locate', '--existing', '--ignore-case', '{{QUERY}}']
shortcut: locate
tokens: []
disabled: False
delimiter:
chars: ' '
keys: ['line']
```
Restart Searx.
Execute a `!locate` search while watching Searx's output, either by running it by hand or using `journalctl -xf` to tail the systemd journal.
**Expected behavior**
Searx runs the `locate` command on the server and returns the results.
**Screenshots & Logs**
Logs (datestamp, hostname, and PID elided to prevent having to scroll back and forth repeatedly):
```
: Traceback (most recent call last):
: File "/opt/searx/searx/searx/search.py", line 281, in search_one_offline_request_safe
: search_results = search_one_offline_request(engine, query, request_params)
: File "/opt/searx/searx/searx/search.py", line 274, in search_one_offline_request
: return engine.search(query, request_params)
: File "/opt/searx/searx/searx/engines/command.py", line 70, in search
: cmd = _get_command_to_run(query)
: File "/opt/searx/searx/searx/engines/command.py", line 83, in _get_command_to_run
: params = shlex_split(query.decode('utf-8'))
: AttributeError: 'str' object has no attribute 'decode'
```
**Additional context**
Searx is being run as a system service, through systemd, with a searx.sh shell script:
```
#!/usr/bin/env bash
SEARX=/opt/searx/searx
# Change to the Searx installation directory.
cd $SEARX
# Initialize the Python virtual environment.
. env/bin/activate
# Start up Searx.
#python searx/webapp.py
uwsgi --ini searx.ini
```
Searx is being run with uwsgi to improve responsiveness.
Other searches on this instance are not impacted in this manner.
| [
{
"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n'''\n\n\nimport re\nfrom os.path import expanduser, isabs, realpath, commonprefix\nfrom shlex import split as shlex_split\nfrom subprocess import Popen, PIPE\nfrom threading import Thread\n\nfrom searx import logger\n\n\noffline = True\npaging = True\ncommand = []\ndelimiter = {}\nparse_regex = {}\nquery_type = ''\nquery_enum = []\nenvironment_variables = {}\nworking_dir = realpath('.')\nresult_separator = '\\n'\nresult_template = 'key-value.html'\ntimeout = 4.0\n\n_command_logger = logger.getChild('command')\n_compiled_parse_regex = {}\n\n\ndef init(engine_settings):\n check_parsing_options(engine_settings)\n\n if 'command' not in engine_settings:\n raise ValueError('engine command : missing configuration key: command')\n\n global command, working_dir, result_template, delimiter, parse_regex, timeout, environment_variables\n\n command = engine_settings['command']\n\n if 'working_dir' in engine_settings:\n working_dir = engine_settings['working_dir']\n if not isabs(engine_settings['working_dir']):\n working_dir = realpath(working_dir)\n\n if 'parse_regex' in engine_settings:\n parse_regex = engine_settings['parse_regex']\n for result_key, regex in parse_regex.items():\n _compiled_parse_regex[result_key] = re.compile(regex, flags=re.MULTILINE)\n if 'delimiter' in engine_settings:\n delimiter = engine_settings['delimiter']\n\n if 'environment_variables' in engine_settings:\n environment_variables = engine_settings['environment_variables']\n\n\ndef search(query, params):\n cmd = _get_command_to_run(query)\n if not cmd:\n return []\n\n results = []\n reader_thread = Thread(target=_get_results_from_process, args=(results, cmd, params['pageno']))\n reader_thread.start()\n reader_thread.join(timeout=timeout)\n\n return results\n\n\ndef _get_command_to_run(query):\n params = shlex_split(query.decode('utf-8'))\n __check_query_params(params)\n\n cmd = []\n for c in command:\n if c == '{{QUERY}}':\n cmd.extend(params)\n else:\n cmd.append(c)\n\n return cmd\n\n\ndef _get_results_from_process(results, cmd, pageno):\n leftover = ''\n count = 0\n start, end = __get_results_limits(pageno)\n with Popen(cmd, stdout=PIPE, stderr=PIPE, env=environment_variables) as process:\n line = process.stdout.readline()\n while line:\n buf = leftover + line.decode('utf-8')\n raw_results = buf.split(result_separator)\n if raw_results[-1]:\n leftover = raw_results[-1]\n raw_results = raw_results[:-1]\n\n for raw_result in raw_results:\n result = __parse_single_result(raw_result)\n if result is None:\n _command_logger.debug('skipped result:', raw_result)\n continue\n\n if start <= count and count <= end:\n result['template'] = result_template\n results.append(result)\n\n count += 1\n if end < count:\n return results\n\n line = process.stdout.readline()\n\n return_code = process.wait(timeout=timeout)\n if return_code != 0:\n raise RuntimeError('non-zero return code when running command', cmd, return_code)\n\n\ndef __get_results_limits(pageno):\n start = (pageno - 1) * 10\n end = start + 9\n return start, end\n\n\ndef __check_query_params(params):\n if not query_type:\n return\n\n if query_type == 'path':\n query_path = params[-1]\n query_path = expanduser(query_path)\n if commonprefix([realpath(query_path), working_dir]) != working_dir:\n raise ValueError('requested path is outside of configured working directory')\n elif query_type == 'enum' and len(query_enum) > 0:\n for param in params:\n if param not in query_enum:\n raise ValueError('submitted query params is not allowed', param, 'allowed params:', query_enum)\n\n\ndef check_parsing_options(engine_settings):\n \"\"\" Checks if delimiter based parsing or regex parsing is configured correctly \"\"\"\n\n if 'delimiter' not in engine_settings and 'parse_regex' not in engine_settings:\n raise ValueError('failed to init settings for parsing lines: missing delimiter or parse_regex')\n if 'delimiter' in engine_settings and 'parse_regex' in engine_settings:\n raise ValueError('failed to init settings for parsing lines: too many settings')\n\n if 'delimiter' in engine_settings:\n if 'chars' not in engine_settings['delimiter'] or 'keys' not in engine_settings['delimiter']:\n raise ValueError\n\n\ndef __parse_single_result(raw_result):\n \"\"\" Parses command line output based on configuration \"\"\"\n\n result = {}\n\n if delimiter:\n elements = raw_result.split(delimiter['chars'], maxsplit=len(delimiter['keys']) - 1)\n if len(elements) != len(delimiter['keys']):\n return {}\n for i in range(len(elements)):\n result[delimiter['keys'][i]] = elements[i]\n\n if parse_regex:\n for result_key, regex in _compiled_parse_regex.items():\n found = regex.search(raw_result)\n if not found:\n return {}\n result[result_key] = raw_result[found.start():found.end()]\n\n return result\n",
"path": "searx/engines/command.py"
}
] | [
{
"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n'''\n\n\nimport re\nfrom os.path import expanduser, isabs, realpath, commonprefix\nfrom shlex import split as shlex_split\nfrom subprocess import Popen, PIPE\nfrom threading import Thread\n\nfrom searx import logger\n\n\noffline = True\npaging = True\ncommand = []\ndelimiter = {}\nparse_regex = {}\nquery_type = ''\nquery_enum = []\nenvironment_variables = {}\nworking_dir = realpath('.')\nresult_separator = '\\n'\nresult_template = 'key-value.html'\ntimeout = 4.0\n\n_command_logger = logger.getChild('command')\n_compiled_parse_regex = {}\n\n\ndef init(engine_settings):\n check_parsing_options(engine_settings)\n\n if 'command' not in engine_settings:\n raise ValueError('engine command : missing configuration key: command')\n\n global command, working_dir, result_template, delimiter, parse_regex, timeout, environment_variables\n\n command = engine_settings['command']\n\n if 'working_dir' in engine_settings:\n working_dir = engine_settings['working_dir']\n if not isabs(engine_settings['working_dir']):\n working_dir = realpath(working_dir)\n\n if 'parse_regex' in engine_settings:\n parse_regex = engine_settings['parse_regex']\n for result_key, regex in parse_regex.items():\n _compiled_parse_regex[result_key] = re.compile(regex, flags=re.MULTILINE)\n if 'delimiter' in engine_settings:\n delimiter = engine_settings['delimiter']\n\n if 'environment_variables' in engine_settings:\n environment_variables = engine_settings['environment_variables']\n\n\ndef search(query, params):\n cmd = _get_command_to_run(query)\n if not cmd:\n return []\n\n results = []\n reader_thread = Thread(target=_get_results_from_process, args=(results, cmd, params['pageno']))\n reader_thread.start()\n reader_thread.join(timeout=timeout)\n\n return results\n\n\ndef _get_command_to_run(query):\n params = shlex_split(query)\n __check_query_params(params)\n\n cmd = []\n for c in command:\n if c == '{{QUERY}}':\n cmd.extend(params)\n else:\n cmd.append(c)\n\n return cmd\n\n\ndef _get_results_from_process(results, cmd, pageno):\n leftover = ''\n count = 0\n start, end = __get_results_limits(pageno)\n with Popen(cmd, stdout=PIPE, stderr=PIPE, env=environment_variables) as process:\n line = process.stdout.readline()\n while line:\n buf = leftover + line.decode('utf-8')\n raw_results = buf.split(result_separator)\n if raw_results[-1]:\n leftover = raw_results[-1]\n raw_results = raw_results[:-1]\n\n for raw_result in raw_results:\n result = __parse_single_result(raw_result)\n if result is None:\n _command_logger.debug('skipped result:', raw_result)\n continue\n\n if start <= count and count <= end:\n result['template'] = result_template\n results.append(result)\n\n count += 1\n if end < count:\n return results\n\n line = process.stdout.readline()\n\n return_code = process.wait(timeout=timeout)\n if return_code != 0:\n raise RuntimeError('non-zero return code when running command', cmd, return_code)\n\n\ndef __get_results_limits(pageno):\n start = (pageno - 1) * 10\n end = start + 9\n return start, end\n\n\ndef __check_query_params(params):\n if not query_type:\n return\n\n if query_type == 'path':\n query_path = params[-1]\n query_path = expanduser(query_path)\n if commonprefix([realpath(query_path), working_dir]) != working_dir:\n raise ValueError('requested path is outside of configured working directory')\n elif query_type == 'enum' and len(query_enum) > 0:\n for param in params:\n if param not in query_enum:\n raise ValueError('submitted query params is not allowed', param, 'allowed params:', query_enum)\n\n\ndef check_parsing_options(engine_settings):\n \"\"\" Checks if delimiter based parsing or regex parsing is configured correctly \"\"\"\n\n if 'delimiter' not in engine_settings and 'parse_regex' not in engine_settings:\n raise ValueError('failed to init settings for parsing lines: missing delimiter or parse_regex')\n if 'delimiter' in engine_settings and 'parse_regex' in engine_settings:\n raise ValueError('failed to init settings for parsing lines: too many settings')\n\n if 'delimiter' in engine_settings:\n if 'chars' not in engine_settings['delimiter'] or 'keys' not in engine_settings['delimiter']:\n raise ValueError\n\n\ndef __parse_single_result(raw_result):\n \"\"\" Parses command line output based on configuration \"\"\"\n\n result = {}\n\n if delimiter:\n elements = raw_result.split(delimiter['chars'], maxsplit=len(delimiter['keys']) - 1)\n if len(elements) != len(delimiter['keys']):\n return {}\n for i in range(len(elements)):\n result[delimiter['keys'][i]] = elements[i]\n\n if parse_regex:\n for result_key, regex in _compiled_parse_regex.items():\n found = regex.search(raw_result)\n if not found:\n return {}\n result[result_key] = raw_result[found.start():found.end()]\n\n return result\n",
"path": "searx/engines/command.py"
}
] | diff --git a/searx/engines/command.py b/searx/engines/command.py
index 08ee5da06a..0268d52eb7 100644
--- a/searx/engines/command.py
+++ b/searx/engines/command.py
@@ -80,7 +80,7 @@ def search(query, params):
def _get_command_to_run(query):
- params = shlex_split(query.decode('utf-8'))
+ params = shlex_split(query)
__check_query_params(params)
cmd = []
|
googleapis__google-cloud-python-1865 | Read the Docs build failing
https://readthedocs.org/projects/gcloud-python/builds/4108022/
https://readthedocs.org/projects/gcloud-python/builds/4108027/
| [
{
"content": "import os\nimport sys\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\nwith open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n\n\nREQUIREMENTS = [\n 'httplib2 >= 0.9.1',\n 'googleapis-common-protos',\n 'oauth2client >= 2.0.1',\n 'protobuf >= 3.0.0b2, != 3.0.0.b2.post1',\n 'six',\n]\n\nGRPC_EXTRAS = [\n 'grpcio >= 0.14.0',\n 'google-gax >= 0.12.0',\n 'gax-google-pubsub-v1',\n]\n\nif sys.version_info[:2] == (2, 7):\n REQUIREMENTS.extend(GRPC_EXTRAS)\n\nsetup(\n name='gcloud',\n version='0.15.0',\n description='API Client library for Google Cloud',\n author='Google Cloud Platform',\n author_email='[email protected]',\n long_description=README,\n scripts=[],\n url='https://github.com/GoogleCloudPlatform/gcloud-python',\n packages=find_packages(),\n license='Apache 2.0',\n platforms='Posix; MacOS X; Windows',\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n extras_require={'grpc': GRPC_EXTRAS},\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Internet',\n ]\n)\n",
"path": "setup.py"
}
] | [
{
"content": "import os\nimport sys\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\nwith open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n\n\nREQUIREMENTS = [\n 'httplib2 >= 0.9.1',\n 'googleapis-common-protos',\n 'oauth2client >= 2.0.1',\n 'protobuf >= 3.0.0b2, != 3.0.0.b2.post1',\n 'six',\n]\n\nGRPC_EXTRAS = [\n 'grpcio >= 0.14.0',\n 'google-gax >= 0.11.0',\n 'gax-google-pubsub-v1',\n]\n\nif sys.version_info[:2] == (2, 7) and 'READTHEDOCS' not in os.environ:\n REQUIREMENTS.extend(GRPC_EXTRAS)\n\nsetup(\n name='gcloud',\n version='0.15.0',\n description='API Client library for Google Cloud',\n author='Google Cloud Platform',\n author_email='[email protected]',\n long_description=README,\n scripts=[],\n url='https://github.com/GoogleCloudPlatform/gcloud-python',\n packages=find_packages(),\n license='Apache 2.0',\n platforms='Posix; MacOS X; Windows',\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n extras_require={'grpc': GRPC_EXTRAS},\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Internet',\n ]\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 977dd551523d..6757414aa0b0 100644
--- a/setup.py
+++ b/setup.py
@@ -25,7 +25,7 @@
'gax-google-pubsub-v1',
]
-if sys.version_info[:2] == (2, 7):
+if sys.version_info[:2] == (2, 7) and 'READTHEDOCS' not in os.environ:
REQUIREMENTS.extend(GRPC_EXTRAS)
setup(
|
pyro-ppl__pyro-3164 | PyTorch 2.0 compatibility: Explicit PyTorch 1.x check causing issues with packages that depend on PyTorch / pyro (e.g. BoTorch)
### Issue Description
The explicit check for PyTorch 1.x here (https://github.com/pyro-ppl/pyro/blob/dev/pyro/distributions/torch_patch.py#L10) is causing problems when another package has a dependency on PyTorch + Pyro, since PyTorch is now at 2.0. For example, it is causing BoTorch tests to fail here (https://github.com/pytorch/botorch/pull/1551).
Could this check be removed to allow for PyTorch 2.0?
### Environment
Mac OS 11.7.1
Python 3.10
PyTorch 2.0
Pyro 1.8.3
### Code Snippet
https://github.com/pytorch/botorch/actions/runs/3659534850/jobs/6185642011
| [
{
"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport functools\nimport math\nimport weakref\n\nimport torch\n\nassert torch.__version__.startswith(\"1.\")\n\n\ndef patch_dependency(target, root_module=torch):\n parts = target.split(\".\")\n assert parts[0] == root_module.__name__\n module = root_module\n for part in parts[1:-1]:\n module = getattr(module, part)\n name = parts[-1]\n old_fn = getattr(module, name, None)\n old_fn = getattr(old_fn, \"_pyro_unpatched\", old_fn) # ensure patching is idempotent\n\n def decorator(new_fn):\n try:\n functools.update_wrapper(new_fn, old_fn)\n except Exception:\n for attr in functools.WRAPPER_ASSIGNMENTS:\n if hasattr(old_fn, attr):\n setattr(new_fn, attr, getattr(old_fn, attr))\n new_fn._pyro_unpatched = old_fn\n setattr(module, name, new_fn)\n return new_fn\n\n return decorator\n\n\n# TODO: Move upstream to allow for pickle serialization of transforms\n@patch_dependency(\"torch.distributions.transforms.Transform.__getstate__\")\ndef _Transform__getstate__(self):\n attrs = {}\n for k, v in self.__dict__.items():\n if isinstance(v, weakref.ref):\n attrs[k] = None\n else:\n attrs[k] = v\n return attrs\n\n\n# TODO move upstream\n@patch_dependency(\"torch.distributions.transforms.Transform.clear_cache\")\ndef _Transform_clear_cache(self):\n if self._cache_size == 1:\n self._cached_x_y = None, None\n\n\n# TODO move upstream\n@patch_dependency(\"torch.distributions.TransformedDistribution.clear_cache\")\ndef _TransformedDistribution_clear_cache(self):\n for t in self.transforms:\n t.clear_cache()\n\n\n# TODO fix https://github.com/pytorch/pytorch/issues/48054 upstream\n@patch_dependency(\"torch.distributions.HalfCauchy.log_prob\")\ndef _HalfCauchy_logprob(self, value):\n if self._validate_args:\n self._validate_sample(value)\n value = torch.as_tensor(\n value, dtype=self.base_dist.scale.dtype, device=self.base_dist.scale.device\n )\n log_prob = self.base_dist.log_prob(value) + math.log(2)\n log_prob.masked_fill_(value.expand(log_prob.shape) < 0, -float(\"inf\"))\n return log_prob\n\n\n# TODO fix batch_shape have an extra singleton dimension upstream\n@patch_dependency(\"torch.distributions.constraints._PositiveDefinite.check\")\ndef _PositiveDefinite_check(self, value):\n matrix_shape = value.shape[-2:]\n batch_shape = value.shape[:-2]\n flattened_value = value.reshape((-1,) + matrix_shape)\n return torch.stack(\n [torch.linalg.eigvalsh(v)[:1] > 0.0 for v in flattened_value]\n ).view(batch_shape)\n\n\n@patch_dependency(\"torch.distributions.constraints._CorrCholesky.check\")\ndef _CorrCholesky_check(self, value):\n row_norm = torch.linalg.norm(value.detach(), dim=-1)\n unit_row_norm = (row_norm - 1.0).abs().le(1e-4).all(dim=-1)\n return torch.distributions.constraints.lower_cholesky.check(value) & unit_row_norm\n\n\n# This adds a __call__ method to satisfy sphinx.\n@patch_dependency(\"torch.distributions.utils.lazy_property.__call__\")\ndef _lazy_property__call__(self):\n raise NotImplementedError\n\n\n__all__ = []\n",
"path": "pyro/distributions/torch_patch.py"
}
] | [
{
"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport functools\nimport math\nimport weakref\n\nimport torch\n\n\ndef patch_dependency(target, root_module=torch):\n parts = target.split(\".\")\n assert parts[0] == root_module.__name__\n module = root_module\n for part in parts[1:-1]:\n module = getattr(module, part)\n name = parts[-1]\n old_fn = getattr(module, name, None)\n old_fn = getattr(old_fn, \"_pyro_unpatched\", old_fn) # ensure patching is idempotent\n\n def decorator(new_fn):\n try:\n functools.update_wrapper(new_fn, old_fn)\n except Exception:\n for attr in functools.WRAPPER_ASSIGNMENTS:\n if hasattr(old_fn, attr):\n setattr(new_fn, attr, getattr(old_fn, attr))\n new_fn._pyro_unpatched = old_fn\n setattr(module, name, new_fn)\n return new_fn\n\n return decorator\n\n\n# TODO: Move upstream to allow for pickle serialization of transforms\n@patch_dependency(\"torch.distributions.transforms.Transform.__getstate__\")\ndef _Transform__getstate__(self):\n attrs = {}\n for k, v in self.__dict__.items():\n if isinstance(v, weakref.ref):\n attrs[k] = None\n else:\n attrs[k] = v\n return attrs\n\n\n# TODO move upstream\n@patch_dependency(\"torch.distributions.transforms.Transform.clear_cache\")\ndef _Transform_clear_cache(self):\n if self._cache_size == 1:\n self._cached_x_y = None, None\n\n\n# TODO move upstream\n@patch_dependency(\"torch.distributions.TransformedDistribution.clear_cache\")\ndef _TransformedDistribution_clear_cache(self):\n for t in self.transforms:\n t.clear_cache()\n\n\n# TODO fix https://github.com/pytorch/pytorch/issues/48054 upstream\n@patch_dependency(\"torch.distributions.HalfCauchy.log_prob\")\ndef _HalfCauchy_logprob(self, value):\n if self._validate_args:\n self._validate_sample(value)\n value = torch.as_tensor(\n value, dtype=self.base_dist.scale.dtype, device=self.base_dist.scale.device\n )\n log_prob = self.base_dist.log_prob(value) + math.log(2)\n log_prob.masked_fill_(value.expand(log_prob.shape) < 0, -float(\"inf\"))\n return log_prob\n\n\n# TODO fix batch_shape have an extra singleton dimension upstream\n@patch_dependency(\"torch.distributions.constraints._PositiveDefinite.check\")\ndef _PositiveDefinite_check(self, value):\n matrix_shape = value.shape[-2:]\n batch_shape = value.shape[:-2]\n flattened_value = value.reshape((-1,) + matrix_shape)\n return torch.stack(\n [torch.linalg.eigvalsh(v)[:1] > 0.0 for v in flattened_value]\n ).view(batch_shape)\n\n\n@patch_dependency(\"torch.distributions.constraints._CorrCholesky.check\")\ndef _CorrCholesky_check(self, value):\n row_norm = torch.linalg.norm(value.detach(), dim=-1)\n unit_row_norm = (row_norm - 1.0).abs().le(1e-4).all(dim=-1)\n return torch.distributions.constraints.lower_cholesky.check(value) & unit_row_norm\n\n\n# This adds a __call__ method to satisfy sphinx.\n@patch_dependency(\"torch.distributions.utils.lazy_property.__call__\")\ndef _lazy_property__call__(self):\n raise NotImplementedError\n\n\n__all__ = []\n",
"path": "pyro/distributions/torch_patch.py"
}
] | diff --git a/pyro/distributions/torch_patch.py b/pyro/distributions/torch_patch.py
index 55d98f6650..cad559b3cf 100644
--- a/pyro/distributions/torch_patch.py
+++ b/pyro/distributions/torch_patch.py
@@ -7,8 +7,6 @@
import torch
-assert torch.__version__.startswith("1.")
-
def patch_dependency(target, root_module=torch):
parts = target.split(".")
|
facebookresearch__nevergrad-705 | Wrong dates in changelog
All the dates at https://github.com/facebookresearch/nevergrad/blob/master/CHANGELOG.md shows 2019, but seems it should be 2020.
| [
{
"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'nevergrad'\ncopyright = '2019, Facebook AI Research' # pylint: disable=redefined-builtin\nauthor = 'Facebook AI Research'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.coverage',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.autosummary',\n 'recommonmark',\n ]\n\nsource_suffix = {\n '.rst': 'restructuredtext',\n '.txt': 'markdown',\n '.md': 'markdown',\n}\n\nmaster_doc = 'index'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = []\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# -- Other --\nlinkcheck_ignore = [r'https://gecco-2020.sigevo.org/*']\n",
"path": "docs/conf.py"
}
] | [
{
"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'nevergrad'\ncopyright = '2019, Facebook AI Research' # pylint: disable=redefined-builtin\nauthor = 'Facebook AI Research'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.coverage',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.autosummary',\n 'recommonmark',\n ]\n\nsource_suffix = {\n '.rst': 'restructuredtext',\n '.txt': 'markdown',\n '.md': 'markdown',\n}\n\nmaster_doc = 'index'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = []\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# -- Other --\nlinkcheck_ignore = [r'https://gecco-2020.sigevo.org/*',\n r'https://arxiv.org/abs/*'] # Transient certificate error :(\n",
"path": "docs/conf.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index a99352abc..c178866e0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,7 +11,7 @@
and [#691](https://github.com/facebookresearch/nevergrad/pull/691).
-## 0.4.1 (2019-05-07)
+## 0.4.1 (2020-05-07)
- `Archive` now stores the best corresponding candidate. This requires twice the memory compared to before the change. [#594](https://github.com/facebookresearch/nevergrad/pull/594)
- `Parameter` now holds a `loss: Optional[float]` attribute which is set and used by optimizers after the `tell` method.
@@ -23,7 +23,7 @@
- Started implementing more ML-oriented testbeds [#642](https://github.com/facebookresearch/nevergrad/pull/642)
-## v0.4.0 (2019-03-09)
+## v0.4.0 (2020-03-09)
### Breaking and important changes
@@ -54,7 +54,7 @@
if is automatically set to a sensible default [#536](https://github.com/facebookresearch/nevergrad/pull/536).
-## v0.3.2 (2019-02-05)
+## v0.3.2 (2020-02-05)
### Breaking changes (possibly for next version)
@@ -80,7 +80,7 @@
- `DE` algorithms comply with the new parametrization system and can be set to use parameter's recombination.
- Fixed array as bounds in `Array` parameters
-## v0.3.1 (2019-01-23)
+## v0.3.1 (2020-01-23)
**Note**: this is the first step to propagate the instrumentation/parametrization framework.
Learn more on the [Facebook user group](https://www.facebook.com/notes/nevergrad-users/moving-to-new-parametrization-upcoming-unstability-and-breaking-changes/639090766861215/).
@@ -100,7 +100,7 @@
- `PSO` now uses initialization by sampling the parametrization, instead of sampling all the real space. A new `WidePSO`
optimizer was created, using the previous initial sampling method [#467](https://github.com/facebookresearch/nevergrad/pull/467).
-## v0.3.0 (2019-01-08)
+## v0.3.0 (2020-01-08)
**Note**: this version is stable, but the following versions will include breaking changes which may cause instability. The aim of this changes will be to update the instrumentation system for more flexibility. See PR #323 and [Fb user group](https://www.facebook.com/groups/nevergradusers/) for more information.
diff --git a/docs/conf.py b/docs/conf.py
index 75f0bc26b..6f0fc8257 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -70,4 +70,5 @@
html_static_path = []
# -- Other --
-linkcheck_ignore = [r'https://gecco-2020.sigevo.org/*']
+linkcheck_ignore = [r'https://gecco-2020.sigevo.org/*',
+ r'https://arxiv.org/abs/*'] # Transient certificate error :(
|
Kinto__kinto-981 | JSON Merge Patch deserialization is broken
Merge-patch operations are broken. It looks like a deserialization problem, which may have been introduced with #790. Also, we should definitely include some view tests for this content-type. My bad :/
```
echo '{"data": {"aaa": "bbb"}}' | http patch localhost:8888/v1/buckets/b1 Content-Type:application/merge-patch+json -a aaa:
HTTP/1.1 400 Bad Request
Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff
Content-Length: 331
Content-Type: application/json; charset=UTF-8
Date: Sun, 11 Dec 2016 23:47:26 GMT
Server: waitress
{
"code": 400,
"details": [
{
"description": "\"{\"data\": {\"aaa\": \"bbb\"}}\n\" is not a mapping type: Does not implement dict-like functionality.",
"location": "body",
"name": ""
}
],
"errno": 107,
"error": "Invalid parameters",
"message": "\"{\"data\": {\"aaa\": \"bbb\"}}\n\" is not a mapping type: Does not implement dict-like functionality."
}
```
JSON Merge Patch deserialization is broken
Merge-patch operations are broken. It looks like a deserialization problem, which may have been introduced with #790. Also, we should definitely include some view tests for this content-type. My bad :/
```
echo '{"data": {"aaa": "bbb"}}' | http patch localhost:8888/v1/buckets/b1 Content-Type:application/merge-patch+json -a aaa:
HTTP/1.1 400 Bad Request
Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff
Content-Length: 331
Content-Type: application/json; charset=UTF-8
Date: Sun, 11 Dec 2016 23:47:26 GMT
Server: waitress
{
"code": 400,
"details": [
{
"description": "\"{\"data\": {\"aaa\": \"bbb\"}}\n\" is not a mapping type: Does not implement dict-like functionality.",
"location": "body",
"name": ""
}
],
"errno": 107,
"error": "Invalid parameters",
"message": "\"{\"data\": {\"aaa\": \"bbb\"}}\n\" is not a mapping type: Does not implement dict-like functionality."
}
```
JSON Merge Patch deserialization is broken
Merge-patch operations are broken. It looks like a deserialization problem, which may have been introduced with #790. Also, we should definitely include some view tests for this content-type. My bad :/
```
echo '{"data": {"aaa": "bbb"}}' | http patch localhost:8888/v1/buckets/b1 Content-Type:application/merge-patch+json -a aaa:
HTTP/1.1 400 Bad Request
Access-Control-Expose-Headers: Retry-After, Content-Length, Alert, Backoff
Content-Length: 331
Content-Type: application/json; charset=UTF-8
Date: Sun, 11 Dec 2016 23:47:26 GMT
Server: waitress
{
"code": 400,
"details": [
{
"description": "\"{\"data\": {\"aaa\": \"bbb\"}}\n\" is not a mapping type: Does not implement dict-like functionality.",
"location": "body",
"name": ""
}
],
"errno": 107,
"error": "Invalid parameters",
"message": "\"{\"data\": {\"aaa\": \"bbb\"}}\n\" is not a mapping type: Does not implement dict-like functionality."
}
```
| [
{
"content": "import platform\nimport codecs\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\ninstalled_with_pypy = platform.python_implementation() == 'PyPy'\n\nREQUIREMENTS = [\n 'colander',\n 'colorama',\n 'cornice >= 2.1',\n 'jsonschema',\n 'jsonpatch',\n 'python-dateutil',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'transaction',\n 'pyramid_tm',\n 'requests',\n 'six',\n 'structlog >= 16.1.0',\n 'enum34',\n 'waitress',\n]\n\nif installed_with_pypy:\n # We install psycopg2cffi instead of psycopg2 when dealing with pypy\n # Note: JSONB support landed after psycopg2cffi 2.7.0\n POSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2cffi>2.7.0',\n 'zope.sqlalchemy',\n ]\nelse:\n # ujson is not pypy compliant, as it uses the CPython C API\n REQUIREMENTS.append('ujson >= 1.35')\n POSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2>2.5',\n 'zope.sqlalchemy',\n ]\n\nREDIS_REQUIRES = [\n 'kinto_redis'\n]\n\nSETUP_REQUIRES = [\n 'pytest-runner'\n]\n\nTEST_REQUIREMENTS = [\n 'pytest',\n 'WebTest'\n]\n\nDEPENDENCY_LINKS = [\n]\n\nMONITORING_REQUIRES = [\n 'raven',\n 'statsd',\n 'newrelic',\n 'werkzeug',\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\n\nsetup(name='kinto',\n version='5.0.1.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=README + \"\\n\\n\" + CHANGELOG + \"\\n\\n\" + CONTRIBUTORS,\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web sync json storage services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n package_data={'': ['*.rst', '*.py']},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n 'redis': REDIS_REQUIRES,\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n \":python_version=='2.7'\": [\"functools32\", \"futures\"],\n },\n test_suite=\"tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n",
"path": "setup.py"
}
] | [
{
"content": "import platform\nimport codecs\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\ninstalled_with_pypy = platform.python_implementation() == 'PyPy'\n\nREQUIREMENTS = [\n 'colander',\n 'colorama',\n 'cornice >= 2.3',\n 'jsonschema',\n 'jsonpatch',\n 'python-dateutil',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'transaction',\n 'pyramid_tm',\n 'requests',\n 'six',\n 'structlog >= 16.1.0',\n 'enum34',\n 'waitress',\n]\n\nif installed_with_pypy:\n # We install psycopg2cffi instead of psycopg2 when dealing with pypy\n # Note: JSONB support landed after psycopg2cffi 2.7.0\n POSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2cffi>2.7.0',\n 'zope.sqlalchemy',\n ]\nelse:\n # ujson is not pypy compliant, as it uses the CPython C API\n REQUIREMENTS.append('ujson >= 1.35')\n POSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2>2.5',\n 'zope.sqlalchemy',\n ]\n\nREDIS_REQUIRES = [\n 'kinto_redis'\n]\n\nSETUP_REQUIRES = [\n 'pytest-runner'\n]\n\nTEST_REQUIREMENTS = [\n 'pytest',\n 'WebTest'\n]\n\nDEPENDENCY_LINKS = [\n]\n\nMONITORING_REQUIRES = [\n 'raven',\n 'statsd',\n 'newrelic',\n 'werkzeug',\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\n\nsetup(name='kinto',\n version='5.0.1.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=README + \"\\n\\n\" + CHANGELOG + \"\\n\\n\" + CONTRIBUTORS,\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web sync json storage services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n package_data={'': ['*.rst', '*.py']},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n 'redis': REDIS_REQUIRES,\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n \":python_version=='2.7'\": [\"functools32\", \"futures\"],\n },\n test_suite=\"tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n",
"path": "setup.py"
}
] | diff --git a/requirements.txt b/requirements.txt
index 0b8b3bff7..aea371fa8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,7 +1,7 @@
colander==1.3.1
colorama==0.3.7
contextlib2==0.5.4
-cornice==2.1.0
+cornice==2.3.0
enum34==1.1.6
functools32==3.2.3.post2
futures==3.0.5
diff --git a/setup.py b/setup.py
index e23f4d793..3f114a915 100644
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@ def read_file(filename):
REQUIREMENTS = [
'colander',
'colorama',
- 'cornice >= 2.1',
+ 'cornice >= 2.3',
'jsonschema',
'jsonpatch',
'python-dateutil',
diff --git a/tests/core/resource/test_record.py b/tests/core/resource/test_record.py
index b57762ec6..a427ab993 100644
--- a/tests/core/resource/test_record.py
+++ b/tests/core/resource/test_record.py
@@ -228,9 +228,67 @@ def test_patch_record_returns_updated_fields(self):
self.assertEquals(self.stored['id'], self.result['id'])
self.assertEquals(self.result['position'], 10)
+ def test_record_timestamp_is_not_updated_if_none_for_missing_field(self):
+ self.resource.request.json = {'data': {'polo': None}}
+ result = self.resource.patch()['data']
+ self.assertEquals(self.result['last_modified'],
+ result['last_modified'])
+
+ def test_record_timestamp_is_not_updated_if_no_field_changed(self):
+ self.resource.request.json = {'data': {'position': 10}}
+ result = self.resource.patch()['data']
+ self.assertEquals(self.result['last_modified'],
+ result['last_modified'])
+
+ def test_collection_timestamp_is_not_updated_if_no_field_changed(self):
+ self.resource.request.json = {'data': {'position': 10}}
+ self.resource.patch()
+ self.resource = self.resource_class(request=self.get_request(),
+ context=self.get_context())
+ self.resource.collection_get()['data']
+ last_modified = int(self.last_response.headers['ETag'][1:-1])
+ self.assertEquals(self.result['last_modified'], last_modified)
+
+ def test_timestamp_is_not_updated_if_no_change_after_preprocessed(self):
+ with mock.patch.object(self.resource, 'process_record') as mocked:
+ mocked.return_value = self.result
+ self.resource.request.json = {'data': {'position': 20}}
+ result = self.resource.patch()['data']
+ self.assertEquals(self.result['last_modified'],
+ result['last_modified'])
+
+ def test_returns_changed_fields_among_provided_if_behaviour_is_diff(self):
+ self.resource.request.json = {'data': {'unread': True, 'position': 15}}
+ self.resource.request.headers['Response-Behavior'] = 'diff'
+ with mock.patch.object(self.resource.model, 'update_record',
+ return_value={'unread': True, 'position': 0}):
+ result = self.resource.patch()['data']
+ self.assertDictEqual(result, {'position': 0})
+
+ def test_returns_changed_fields_if_behaviour_is_light(self):
+ self.resource.request.json = {'data': {'unread': True, 'position': 15}}
+ self.resource.request.headers['Response-Behavior'] = 'light'
+ with mock.patch.object(self.resource.model, 'update_record',
+ return_value={'unread': True, 'position': 0}):
+ result = self.resource.patch()['data']
+ self.assertDictEqual(result, {'unread': True, 'position': 0})
+
+
+class MergePatchTest(BaseTest):
+ def setUp(self):
+ super(MergePatchTest, self).setUp()
+ self.stored = self.model.create_record({})
+ self.resource.record_id = self.stored['id']
+ self.headers = self.resource.request.headers
+ self.headers['Content-Type'] = 'application/merge-patch+json'
+
+ class ArticleSchema(ResourceSchema):
+ unread = colander.SchemaNode(colander.Boolean(), missing=colander.drop)
+ position = colander.SchemaNode(colander.Int(), missing=colander.drop)
+
+ self.resource.schema = ArticleSchema
+
def test_merge_patch_updates_attributes_recursively(self):
- header = self.resource.request.headers
- header['Content-Type'] = 'application/merge-patch+json'
self.resource.request.json = {'data': {'a': {'b': 'bbb',
'c': 'ccc'}}}
self.resource.patch()
@@ -240,8 +298,6 @@ def test_merge_patch_updates_attributes_recursively(self):
self.assertEqual(result['a']['b'], 'aaa')
def test_merge_patch_removes_attribute_if_none(self):
- header = self.resource.request.headers
- header['Content-Type'] = 'application/merge-patch+json'
self.resource.request.json = {'data': {'field': 'aaa'}}
self.resource.patch()
self.resource.request.json = {'data': {'field': None}}
@@ -251,8 +307,6 @@ def test_merge_patch_removes_attribute_if_none(self):
self.assertNotIn('field', result)
def test_merge_patch_removes_attributes_recursively_if_none(self):
- header = self.resource.request.headers
- header['Content-Type'] = 'application/merge-patch+json'
self.resource.request.json = {'data': {'a': {'b': 'aaa'}}}
self.resource.patch()
self.resource.request.json = {'data': {'a': {'b': None}}}
@@ -266,8 +320,6 @@ def test_merge_patch_removes_attributes_recursively_if_none(self):
self.assertNotIn('cc', result['aa']['bb'])
def test_merge_patch_doesnt_remove_attribute_if_false(self):
- header = self.resource.request.headers
- header['Content-Type'] = 'application/merge-patch+json'
self.resource.request.json = {'data': {'field': 0}}
result = self.resource.patch()['data']
self.assertIn('field', result)
@@ -279,8 +331,7 @@ def test_merge_patch_doesnt_remove_attribute_if_false(self):
self.assertIn('field', result)
def test_patch_doesnt_remove_attribute_if_not_merge_header(self):
- header = self.resource.request.headers
- header['Content-Type'] = 'application/json'
+ self.headers['Content-Type'] = 'application/json'
self.resource.request.json = {'data': {'field': 'aaa'}}
self.resource.patch()
self.resource.request.json = {'data': {'field': None}}
@@ -290,63 +341,17 @@ def test_patch_doesnt_remove_attribute_if_not_merge_header(self):
self.assertIn('field', result)
def test_merge_patch_doesnt_remove_previously_inserted_nones(self):
- header = self.resource.request.headers
- header['Content-Type'] = 'application/json'
+ self.headers['Content-Type'] = 'application/json'
self.resource.request.json = {'data': {'field': 'aaa'}}
result = self.resource.patch()['data']
self.resource.request.json = {'data': {'field': None}}
result = self.resource.patch()['data']
self.assertIn('field', result)
- header['Content-Type'] = 'application/merge-patch+json'
+ self.headers['Content-Type'] = 'application/merge-patch+json'
self.resource.request.json = {'data': {'position': 10}}
result = self.resource.patch()['data']
self.assertIn('field', result)
- def test_record_timestamp_is_not_updated_if_none_for_missing_field(self):
- self.resource.request.json = {'data': {'polo': None}}
- result = self.resource.patch()['data']
- self.assertEquals(self.result['last_modified'],
- result['last_modified'])
-
- def test_record_timestamp_is_not_updated_if_no_field_changed(self):
- self.resource.request.json = {'data': {'position': 10}}
- result = self.resource.patch()['data']
- self.assertEquals(self.result['last_modified'],
- result['last_modified'])
-
- def test_collection_timestamp_is_not_updated_if_no_field_changed(self):
- self.resource.request.json = {'data': {'position': 10}}
- self.resource.patch()
- self.resource = self.resource_class(request=self.get_request(),
- context=self.get_context())
- self.resource.collection_get()['data']
- last_modified = int(self.last_response.headers['ETag'][1:-1])
- self.assertEquals(self.result['last_modified'], last_modified)
-
- def test_timestamp_is_not_updated_if_no_change_after_preprocessed(self):
- with mock.patch.object(self.resource, 'process_record') as mocked:
- mocked.return_value = self.result
- self.resource.request.json = {'data': {'position': 20}}
- result = self.resource.patch()['data']
- self.assertEquals(self.result['last_modified'],
- result['last_modified'])
-
- def test_returns_changed_fields_among_provided_if_behaviour_is_diff(self):
- self.resource.request.json = {'data': {'unread': True, 'position': 15}}
- self.resource.request.headers['Response-Behavior'] = 'diff'
- with mock.patch.object(self.resource.model, 'update_record',
- return_value={'unread': True, 'position': 0}):
- result = self.resource.patch()['data']
- self.assertDictEqual(result, {'position': 0})
-
- def test_returns_changed_fields_if_behaviour_is_light(self):
- self.resource.request.json = {'data': {'unread': True, 'position': 15}}
- self.resource.request.headers['Response-Behavior'] = 'light'
- with mock.patch.object(self.resource.model, 'update_record',
- return_value={'unread': True, 'position': 0}):
- result = self.resource.patch()['data']
- self.assertDictEqual(result, {'unread': True, 'position': 0})
-
class JsonPatchTest(BaseTest):
def setUp(self):
diff --git a/tests/test_views_records.py b/tests/test_views_records.py
index 1e1787042..d1559a11c 100644
--- a/tests/test_views_records.py
+++ b/tests/test_views_records.py
@@ -301,6 +301,49 @@ def test_records_can_be_created_after_deletion(self):
headers=headers, status=201)
+class RecordsViewMergeTest(BaseWebTest, unittest.TestCase):
+
+ collection_url = '/buckets/beers/collections/barley/records'
+ _record_url = '/buckets/beers/collections/barley/records/%s'
+
+ def setUp(self):
+ super(RecordsViewMergeTest, self).setUp()
+ self.app.put_json('/buckets/beers', MINIMALIST_BUCKET,
+ headers=self.headers)
+ self.app.put_json('/buckets/beers/collections/barley',
+ MINIMALIST_COLLECTION,
+ headers=self.headers)
+ record = MINIMALIST_RECORD.copy()
+ record['data'] = {}
+ record['data']['grain'] = {'one': 1}
+ resp = self.app.post_json(self.collection_url,
+ record,
+ headers=self.headers)
+ self.record = resp.json['data']
+ self.record_url = self._record_url % self.record['id']
+
+ def test_merge_patch(self):
+ headers = self.headers.copy()
+ headers['Content-Type'] = 'application/merge-patch+json'
+ json = {'data': {'grain': {'two': 2}}}
+ resp = self.app.patch_json(self.record_url,
+ json,
+ headers=headers,
+ status=200)
+ self.assertEquals(resp.json['data']['grain']['one'], 1)
+ self.assertEquals(resp.json['data']['grain']['two'], 2)
+
+ def test_merge_patch_remove_nones(self):
+ headers = self.headers.copy()
+ headers['Content-Type'] = 'application/merge-patch+json'
+ json = {'data': {'grain': {'one': None}}}
+ resp = self.app.patch_json(self.record_url,
+ json,
+ headers=headers,
+ status=200)
+ self.assertNotIn('one', resp.json['data']['grain'])
+
+
class RecordsViewPatchTest(BaseWebTest, unittest.TestCase):
collection_url = '/buckets/beers/collections/barley/records'
|
freedomofpress__securedrop-5236 | qa_loader.py uses a fixed random seed every run
## Description
Always using the same seed makes it impossible to run `qa_loader.py` multiple times with the same database, as supposedly random values aren't, causing unique constraint violations.
## Steps to Reproduce
- Run the dev server with `make dev`
- Start a shell in the container with `docker exec -it securedrop-dev-0 bash`
- In that shell, run `./qa_loader --journalist-count 1 --source-count 1`
- Run the same command a second time.
## Expected Behavior
That you could keep adding random journalists and sources to the database.
## Actual Behavior
You get `sqlalchemy.exc.IntegrityError: (sqlite3.IntegrityError) UNIQUE constraint failed: journalists.username` because [`random.seed` is always called with the same value](https://github.com/freedomofpress/securedrop/blob/ec2220c3c2b9120d029b616d3a07647b175bc6ab/securedrop/qa_loader.py#L22).
| [
{
"content": "#!/opt/venvs/securedrop-app-code/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport random\nimport string\nimport sys\nfrom argparse import ArgumentParser\nfrom datetime import datetime\nfrom itertools import cycle\nfrom os import path\n\nfrom flask import current_app\n\nfrom crypto_util import DICEWARE_SAFE_CHARS\nfrom db import db\nfrom journalist_app import create_app\nfrom models import Journalist, JournalistLoginAttempt, Reply, Source, SourceStar, Submission\nfrom sdconfig import config as sdconfig\n\n\nrandom.seed(\"~(=^–^)\") # mrow?\n\n\ndef random_bool():\n return bool(random.getrandbits(1))\n\n\ndef random_chars(len, nullable, chars=string.ascii_letters):\n if nullable and random_bool():\n return None\n else:\n return \"\".join([random.choice(chars) for _ in range(len)])\n\n\ndef bool_or_none():\n return random.choice([True, False, None])\n\n\ndef random_datetime(nullable):\n if nullable and random_bool():\n return None\n else:\n now = datetime.now()\n return datetime(\n year=random.randint(2013, now.year),\n month=random.randint(1, now.month),\n day=random.randint(1, now.day),\n hour=random.randint(0, 23),\n minute=random.randint(0, 59),\n second=random.randint(0, 59),\n microsecond=random.randint(0, 1000),\n )\n\n\ndef positive_int(s):\n i = int(s)\n if i < 1:\n raise ValueError(\"{} is not >= 1\".format(s))\n return i\n\n\ndef fraction(s):\n f = float(s)\n if 0 <= f <= 1:\n return f\n raise ValueError(\"{} should be a float between 0 and 1\".format(s))\n\n\nsubmissions = cycle(\n [\n \"This is a test submission without markup!\",\n 'This is a test submission with markup and characters such as \\, \\\\, \\', \" and \". '\n + \"<strong>This text should not be bold</strong>!\", # noqa: W605, E501\n ]\n)\n\n\nreplies = cycle(\n [\n \"This is a test reply without markup!\",\n 'This is a test reply with markup and characters such as \\, \\\\, \\', \" and \". '\n + \"<strong>This text should not be bold</strong>!\", # noqa: W605, E501\n ]\n)\n\n\nclass QaLoader(object):\n def __init__(\n self,\n config,\n journalist_count=10,\n source_count=50,\n submissions_per_source=1,\n replies_per_source=1,\n source_star_fraction=0.1,\n source_reply_fraction=0.5,\n ):\n \"\"\"\n source_star_fraction and source_reply_fraction are simply the\n fraction of sources starred or replied to.\n \"\"\"\n self.config = config\n self.app = create_app(config)\n\n self.journalist_count = journalist_count\n self.source_count = source_count\n self.submissions_per_source = submissions_per_source\n self.replies_per_source = replies_per_source\n self.source_star_fraction = source_star_fraction\n self.source_reply_fraction = source_reply_fraction\n\n self.journalists = []\n self.sources = []\n\n def new_journalist(self):\n # Make a diceware-like password\n pw = \" \".join(\n [random_chars(3, nullable=False, chars=DICEWARE_SAFE_CHARS) for _ in range(7)]\n )\n journalist = Journalist(\n username=random_chars(random.randint(3, 32), nullable=False),\n password=pw,\n is_admin=random_bool(),\n )\n if random_bool():\n # to add legacy passwords back in\n journalist.passphrase_hash = None\n journalist.pw_salt = random_chars(32, nullable=False).encode(\"utf-8\")\n journalist.pw_hash = random_chars(64, nullable=False).encode(\"utf-8\")\n\n journalist.is_admin = bool_or_none()\n\n journalist.is_totp = bool_or_none()\n journalist.hotp_counter = random.randint(-1000, 1000) if random_bool() else None\n journalist.created_on = random_datetime(nullable=True)\n journalist.last_access = random_datetime(nullable=True)\n\n db.session.add(journalist)\n db.session.flush()\n self.journalists.append(journalist.id)\n\n def new_source(self):\n codename = current_app.crypto_util.genrandomid()\n filesystem_id = current_app.crypto_util.hash_codename(codename)\n journalist_designation = current_app.crypto_util.display_id()\n source = Source(filesystem_id, journalist_designation)\n db.session.add(source)\n db.session.flush()\n\n # Generate submissions directory and generate source key\n os.mkdir(current_app.storage.path(source.filesystem_id))\n current_app.crypto_util.genkeypair(source.filesystem_id, codename)\n\n self.sources.append(source.id)\n\n def new_submission(self, source_id):\n source = Source.query.get(source_id)\n\n source.interaction_count += 1\n fpath = current_app.storage.save_message_submission(\n source.filesystem_id,\n source.interaction_count,\n source.journalist_filename,\n next(submissions),\n )\n submission = Submission(source, fpath)\n db.session.add(submission)\n\n source.pending = False\n source.last_updated = datetime.utcnow()\n\n db.session.flush()\n\n def new_source_star(self, source_id):\n source = Source.query.get(source_id)\n star = SourceStar(source, bool_or_none())\n db.session.add(star)\n\n def new_reply(self, journalist_id, source_id):\n source = Source.query.get(source_id)\n\n journalist = Journalist.query.get(journalist_id)\n\n source.interaction_count += 1\n source.last_updated = datetime.utcnow()\n\n fname = \"{}-{}-reply.gpg\".format(source.interaction_count, source.journalist_filename)\n current_app.crypto_util.encrypt(\n next(replies),\n [\n current_app.crypto_util.get_fingerprint(source.filesystem_id),\n sdconfig.JOURNALIST_KEY\n ],\n current_app.storage.path(source.filesystem_id, fname),\n )\n\n reply = Reply(journalist, source, fname)\n db.session.add(reply)\n db.session.flush()\n\n def new_journalist_login_attempt(self, journalist_id):\n journalist = Journalist.query.get(journalist_id)\n attempt = JournalistLoginAttempt(journalist)\n attempt.timestamp = random_datetime(nullable=True)\n db.session.add(attempt)\n\n def load(self):\n with self.app.app_context():\n print(\"Creating {:d} journalists...\".format(self.journalist_count))\n for i in range(1, self.journalist_count + 1):\n self.new_journalist()\n if i % min(10, max(1, int(self.journalist_count / 10))) == 0:\n sys.stdout.write(\"{}\\r{}\".format(\" \" * len(str(self.journalist_count + 1)), i))\n print(\"\\n\")\n db.session.commit()\n\n print(\"Creating {:d} sources...\".format(self.source_count))\n for i in range(1, self.source_count + 1):\n self.new_source()\n if i % min(10, max(1, int(self.source_count / 10))) == 0:\n sys.stdout.write(\"{}\\r{}\".format(\" \" * len(str(self.source_count + 1)), i))\n print(\"\\n\")\n db.session.commit()\n\n print(\n \"Creating submissions ({:d} each) for each source...\".format(\n self.submissions_per_source\n )\n )\n for sid in self.sources:\n for _ in range(1, self.submissions_per_source + 1):\n self.new_submission(sid)\n db.session.commit()\n\n print(\"Starring {:.2f}% of all sources...\".format(self.source_star_fraction * 100))\n for sid in random.sample(\n self.sources, int(self.source_count * self.source_star_fraction)\n ):\n self.new_source_star(sid)\n db.session.commit()\n\n print(\n \"Creating replies ({:d} each) for {:.2f}% of sources...\".format(\n self.replies_per_source, self.source_reply_fraction * 100\n )\n )\n for sid in random.sample(\n self.sources, int(self.source_count * self.source_reply_fraction)\n ):\n jid = random.choice(self.journalists)\n for _ in range(self.replies_per_source):\n self.new_reply(jid, sid)\n db.session.commit()\n\n for jid in self.journalists:\n self.new_journalist_login_attempt(jid)\n db.session.commit()\n\n\ndef arg_parser():\n parser = ArgumentParser(\n path.basename(__file__), description=\"Loads data into the database for testing upgrades\"\n )\n parser.add_argument(\n \"--journalist-count\",\n type=positive_int,\n default=10,\n help=(\"Number of journalists to create\"),\n )\n parser.add_argument(\n \"--source-count\", type=positive_int, default=50, help=(\"Number of sources to create\")\n )\n parser.add_argument(\n \"--submissions-per-source\",\n type=positive_int,\n default=1,\n help=(\"Number of submissions to create for each source\"),\n )\n parser.add_argument(\n \"--replies-per-source\",\n type=positive_int,\n default=1,\n help=(\"Number of replies to create for each source\"),\n )\n parser.add_argument(\n \"--source-star-fraction\",\n type=fraction,\n default=0.1,\n help=(\"Fraction of sources to star\"),\n )\n parser.add_argument(\n \"--source-reply-fraction\",\n type=fraction,\n default=0.5,\n help=(\"Fraction of sources to reply to\"),\n )\n return parser\n\n\ndef main():\n args = arg_parser().parse_args()\n print(\"Loading data. This may take a while.\")\n QaLoader(\n sdconfig,\n args.journalist_count,\n args.source_count,\n args.submissions_per_source,\n args.replies_per_source,\n args.source_star_fraction,\n args.source_reply_fraction,\n ).load()\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"\") # for prompt on a newline\n sys.exit(1)\n",
"path": "securedrop/qa_loader.py"
}
] | [
{
"content": "#!/opt/venvs/securedrop-app-code/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport random\nimport string\nimport sys\nfrom argparse import ArgumentParser\nfrom datetime import datetime\nfrom itertools import cycle\nfrom os import path\n\nfrom flask import current_app\n\nfrom crypto_util import DICEWARE_SAFE_CHARS\nfrom db import db\nfrom journalist_app import create_app\nfrom models import Journalist, JournalistLoginAttempt, Reply, Source, SourceStar, Submission\nfrom sdconfig import config as sdconfig\n\n\ndef random_bool():\n return bool(random.getrandbits(1))\n\n\ndef random_chars(len, nullable, chars=string.ascii_letters):\n if nullable and random_bool():\n return None\n else:\n return \"\".join([random.choice(chars) for _ in range(len)])\n\n\ndef bool_or_none():\n return random.choice([True, False, None])\n\n\ndef random_datetime(nullable):\n if nullable and random_bool():\n return None\n else:\n now = datetime.now()\n return datetime(\n year=random.randint(2013, now.year),\n month=random.randint(1, now.month),\n day=random.randint(1, now.day),\n hour=random.randint(0, 23),\n minute=random.randint(0, 59),\n second=random.randint(0, 59),\n microsecond=random.randint(0, 1000),\n )\n\n\ndef positive_int(s):\n i = int(s)\n if i < 1:\n raise ValueError(\"{} is not >= 1\".format(s))\n return i\n\n\ndef fraction(s):\n f = float(s)\n if 0 <= f <= 1:\n return f\n raise ValueError(\"{} should be a float between 0 and 1\".format(s))\n\n\nsubmissions = cycle(\n [\n \"This is a test submission without markup!\",\n 'This is a test submission with markup and characters such as \\, \\\\, \\', \" and \". '\n + \"<strong>This text should not be bold</strong>!\", # noqa: W605, E501\n ]\n)\n\n\nreplies = cycle(\n [\n \"This is a test reply without markup!\",\n 'This is a test reply with markup and characters such as \\, \\\\, \\', \" and \". '\n + \"<strong>This text should not be bold</strong>!\", # noqa: W605, E501\n ]\n)\n\n\nclass QaLoader(object):\n def __init__(\n self,\n config,\n journalist_count=10,\n source_count=50,\n submissions_per_source=1,\n replies_per_source=1,\n source_star_fraction=0.1,\n source_reply_fraction=0.5,\n ):\n \"\"\"\n source_star_fraction and source_reply_fraction are simply the\n fraction of sources starred or replied to.\n \"\"\"\n self.config = config\n self.app = create_app(config)\n\n self.journalist_count = journalist_count\n self.source_count = source_count\n self.submissions_per_source = submissions_per_source\n self.replies_per_source = replies_per_source\n self.source_star_fraction = source_star_fraction\n self.source_reply_fraction = source_reply_fraction\n\n self.journalists = []\n self.sources = []\n\n def new_journalist(self):\n # Make a diceware-like password\n pw = \" \".join(\n [random_chars(3, nullable=False, chars=DICEWARE_SAFE_CHARS) for _ in range(7)]\n )\n journalist = Journalist(\n username=random_chars(random.randint(3, 32), nullable=False),\n password=pw,\n is_admin=random_bool(),\n )\n if random_bool():\n # to add legacy passwords back in\n journalist.passphrase_hash = None\n journalist.pw_salt = random_chars(32, nullable=False).encode(\"utf-8\")\n journalist.pw_hash = random_chars(64, nullable=False).encode(\"utf-8\")\n\n journalist.is_admin = bool_or_none()\n\n journalist.is_totp = bool_or_none()\n journalist.hotp_counter = random.randint(-1000, 1000) if random_bool() else None\n journalist.created_on = random_datetime(nullable=True)\n journalist.last_access = random_datetime(nullable=True)\n\n db.session.add(journalist)\n db.session.flush()\n self.journalists.append(journalist.id)\n\n def new_source(self):\n codename = current_app.crypto_util.genrandomid()\n filesystem_id = current_app.crypto_util.hash_codename(codename)\n journalist_designation = current_app.crypto_util.display_id()\n source = Source(filesystem_id, journalist_designation)\n db.session.add(source)\n db.session.flush()\n\n # Generate submissions directory and generate source key\n os.mkdir(current_app.storage.path(source.filesystem_id))\n current_app.crypto_util.genkeypair(source.filesystem_id, codename)\n\n self.sources.append(source.id)\n\n def new_submission(self, source_id):\n source = Source.query.get(source_id)\n\n source.interaction_count += 1\n fpath = current_app.storage.save_message_submission(\n source.filesystem_id,\n source.interaction_count,\n source.journalist_filename,\n next(submissions),\n )\n submission = Submission(source, fpath)\n db.session.add(submission)\n\n source.pending = False\n source.last_updated = datetime.utcnow()\n\n db.session.flush()\n\n def new_source_star(self, source_id):\n source = Source.query.get(source_id)\n star = SourceStar(source, bool_or_none())\n db.session.add(star)\n\n def new_reply(self, journalist_id, source_id):\n source = Source.query.get(source_id)\n\n journalist = Journalist.query.get(journalist_id)\n\n source.interaction_count += 1\n source.last_updated = datetime.utcnow()\n\n fname = \"{}-{}-reply.gpg\".format(source.interaction_count, source.journalist_filename)\n current_app.crypto_util.encrypt(\n next(replies),\n [\n current_app.crypto_util.get_fingerprint(source.filesystem_id),\n sdconfig.JOURNALIST_KEY\n ],\n current_app.storage.path(source.filesystem_id, fname),\n )\n\n reply = Reply(journalist, source, fname)\n db.session.add(reply)\n db.session.flush()\n\n def new_journalist_login_attempt(self, journalist_id):\n journalist = Journalist.query.get(journalist_id)\n attempt = JournalistLoginAttempt(journalist)\n attempt.timestamp = random_datetime(nullable=True)\n db.session.add(attempt)\n\n def load(self):\n with self.app.app_context():\n print(\"Creating {:d} journalists...\".format(self.journalist_count))\n for i in range(1, self.journalist_count + 1):\n self.new_journalist()\n if i % min(10, max(1, int(self.journalist_count / 10))) == 0:\n sys.stdout.write(\"{}\\r{}\".format(\" \" * len(str(self.journalist_count + 1)), i))\n print(\"\\n\")\n db.session.commit()\n\n print(\"Creating {:d} sources...\".format(self.source_count))\n for i in range(1, self.source_count + 1):\n self.new_source()\n if i % min(10, max(1, int(self.source_count / 10))) == 0:\n sys.stdout.write(\"{}\\r{}\".format(\" \" * len(str(self.source_count + 1)), i))\n print(\"\\n\")\n db.session.commit()\n\n print(\n \"Creating submissions ({:d} each) for each source...\".format(\n self.submissions_per_source\n )\n )\n for sid in self.sources:\n for _ in range(1, self.submissions_per_source + 1):\n self.new_submission(sid)\n db.session.commit()\n\n print(\"Starring {:.2f}% of all sources...\".format(self.source_star_fraction * 100))\n for sid in random.sample(\n self.sources, int(self.source_count * self.source_star_fraction)\n ):\n self.new_source_star(sid)\n db.session.commit()\n\n print(\n \"Creating replies ({:d} each) for {:.2f}% of sources...\".format(\n self.replies_per_source, self.source_reply_fraction * 100\n )\n )\n for sid in random.sample(\n self.sources, int(self.source_count * self.source_reply_fraction)\n ):\n jid = random.choice(self.journalists)\n for _ in range(self.replies_per_source):\n self.new_reply(jid, sid)\n db.session.commit()\n\n for jid in self.journalists:\n self.new_journalist_login_attempt(jid)\n db.session.commit()\n\n\ndef arg_parser():\n parser = ArgumentParser(\n path.basename(__file__), description=\"Loads data into the database for testing upgrades\"\n )\n parser.add_argument(\n \"--journalist-count\",\n type=positive_int,\n default=10,\n help=(\"Number of journalists to create\"),\n )\n parser.add_argument(\n \"--source-count\", type=positive_int, default=50, help=(\"Number of sources to create\")\n )\n parser.add_argument(\n \"--submissions-per-source\",\n type=positive_int,\n default=1,\n help=(\"Number of submissions to create for each source\"),\n )\n parser.add_argument(\n \"--replies-per-source\",\n type=positive_int,\n default=1,\n help=(\"Number of replies to create for each source\"),\n )\n parser.add_argument(\n \"--source-star-fraction\",\n type=fraction,\n default=0.1,\n help=(\"Fraction of sources to star\"),\n )\n parser.add_argument(\n \"--source-reply-fraction\",\n type=fraction,\n default=0.5,\n help=(\"Fraction of sources to reply to\"),\n )\n return parser\n\n\ndef main():\n args = arg_parser().parse_args()\n print(\"Loading data. This may take a while.\")\n QaLoader(\n sdconfig,\n args.journalist_count,\n args.source_count,\n args.submissions_per_source,\n args.replies_per_source,\n args.source_star_fraction,\n args.source_reply_fraction,\n ).load()\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"\") # for prompt on a newline\n sys.exit(1)\n",
"path": "securedrop/qa_loader.py"
}
] | diff --git a/securedrop/qa_loader.py b/securedrop/qa_loader.py
index 8f76be13e1..c4fa054da9 100755
--- a/securedrop/qa_loader.py
+++ b/securedrop/qa_loader.py
@@ -19,9 +19,6 @@
from sdconfig import config as sdconfig
-random.seed("~(=^–^)") # mrow?
-
-
def random_bool():
return bool(random.getrandbits(1))
|
apache__airflow-14978 | Bump supported mysqlclient to <1.5
**Description**
version 1.4.X introduced in Jan 2019
we should support it if we can.
**Use case / motivation**
pin of <1.4 was done in https://github.com/apache/airflow/pull/4558 due to lack of Python 2 compatibility. Since Master doesn't support Python 2 anymore there is no need for that restriction
**Related Issues**
Moved from https://issues.apache.org/jira/browse/AIRFLOW-4810
I tried to fix it in https://github.com/apache/airflow/pull/5430 but didn't get help with the tests so if any one wants to pick it up be my guest.
| [
{
"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Setup.py for the Airflow project.\"\"\"\nimport glob\nimport logging\nimport os\nimport subprocess\nimport unittest\nfrom copy import deepcopy\nfrom distutils import log\nfrom os.path import dirname, relpath\nfrom textwrap import wrap\nfrom typing import Dict, List, Tuple\n\nfrom setuptools import Command, Distribution, find_namespace_packages, setup\nfrom setuptools.command.develop import develop as develop_orig\nfrom setuptools.command.install import install as install_orig\n\n# Controls whether providers are installed from packages or directly from sources\n# It is turned on by default in case of development environments such as Breeze\n# And it is particularly useful when you add a new provider and there is no\n# PyPI version to install the provider package from\nINSTALL_PROVIDERS_FROM_SOURCES = 'INSTALL_PROVIDERS_FROM_SOURCES'\n\nlogger = logging.getLogger(__name__)\n\nversion = '2.1.0.dev0'\n\nmy_dir = dirname(__file__)\n\n\ndef airflow_test_suite() -> unittest.TestSuite:\n \"\"\"Test suite for Airflow tests\"\"\"\n test_loader = unittest.TestLoader()\n test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')\n return test_suite\n\n\nclass CleanCommand(Command):\n \"\"\"\n Command to tidy up the project root.\n Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.\n \"\"\"\n\n description = \"Tidy up the project root\"\n user_options: List[str] = []\n\n def initialize_options(self):\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self):\n \"\"\"Set final values for options.\"\"\"\n\n @staticmethod\n def rm_all_files(files: List[str]):\n \"\"\"Remove all files from the list\"\"\"\n for file in files:\n try:\n os.remove(file)\n except Exception as e: # noqa pylint: disable=broad-except\n logger.warning(\"Error when removing %s: %s\", file, e)\n\n def run(self):\n \"\"\"Remove temporary files and directories.\"\"\"\n os.chdir(my_dir)\n self.rm_all_files(glob.glob('./build/*'))\n self.rm_all_files(glob.glob('./**/__pycache__/*', recursive=True))\n self.rm_all_files(glob.glob('./**/*.pyc', recursive=True))\n self.rm_all_files(glob.glob('./dist/*'))\n self.rm_all_files(glob.glob('./*.egg-info'))\n self.rm_all_files(glob.glob('./docker-context-files/*.whl'))\n self.rm_all_files(glob.glob('./docker-context-files/*.tgz'))\n\n\nclass CompileAssets(Command):\n \"\"\"\n Compile and build the frontend assets using yarn and webpack.\n Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.\n \"\"\"\n\n description = \"Compile and build the frontend assets\"\n user_options: List[str] = []\n\n def initialize_options(self):\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self):\n \"\"\"Set final values for options.\"\"\"\n\n def run(self): # noqa\n \"\"\"Run a command to compile and build assets.\"\"\"\n subprocess.check_call('./airflow/www/compile_assets.sh')\n\n\nclass ListExtras(Command):\n \"\"\"\n List all available extras\n Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.\n \"\"\"\n\n description = \"List available extras\"\n user_options: List[str] = []\n\n def initialize_options(self):\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self):\n \"\"\"Set final values for options.\"\"\"\n\n def run(self): # noqa\n \"\"\"List extras.\"\"\"\n print(\"\\n\".join(wrap(\", \".join(EXTRAS_REQUIREMENTS.keys()), 100)))\n\n\ndef git_version(version_: str) -> str:\n \"\"\"\n Return a version to identify the state of the underlying git repo. The version will\n indicate whether the head of the current git-backed working directory is tied to a\n release tag or not : it will indicate the former with a 'release:{version}' prefix\n and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current\n branch head. Finally, a \"dirty\" suffix is appended to indicate that uncommitted\n changes are present.\n\n :param str version_: Semver version\n :return: Found Airflow version in Git repo\n :rtype: str\n \"\"\"\n try:\n import git\n\n try:\n repo = git.Repo(os.path.join(*[my_dir, '.git']))\n except git.NoSuchPathError:\n logger.warning('.git directory not found: Cannot compute the git version')\n return ''\n except git.InvalidGitRepositoryError:\n logger.warning('Invalid .git directory not found: Cannot compute the git version')\n return ''\n except ImportError:\n logger.warning('gitpython not found: Cannot compute the git version.')\n return ''\n if repo:\n sha = repo.head.commit.hexsha\n if repo.is_dirty():\n return f'.dev0+{sha}.dirty'\n # commit is clean\n return f'.release:{version_}+{sha}'\n return 'no_git_version'\n\n\ndef write_version(filename: str = os.path.join(*[my_dir, \"airflow\", \"git_version\"])):\n \"\"\"\n Write the Semver version + git hash to file, e.g. \".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65\".\n\n :param str filename: Destination file to write\n \"\"\"\n text = f\"{git_version(version)}\"\n with open(filename, 'w') as file:\n file.write(text)\n\n\ndef get_sphinx_theme_version() -> str:\n \"\"\"\n Return sphinx theme version. If USE_THEME_FROM_GIT env variable is set, the theme is used from\n GitHub to allow dynamically update it during development. However for regular PIP release\n you cannot use @ package specification, so the latest available released theme package from\n PIP is used.\n :return: Version of sphinx theme to use.\n \"\"\"\n if os.environ.get('USE_THEME_FROM_GIT'):\n return (\n \"@ https://github.com/apache/airflow-site/releases/download/0.0.4/\"\n + \"sphinx_airflow_theme-0.0.4-py3-none-any.whl\"\n )\n return ''\n\n\n# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py\n# If you change this mark you should also change ./scripts/ci/check_order_setup.py\n# Start dependencies group\namazon = [\n 'boto3>=1.15.0,<1.18.0',\n 'botocore>=1.18.0,<1.19.0',\n 'watchtower~=0.7.3',\n]\napache_beam = [\n 'apache-beam[gcp]',\n]\nasync_packages = [\n 'eventlet>= 0.9.7',\n 'gevent>=0.13',\n 'greenlet>=0.4.9',\n]\natlas = [\n 'atlasclient>=0.1.2',\n]\nazure = [\n 'azure-batch>=8.0.0',\n 'azure-cosmos>=3.0.1,<4',\n 'azure-datalake-store>=0.0.45',\n 'azure-identity>=1.3.1',\n 'azure-keyvault>=4.1.0',\n 'azure-kusto-data>=0.0.43,<0.1',\n 'azure-mgmt-containerinstance>=1.5.0,<2.0',\n 'azure-mgmt-datafactory>=1.0.0,<2.0',\n 'azure-mgmt-datalake-store>=0.5.0',\n 'azure-mgmt-resource>=2.2.0',\n 'azure-storage-blob>=12.7.0',\n 'azure-storage-common>=2.1.0',\n 'azure-storage-file>=2.1.0',\n]\ncassandra = [\n 'cassandra-driver>=3.13.0,<3.21.0',\n]\ncelery = [\n 'celery~=4.4.2',\n 'flower>=0.7.3, <1.0',\n 'vine~=1.3', # https://stackoverflow.com/questions/32757259/celery-no-module-named-five\n]\ncgroups = [\n 'cgroupspy>=0.1.4',\n]\ncloudant = [\n 'cloudant>=2.0',\n]\ndask = ['cloudpickle>=1.4.1, <1.5.0', 'distributed>=2.11.1, <2.20']\ndatabricks = [\n 'requests>=2.20.0, <3',\n]\ndatadog = [\n 'datadog>=0.14.0',\n]\ndoc = [\n # Sphinx is limited to < 3.5.0 because of https://github.com/sphinx-doc/sphinx/issues/8880\n 'sphinx>=2.1.2, <3.5.0',\n f'sphinx-airflow-theme{get_sphinx_theme_version()}',\n 'sphinx-argparse>=0.1.13',\n 'sphinx-autoapi==1.0.0',\n 'sphinx-copybutton',\n 'sphinx-jinja~=1.1',\n 'sphinx-rtd-theme>=0.1.6',\n 'sphinxcontrib-httpdomain>=1.7.0',\n 'sphinxcontrib-redoc>=1.6.0',\n 'sphinxcontrib-spelling==5.2.1',\n]\ndocker = [\n 'docker~=3.0',\n]\ndruid = [\n 'pydruid>=0.4.1',\n]\nelasticsearch = [\n 'elasticsearch>7, <7.6.0',\n 'elasticsearch-dbapi==0.1.0',\n 'elasticsearch-dsl>=5.0.0',\n]\nexasol = [\n 'pyexasol>=0.5.1,<1.0.0',\n]\nfacebook = [\n 'facebook-business>=6.0.2',\n]\nflask_oauth = [\n 'Flask-OAuthlib>=0.9.1,<0.9.6', # Flask OAuthLib 0.9.6 requires Flask-Login 0.5.0 - breaks FAB\n 'oauthlib!=2.0.3,!=2.0.4,!=2.0.5,<3.0.0,>=1.1.2',\n 'requests-oauthlib<1.2.0',\n]\ngoogle = [\n 'PyOpenSSL',\n 'google-ads>=4.0.0,<8.0.0',\n 'google-api-core>=1.25.1,<2.0.0',\n 'google-api-python-client>=1.6.0,<2.0.0',\n 'google-auth>=1.0.0,<2.0.0',\n 'google-auth-httplib2>=0.0.1',\n 'google-cloud-automl>=2.1.0,<3.0.0',\n 'google-cloud-bigquery-datatransfer>=3.0.0,<4.0.0',\n 'google-cloud-bigtable>=1.0.0,<2.0.0',\n 'google-cloud-container>=0.1.1,<2.0.0',\n 'google-cloud-datacatalog>=3.0.0,<4.0.0',\n 'google-cloud-dataproc>=2.2.0,<3.0.0',\n 'google-cloud-dlp>=0.11.0,<2.0.0',\n 'google-cloud-kms>=2.0.0,<3.0.0',\n 'google-cloud-language>=1.1.1,<2.0.0',\n 'google-cloud-logging>=2.1.1,<3.0.0',\n 'google-cloud-memcache>=0.2.0',\n 'google-cloud-monitoring>=2.0.0,<3.0.0',\n 'google-cloud-os-login>=2.0.0,<3.0.0',\n 'google-cloud-pubsub>=2.0.0,<3.0.0',\n 'google-cloud-redis>=2.0.0,<3.0.0',\n 'google-cloud-secret-manager>=0.2.0,<2.0.0',\n 'google-cloud-spanner>=1.10.0,<2.0.0',\n 'google-cloud-speech>=0.36.3,<2.0.0',\n 'google-cloud-storage>=1.30,<2.0.0',\n 'google-cloud-tasks>=2.0.0,<3.0.0',\n 'google-cloud-texttospeech>=0.4.0,<2.0.0',\n 'google-cloud-translate>=1.5.0,<2.0.0',\n 'google-cloud-videointelligence>=1.7.0,<2.0.0',\n 'google-cloud-vision>=0.35.2,<2.0.0',\n 'google-cloud-workflows>=0.1.0,<2.0.0',\n 'grpcio-gcp>=0.2.2',\n 'json-merge-patch~=0.2',\n 'pandas-gbq',\n 'plyvel',\n]\ngrpc = [\n 'google-auth>=1.0.0, <2.0.0dev',\n 'google-auth-httplib2>=0.0.1',\n 'grpcio>=1.15.0',\n]\nhashicorp = [\n 'hvac~=0.10',\n]\nhdfs = [\n 'snakebite-py3',\n]\nhive = [\n 'hmsclient>=0.1.0',\n 'pyhive[hive]>=0.6.0',\n 'thrift>=0.9.2',\n]\njdbc = [\n 'jaydebeapi>=1.1.1',\n]\njenkins = [\n 'python-jenkins>=1.0.0',\n]\njira = [\n 'JIRA>1.0.7',\n]\nkerberos = [\n 'pykerberos>=1.1.13',\n 'requests_kerberos>=0.10.0',\n 'thrift_sasl>=0.2.0',\n]\nkubernetes = [\n 'cryptography>=2.0.0',\n 'kubernetes>=3.0.0, <12.0.0',\n]\nkylin = ['kylinpy>=2.6']\nldap = [\n 'ldap3>=2.5.1',\n 'python-ldap',\n]\nmongo = [\n 'dnspython>=1.13.0,<2.0.0',\n 'pymongo>=3.6.0',\n]\nmssql = [\n 'pymssql~=2.1,>=2.1.5',\n]\nmysql = [\n 'mysql-connector-python>=8.0.11, <=8.0.22',\n 'mysqlclient>=1.3.6,<1.4',\n]\nneo4j = ['neo4j>=4.2.1']\nodbc = [\n 'pyodbc',\n]\noracle = [\n 'cx_Oracle>=5.1.2',\n]\npagerduty = [\n 'pdpyras>=4.1.2,<5',\n]\npapermill = [\n 'nteract-scrapbook[all]>=0.3.1',\n 'papermill[all]>=1.2.1',\n]\npassword = [\n 'bcrypt>=2.0.0',\n 'flask-bcrypt>=0.7.1',\n]\npinot = [\n # pinotdb v0.1.1 may still work with older versions of Apache Pinot, but we've confirmed that it\n # causes a problem with newer versions.\n 'pinotdb>0.1.2,<1.0.0',\n]\nplexus = [\n 'arrow>=0.16.0,<1.0.0',\n]\npostgres = [\n 'psycopg2-binary>=2.7.4',\n]\npresto = ['presto-python-client>=0.7.0,<0.8']\nqubole = [\n 'qds-sdk>=1.10.4',\n]\nrabbitmq = [\n 'amqp<5.0.0',\n]\nredis = [\n 'redis~=3.2',\n]\nsalesforce = [\n 'simple-salesforce>=1.0.0',\n 'tableauserverclient',\n]\nsamba = [\n 'pysmbclient>=0.1.3',\n]\nsegment = [\n 'analytics-python>=1.2.9',\n]\nsendgrid = [\n 'sendgrid>=6.0.0,<7',\n]\nsentry = [\n 'blinker>=1.1',\n 'sentry-sdk>=0.8.0',\n]\nsingularity = ['spython>=0.0.56']\nslack = [\n 'slack_sdk>=3.0.0,<4.0.0',\n]\nsnowflake = [\n 'snowflake-connector-python>=2.4.1',\n 'snowflake-sqlalchemy>=1.1.0',\n]\nspark = [\n 'pyspark',\n]\nssh = [\n 'paramiko>=2.6.0',\n 'pysftp>=0.2.9',\n 'sshtunnel>=0.1.4,<0.2',\n]\nstatsd = [\n 'statsd>=3.3.0, <4.0',\n]\ntableau = [\n 'tableauserverclient',\n]\ntelegram = [\n 'python-telegram-bot==13.0',\n]\nvertica = [\n 'vertica-python>=0.5.1',\n]\nvirtualenv = [\n 'virtualenv',\n]\nwebhdfs = [\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\n]\nwinrm = [\n 'pywinrm~=0.4',\n]\nyandex = [\n 'yandexcloud>=0.22.0',\n]\nzendesk = [\n 'zdesk',\n]\n# End dependencies group\n\ndevel = [\n 'beautifulsoup4~=4.7.1',\n 'black',\n 'blinker',\n 'bowler',\n 'click~=7.1',\n 'coverage',\n 'docutils',\n 'flake8>=3.6.0',\n 'flake8-colors',\n 'flaky',\n 'freezegun',\n 'github3.py',\n 'gitpython',\n 'importlib-resources~=1.4',\n 'ipdb',\n 'jira',\n 'jsonpath-ng',\n # HACK: Moto is not compatible with newer versions\n # See: https://github.com/spulec/moto/issues/3535\n 'mock<4.0.3',\n 'mongomock',\n 'moto<2',\n 'mypy==0.770',\n 'parameterized',\n 'paramiko',\n 'pipdeptree',\n 'pre-commit',\n 'pylint>=2.7.0',\n 'pysftp',\n 'pytest~=6.0',\n 'pytest-cov',\n 'pytest-instafail',\n 'pytest-rerunfailures~=9.1',\n 'pytest-timeouts',\n 'pytest-xdist',\n 'pywinrm',\n 'qds-sdk>=1.9.6',\n 'requests_mock',\n 'wheel',\n 'yamllint',\n]\n\ndevel_minreq = cgroups + devel + doc + kubernetes + mysql + password\ndevel_hadoop = devel_minreq + hdfs + hive + kerberos + presto + webhdfs\n\n# Dict of all providers which are part of the Apache Airflow repository together with their requirements\nPROVIDERS_REQUIREMENTS: Dict[str, List[str]] = {\n 'airbyte': [],\n 'amazon': amazon,\n 'apache.beam': apache_beam,\n 'apache.cassandra': cassandra,\n 'apache.druid': druid,\n 'apache.hdfs': hdfs,\n 'apache.hive': hive,\n 'apache.kylin': kylin,\n 'apache.livy': [],\n 'apache.pig': [],\n 'apache.pinot': pinot,\n 'apache.spark': spark,\n 'apache.sqoop': [],\n 'celery': celery,\n 'cloudant': cloudant,\n 'cncf.kubernetes': kubernetes,\n 'databricks': databricks,\n 'datadog': datadog,\n 'dingding': [],\n 'discord': [],\n 'docker': docker,\n 'elasticsearch': elasticsearch,\n 'exasol': exasol,\n 'facebook': facebook,\n 'ftp': [],\n 'google': google,\n 'grpc': grpc,\n 'hashicorp': hashicorp,\n 'http': [],\n 'imap': [],\n 'jdbc': jdbc,\n 'jenkins': jenkins,\n 'jira': jira,\n 'microsoft.azure': azure,\n 'microsoft.mssql': mssql,\n 'microsoft.winrm': winrm,\n 'mongo': mongo,\n 'mysql': mysql,\n 'neo4j': neo4j,\n 'odbc': odbc,\n 'openfaas': [],\n 'opsgenie': [],\n 'oracle': oracle,\n 'pagerduty': pagerduty,\n 'papermill': papermill,\n 'plexus': plexus,\n 'postgres': postgres,\n 'presto': presto,\n 'qubole': qubole,\n 'redis': redis,\n 'salesforce': salesforce,\n 'samba': samba,\n 'segment': segment,\n 'sendgrid': sendgrid,\n 'sftp': ssh,\n 'singularity': singularity,\n 'slack': slack,\n 'snowflake': snowflake,\n 'sqlite': [],\n 'ssh': ssh,\n 'tableau': tableau,\n 'telegram': telegram,\n 'vertica': vertica,\n 'yandex': yandex,\n 'zendesk': zendesk,\n}\n\n# Those are all additional extras which do not have their own 'providers'\n# The 'apache.atlas' and 'apache.webhdfs' are extras that provide additional libraries\n# but they do not have separate providers (yet?), they are merely there to add extra libraries\n# That can be used in custom python/bash operators.\nADDITIONAL_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'apache.atlas': atlas,\n 'apache.webhdfs': webhdfs,\n}\n\n\n# Those are extras that are extensions of the 'core' Airflow. They provide additional features\n# To airflow core. They do not have separate providers because they do not have any operators/hooks etc.\nCORE_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'async': async_packages,\n 'celery': celery, # also has provider, but it extends the core with the Celery executor\n 'cgroups': cgroups,\n 'cncf.kubernetes': kubernetes, # also has provider, but it extends the core with the KubernetesExecutor\n 'dask': dask,\n 'github_enterprise': flask_oauth,\n 'google_auth': flask_oauth,\n 'kerberos': kerberos,\n 'ldap': ldap,\n 'password': password,\n 'rabbitmq': rabbitmq,\n 'sentry': sentry,\n 'statsd': statsd,\n 'virtualenv': virtualenv,\n}\n\n\nEXTRAS_REQUIREMENTS: Dict[str, List[str]] = deepcopy(CORE_EXTRAS_REQUIREMENTS)\n\n\ndef add_extras_for_all_providers() -> None:\n \"\"\"\n Adds extras for all providers.\n By default all providers have the same extra name as provider id, for example\n 'apache.hive' extra has 'apache.hive' provider requirement.\n \"\"\"\n for provider_name, provider_requirement in PROVIDERS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[provider_name] = provider_requirement\n\n\ndef add_additional_extras() -> None:\n \"\"\"Adds extras for all additional extras.\"\"\"\n for extra_name, extra_requirement in ADDITIONAL_EXTRAS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[extra_name] = extra_requirement\n\n\nadd_extras_for_all_providers()\nadd_additional_extras()\n\n#############################################################################################################\n# The whole section can be removed in Airflow 3.0 as those old aliases are deprecated in 2.* series\n#############################################################################################################\n\n# Dictionary of aliases from 1.10 - deprecated in Airflow 2.*\nEXTRAS_DEPRECATED_ALIASES: Dict[str, str] = {\n 'atlas': 'apache.atlas',\n 'aws': 'amazon',\n 'azure': 'microsoft.azure',\n 'cassandra': 'apache.cassandra',\n 'crypto': '', # All crypto requirements are installation requirements of core Airflow\n 'druid': 'apache.druid',\n 'gcp': 'google',\n 'gcp_api': 'google',\n 'hdfs': 'apache.hdfs',\n 'hive': 'apache.hive',\n 'kubernetes': 'cncf.kubernetes',\n 'mssql': 'microsoft.mssql',\n 'pinot': 'apache.pinot',\n 'qds': 'qubole',\n 's3': 'amazon',\n 'spark': 'apache.spark',\n 'webhdfs': 'apache.webhdfs',\n 'winrm': 'microsoft.winrm',\n}\n\n\ndef find_requirements_for_alias(alias_to_look_for: Tuple[str, str]) -> List[str]:\n \"\"\"Finds requirements for an alias\"\"\"\n deprecated_extra = alias_to_look_for[0]\n new_extra = alias_to_look_for[1]\n if new_extra == '': # Handle case for crypto\n return []\n try:\n return EXTRAS_REQUIREMENTS[new_extra]\n except KeyError: # noqa\n raise Exception(f\"The extra {new_extra} is missing for alias {deprecated_extra}\")\n\n\ndef add_extras_for_all_deprecated_aliases() -> None:\n \"\"\"\n Add extras for all deprecated aliases. Requirements for those deprecated aliases are the same\n as the extras they are replaced with.\n The requirements are not copies - those are the same lists as for the new extras. This is intended.\n Thanks to that if the original extras are later extended with providers, aliases are extended as well.\n \"\"\"\n for alias, extra in EXTRAS_DEPRECATED_ALIASES.items():\n requirements = EXTRAS_REQUIREMENTS.get(extra) if extra != '' else []\n if requirements is None:\n raise Exception(f\"The extra {extra} is missing for deprecated alias {alias}\")\n EXTRAS_REQUIREMENTS[alias] = requirements\n\n\nadd_extras_for_all_deprecated_aliases()\n\n#############################################################################################################\n# End of deprecated section\n#############################################################################################################\n\n# This is list of all providers. It's a shortcut for anyone who would like to easily get list of\n# All providers. It is used by pre-commits.\nALL_PROVIDERS = list(PROVIDERS_REQUIREMENTS.keys())\n\nALL_DB_PROVIDERS = [\n 'apache.cassandra',\n 'apache.druid',\n 'apache.hdfs',\n 'apache.hive',\n 'apache.pinot',\n 'cloudant',\n 'exasol',\n 'microsoft.mssql',\n 'mongo',\n 'mysql',\n 'neo4j',\n 'postgres',\n 'presto',\n 'vertica',\n]\n\n# Special requirements for all database-related providers. They are de-duplicated.\nall_dbs = list({req for db_provider in ALL_DB_PROVIDERS for req in PROVIDERS_REQUIREMENTS[db_provider]})\n\n# Requirements for all \"user\" extras (no devel). They are de-duplicated. Note that we do not need\n# to separately add providers requirements - they have been already added as 'providers' extras above\n_all_requirements = list({req for extras_reqs in EXTRAS_REQUIREMENTS.values() for req in extras_reqs})\n\n# All user extras here\nEXTRAS_REQUIREMENTS[\"all\"] = _all_requirements\n\n# All db user extras here\nEXTRAS_REQUIREMENTS[\"all_dbs\"] = all_dbs\n\n# This can be simplified to devel_hadoop + _all_requirements due to inclusions\n# but we keep it for explicit sake. We are de-duplicating it anyway.\ndevel_all = list(set(_all_requirements + doc + devel_minreq + devel_hadoop))\n\n# Those are packages excluded for \"all\" dependencies\nPACKAGES_EXCLUDED_FOR_ALL = []\nPACKAGES_EXCLUDED_FOR_ALL.extend(\n [\n 'snakebite',\n ]\n)\n\n# Those packages are excluded because they break tests and they are not needed to run our test suite.\n# This can be removed as soon as we get non-conflicting\n# requirements for the apache-beam as well.\n#\n# Currently Apache Beam has very narrow and old dependencies for 'dill' and 'mock' packages which\n# are required by our tests (but only for tests).\n#\nPACKAGES_EXCLUDED_FOR_CI = [\n 'apache-beam',\n]\n\n\ndef is_package_excluded(package: str, exclusion_list: List[str]):\n \"\"\"\n Checks if package should be excluded.\n\n :param package: package name (beginning of it)\n :param exclusion_list: list of excluded packages\n :return: true if package should be excluded\n \"\"\"\n return any(package.startswith(excluded_package) for excluded_package in exclusion_list)\n\n\ndevel_all = [\n package\n for package in devel_all\n if not is_package_excluded(package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)\n]\n\ndevel_ci = [\n package\n for package in devel_all\n if not is_package_excluded(\n package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_CI + PACKAGES_EXCLUDED_FOR_ALL\n )\n]\n\n\n# Those are extras that we have to add for development purposes\n# They can be use to install some predefined set of dependencies.\nEXTRAS_REQUIREMENTS[\"doc\"] = doc\nEXTRAS_REQUIREMENTS[\"devel\"] = devel_minreq # devel_minreq already includes doc\nEXTRAS_REQUIREMENTS[\"devel_hadoop\"] = devel_hadoop # devel_hadoop already includes devel_minreq\nEXTRAS_REQUIREMENTS[\"devel_all\"] = devel_all\nEXTRAS_REQUIREMENTS[\"devel_ci\"] = devel_ci\n\n\ndef sort_extras_requirements() -> Dict[str, List[str]]:\n \"\"\"\n For Python 3.6+ the dictionary order remains when keys() are retrieved.\n Sort both: extras and list of dependencies to make it easier to analyse problems\n external packages will be first, then if providers are added they are added at the end of the lists.\n \"\"\"\n sorted_requirements = dict(sorted(EXTRAS_REQUIREMENTS.items())) # noqa\n for extra_list in sorted_requirements.values():\n extra_list.sort()\n return sorted_requirements\n\n\nEXTRAS_REQUIREMENTS = sort_extras_requirements()\n\n# Those providers are pre-installed always when airflow is installed.\n# Those providers do not have dependency on airflow2.0 because that would lead to circular dependencies.\n# This is not a problem for PIP but some tools (pipdeptree) show those as a warning.\nPREINSTALLED_PROVIDERS = [\n 'ftp',\n 'http',\n 'imap',\n 'sqlite',\n]\n\n\ndef get_provider_package_from_package_id(package_id: str):\n \"\"\"\n Builds the name of provider package out of the package id provided/\n\n :param package_id: id of the package (like amazon or microsoft.azure)\n :return: full name of package in PyPI\n \"\"\"\n package_suffix = package_id.replace(\".\", \"-\")\n return f\"apache-airflow-providers-{package_suffix}\"\n\n\ndef get_all_provider_packages():\n \"\"\"Returns all provider packages configured in setup.py\"\"\"\n return \" \".join([get_provider_package_from_package_id(package) for package in PROVIDERS_REQUIREMENTS])\n\n\nclass AirflowDistribution(Distribution):\n \"\"\"\n The setuptools.Distribution subclass with Airflow specific behaviour\n\n The reason for pylint: disable=signature-differs of parse_config_files is explained here:\n https://github.com/PyCQA/pylint/issues/3737\n\n \"\"\"\n\n def parse_config_files(self, *args, **kwargs): # pylint: disable=signature-differs\n \"\"\"\n Ensure that when we have been asked to install providers from sources\n that we don't *also* try to install those providers from PyPI.\n Also we should make sure that in this case we copy provider.yaml files so that\n Providers manager can find package information.\n \"\"\"\n super().parse_config_files(*args, **kwargs)\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n self.install_requires = [ # noqa pylint: disable=attribute-defined-outside-init\n req for req in self.install_requires if not req.startswith('apache-airflow-providers-')\n ]\n provider_yaml_files = glob.glob(\"airflow/providers/**/provider.yaml\", recursive=True)\n for provider_yaml_file in provider_yaml_files:\n provider_relative_path = relpath(provider_yaml_file, os.path.join(my_dir, \"airflow\"))\n self.package_data['airflow'].append(provider_relative_path)\n else:\n self.install_requires.extend(\n [get_provider_package_from_package_id(package_id) for package_id in PREINSTALLED_PROVIDERS]\n )\n\n\ndef replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None:\n \"\"\"\n Replaces extra requirement with provider package. The intention here is that when\n the provider is added as dependency of extra, there is no need to add the dependencies\n separately. This is not needed and even harmful, because in case of future versions of\n the provider, the requirements might change, so hard-coding requirements from the version\n that was available at the release time might cause dependency conflicts in the future.\n\n Say for example that you have salesforce provider with those deps:\n\n { 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] }\n\n Initially ['salesforce'] extra has those requirements and it works like that when you install\n it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when\n the production installation is used, The dependencies are changed:\n\n { 'salesforce': ['apache-airflow-providers-salesforce'] }\n\n And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies:\n ['simple-salesforce>=1.0.0', 'tableauserverclient']\n\n So transitively 'salesforce' extra has all the requirements it needs and in case the provider\n changes it's dependencies, they will transitively change as well.\n\n In the constraint mechanism we save both - provider versions and it's dependencies\n version, which means that installation using constraints is repeatable.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n EXTRAS_REQUIREMENTS[extra] = [\n get_provider_package_from_package_id(package_name) for package_name in providers\n ]\n\n\ndef add_provider_packages_to_extra_requirements(extra: str, providers: List[str]) -> None:\n \"\"\"\n Adds provider packages as requirements to extra. This is used to add provider packages as requirements\n to the \"bulk\" kind of extras. Those bulk extras do not have the detailed 'extra' requirements as\n initial values, so instead of replacing them (see previous function) we can extend them.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n EXTRAS_REQUIREMENTS[extra].extend(\n [get_provider_package_from_package_id(package_name) for package_name in providers]\n )\n\n\ndef add_all_provider_packages() -> None:\n \"\"\"\n In case of regular installation (providers installed from packages), we should add extra dependencies to\n Airflow - to get the providers automatically installed when those extras are installed.\n\n For providers installed from sources we skip that step. That helps to test and install airflow with\n all packages in CI - for example when new providers are added, otherwise the installation would fail\n as the new provider is not yet in PyPI.\n\n \"\"\"\n for provider in ALL_PROVIDERS:\n replace_extra_requirement_with_provider_packages(provider, [provider])\n add_provider_packages_to_extra_requirements(\"all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_ci\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"all_dbs\", ALL_DB_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_hadoop\", [\"apache.hdfs\", \"apache.hive\", \"presto\"])\n\n\nclass Develop(develop_orig):\n \"\"\"Forces removal of providers in editable mode.\"\"\"\n\n def run(self):\n self.announce('Installing in editable mode. Uninstalling provider packages!', level=log.INFO)\n # We need to run \"python3 -m pip\" because it might be that older PIP binary is in the path\n # And it results with an error when running pip directly (cannot import pip module)\n # also PIP does not have a stable API so we have to run subprocesses ¯\\_(ツ)_/¯\n try:\n installed_packages = (\n subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"]).decode().splitlines()\n )\n airflow_provider_packages = [\n package_line.split(\"=\")[0]\n for package_line in installed_packages\n if package_line.startswith(\"apache-airflow-providers\")\n ]\n self.announce(f'Uninstalling ${airflow_provider_packages}!', level=log.INFO)\n subprocess.check_call([\"python3\", \"-m\", \"pip\", \"uninstall\", \"--yes\", *airflow_provider_packages])\n except subprocess.CalledProcessError as e:\n self.announce(f'Error when uninstalling airflow provider packages: {e}!', level=log.WARN)\n super().run()\n\n\nclass Install(install_orig):\n \"\"\"Forces installation of providers from sources in editable mode.\"\"\"\n\n def run(self):\n self.announce('Standard installation. Providers are installed from packages', level=log.INFO)\n super().run()\n\n\ndef do_setup() -> None:\n \"\"\"\n Perform the Airflow package setup.\n\n Most values come from setup.cfg, only the dynamically calculated ones are passed to setup\n function call. See https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html\n \"\"\"\n setup_kwargs = {}\n\n def include_provider_namespace_packages_when_installing_from_sources() -> None:\n \"\"\"\n When installing providers from sources we install all namespace packages found below airflow,\n including airflow and provider packages, otherwise defaults from setup.cfg control this.\n The kwargs in setup() call override those that are specified in setup.cfg.\n \"\"\"\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n setup_kwargs['packages'] = find_namespace_packages(include=['airflow*'])\n\n include_provider_namespace_packages_when_installing_from_sources()\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n print(\"Installing providers from sources. Skip adding providers as dependencies\")\n else:\n add_all_provider_packages()\n\n write_version()\n setup(\n distclass=AirflowDistribution,\n version=version,\n extras_require=EXTRAS_REQUIREMENTS,\n download_url=('https://archive.apache.org/dist/airflow/' + version),\n cmdclass={\n 'extra_clean': CleanCommand,\n 'compile_assets': CompileAssets,\n 'list_extras': ListExtras,\n 'install': Install,\n 'develop': Develop,\n },\n test_suite='setup.airflow_test_suite',\n **setup_kwargs,\n )\n\n\nif __name__ == \"__main__\":\n do_setup()\n",
"path": "setup.py"
}
] | [
{
"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Setup.py for the Airflow project.\"\"\"\nimport glob\nimport logging\nimport os\nimport subprocess\nimport unittest\nfrom copy import deepcopy\nfrom distutils import log\nfrom os.path import dirname, relpath\nfrom textwrap import wrap\nfrom typing import Dict, List, Tuple\n\nfrom setuptools import Command, Distribution, find_namespace_packages, setup\nfrom setuptools.command.develop import develop as develop_orig\nfrom setuptools.command.install import install as install_orig\n\n# Controls whether providers are installed from packages or directly from sources\n# It is turned on by default in case of development environments such as Breeze\n# And it is particularly useful when you add a new provider and there is no\n# PyPI version to install the provider package from\nINSTALL_PROVIDERS_FROM_SOURCES = 'INSTALL_PROVIDERS_FROM_SOURCES'\n\nlogger = logging.getLogger(__name__)\n\nversion = '2.1.0.dev0'\n\nmy_dir = dirname(__file__)\n\n\ndef airflow_test_suite() -> unittest.TestSuite:\n \"\"\"Test suite for Airflow tests\"\"\"\n test_loader = unittest.TestLoader()\n test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')\n return test_suite\n\n\nclass CleanCommand(Command):\n \"\"\"\n Command to tidy up the project root.\n Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.\n \"\"\"\n\n description = \"Tidy up the project root\"\n user_options: List[str] = []\n\n def initialize_options(self):\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self):\n \"\"\"Set final values for options.\"\"\"\n\n @staticmethod\n def rm_all_files(files: List[str]):\n \"\"\"Remove all files from the list\"\"\"\n for file in files:\n try:\n os.remove(file)\n except Exception as e: # noqa pylint: disable=broad-except\n logger.warning(\"Error when removing %s: %s\", file, e)\n\n def run(self):\n \"\"\"Remove temporary files and directories.\"\"\"\n os.chdir(my_dir)\n self.rm_all_files(glob.glob('./build/*'))\n self.rm_all_files(glob.glob('./**/__pycache__/*', recursive=True))\n self.rm_all_files(glob.glob('./**/*.pyc', recursive=True))\n self.rm_all_files(glob.glob('./dist/*'))\n self.rm_all_files(glob.glob('./*.egg-info'))\n self.rm_all_files(glob.glob('./docker-context-files/*.whl'))\n self.rm_all_files(glob.glob('./docker-context-files/*.tgz'))\n\n\nclass CompileAssets(Command):\n \"\"\"\n Compile and build the frontend assets using yarn and webpack.\n Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.\n \"\"\"\n\n description = \"Compile and build the frontend assets\"\n user_options: List[str] = []\n\n def initialize_options(self):\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self):\n \"\"\"Set final values for options.\"\"\"\n\n def run(self): # noqa\n \"\"\"Run a command to compile and build assets.\"\"\"\n subprocess.check_call('./airflow/www/compile_assets.sh')\n\n\nclass ListExtras(Command):\n \"\"\"\n List all available extras\n Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.\n \"\"\"\n\n description = \"List available extras\"\n user_options: List[str] = []\n\n def initialize_options(self):\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self):\n \"\"\"Set final values for options.\"\"\"\n\n def run(self): # noqa\n \"\"\"List extras.\"\"\"\n print(\"\\n\".join(wrap(\", \".join(EXTRAS_REQUIREMENTS.keys()), 100)))\n\n\ndef git_version(version_: str) -> str:\n \"\"\"\n Return a version to identify the state of the underlying git repo. The version will\n indicate whether the head of the current git-backed working directory is tied to a\n release tag or not : it will indicate the former with a 'release:{version}' prefix\n and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current\n branch head. Finally, a \"dirty\" suffix is appended to indicate that uncommitted\n changes are present.\n\n :param str version_: Semver version\n :return: Found Airflow version in Git repo\n :rtype: str\n \"\"\"\n try:\n import git\n\n try:\n repo = git.Repo(os.path.join(*[my_dir, '.git']))\n except git.NoSuchPathError:\n logger.warning('.git directory not found: Cannot compute the git version')\n return ''\n except git.InvalidGitRepositoryError:\n logger.warning('Invalid .git directory not found: Cannot compute the git version')\n return ''\n except ImportError:\n logger.warning('gitpython not found: Cannot compute the git version.')\n return ''\n if repo:\n sha = repo.head.commit.hexsha\n if repo.is_dirty():\n return f'.dev0+{sha}.dirty'\n # commit is clean\n return f'.release:{version_}+{sha}'\n return 'no_git_version'\n\n\ndef write_version(filename: str = os.path.join(*[my_dir, \"airflow\", \"git_version\"])):\n \"\"\"\n Write the Semver version + git hash to file, e.g. \".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65\".\n\n :param str filename: Destination file to write\n \"\"\"\n text = f\"{git_version(version)}\"\n with open(filename, 'w') as file:\n file.write(text)\n\n\ndef get_sphinx_theme_version() -> str:\n \"\"\"\n Return sphinx theme version. If USE_THEME_FROM_GIT env variable is set, the theme is used from\n GitHub to allow dynamically update it during development. However for regular PIP release\n you cannot use @ package specification, so the latest available released theme package from\n PIP is used.\n :return: Version of sphinx theme to use.\n \"\"\"\n if os.environ.get('USE_THEME_FROM_GIT'):\n return (\n \"@ https://github.com/apache/airflow-site/releases/download/0.0.4/\"\n + \"sphinx_airflow_theme-0.0.4-py3-none-any.whl\"\n )\n return ''\n\n\n# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py\n# If you change this mark you should also change ./scripts/ci/check_order_setup.py\n# Start dependencies group\namazon = [\n 'boto3>=1.15.0,<1.18.0',\n 'botocore>=1.18.0,<1.19.0',\n 'watchtower~=0.7.3',\n]\napache_beam = [\n 'apache-beam[gcp]',\n]\nasync_packages = [\n 'eventlet>= 0.9.7',\n 'gevent>=0.13',\n 'greenlet>=0.4.9',\n]\natlas = [\n 'atlasclient>=0.1.2',\n]\nazure = [\n 'azure-batch>=8.0.0',\n 'azure-cosmos>=3.0.1,<4',\n 'azure-datalake-store>=0.0.45',\n 'azure-identity>=1.3.1',\n 'azure-keyvault>=4.1.0',\n 'azure-kusto-data>=0.0.43,<0.1',\n 'azure-mgmt-containerinstance>=1.5.0,<2.0',\n 'azure-mgmt-datafactory>=1.0.0,<2.0',\n 'azure-mgmt-datalake-store>=0.5.0',\n 'azure-mgmt-resource>=2.2.0',\n 'azure-storage-blob>=12.7.0',\n 'azure-storage-common>=2.1.0',\n 'azure-storage-file>=2.1.0',\n]\ncassandra = [\n 'cassandra-driver>=3.13.0,<3.21.0',\n]\ncelery = [\n 'celery~=4.4.2',\n 'flower>=0.7.3, <1.0',\n 'vine~=1.3', # https://stackoverflow.com/questions/32757259/celery-no-module-named-five\n]\ncgroups = [\n 'cgroupspy>=0.1.4',\n]\ncloudant = [\n 'cloudant>=2.0',\n]\ndask = ['cloudpickle>=1.4.1, <1.5.0', 'distributed>=2.11.1, <2.20']\ndatabricks = [\n 'requests>=2.20.0, <3',\n]\ndatadog = [\n 'datadog>=0.14.0',\n]\ndoc = [\n # Sphinx is limited to < 3.5.0 because of https://github.com/sphinx-doc/sphinx/issues/8880\n 'sphinx>=2.1.2, <3.5.0',\n f'sphinx-airflow-theme{get_sphinx_theme_version()}',\n 'sphinx-argparse>=0.1.13',\n 'sphinx-autoapi==1.0.0',\n 'sphinx-copybutton',\n 'sphinx-jinja~=1.1',\n 'sphinx-rtd-theme>=0.1.6',\n 'sphinxcontrib-httpdomain>=1.7.0',\n 'sphinxcontrib-redoc>=1.6.0',\n 'sphinxcontrib-spelling==5.2.1',\n]\ndocker = [\n 'docker~=3.0',\n]\ndruid = [\n 'pydruid>=0.4.1',\n]\nelasticsearch = [\n 'elasticsearch>7, <7.6.0',\n 'elasticsearch-dbapi==0.1.0',\n 'elasticsearch-dsl>=5.0.0',\n]\nexasol = [\n 'pyexasol>=0.5.1,<1.0.0',\n]\nfacebook = [\n 'facebook-business>=6.0.2',\n]\nflask_oauth = [\n 'Flask-OAuthlib>=0.9.1,<0.9.6', # Flask OAuthLib 0.9.6 requires Flask-Login 0.5.0 - breaks FAB\n 'oauthlib!=2.0.3,!=2.0.4,!=2.0.5,<3.0.0,>=1.1.2',\n 'requests-oauthlib<1.2.0',\n]\ngoogle = [\n 'PyOpenSSL',\n 'google-ads>=4.0.0,<8.0.0',\n 'google-api-core>=1.25.1,<2.0.0',\n 'google-api-python-client>=1.6.0,<2.0.0',\n 'google-auth>=1.0.0,<2.0.0',\n 'google-auth-httplib2>=0.0.1',\n 'google-cloud-automl>=2.1.0,<3.0.0',\n 'google-cloud-bigquery-datatransfer>=3.0.0,<4.0.0',\n 'google-cloud-bigtable>=1.0.0,<2.0.0',\n 'google-cloud-container>=0.1.1,<2.0.0',\n 'google-cloud-datacatalog>=3.0.0,<4.0.0',\n 'google-cloud-dataproc>=2.2.0,<3.0.0',\n 'google-cloud-dlp>=0.11.0,<2.0.0',\n 'google-cloud-kms>=2.0.0,<3.0.0',\n 'google-cloud-language>=1.1.1,<2.0.0',\n 'google-cloud-logging>=2.1.1,<3.0.0',\n 'google-cloud-memcache>=0.2.0',\n 'google-cloud-monitoring>=2.0.0,<3.0.0',\n 'google-cloud-os-login>=2.0.0,<3.0.0',\n 'google-cloud-pubsub>=2.0.0,<3.0.0',\n 'google-cloud-redis>=2.0.0,<3.0.0',\n 'google-cloud-secret-manager>=0.2.0,<2.0.0',\n 'google-cloud-spanner>=1.10.0,<2.0.0',\n 'google-cloud-speech>=0.36.3,<2.0.0',\n 'google-cloud-storage>=1.30,<2.0.0',\n 'google-cloud-tasks>=2.0.0,<3.0.0',\n 'google-cloud-texttospeech>=0.4.0,<2.0.0',\n 'google-cloud-translate>=1.5.0,<2.0.0',\n 'google-cloud-videointelligence>=1.7.0,<2.0.0',\n 'google-cloud-vision>=0.35.2,<2.0.0',\n 'google-cloud-workflows>=0.1.0,<2.0.0',\n 'grpcio-gcp>=0.2.2',\n 'json-merge-patch~=0.2',\n 'pandas-gbq',\n 'plyvel',\n]\ngrpc = [\n 'google-auth>=1.0.0, <2.0.0dev',\n 'google-auth-httplib2>=0.0.1',\n 'grpcio>=1.15.0',\n]\nhashicorp = [\n 'hvac~=0.10',\n]\nhdfs = [\n 'snakebite-py3',\n]\nhive = [\n 'hmsclient>=0.1.0',\n 'pyhive[hive]>=0.6.0',\n 'thrift>=0.9.2',\n]\njdbc = [\n 'jaydebeapi>=1.1.1',\n]\njenkins = [\n 'python-jenkins>=1.0.0',\n]\njira = [\n 'JIRA>1.0.7',\n]\nkerberos = [\n 'pykerberos>=1.1.13',\n 'requests_kerberos>=0.10.0',\n 'thrift_sasl>=0.2.0',\n]\nkubernetes = [\n 'cryptography>=2.0.0',\n 'kubernetes>=3.0.0, <12.0.0',\n]\nkylin = ['kylinpy>=2.6']\nldap = [\n 'ldap3>=2.5.1',\n 'python-ldap',\n]\nmongo = [\n 'dnspython>=1.13.0,<2.0.0',\n 'pymongo>=3.6.0',\n]\nmssql = [\n 'pymssql~=2.1,>=2.1.5',\n]\nmysql = [\n 'mysql-connector-python>=8.0.11, <=8.0.22',\n 'mysqlclient>=1.3.6,<3',\n]\nneo4j = ['neo4j>=4.2.1']\nodbc = [\n 'pyodbc',\n]\noracle = [\n 'cx_Oracle>=5.1.2',\n]\npagerduty = [\n 'pdpyras>=4.1.2,<5',\n]\npapermill = [\n 'nteract-scrapbook[all]>=0.3.1',\n 'papermill[all]>=1.2.1',\n]\npassword = [\n 'bcrypt>=2.0.0',\n 'flask-bcrypt>=0.7.1',\n]\npinot = [\n # pinotdb v0.1.1 may still work with older versions of Apache Pinot, but we've confirmed that it\n # causes a problem with newer versions.\n 'pinotdb>0.1.2,<1.0.0',\n]\nplexus = [\n 'arrow>=0.16.0,<1.0.0',\n]\npostgres = [\n 'psycopg2-binary>=2.7.4',\n]\npresto = ['presto-python-client>=0.7.0,<0.8']\nqubole = [\n 'qds-sdk>=1.10.4',\n]\nrabbitmq = [\n 'amqp<5.0.0',\n]\nredis = [\n 'redis~=3.2',\n]\nsalesforce = [\n 'simple-salesforce>=1.0.0',\n 'tableauserverclient',\n]\nsamba = [\n 'pysmbclient>=0.1.3',\n]\nsegment = [\n 'analytics-python>=1.2.9',\n]\nsendgrid = [\n 'sendgrid>=6.0.0,<7',\n]\nsentry = [\n 'blinker>=1.1',\n 'sentry-sdk>=0.8.0',\n]\nsingularity = ['spython>=0.0.56']\nslack = [\n 'slack_sdk>=3.0.0,<4.0.0',\n]\nsnowflake = [\n 'snowflake-connector-python>=2.4.1',\n 'snowflake-sqlalchemy>=1.1.0',\n]\nspark = [\n 'pyspark',\n]\nssh = [\n 'paramiko>=2.6.0',\n 'pysftp>=0.2.9',\n 'sshtunnel>=0.1.4,<0.2',\n]\nstatsd = [\n 'statsd>=3.3.0, <4.0',\n]\ntableau = [\n 'tableauserverclient',\n]\ntelegram = [\n 'python-telegram-bot==13.0',\n]\nvertica = [\n 'vertica-python>=0.5.1',\n]\nvirtualenv = [\n 'virtualenv',\n]\nwebhdfs = [\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\n]\nwinrm = [\n 'pywinrm~=0.4',\n]\nyandex = [\n 'yandexcloud>=0.22.0',\n]\nzendesk = [\n 'zdesk',\n]\n# End dependencies group\n\ndevel = [\n 'beautifulsoup4~=4.7.1',\n 'black',\n 'blinker',\n 'bowler',\n 'click~=7.1',\n 'coverage',\n 'docutils',\n 'flake8>=3.6.0',\n 'flake8-colors',\n 'flaky',\n 'freezegun',\n 'github3.py',\n 'gitpython',\n 'importlib-resources~=1.4',\n 'ipdb',\n 'jira',\n 'jsonpath-ng',\n # HACK: Moto is not compatible with newer versions\n # See: https://github.com/spulec/moto/issues/3535\n 'mock<4.0.3',\n 'mongomock',\n 'moto<2',\n 'mypy==0.770',\n 'parameterized',\n 'paramiko',\n 'pipdeptree',\n 'pre-commit',\n 'pylint>=2.7.0',\n 'pysftp',\n 'pytest~=6.0',\n 'pytest-cov',\n 'pytest-instafail',\n 'pytest-rerunfailures~=9.1',\n 'pytest-timeouts',\n 'pytest-xdist',\n 'pywinrm',\n 'qds-sdk>=1.9.6',\n 'requests_mock',\n 'wheel',\n 'yamllint',\n]\n\ndevel_minreq = cgroups + devel + doc + kubernetes + mysql + password\ndevel_hadoop = devel_minreq + hdfs + hive + kerberos + presto + webhdfs\n\n# Dict of all providers which are part of the Apache Airflow repository together with their requirements\nPROVIDERS_REQUIREMENTS: Dict[str, List[str]] = {\n 'airbyte': [],\n 'amazon': amazon,\n 'apache.beam': apache_beam,\n 'apache.cassandra': cassandra,\n 'apache.druid': druid,\n 'apache.hdfs': hdfs,\n 'apache.hive': hive,\n 'apache.kylin': kylin,\n 'apache.livy': [],\n 'apache.pig': [],\n 'apache.pinot': pinot,\n 'apache.spark': spark,\n 'apache.sqoop': [],\n 'celery': celery,\n 'cloudant': cloudant,\n 'cncf.kubernetes': kubernetes,\n 'databricks': databricks,\n 'datadog': datadog,\n 'dingding': [],\n 'discord': [],\n 'docker': docker,\n 'elasticsearch': elasticsearch,\n 'exasol': exasol,\n 'facebook': facebook,\n 'ftp': [],\n 'google': google,\n 'grpc': grpc,\n 'hashicorp': hashicorp,\n 'http': [],\n 'imap': [],\n 'jdbc': jdbc,\n 'jenkins': jenkins,\n 'jira': jira,\n 'microsoft.azure': azure,\n 'microsoft.mssql': mssql,\n 'microsoft.winrm': winrm,\n 'mongo': mongo,\n 'mysql': mysql,\n 'neo4j': neo4j,\n 'odbc': odbc,\n 'openfaas': [],\n 'opsgenie': [],\n 'oracle': oracle,\n 'pagerduty': pagerduty,\n 'papermill': papermill,\n 'plexus': plexus,\n 'postgres': postgres,\n 'presto': presto,\n 'qubole': qubole,\n 'redis': redis,\n 'salesforce': salesforce,\n 'samba': samba,\n 'segment': segment,\n 'sendgrid': sendgrid,\n 'sftp': ssh,\n 'singularity': singularity,\n 'slack': slack,\n 'snowflake': snowflake,\n 'sqlite': [],\n 'ssh': ssh,\n 'tableau': tableau,\n 'telegram': telegram,\n 'vertica': vertica,\n 'yandex': yandex,\n 'zendesk': zendesk,\n}\n\n# Those are all additional extras which do not have their own 'providers'\n# The 'apache.atlas' and 'apache.webhdfs' are extras that provide additional libraries\n# but they do not have separate providers (yet?), they are merely there to add extra libraries\n# That can be used in custom python/bash operators.\nADDITIONAL_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'apache.atlas': atlas,\n 'apache.webhdfs': webhdfs,\n}\n\n\n# Those are extras that are extensions of the 'core' Airflow. They provide additional features\n# To airflow core. They do not have separate providers because they do not have any operators/hooks etc.\nCORE_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'async': async_packages,\n 'celery': celery, # also has provider, but it extends the core with the Celery executor\n 'cgroups': cgroups,\n 'cncf.kubernetes': kubernetes, # also has provider, but it extends the core with the KubernetesExecutor\n 'dask': dask,\n 'github_enterprise': flask_oauth,\n 'google_auth': flask_oauth,\n 'kerberos': kerberos,\n 'ldap': ldap,\n 'password': password,\n 'rabbitmq': rabbitmq,\n 'sentry': sentry,\n 'statsd': statsd,\n 'virtualenv': virtualenv,\n}\n\n\nEXTRAS_REQUIREMENTS: Dict[str, List[str]] = deepcopy(CORE_EXTRAS_REQUIREMENTS)\n\n\ndef add_extras_for_all_providers() -> None:\n \"\"\"\n Adds extras for all providers.\n By default all providers have the same extra name as provider id, for example\n 'apache.hive' extra has 'apache.hive' provider requirement.\n \"\"\"\n for provider_name, provider_requirement in PROVIDERS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[provider_name] = provider_requirement\n\n\ndef add_additional_extras() -> None:\n \"\"\"Adds extras for all additional extras.\"\"\"\n for extra_name, extra_requirement in ADDITIONAL_EXTRAS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[extra_name] = extra_requirement\n\n\nadd_extras_for_all_providers()\nadd_additional_extras()\n\n#############################################################################################################\n# The whole section can be removed in Airflow 3.0 as those old aliases are deprecated in 2.* series\n#############################################################################################################\n\n# Dictionary of aliases from 1.10 - deprecated in Airflow 2.*\nEXTRAS_DEPRECATED_ALIASES: Dict[str, str] = {\n 'atlas': 'apache.atlas',\n 'aws': 'amazon',\n 'azure': 'microsoft.azure',\n 'cassandra': 'apache.cassandra',\n 'crypto': '', # All crypto requirements are installation requirements of core Airflow\n 'druid': 'apache.druid',\n 'gcp': 'google',\n 'gcp_api': 'google',\n 'hdfs': 'apache.hdfs',\n 'hive': 'apache.hive',\n 'kubernetes': 'cncf.kubernetes',\n 'mssql': 'microsoft.mssql',\n 'pinot': 'apache.pinot',\n 'qds': 'qubole',\n 's3': 'amazon',\n 'spark': 'apache.spark',\n 'webhdfs': 'apache.webhdfs',\n 'winrm': 'microsoft.winrm',\n}\n\n\ndef find_requirements_for_alias(alias_to_look_for: Tuple[str, str]) -> List[str]:\n \"\"\"Finds requirements for an alias\"\"\"\n deprecated_extra = alias_to_look_for[0]\n new_extra = alias_to_look_for[1]\n if new_extra == '': # Handle case for crypto\n return []\n try:\n return EXTRAS_REQUIREMENTS[new_extra]\n except KeyError: # noqa\n raise Exception(f\"The extra {new_extra} is missing for alias {deprecated_extra}\")\n\n\ndef add_extras_for_all_deprecated_aliases() -> None:\n \"\"\"\n Add extras for all deprecated aliases. Requirements for those deprecated aliases are the same\n as the extras they are replaced with.\n The requirements are not copies - those are the same lists as for the new extras. This is intended.\n Thanks to that if the original extras are later extended with providers, aliases are extended as well.\n \"\"\"\n for alias, extra in EXTRAS_DEPRECATED_ALIASES.items():\n requirements = EXTRAS_REQUIREMENTS.get(extra) if extra != '' else []\n if requirements is None:\n raise Exception(f\"The extra {extra} is missing for deprecated alias {alias}\")\n EXTRAS_REQUIREMENTS[alias] = requirements\n\n\nadd_extras_for_all_deprecated_aliases()\n\n#############################################################################################################\n# End of deprecated section\n#############################################################################################################\n\n# This is list of all providers. It's a shortcut for anyone who would like to easily get list of\n# All providers. It is used by pre-commits.\nALL_PROVIDERS = list(PROVIDERS_REQUIREMENTS.keys())\n\nALL_DB_PROVIDERS = [\n 'apache.cassandra',\n 'apache.druid',\n 'apache.hdfs',\n 'apache.hive',\n 'apache.pinot',\n 'cloudant',\n 'exasol',\n 'microsoft.mssql',\n 'mongo',\n 'mysql',\n 'neo4j',\n 'postgres',\n 'presto',\n 'vertica',\n]\n\n# Special requirements for all database-related providers. They are de-duplicated.\nall_dbs = list({req for db_provider in ALL_DB_PROVIDERS for req in PROVIDERS_REQUIREMENTS[db_provider]})\n\n# Requirements for all \"user\" extras (no devel). They are de-duplicated. Note that we do not need\n# to separately add providers requirements - they have been already added as 'providers' extras above\n_all_requirements = list({req for extras_reqs in EXTRAS_REQUIREMENTS.values() for req in extras_reqs})\n\n# All user extras here\nEXTRAS_REQUIREMENTS[\"all\"] = _all_requirements\n\n# All db user extras here\nEXTRAS_REQUIREMENTS[\"all_dbs\"] = all_dbs\n\n# This can be simplified to devel_hadoop + _all_requirements due to inclusions\n# but we keep it for explicit sake. We are de-duplicating it anyway.\ndevel_all = list(set(_all_requirements + doc + devel_minreq + devel_hadoop))\n\n# Those are packages excluded for \"all\" dependencies\nPACKAGES_EXCLUDED_FOR_ALL = []\nPACKAGES_EXCLUDED_FOR_ALL.extend(\n [\n 'snakebite',\n ]\n)\n\n# Those packages are excluded because they break tests and they are not needed to run our test suite.\n# This can be removed as soon as we get non-conflicting\n# requirements for the apache-beam as well.\n#\n# Currently Apache Beam has very narrow and old dependencies for 'dill' and 'mock' packages which\n# are required by our tests (but only for tests).\n#\nPACKAGES_EXCLUDED_FOR_CI = [\n 'apache-beam',\n]\n\n\ndef is_package_excluded(package: str, exclusion_list: List[str]):\n \"\"\"\n Checks if package should be excluded.\n\n :param package: package name (beginning of it)\n :param exclusion_list: list of excluded packages\n :return: true if package should be excluded\n \"\"\"\n return any(package.startswith(excluded_package) for excluded_package in exclusion_list)\n\n\ndevel_all = [\n package\n for package in devel_all\n if not is_package_excluded(package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)\n]\n\ndevel_ci = [\n package\n for package in devel_all\n if not is_package_excluded(\n package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_CI + PACKAGES_EXCLUDED_FOR_ALL\n )\n]\n\n\n# Those are extras that we have to add for development purposes\n# They can be use to install some predefined set of dependencies.\nEXTRAS_REQUIREMENTS[\"doc\"] = doc\nEXTRAS_REQUIREMENTS[\"devel\"] = devel_minreq # devel_minreq already includes doc\nEXTRAS_REQUIREMENTS[\"devel_hadoop\"] = devel_hadoop # devel_hadoop already includes devel_minreq\nEXTRAS_REQUIREMENTS[\"devel_all\"] = devel_all\nEXTRAS_REQUIREMENTS[\"devel_ci\"] = devel_ci\n\n\ndef sort_extras_requirements() -> Dict[str, List[str]]:\n \"\"\"\n For Python 3.6+ the dictionary order remains when keys() are retrieved.\n Sort both: extras and list of dependencies to make it easier to analyse problems\n external packages will be first, then if providers are added they are added at the end of the lists.\n \"\"\"\n sorted_requirements = dict(sorted(EXTRAS_REQUIREMENTS.items())) # noqa\n for extra_list in sorted_requirements.values():\n extra_list.sort()\n return sorted_requirements\n\n\nEXTRAS_REQUIREMENTS = sort_extras_requirements()\n\n# Those providers are pre-installed always when airflow is installed.\n# Those providers do not have dependency on airflow2.0 because that would lead to circular dependencies.\n# This is not a problem for PIP but some tools (pipdeptree) show those as a warning.\nPREINSTALLED_PROVIDERS = [\n 'ftp',\n 'http',\n 'imap',\n 'sqlite',\n]\n\n\ndef get_provider_package_from_package_id(package_id: str):\n \"\"\"\n Builds the name of provider package out of the package id provided/\n\n :param package_id: id of the package (like amazon or microsoft.azure)\n :return: full name of package in PyPI\n \"\"\"\n package_suffix = package_id.replace(\".\", \"-\")\n return f\"apache-airflow-providers-{package_suffix}\"\n\n\ndef get_all_provider_packages():\n \"\"\"Returns all provider packages configured in setup.py\"\"\"\n return \" \".join([get_provider_package_from_package_id(package) for package in PROVIDERS_REQUIREMENTS])\n\n\nclass AirflowDistribution(Distribution):\n \"\"\"\n The setuptools.Distribution subclass with Airflow specific behaviour\n\n The reason for pylint: disable=signature-differs of parse_config_files is explained here:\n https://github.com/PyCQA/pylint/issues/3737\n\n \"\"\"\n\n def parse_config_files(self, *args, **kwargs): # pylint: disable=signature-differs\n \"\"\"\n Ensure that when we have been asked to install providers from sources\n that we don't *also* try to install those providers from PyPI.\n Also we should make sure that in this case we copy provider.yaml files so that\n Providers manager can find package information.\n \"\"\"\n super().parse_config_files(*args, **kwargs)\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n self.install_requires = [ # noqa pylint: disable=attribute-defined-outside-init\n req for req in self.install_requires if not req.startswith('apache-airflow-providers-')\n ]\n provider_yaml_files = glob.glob(\"airflow/providers/**/provider.yaml\", recursive=True)\n for provider_yaml_file in provider_yaml_files:\n provider_relative_path = relpath(provider_yaml_file, os.path.join(my_dir, \"airflow\"))\n self.package_data['airflow'].append(provider_relative_path)\n else:\n self.install_requires.extend(\n [get_provider_package_from_package_id(package_id) for package_id in PREINSTALLED_PROVIDERS]\n )\n\n\ndef replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None:\n \"\"\"\n Replaces extra requirement with provider package. The intention here is that when\n the provider is added as dependency of extra, there is no need to add the dependencies\n separately. This is not needed and even harmful, because in case of future versions of\n the provider, the requirements might change, so hard-coding requirements from the version\n that was available at the release time might cause dependency conflicts in the future.\n\n Say for example that you have salesforce provider with those deps:\n\n { 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] }\n\n Initially ['salesforce'] extra has those requirements and it works like that when you install\n it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when\n the production installation is used, The dependencies are changed:\n\n { 'salesforce': ['apache-airflow-providers-salesforce'] }\n\n And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies:\n ['simple-salesforce>=1.0.0', 'tableauserverclient']\n\n So transitively 'salesforce' extra has all the requirements it needs and in case the provider\n changes it's dependencies, they will transitively change as well.\n\n In the constraint mechanism we save both - provider versions and it's dependencies\n version, which means that installation using constraints is repeatable.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n EXTRAS_REQUIREMENTS[extra] = [\n get_provider_package_from_package_id(package_name) for package_name in providers\n ]\n\n\ndef add_provider_packages_to_extra_requirements(extra: str, providers: List[str]) -> None:\n \"\"\"\n Adds provider packages as requirements to extra. This is used to add provider packages as requirements\n to the \"bulk\" kind of extras. Those bulk extras do not have the detailed 'extra' requirements as\n initial values, so instead of replacing them (see previous function) we can extend them.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n EXTRAS_REQUIREMENTS[extra].extend(\n [get_provider_package_from_package_id(package_name) for package_name in providers]\n )\n\n\ndef add_all_provider_packages() -> None:\n \"\"\"\n In case of regular installation (providers installed from packages), we should add extra dependencies to\n Airflow - to get the providers automatically installed when those extras are installed.\n\n For providers installed from sources we skip that step. That helps to test and install airflow with\n all packages in CI - for example when new providers are added, otherwise the installation would fail\n as the new provider is not yet in PyPI.\n\n \"\"\"\n for provider in ALL_PROVIDERS:\n replace_extra_requirement_with_provider_packages(provider, [provider])\n add_provider_packages_to_extra_requirements(\"all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_ci\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"all_dbs\", ALL_DB_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_hadoop\", [\"apache.hdfs\", \"apache.hive\", \"presto\"])\n\n\nclass Develop(develop_orig):\n \"\"\"Forces removal of providers in editable mode.\"\"\"\n\n def run(self):\n self.announce('Installing in editable mode. Uninstalling provider packages!', level=log.INFO)\n # We need to run \"python3 -m pip\" because it might be that older PIP binary is in the path\n # And it results with an error when running pip directly (cannot import pip module)\n # also PIP does not have a stable API so we have to run subprocesses ¯\\_(ツ)_/¯\n try:\n installed_packages = (\n subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"]).decode().splitlines()\n )\n airflow_provider_packages = [\n package_line.split(\"=\")[0]\n for package_line in installed_packages\n if package_line.startswith(\"apache-airflow-providers\")\n ]\n self.announce(f'Uninstalling ${airflow_provider_packages}!', level=log.INFO)\n subprocess.check_call([\"python3\", \"-m\", \"pip\", \"uninstall\", \"--yes\", *airflow_provider_packages])\n except subprocess.CalledProcessError as e:\n self.announce(f'Error when uninstalling airflow provider packages: {e}!', level=log.WARN)\n super().run()\n\n\nclass Install(install_orig):\n \"\"\"Forces installation of providers from sources in editable mode.\"\"\"\n\n def run(self):\n self.announce('Standard installation. Providers are installed from packages', level=log.INFO)\n super().run()\n\n\ndef do_setup() -> None:\n \"\"\"\n Perform the Airflow package setup.\n\n Most values come from setup.cfg, only the dynamically calculated ones are passed to setup\n function call. See https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html\n \"\"\"\n setup_kwargs = {}\n\n def include_provider_namespace_packages_when_installing_from_sources() -> None:\n \"\"\"\n When installing providers from sources we install all namespace packages found below airflow,\n including airflow and provider packages, otherwise defaults from setup.cfg control this.\n The kwargs in setup() call override those that are specified in setup.cfg.\n \"\"\"\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n setup_kwargs['packages'] = find_namespace_packages(include=['airflow*'])\n\n include_provider_namespace_packages_when_installing_from_sources()\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n print(\"Installing providers from sources. Skip adding providers as dependencies\")\n else:\n add_all_provider_packages()\n\n write_version()\n setup(\n distclass=AirflowDistribution,\n version=version,\n extras_require=EXTRAS_REQUIREMENTS,\n download_url=('https://archive.apache.org/dist/airflow/' + version),\n cmdclass={\n 'extra_clean': CleanCommand,\n 'compile_assets': CompileAssets,\n 'list_extras': ListExtras,\n 'install': Install,\n 'develop': Develop,\n },\n test_suite='setup.airflow_test_suite',\n **setup_kwargs,\n )\n\n\nif __name__ == \"__main__\":\n do_setup()\n",
"path": "setup.py"
}
] | diff --git a/docs/apache-airflow-providers-mysql/index.rst b/docs/apache-airflow-providers-mysql/index.rst
index 12a21edab4f68..ad46fd87e928b 100644
--- a/docs/apache-airflow-providers-mysql/index.rst
+++ b/docs/apache-airflow-providers-mysql/index.rst
@@ -93,7 +93,7 @@ PIP requirements
PIP package Version required
========================== ======================
``mysql-connector-python`` ``>=8.0.11, <=8.0.22``
-``mysqlclient`` ``>=1.3.6,<1.4``
+``mysqlclient`` ``>=1.3.6,<3``
========================== ======================
Cross provider package dependencies
diff --git a/setup.py b/setup.py
index 332d43cc63b3c..79c37ec00deac 100644
--- a/setup.py
+++ b/setup.py
@@ -365,7 +365,7 @@ def get_sphinx_theme_version() -> str:
]
mysql = [
'mysql-connector-python>=8.0.11, <=8.0.22',
- 'mysqlclient>=1.3.6,<1.4',
+ 'mysqlclient>=1.3.6,<3',
]
neo4j = ['neo4j>=4.2.1']
odbc = [
|
ManimCommunity__manim-3166 | Not all arrow tips are accessible
## Description of bug / unexpected behavior
<!-- Add a clear and concise description of the problem you encountered. -->
The [manim.mobject.geometry.tips](https://docs.manim.community/en/stable/_modules/manim/mobject/geometry/tips.html#ArrowTriangleFilledTip) file has presents of some arrow tips to use. The list `__all__` contains:
```py
__all__ = [
"ArrowTip",
"ArrowCircleFilledTip",
"ArrowCircleTip",
"ArrowSquareTip",
"ArrowSquareFilledTip",
]
```
## Expected behavior
<!-- Add a clear and concise description of what you expected to happen. -->
Instead, it should have:
```py
__all__ = [
"ArrowTip",
"ArrowCircleFilledTip",
"ArrowCircleTip",
"ArrowSquareTip",
"ArrowSquareFilledTip"
"ArrowTriangleTip", # added
"ArrowTriangleFilledTip", # added
]
```
## How to reproduce the issue
<!-- Provide a piece of code illustrating the undesired behavior. -->
<details><summary>Code for reproducing the problem</summary>
```py
class Test(Scene):
def construct(self):
my_line = Line()
my_line.add_tip(ArrowTriangleFilledTip(fill_color=WHITE))
self.add(my_line)
```
</details>
## Additional media files
<!-- Paste in the files manim produced on rendering the code above. -->
None
<!-- Insert screenshots here (only when absolutely necessary, we prefer copy/pasted output!) -->
</details>
## System specifications
<details><summary>System Details</summary>
- OS: macOS 13.0.1 (Ventura)
- RAM: 8GB
- Python version: Python 3.10.9
- Installed modules: manim 0.17.2
| [
{
"content": "r\"\"\"A collection of tip mobjects for use with :class:`~.TipableVMobject`.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"ArrowTip\",\n \"ArrowCircleFilledTip\",\n \"ArrowCircleTip\",\n \"ArrowSquareTip\",\n \"ArrowSquareFilledTip\",\n]\n\nimport numpy as np\n\nfrom manim.constants import *\nfrom manim.mobject.geometry.arc import Circle\nfrom manim.mobject.geometry.polygram import Square, Triangle\nfrom manim.mobject.opengl.opengl_compatibility import ConvertToOpenGL\nfrom manim.mobject.types.vectorized_mobject import VMobject\nfrom manim.utils.space_ops import angle_of_vector\n\n\nclass ArrowTip(VMobject, metaclass=ConvertToOpenGL):\n r\"\"\"Base class for arrow tips.\n\n .. seealso::\n :class:`ArrowTriangleTip`\n :class:`ArrowTriangleFilledTip`\n :class:`ArrowCircleTip`\n :class:`ArrowCircleFilledTip`\n :class:`ArrowSquareTip`\n :class:`ArrowSquareFilledTip`\n\n Examples\n --------\n Cannot be used directly, only intended for inheritance::\n\n >>> tip = ArrowTip()\n Traceback (most recent call last):\n ...\n NotImplementedError: Has to be implemented in inheriting subclasses.\n\n Instead, use one of the pre-defined ones, or make\n a custom one like this:\n\n .. manim:: CustomTipExample\n\n >>> from manim import RegularPolygon, Arrow\n >>> class MyCustomArrowTip(ArrowTip, RegularPolygon):\n ... def __init__(self, length=0.35, **kwargs):\n ... RegularPolygon.__init__(self, n=5, **kwargs)\n ... self.width = length\n ... self.stretch_to_fit_height(length)\n >>> arr = Arrow(np.array([-2, -2, 0]), np.array([2, 2, 0]),\n ... tip_shape=MyCustomArrowTip)\n >>> isinstance(arr.tip, RegularPolygon)\n True\n >>> from manim import Scene, Create\n >>> class CustomTipExample(Scene):\n ... def construct(self):\n ... self.play(Create(arr))\n\n Using a class inherited from :class:`ArrowTip` to get a non-filled\n tip is a shorthand to manually specifying the arrow tip style as follows::\n\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 1, 0]),\n ... tip_style={'fill_opacity': 0, 'stroke_width': 3})\n\n The following example illustrates the usage of all of the predefined\n arrow tips.\n\n .. manim:: ArrowTipsShowcase\n :save_last_frame:\n\n from manim.mobject.geometry.tips import ArrowTriangleTip,\\\n ArrowSquareTip, ArrowSquareFilledTip,\\\n ArrowCircleTip, ArrowCircleFilledTip\n class ArrowTipsShowcase(Scene):\n def construct(self):\n a00 = Arrow(start=[-2, 3, 0], end=[2, 3, 0], color=YELLOW)\n a11 = Arrow(start=[-2, 2, 0], end=[2, 2, 0], tip_shape=ArrowTriangleTip)\n a12 = Arrow(start=[-2, 1, 0], end=[2, 1, 0])\n a21 = Arrow(start=[-2, 0, 0], end=[2, 0, 0], tip_shape=ArrowSquareTip)\n a22 = Arrow([-2, -1, 0], [2, -1, 0], tip_shape=ArrowSquareFilledTip)\n a31 = Arrow([-2, -2, 0], [2, -2, 0], tip_shape=ArrowCircleTip)\n a32 = Arrow([-2, -3, 0], [2, -3, 0], tip_shape=ArrowCircleFilledTip)\n b11 = a11.copy().scale(0.5, scale_tips=True).next_to(a11, RIGHT)\n b12 = a12.copy().scale(0.5, scale_tips=True).next_to(a12, RIGHT)\n b21 = a21.copy().scale(0.5, scale_tips=True).next_to(a21, RIGHT)\n self.add(a00, a11, a12, a21, a22, a31, a32, b11, b12, b21)\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n raise NotImplementedError(\"Has to be implemented in inheriting subclasses.\")\n\n @property\n def base(self):\n r\"\"\"The base point of the arrow tip.\n\n This is the point connecting to the arrow line.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 0, 0]), buff=0)\n >>> arrow.tip.base.round(2) + 0. # add 0. to avoid negative 0 in output\n array([1.65, 0. , 0. ])\n\n \"\"\"\n return self.point_from_proportion(0.5)\n\n @property\n def tip_point(self):\n r\"\"\"The tip point of the arrow tip.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 0, 0]), buff=0)\n >>> arrow.tip.tip_point.round(2) + 0.\n array([2., 0., 0.])\n\n \"\"\"\n return self.points[0]\n\n @property\n def vector(self):\n r\"\"\"The vector pointing from the base point to the tip point.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 2, 0]), buff=0)\n >>> arrow.tip.vector.round(2) + 0.\n array([0.25, 0.25, 0. ])\n\n \"\"\"\n return self.tip_point - self.base\n\n @property\n def tip_angle(self):\n r\"\"\"The angle of the arrow tip.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 1, 0]), buff=0)\n >>> round(arrow.tip.tip_angle, 5) == round(PI/4, 5)\n True\n\n \"\"\"\n return angle_of_vector(self.vector)\n\n @property\n def length(self):\n r\"\"\"The length of the arrow tip.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 2, 0]))\n >>> round(arrow.tip.length, 3)\n 0.35\n\n \"\"\"\n return np.linalg.norm(self.vector)\n\n\nclass ArrowTriangleTip(ArrowTip, Triangle):\n r\"\"\"Triangular arrow tip.\"\"\"\n\n def __init__(\n self,\n fill_opacity=0,\n stroke_width=3,\n length=DEFAULT_ARROW_TIP_LENGTH,\n width=DEFAULT_ARROW_TIP_LENGTH,\n start_angle=PI,\n **kwargs,\n ):\n Triangle.__init__(\n self,\n fill_opacity=fill_opacity,\n stroke_width=stroke_width,\n start_angle=start_angle,\n **kwargs,\n )\n self.width = width\n\n self.stretch_to_fit_width(length)\n self.stretch_to_fit_height(width)\n\n\nclass ArrowTriangleFilledTip(ArrowTriangleTip):\n r\"\"\"Triangular arrow tip with filled tip.\n\n This is the default arrow tip shape.\n \"\"\"\n\n def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):\n super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)\n\n\nclass ArrowCircleTip(ArrowTip, Circle):\n r\"\"\"Circular arrow tip.\"\"\"\n\n def __init__(\n self,\n fill_opacity=0,\n stroke_width=3,\n length=DEFAULT_ARROW_TIP_LENGTH,\n start_angle=PI,\n **kwargs,\n ):\n self.start_angle = start_angle\n Circle.__init__(\n self, fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs\n )\n self.width = length\n self.stretch_to_fit_height(length)\n\n\nclass ArrowCircleFilledTip(ArrowCircleTip):\n r\"\"\"Circular arrow tip with filled tip.\"\"\"\n\n def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):\n super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)\n\n\nclass ArrowSquareTip(ArrowTip, Square):\n r\"\"\"Square arrow tip.\"\"\"\n\n def __init__(\n self,\n fill_opacity=0,\n stroke_width=3,\n length=DEFAULT_ARROW_TIP_LENGTH,\n start_angle=PI,\n **kwargs,\n ):\n self.start_angle = start_angle\n Square.__init__(\n self,\n fill_opacity=fill_opacity,\n stroke_width=stroke_width,\n side_length=length,\n **kwargs,\n )\n self.width = length\n self.stretch_to_fit_height(length)\n\n\nclass ArrowSquareFilledTip(ArrowSquareTip):\n r\"\"\"Square arrow tip with filled tip.\"\"\"\n\n def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):\n super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)\n",
"path": "manim/mobject/geometry/tips.py"
}
] | [
{
"content": "r\"\"\"A collection of tip mobjects for use with :class:`~.TipableVMobject`.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"ArrowTip\",\n \"ArrowCircleFilledTip\",\n \"ArrowCircleTip\",\n \"ArrowSquareTip\",\n \"ArrowSquareFilledTip\",\n \"ArrowTriangleTip\",\n \"ArrowTriangleFilledTip\",\n]\n\nimport numpy as np\n\nfrom manim.constants import *\nfrom manim.mobject.geometry.arc import Circle\nfrom manim.mobject.geometry.polygram import Square, Triangle\nfrom manim.mobject.opengl.opengl_compatibility import ConvertToOpenGL\nfrom manim.mobject.types.vectorized_mobject import VMobject\nfrom manim.utils.space_ops import angle_of_vector\n\n\nclass ArrowTip(VMobject, metaclass=ConvertToOpenGL):\n r\"\"\"Base class for arrow tips.\n\n .. seealso::\n :class:`ArrowTriangleTip`\n :class:`ArrowTriangleFilledTip`\n :class:`ArrowCircleTip`\n :class:`ArrowCircleFilledTip`\n :class:`ArrowSquareTip`\n :class:`ArrowSquareFilledTip`\n\n Examples\n --------\n Cannot be used directly, only intended for inheritance::\n\n >>> tip = ArrowTip()\n Traceback (most recent call last):\n ...\n NotImplementedError: Has to be implemented in inheriting subclasses.\n\n Instead, use one of the pre-defined ones, or make\n a custom one like this:\n\n .. manim:: CustomTipExample\n\n >>> from manim import RegularPolygon, Arrow\n >>> class MyCustomArrowTip(ArrowTip, RegularPolygon):\n ... def __init__(self, length=0.35, **kwargs):\n ... RegularPolygon.__init__(self, n=5, **kwargs)\n ... self.width = length\n ... self.stretch_to_fit_height(length)\n >>> arr = Arrow(np.array([-2, -2, 0]), np.array([2, 2, 0]),\n ... tip_shape=MyCustomArrowTip)\n >>> isinstance(arr.tip, RegularPolygon)\n True\n >>> from manim import Scene, Create\n >>> class CustomTipExample(Scene):\n ... def construct(self):\n ... self.play(Create(arr))\n\n Using a class inherited from :class:`ArrowTip` to get a non-filled\n tip is a shorthand to manually specifying the arrow tip style as follows::\n\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 1, 0]),\n ... tip_style={'fill_opacity': 0, 'stroke_width': 3})\n\n The following example illustrates the usage of all of the predefined\n arrow tips.\n\n .. manim:: ArrowTipsShowcase\n :save_last_frame:\n\n from manim.mobject.geometry.tips import ArrowTriangleTip,\\\n ArrowSquareTip, ArrowSquareFilledTip,\\\n ArrowCircleTip, ArrowCircleFilledTip\n class ArrowTipsShowcase(Scene):\n def construct(self):\n a00 = Arrow(start=[-2, 3, 0], end=[2, 3, 0], color=YELLOW)\n a11 = Arrow(start=[-2, 2, 0], end=[2, 2, 0], tip_shape=ArrowTriangleTip)\n a12 = Arrow(start=[-2, 1, 0], end=[2, 1, 0])\n a21 = Arrow(start=[-2, 0, 0], end=[2, 0, 0], tip_shape=ArrowSquareTip)\n a22 = Arrow([-2, -1, 0], [2, -1, 0], tip_shape=ArrowSquareFilledTip)\n a31 = Arrow([-2, -2, 0], [2, -2, 0], tip_shape=ArrowCircleTip)\n a32 = Arrow([-2, -3, 0], [2, -3, 0], tip_shape=ArrowCircleFilledTip)\n b11 = a11.copy().scale(0.5, scale_tips=True).next_to(a11, RIGHT)\n b12 = a12.copy().scale(0.5, scale_tips=True).next_to(a12, RIGHT)\n b21 = a21.copy().scale(0.5, scale_tips=True).next_to(a21, RIGHT)\n self.add(a00, a11, a12, a21, a22, a31, a32, b11, b12, b21)\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n raise NotImplementedError(\"Has to be implemented in inheriting subclasses.\")\n\n @property\n def base(self):\n r\"\"\"The base point of the arrow tip.\n\n This is the point connecting to the arrow line.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 0, 0]), buff=0)\n >>> arrow.tip.base.round(2) + 0. # add 0. to avoid negative 0 in output\n array([1.65, 0. , 0. ])\n\n \"\"\"\n return self.point_from_proportion(0.5)\n\n @property\n def tip_point(self):\n r\"\"\"The tip point of the arrow tip.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 0, 0]), buff=0)\n >>> arrow.tip.tip_point.round(2) + 0.\n array([2., 0., 0.])\n\n \"\"\"\n return self.points[0]\n\n @property\n def vector(self):\n r\"\"\"The vector pointing from the base point to the tip point.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 2, 0]), buff=0)\n >>> arrow.tip.vector.round(2) + 0.\n array([0.25, 0.25, 0. ])\n\n \"\"\"\n return self.tip_point - self.base\n\n @property\n def tip_angle(self):\n r\"\"\"The angle of the arrow tip.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 1, 0]), buff=0)\n >>> round(arrow.tip.tip_angle, 5) == round(PI/4, 5)\n True\n\n \"\"\"\n return angle_of_vector(self.vector)\n\n @property\n def length(self):\n r\"\"\"The length of the arrow tip.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 2, 0]))\n >>> round(arrow.tip.length, 3)\n 0.35\n\n \"\"\"\n return np.linalg.norm(self.vector)\n\n\nclass ArrowTriangleTip(ArrowTip, Triangle):\n r\"\"\"Triangular arrow tip.\"\"\"\n\n def __init__(\n self,\n fill_opacity=0,\n stroke_width=3,\n length=DEFAULT_ARROW_TIP_LENGTH,\n width=DEFAULT_ARROW_TIP_LENGTH,\n start_angle=PI,\n **kwargs,\n ):\n Triangle.__init__(\n self,\n fill_opacity=fill_opacity,\n stroke_width=stroke_width,\n start_angle=start_angle,\n **kwargs,\n )\n self.width = width\n\n self.stretch_to_fit_width(length)\n self.stretch_to_fit_height(width)\n\n\nclass ArrowTriangleFilledTip(ArrowTriangleTip):\n r\"\"\"Triangular arrow tip with filled tip.\n\n This is the default arrow tip shape.\n \"\"\"\n\n def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):\n super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)\n\n\nclass ArrowCircleTip(ArrowTip, Circle):\n r\"\"\"Circular arrow tip.\"\"\"\n\n def __init__(\n self,\n fill_opacity=0,\n stroke_width=3,\n length=DEFAULT_ARROW_TIP_LENGTH,\n start_angle=PI,\n **kwargs,\n ):\n self.start_angle = start_angle\n Circle.__init__(\n self, fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs\n )\n self.width = length\n self.stretch_to_fit_height(length)\n\n\nclass ArrowCircleFilledTip(ArrowCircleTip):\n r\"\"\"Circular arrow tip with filled tip.\"\"\"\n\n def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):\n super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)\n\n\nclass ArrowSquareTip(ArrowTip, Square):\n r\"\"\"Square arrow tip.\"\"\"\n\n def __init__(\n self,\n fill_opacity=0,\n stroke_width=3,\n length=DEFAULT_ARROW_TIP_LENGTH,\n start_angle=PI,\n **kwargs,\n ):\n self.start_angle = start_angle\n Square.__init__(\n self,\n fill_opacity=fill_opacity,\n stroke_width=stroke_width,\n side_length=length,\n **kwargs,\n )\n self.width = length\n self.stretch_to_fit_height(length)\n\n\nclass ArrowSquareFilledTip(ArrowSquareTip):\n r\"\"\"Square arrow tip with filled tip.\"\"\"\n\n def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):\n super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)\n",
"path": "manim/mobject/geometry/tips.py"
}
] | diff --git a/manim/mobject/geometry/tips.py b/manim/mobject/geometry/tips.py
index b25a5a5380..0c82697fdc 100644
--- a/manim/mobject/geometry/tips.py
+++ b/manim/mobject/geometry/tips.py
@@ -8,6 +8,8 @@
"ArrowCircleTip",
"ArrowSquareTip",
"ArrowSquareFilledTip",
+ "ArrowTriangleTip",
+ "ArrowTriangleFilledTip",
]
import numpy as np
|
NVIDIA__apex-564 | RuntimeError: "GeluCUDAKernelImpl" not implemented for 'Half'
PyTorch 1.2 introduced the `gelu` activation function. Unfortunately, this leads to terminal errors when using with AMP.
Trace (`self.activation` is `gelu`):
```
Traceback (most recent call last):
File "predict.py", line 282, in <module>
predictor.predict()
File "predict.py", line 74, in predict
fig = trainer.train()
File "/home/bram/Python/projects/transformer-classifiers/transformer_classifiers/TransformerTrainer.py", line 232, in train
self._process('train', epoch)
File "/home/bram/Python/projects/transformer-classifiers/transformer_classifiers/TransformerTrainer.py", line 124, in _process
preds = self.model(input_ids, attention_mask=input_mask)
File "/home/bram/.local/share/virtualenvs/transformer-classifiers-x27iJBv7/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/home/bram/.local/share/virtualenvs/transformer-classifiers-x27iJBv7/lib/python3.7/site-packages/torch/nn/parallel/distributed.py", line 442, in forward
output = self.module(*inputs[0], **kwargs[0])
File "/home/bram/.local/share/virtualenvs/transformer-classifiers-x27iJBv7/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/home/bram/Python/projects/transformer-classifiers/transformer_classifiers/models.py", line 140, in forward
cls_output = self.activation(cls_output)
File "/home/bram/.local/share/virtualenvs/transformer-classifiers-x27iJBv7/lib/python3.7/site-packages/torch/nn/functional.py", line 1126, in gelu
return torch._C._nn.gelu(input)
RuntimeError: "GeluCUDAKernelImpl" not implemented for 'Half'
```
| [
{
"content": "\n# TODO: think about the following two. They do weird things.\n# - torch.nn.utils.clip_grad (but it should always be fp32 anyway)\n# - torch.nn.utils.weight_norm\n\n# Notes:\n# F.instance_norm uses batch_norm internally. Which correctly handles\n# fp16 in/out with fp32 weights. So we shouldn't do anything for\n# either of these.\n# F.normalize calls `input.norm()` internally, so it's redundant, but\n# kept here in case impl. changes.\n# F.cosine_similarity is same: calls `x.norm()` internally.\n\nimport torch.nn.functional\n\nMODULE = torch.nn.functional\n\nFP16_FUNCS = [\n 'conv1d',\n 'conv2d',\n 'conv3d',\n 'conv_transpose1d',\n 'conv_transpose2d',\n 'conv_transpose3d',\n 'conv_tbc', # Undocumented / maybe new?\n 'linear',\n]\n\nFP32_FUNCS = [\n\n # Interpolation/Upsampling TODO: Remove for 1.2\n 'interpolate',\n 'grid_sample',\n\n # Pointwise\n 'softplus',\n 'softmin',\n 'log_softmax',\n 'softmax',\n\n # Normalization\n 'layer_norm',\n 'group_norm',\n 'local_response_norm',\n 'normalize',\n 'cosine_similarity',\n\n # Loss functions\n # TODO: which of these can be fp16?\n 'poisson_nll_loss',\n 'cosine_embedding_loss',\n 'cross_entropy',\n 'hinge_embedding_loss',\n 'kl_div',\n 'l1_loss',\n 'mse_loss',\n 'margin_ranking_loss',\n 'multilabel_margin_loss',\n 'multilabel_soft_margin_loss',\n 'multi_margin_loss',\n 'nll_loss',\n 'binary_cross_entropy_with_logits',\n 'smooth_l1_loss',\n 'soft_margin_loss',\n 'triplet_margin_loss'\n]\n\nBANNED_FUNCS = [\n ('binary_cross_entropy',\n (\"\\namp does not work out-of-the-box with `F.binary_cross_entropy` or `torch.nn.BCELoss.` \"\n \"It requires that the output of the previous function be already a FloatTensor. \\n\\n\"\n \"Most models have a Sigmoid right before BCELoss. In that case, you can use\\n\"\n \" torch.nn.BCEWithLogitsLoss\\nto combine Sigmoid+BCELoss into a single layer \"\n \"that is compatible with amp.\\nAnother option is to add\\n\"\n \" amp.register_float_function(torch, 'sigmoid')\\nbefore calling `amp.init()`.\\n\"\n \"If you _really_ know what you are doing, you can disable this warning by passing \"\n \"allow_banned=True to `amp.init()`.\"))\n]\n",
"path": "apex/amp/lists/functional_overrides.py"
}
] | [
{
"content": "\n# TODO: think about the following two. They do weird things.\n# - torch.nn.utils.clip_grad (but it should always be fp32 anyway)\n# - torch.nn.utils.weight_norm\n\n# Notes:\n# F.instance_norm uses batch_norm internally. Which correctly handles\n# fp16 in/out with fp32 weights. So we shouldn't do anything for\n# either of these.\n# F.normalize calls `input.norm()` internally, so it's redundant, but\n# kept here in case impl. changes.\n# F.cosine_similarity is same: calls `x.norm()` internally.\n\nimport torch.nn.functional\n\nMODULE = torch.nn.functional\n\nFP16_FUNCS = [\n 'conv1d',\n 'conv2d',\n 'conv3d',\n 'conv_transpose1d',\n 'conv_transpose2d',\n 'conv_transpose3d',\n 'conv_tbc', # Undocumented / maybe new?\n 'linear',\n]\n\nFP32_FUNCS = [\n\n # Interpolation/Upsampling TODO: Remove for 1.2\n 'interpolate',\n 'grid_sample',\n\n # Pointwise\n 'softplus',\n 'softmin',\n 'log_softmax',\n 'softmax',\n 'gelu',\n \n # Normalization\n 'layer_norm',\n 'group_norm',\n 'local_response_norm',\n 'normalize',\n 'cosine_similarity',\n\n # Loss functions\n # TODO: which of these can be fp16?\n 'poisson_nll_loss',\n 'cosine_embedding_loss',\n 'cross_entropy',\n 'hinge_embedding_loss',\n 'kl_div',\n 'l1_loss',\n 'mse_loss',\n 'margin_ranking_loss',\n 'multilabel_margin_loss',\n 'multilabel_soft_margin_loss',\n 'multi_margin_loss',\n 'nll_loss',\n 'binary_cross_entropy_with_logits',\n 'smooth_l1_loss',\n 'soft_margin_loss',\n 'triplet_margin_loss'\n]\n\nBANNED_FUNCS = [\n ('binary_cross_entropy',\n (\"\\namp does not work out-of-the-box with `F.binary_cross_entropy` or `torch.nn.BCELoss.` \"\n \"It requires that the output of the previous function be already a FloatTensor. \\n\\n\"\n \"Most models have a Sigmoid right before BCELoss. In that case, you can use\\n\"\n \" torch.nn.BCEWithLogitsLoss\\nto combine Sigmoid+BCELoss into a single layer \"\n \"that is compatible with amp.\\nAnother option is to add\\n\"\n \" amp.register_float_function(torch, 'sigmoid')\\nbefore calling `amp.init()`.\\n\"\n \"If you _really_ know what you are doing, you can disable this warning by passing \"\n \"allow_banned=True to `amp.init()`.\"))\n]\n",
"path": "apex/amp/lists/functional_overrides.py"
}
] | diff --git a/apex/amp/lists/functional_overrides.py b/apex/amp/lists/functional_overrides.py
index 3ea6a4918..d1dfcd0ea 100644
--- a/apex/amp/lists/functional_overrides.py
+++ b/apex/amp/lists/functional_overrides.py
@@ -37,7 +37,8 @@
'softmin',
'log_softmax',
'softmax',
-
+ 'gelu',
+
# Normalization
'layer_norm',
'group_norm',
|
vyperlang__vyper-3207 | variables named `UNREACHABLE` can be shadowed by `raise`and `assert` when used with `UNREACHABLE`
### Version Information
* vyper Version (output of `vyper --version`): 0.3.8+commit.6020b8bb
* OS: OSX
* Python Version (output of `python --version`): 3.8.0
### What's your issue about?
`UNREACHABLE` is not a reserved keyword. For someone who is not familiar with the custom semantic of `UNREACHABLE` when used as a reason string for a `raise` or `assert` statement, the fact that in this context, any previously defined variable named `UNREACHABLE` will be shadowed by this new semantic might be confusing.
To illustrate this, in the following contract, a call to `bar` will revert with `invalid opcode` and not `this is unreachable` as one could imagine.
```Vyper
@external
def bar():
UNREACHABLE: String[20] = "this is unreachable"
x: uint256 = 3
assert 2>x, UNREACHABLE
```
### How can it be fixed?
An option could be to make `UNREACHABLE` a reserved keyword but some other alternative less restrictive might be better.
| [
{
"content": "import contextlib\nimport re\n\nfrom vyper.evm.opcodes import OPCODES\nfrom vyper.exceptions import (\n CompilerPanic,\n NamespaceCollision,\n StructureException,\n UndeclaredDefinition,\n)\nfrom vyper.semantics.analysis.levenshtein_utils import get_levenshtein_error_suggestions\n\n\nclass Namespace(dict):\n \"\"\"\n Dictionary subclass that represents the namespace of a contract.\n\n Attributes\n ----------\n _scopes : List[Set]\n List of sets containing the key names for each scope\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._scopes = []\n # NOTE cyclic imports!\n # TODO: break this cycle by providing an `init_vyper_namespace` in 3rd module\n from vyper.builtins.functions import get_builtin_functions\n from vyper.semantics import environment\n from vyper.semantics.analysis.base import VarInfo\n from vyper.semantics.types import get_types\n\n self.update(get_types())\n self.update(environment.get_constant_vars())\n self.update({k: VarInfo(b) for (k, b) in get_builtin_functions().items()})\n\n def __eq__(self, other):\n return self is other\n\n def __setitem__(self, attr, obj):\n if self._scopes:\n self.validate_assignment(attr)\n self._scopes[-1].add(attr)\n assert isinstance(attr, str), f\"not a string: {attr}\"\n super().__setitem__(attr, obj)\n\n def __getitem__(self, key):\n if key not in self:\n suggestions_str = get_levenshtein_error_suggestions(key, self, 0.2)\n raise UndeclaredDefinition(f\"'{key}' has not been declared. {suggestions_str}\")\n return super().__getitem__(key)\n\n def __enter__(self):\n if not self._scopes:\n raise CompilerPanic(\"Context manager must be invoked via namespace.enter_scope()\")\n\n def __exit__(self, exc_type, exc_value, traceback):\n if not self._scopes:\n raise CompilerPanic(\"Bad use of namespace as a context manager\")\n for key in self._scopes.pop():\n del self[key]\n\n def enter_scope(self):\n \"\"\"\n Enter a new scope within the namespace.\n\n Called as a context manager, e.g. `with namespace.enter_scope():`\n All items that are added within the context are removed upon exit.\n \"\"\"\n # NOTE cyclic imports!\n from vyper.semantics import environment\n\n self._scopes.append(set())\n\n if len(self._scopes) == 1:\n # add mutable vars (`self`) to the initial scope\n self.update(environment.get_mutable_vars())\n\n return self\n\n def update(self, other):\n for key, value in other.items():\n self.__setitem__(key, value)\n\n def clear(self):\n super().clear()\n self.__init__()\n\n def validate_assignment(self, attr):\n validate_identifier(attr)\n if attr in self:\n obj = super().__getitem__(attr)\n raise NamespaceCollision(f\"'{attr}' has already been declared as a {obj}\")\n\n\ndef get_namespace():\n \"\"\"\n Get the active namespace object.\n \"\"\"\n global _namespace\n try:\n return _namespace\n except NameError:\n _namespace = Namespace()\n return _namespace\n\n\[email protected]\ndef override_global_namespace(ns):\n global _namespace\n tmp = _namespace\n try:\n # clobber global namespace\n _namespace = ns\n yield\n finally:\n # unclobber\n _namespace = tmp\n\n\ndef validate_identifier(attr):\n namespace = get_namespace()\n if attr in namespace and attr not in [x for i in namespace._scopes for x in i]:\n raise NamespaceCollision(f\"Cannot assign to '{attr}', it is a builtin\")\n if attr.lower() in RESERVED_KEYWORDS or attr.upper() in OPCODES:\n raise StructureException(f\"'{attr}' is a reserved keyword\")\n if not re.match(\"^[_a-zA-Z][a-zA-Z0-9_]*$\", attr):\n raise StructureException(f\"'{attr}' contains invalid character(s)\")\n\n\n# Cannot be used for variable or member naming\nRESERVED_KEYWORDS = {\n # decorators\n \"public\",\n \"external\",\n \"nonpayable\",\n \"constant\",\n \"immutable\",\n \"internal\",\n \"payable\",\n \"nonreentrant\",\n # \"class\" keywords\n \"interface\",\n \"struct\",\n \"event\",\n \"enum\",\n # control flow\n \"if\",\n \"for\",\n \"while\",\n \"until\",\n \"pass\",\n \"def\",\n # EVM operations\n \"send\",\n \"selfdestruct\",\n \"assert\",\n \"raise\",\n \"throw\",\n # special functions (no name mangling)\n \"init\",\n \"_init_\",\n \"___init___\",\n \"____init____\",\n \"default\",\n \"_default_\",\n \"___default___\",\n \"____default____\",\n # environment variables\n \"chainid\",\n \"blockhash\",\n \"timestamp\",\n \"timedelta\",\n # boolean literals\n \"true\",\n \"false\",\n # more control flow and special operations\n \"this\",\n \"continue\",\n \"range\",\n # None sentinal value\n \"none\",\n # more special operations\n \"indexed\",\n # denominations\n \"ether\",\n \"wei\",\n \"finney\",\n \"szabo\",\n \"shannon\",\n \"lovelace\",\n \"ada\",\n \"babbage\",\n \"gwei\",\n \"kwei\",\n \"mwei\",\n \"twei\",\n \"pwei\",\n # `address` members\n \"balance\",\n \"codesize\",\n \"codehash\",\n \"code\",\n \"is_contract\",\n # units\n \"units\",\n # sentinal constant values\n \"zero_address\",\n \"empty_bytes32\",\n \"max_int128\",\n \"min_int128\",\n \"max_decimal\",\n \"min_decimal\",\n \"max_uint256\",\n \"zero_wei\",\n}\n",
"path": "vyper/semantics/namespace.py"
}
] | [
{
"content": "import contextlib\nimport re\n\nfrom vyper.evm.opcodes import OPCODES\nfrom vyper.exceptions import (\n CompilerPanic,\n NamespaceCollision,\n StructureException,\n UndeclaredDefinition,\n)\nfrom vyper.semantics.analysis.levenshtein_utils import get_levenshtein_error_suggestions\n\n\nclass Namespace(dict):\n \"\"\"\n Dictionary subclass that represents the namespace of a contract.\n\n Attributes\n ----------\n _scopes : List[Set]\n List of sets containing the key names for each scope\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._scopes = []\n # NOTE cyclic imports!\n # TODO: break this cycle by providing an `init_vyper_namespace` in 3rd module\n from vyper.builtins.functions import get_builtin_functions\n from vyper.semantics import environment\n from vyper.semantics.analysis.base import VarInfo\n from vyper.semantics.types import get_types\n\n self.update(get_types())\n self.update(environment.get_constant_vars())\n self.update({k: VarInfo(b) for (k, b) in get_builtin_functions().items()})\n\n def __eq__(self, other):\n return self is other\n\n def __setitem__(self, attr, obj):\n if self._scopes:\n self.validate_assignment(attr)\n self._scopes[-1].add(attr)\n assert isinstance(attr, str), f\"not a string: {attr}\"\n super().__setitem__(attr, obj)\n\n def __getitem__(self, key):\n if key not in self:\n suggestions_str = get_levenshtein_error_suggestions(key, self, 0.2)\n raise UndeclaredDefinition(f\"'{key}' has not been declared. {suggestions_str}\")\n return super().__getitem__(key)\n\n def __enter__(self):\n if not self._scopes:\n raise CompilerPanic(\"Context manager must be invoked via namespace.enter_scope()\")\n\n def __exit__(self, exc_type, exc_value, traceback):\n if not self._scopes:\n raise CompilerPanic(\"Bad use of namespace as a context manager\")\n for key in self._scopes.pop():\n del self[key]\n\n def enter_scope(self):\n \"\"\"\n Enter a new scope within the namespace.\n\n Called as a context manager, e.g. `with namespace.enter_scope():`\n All items that are added within the context are removed upon exit.\n \"\"\"\n # NOTE cyclic imports!\n from vyper.semantics import environment\n\n self._scopes.append(set())\n\n if len(self._scopes) == 1:\n # add mutable vars (`self`) to the initial scope\n self.update(environment.get_mutable_vars())\n\n return self\n\n def update(self, other):\n for key, value in other.items():\n self.__setitem__(key, value)\n\n def clear(self):\n super().clear()\n self.__init__()\n\n def validate_assignment(self, attr):\n validate_identifier(attr)\n if attr in self:\n obj = super().__getitem__(attr)\n raise NamespaceCollision(f\"'{attr}' has already been declared as a {obj}\")\n\n\ndef get_namespace():\n \"\"\"\n Get the active namespace object.\n \"\"\"\n global _namespace\n try:\n return _namespace\n except NameError:\n _namespace = Namespace()\n return _namespace\n\n\[email protected]\ndef override_global_namespace(ns):\n global _namespace\n tmp = _namespace\n try:\n # clobber global namespace\n _namespace = ns\n yield\n finally:\n # unclobber\n _namespace = tmp\n\n\ndef validate_identifier(attr):\n namespace = get_namespace()\n if attr in namespace and attr not in [x for i in namespace._scopes for x in i]:\n raise NamespaceCollision(f\"Cannot assign to '{attr}', it is a builtin\")\n if attr.lower() in RESERVED_KEYWORDS or attr.upper() in OPCODES:\n raise StructureException(f\"'{attr}' is a reserved keyword\")\n if not re.match(\"^[_a-zA-Z][a-zA-Z0-9_]*$\", attr):\n raise StructureException(f\"'{attr}' contains invalid character(s)\")\n\n\n# Cannot be used for variable or member naming\nRESERVED_KEYWORDS = {\n # decorators\n \"public\",\n \"external\",\n \"nonpayable\",\n \"constant\",\n \"immutable\",\n \"internal\",\n \"payable\",\n \"nonreentrant\",\n # \"class\" keywords\n \"interface\",\n \"struct\",\n \"event\",\n \"enum\",\n # control flow\n \"if\",\n \"for\",\n \"while\",\n \"until\",\n \"pass\",\n \"def\",\n # EVM operations\n \"send\",\n \"selfdestruct\",\n \"assert\",\n \"raise\",\n \"throw\",\n \"unreachable\",\n # special functions (no name mangling)\n \"init\",\n \"_init_\",\n \"___init___\",\n \"____init____\",\n \"default\",\n \"_default_\",\n \"___default___\",\n \"____default____\",\n # environment variables\n \"chainid\",\n \"blockhash\",\n \"timestamp\",\n \"timedelta\",\n # boolean literals\n \"true\",\n \"false\",\n # more control flow and special operations\n \"this\",\n \"continue\",\n \"range\",\n # None sentinal value\n \"none\",\n # more special operations\n \"indexed\",\n # denominations\n \"ether\",\n \"wei\",\n \"finney\",\n \"szabo\",\n \"shannon\",\n \"lovelace\",\n \"ada\",\n \"babbage\",\n \"gwei\",\n \"kwei\",\n \"mwei\",\n \"twei\",\n \"pwei\",\n # `address` members\n \"balance\",\n \"codesize\",\n \"codehash\",\n \"code\",\n \"is_contract\",\n # units\n \"units\",\n # sentinal constant values\n \"zero_address\",\n \"empty_bytes32\",\n \"max_int128\",\n \"min_int128\",\n \"max_decimal\",\n \"min_decimal\",\n \"max_uint256\",\n \"zero_wei\",\n}\n",
"path": "vyper/semantics/namespace.py"
}
] | diff --git a/vyper/semantics/namespace.py b/vyper/semantics/namespace.py
index 5aa530ea01..752ef7ad96 100644
--- a/vyper/semantics/namespace.py
+++ b/vyper/semantics/namespace.py
@@ -158,6 +158,7 @@ def validate_identifier(attr):
"assert",
"raise",
"throw",
+ "unreachable",
# special functions (no name mangling)
"init",
"_init_",
|
django__channels-1223 | InMemoryChannelLayer: unused variable local_poll_interval
https://github.com/django/channels/blob/5feecdb6a0df720651851f377aee3587d07eceef/channels/layers.py#L199
| [
{
"content": "from __future__ import unicode_literals\n\nimport asyncio\nimport fnmatch\nimport random\nimport re\nimport string\nimport time\nfrom copy import deepcopy\n\nfrom django.conf import settings\nfrom django.core.signals import setting_changed\nfrom django.utils.module_loading import import_string\n\nfrom channels import DEFAULT_CHANNEL_LAYER\n\nfrom .exceptions import ChannelFull, InvalidChannelLayerError\n\n\nclass ChannelLayerManager:\n \"\"\"\n Takes a settings dictionary of backends and initialises them on request.\n \"\"\"\n\n def __init__(self):\n self.backends = {}\n setting_changed.connect(self._reset_backends)\n\n def _reset_backends(self, setting, **kwargs):\n \"\"\"\n Removes cached channel layers when the CHANNEL_LAYERS setting changes.\n \"\"\"\n if setting == \"CHANNEL_LAYERS\":\n self.backends = {}\n\n @property\n def configs(self):\n # Lazy load settings so we can be imported\n return getattr(settings, \"CHANNEL_LAYERS\", {})\n\n def make_backend(self, name):\n \"\"\"\n Instantiate channel layer.\n \"\"\"\n config = self.configs[name].get(\"CONFIG\", {})\n return self._make_backend(name, config)\n\n def make_test_backend(self, name):\n \"\"\"\n Instantiate channel layer using its test config.\n \"\"\"\n try:\n config = self.configs[name][\"TEST_CONFIG\"]\n except KeyError:\n raise InvalidChannelLayerError(\"No TEST_CONFIG specified for %s\" % name)\n return self._make_backend(name, config)\n\n def _make_backend(self, name, config):\n # Check for old format config\n if \"ROUTING\" in self.configs[name]:\n raise InvalidChannelLayerError(\n \"ROUTING key found for %s - this is no longer needed in Channels 2.\"\n % name\n )\n # Load the backend class\n try:\n backend_class = import_string(self.configs[name][\"BACKEND\"])\n except KeyError:\n raise InvalidChannelLayerError(\"No BACKEND specified for %s\" % name)\n except ImportError:\n raise InvalidChannelLayerError(\n \"Cannot import BACKEND %r specified for %s\"\n % (self.configs[name][\"BACKEND\"], name)\n )\n # Initialise and pass config\n return backend_class(**config)\n\n def __getitem__(self, key):\n if key not in self.backends:\n self.backends[key] = self.make_backend(key)\n return self.backends[key]\n\n def __contains__(self, key):\n return key in self.configs\n\n def set(self, key, layer):\n \"\"\"\n Sets an alias to point to a new ChannelLayerWrapper instance, and\n returns the old one that it replaced. Useful for swapping out the\n backend during tests.\n \"\"\"\n old = self.backends.get(key, None)\n self.backends[key] = layer\n return old\n\n\nclass BaseChannelLayer:\n \"\"\"\n Base channel layer class that others can inherit from, with useful\n common functionality.\n \"\"\"\n\n def __init__(self, expiry=60, capacity=100, channel_capacity=None):\n self.expiry = expiry\n self.capacity = capacity\n self.channel_capacity = channel_capacity or {}\n\n def compile_capacities(self, channel_capacity):\n \"\"\"\n Takes an input channel_capacity dict and returns the compiled list\n of regexes that get_capacity will look for as self.channel_capacity\n \"\"\"\n result = []\n for pattern, value in channel_capacity.items():\n # If they passed in a precompiled regex, leave it, else intepret\n # it as a glob.\n if hasattr(pattern, \"match\"):\n result.append((pattern, value))\n else:\n result.append((re.compile(fnmatch.translate(pattern)), value))\n return result\n\n def get_capacity(self, channel):\n \"\"\"\n Gets the correct capacity for the given channel; either the default,\n or a matching result from channel_capacity. Returns the first matching\n result; if you want to control the order of matches, use an ordered dict\n as input.\n \"\"\"\n for pattern, capacity in self.channel_capacity:\n if pattern.match(channel):\n return capacity\n return self.capacity\n\n def match_type_and_length(self, name):\n if isinstance(name, str) and (len(name) < 100):\n return True\n return False\n\n ### Name validation functions\n\n channel_name_regex = re.compile(r\"^[a-zA-Z\\d\\-_.]+(\\![\\d\\w\\-_.]*)?$\")\n group_name_regex = re.compile(r\"^[a-zA-Z\\d\\-_.]+$\")\n invalid_name_error = (\n \"{} name must be a valid unicode string containing only ASCII \"\n + \"alphanumerics, hyphens, underscores, or periods.\"\n )\n\n def valid_channel_name(self, name, receive=False):\n if self.match_type_and_length(name):\n if bool(self.channel_name_regex.match(name)):\n # Check cases for special channels\n if \"!\" in name and not name.endswith(\"!\") and receive:\n raise TypeError(\n \"Specific channel names in receive() must end at the !\"\n )\n return True\n raise TypeError(\n \"Channel name must be a valid unicode string containing only ASCII \"\n + \"alphanumerics, hyphens, or periods, not '{}'.\".format(name)\n )\n\n def valid_group_name(self, name):\n if self.match_type_and_length(name):\n if bool(self.group_name_regex.match(name)):\n return True\n raise TypeError(\n \"Group name must be a valid unicode string containing only ASCII \"\n + \"alphanumerics, hyphens, or periods.\"\n )\n\n def valid_channel_names(self, names, receive=False):\n _non_empty_list = True if names else False\n _names_type = isinstance(names, list)\n assert _non_empty_list and _names_type, \"names must be a non-empty list\"\n\n assert all(\n self.valid_channel_name(channel, receive=receive) for channel in names\n )\n return True\n\n def non_local_name(self, name):\n \"\"\"\n Given a channel name, returns the \"non-local\" part. If the channel name\n is a process-specific channel (contains !) this means the part up to\n and including the !; if it is anything else, this means the full name.\n \"\"\"\n if \"!\" in name:\n return name[: name.find(\"!\") + 1]\n else:\n return name\n\n\nclass InMemoryChannelLayer(BaseChannelLayer):\n \"\"\"\n In-memory channel layer implementation\n \"\"\"\n\n local_poll_interval = 0.01\n\n def __init__(\n self,\n expiry=60,\n group_expiry=86400,\n capacity=100,\n channel_capacity=None,\n **kwargs\n ):\n super().__init__(\n expiry=expiry,\n capacity=capacity,\n channel_capacity=channel_capacity,\n **kwargs\n )\n self.channels = {}\n self.groups = {}\n self.group_expiry = group_expiry\n\n ### Channel layer API ###\n\n extensions = [\"groups\", \"flush\"]\n\n async def send(self, channel, message):\n \"\"\"\n Send a message onto a (general or specific) channel.\n \"\"\"\n # Typecheck\n assert isinstance(message, dict), \"message is not a dict\"\n assert self.valid_channel_name(channel), \"Channel name not valid\"\n # If it's a process-local channel, strip off local part and stick full name in message\n assert \"__asgi_channel__\" not in message\n\n queue = self.channels.setdefault(channel, asyncio.Queue())\n # Are we full\n if queue.qsize() >= self.capacity:\n raise ChannelFull(channel)\n\n # Add message\n await queue.put((time.time() + self.expiry, deepcopy(message)))\n\n async def receive(self, channel):\n \"\"\"\n Receive the first message that arrives on the channel.\n If more than one coroutine waits on the same channel, a random one\n of the waiting coroutines will get the result.\n \"\"\"\n assert self.valid_channel_name(channel)\n self._clean_expired()\n\n queue = self.channels.setdefault(channel, asyncio.Queue())\n\n # Do a plain direct receive\n _, message = await queue.get()\n\n # Delete if empty\n if queue.empty():\n del self.channels[channel]\n\n return message\n\n async def new_channel(self, prefix=\"specific.\"):\n \"\"\"\n Returns a new channel name that can be used by something in our\n process as a specific channel.\n \"\"\"\n return \"%s.inmemory!%s\" % (\n prefix,\n \"\".join(random.choice(string.ascii_letters) for i in range(12)),\n )\n\n ### Expire cleanup ###\n\n def _clean_expired(self):\n \"\"\"\n Goes through all messages and groups and removes those that are expired.\n Any channel with an expired message is removed from all groups.\n \"\"\"\n # Channel cleanup\n for channel, queue in list(self.channels.items()):\n remove = False\n # See if it's expired\n while not queue.empty() and queue._queue[0][0] < time.time():\n queue.get_nowait()\n remove = True\n # Any removal prompts group discard\n if remove:\n self._remove_from_groups(channel)\n # Is the channel now empty and needs deleting?\n if not queue:\n del self.channels[channel]\n\n # Group Expiration\n timeout = int(time.time()) - self.group_expiry\n for group in self.groups:\n for channel in list(self.groups.get(group, set())):\n # If join time is older than group_expiry end the group membership\n if (\n self.groups[group][channel]\n and int(self.groups[group][channel]) < timeout\n ):\n # Delete from group\n del self.groups[group][channel]\n\n ### Flush extension ###\n\n async def flush(self):\n self.channels = {}\n self.groups = {}\n\n async def close(self):\n # Nothing to go\n pass\n\n def _remove_from_groups(self, channel):\n \"\"\"\n Removes a channel from all groups. Used when a message on it expires.\n \"\"\"\n for channels in self.groups.values():\n if channel in channels:\n del channels[channel]\n\n ### Groups extension ###\n\n async def group_add(self, group, channel):\n \"\"\"\n Adds the channel name to a group.\n \"\"\"\n # Check the inputs\n assert self.valid_group_name(group), \"Group name not valid\"\n assert self.valid_channel_name(channel), \"Channel name not valid\"\n # Add to group dict\n self.groups.setdefault(group, {})\n self.groups[group][channel] = time.time()\n\n async def group_discard(self, group, channel):\n # Both should be text and valid\n assert self.valid_channel_name(channel), \"Invalid channel name\"\n assert self.valid_group_name(group), \"Invalid group name\"\n # Remove from group set\n if group in self.groups:\n if channel in self.groups[group]:\n del self.groups[group][channel]\n if not self.groups[group]:\n del self.groups[group]\n\n async def group_send(self, group, message):\n # Check types\n assert isinstance(message, dict), \"Message is not a dict\"\n assert self.valid_group_name(group), \"Invalid group name\"\n # Run clean\n self._clean_expired()\n # Send to each channel\n for channel in self.groups.get(group, set()):\n try:\n await self.send(channel, message)\n except ChannelFull:\n pass\n\n\ndef get_channel_layer(alias=DEFAULT_CHANNEL_LAYER):\n \"\"\"\n Returns a channel layer by alias, or None if it is not configured.\n \"\"\"\n try:\n return channel_layers[alias]\n except KeyError:\n return None\n\n\n# Default global instance of the channel layer manager\nchannel_layers = ChannelLayerManager()\n",
"path": "channels/layers.py"
}
] | [
{
"content": "from __future__ import unicode_literals\n\nimport asyncio\nimport fnmatch\nimport random\nimport re\nimport string\nimport time\nfrom copy import deepcopy\n\nfrom django.conf import settings\nfrom django.core.signals import setting_changed\nfrom django.utils.module_loading import import_string\n\nfrom channels import DEFAULT_CHANNEL_LAYER\n\nfrom .exceptions import ChannelFull, InvalidChannelLayerError\n\n\nclass ChannelLayerManager:\n \"\"\"\n Takes a settings dictionary of backends and initialises them on request.\n \"\"\"\n\n def __init__(self):\n self.backends = {}\n setting_changed.connect(self._reset_backends)\n\n def _reset_backends(self, setting, **kwargs):\n \"\"\"\n Removes cached channel layers when the CHANNEL_LAYERS setting changes.\n \"\"\"\n if setting == \"CHANNEL_LAYERS\":\n self.backends = {}\n\n @property\n def configs(self):\n # Lazy load settings so we can be imported\n return getattr(settings, \"CHANNEL_LAYERS\", {})\n\n def make_backend(self, name):\n \"\"\"\n Instantiate channel layer.\n \"\"\"\n config = self.configs[name].get(\"CONFIG\", {})\n return self._make_backend(name, config)\n\n def make_test_backend(self, name):\n \"\"\"\n Instantiate channel layer using its test config.\n \"\"\"\n try:\n config = self.configs[name][\"TEST_CONFIG\"]\n except KeyError:\n raise InvalidChannelLayerError(\"No TEST_CONFIG specified for %s\" % name)\n return self._make_backend(name, config)\n\n def _make_backend(self, name, config):\n # Check for old format config\n if \"ROUTING\" in self.configs[name]:\n raise InvalidChannelLayerError(\n \"ROUTING key found for %s - this is no longer needed in Channels 2.\"\n % name\n )\n # Load the backend class\n try:\n backend_class = import_string(self.configs[name][\"BACKEND\"])\n except KeyError:\n raise InvalidChannelLayerError(\"No BACKEND specified for %s\" % name)\n except ImportError:\n raise InvalidChannelLayerError(\n \"Cannot import BACKEND %r specified for %s\"\n % (self.configs[name][\"BACKEND\"], name)\n )\n # Initialise and pass config\n return backend_class(**config)\n\n def __getitem__(self, key):\n if key not in self.backends:\n self.backends[key] = self.make_backend(key)\n return self.backends[key]\n\n def __contains__(self, key):\n return key in self.configs\n\n def set(self, key, layer):\n \"\"\"\n Sets an alias to point to a new ChannelLayerWrapper instance, and\n returns the old one that it replaced. Useful for swapping out the\n backend during tests.\n \"\"\"\n old = self.backends.get(key, None)\n self.backends[key] = layer\n return old\n\n\nclass BaseChannelLayer:\n \"\"\"\n Base channel layer class that others can inherit from, with useful\n common functionality.\n \"\"\"\n\n def __init__(self, expiry=60, capacity=100, channel_capacity=None):\n self.expiry = expiry\n self.capacity = capacity\n self.channel_capacity = channel_capacity or {}\n\n def compile_capacities(self, channel_capacity):\n \"\"\"\n Takes an input channel_capacity dict and returns the compiled list\n of regexes that get_capacity will look for as self.channel_capacity\n \"\"\"\n result = []\n for pattern, value in channel_capacity.items():\n # If they passed in a precompiled regex, leave it, else intepret\n # it as a glob.\n if hasattr(pattern, \"match\"):\n result.append((pattern, value))\n else:\n result.append((re.compile(fnmatch.translate(pattern)), value))\n return result\n\n def get_capacity(self, channel):\n \"\"\"\n Gets the correct capacity for the given channel; either the default,\n or a matching result from channel_capacity. Returns the first matching\n result; if you want to control the order of matches, use an ordered dict\n as input.\n \"\"\"\n for pattern, capacity in self.channel_capacity:\n if pattern.match(channel):\n return capacity\n return self.capacity\n\n def match_type_and_length(self, name):\n if isinstance(name, str) and (len(name) < 100):\n return True\n return False\n\n ### Name validation functions\n\n channel_name_regex = re.compile(r\"^[a-zA-Z\\d\\-_.]+(\\![\\d\\w\\-_.]*)?$\")\n group_name_regex = re.compile(r\"^[a-zA-Z\\d\\-_.]+$\")\n invalid_name_error = (\n \"{} name must be a valid unicode string containing only ASCII \"\n + \"alphanumerics, hyphens, underscores, or periods.\"\n )\n\n def valid_channel_name(self, name, receive=False):\n if self.match_type_and_length(name):\n if bool(self.channel_name_regex.match(name)):\n # Check cases for special channels\n if \"!\" in name and not name.endswith(\"!\") and receive:\n raise TypeError(\n \"Specific channel names in receive() must end at the !\"\n )\n return True\n raise TypeError(\n \"Channel name must be a valid unicode string containing only ASCII \"\n + \"alphanumerics, hyphens, or periods, not '{}'.\".format(name)\n )\n\n def valid_group_name(self, name):\n if self.match_type_and_length(name):\n if bool(self.group_name_regex.match(name)):\n return True\n raise TypeError(\n \"Group name must be a valid unicode string containing only ASCII \"\n + \"alphanumerics, hyphens, or periods.\"\n )\n\n def valid_channel_names(self, names, receive=False):\n _non_empty_list = True if names else False\n _names_type = isinstance(names, list)\n assert _non_empty_list and _names_type, \"names must be a non-empty list\"\n\n assert all(\n self.valid_channel_name(channel, receive=receive) for channel in names\n )\n return True\n\n def non_local_name(self, name):\n \"\"\"\n Given a channel name, returns the \"non-local\" part. If the channel name\n is a process-specific channel (contains !) this means the part up to\n and including the !; if it is anything else, this means the full name.\n \"\"\"\n if \"!\" in name:\n return name[: name.find(\"!\") + 1]\n else:\n return name\n\n\nclass InMemoryChannelLayer(BaseChannelLayer):\n \"\"\"\n In-memory channel layer implementation\n \"\"\"\n\n def __init__(\n self,\n expiry=60,\n group_expiry=86400,\n capacity=100,\n channel_capacity=None,\n **kwargs\n ):\n super().__init__(\n expiry=expiry,\n capacity=capacity,\n channel_capacity=channel_capacity,\n **kwargs\n )\n self.channels = {}\n self.groups = {}\n self.group_expiry = group_expiry\n\n ### Channel layer API ###\n\n extensions = [\"groups\", \"flush\"]\n\n async def send(self, channel, message):\n \"\"\"\n Send a message onto a (general or specific) channel.\n \"\"\"\n # Typecheck\n assert isinstance(message, dict), \"message is not a dict\"\n assert self.valid_channel_name(channel), \"Channel name not valid\"\n # If it's a process-local channel, strip off local part and stick full name in message\n assert \"__asgi_channel__\" not in message\n\n queue = self.channels.setdefault(channel, asyncio.Queue())\n # Are we full\n if queue.qsize() >= self.capacity:\n raise ChannelFull(channel)\n\n # Add message\n await queue.put((time.time() + self.expiry, deepcopy(message)))\n\n async def receive(self, channel):\n \"\"\"\n Receive the first message that arrives on the channel.\n If more than one coroutine waits on the same channel, a random one\n of the waiting coroutines will get the result.\n \"\"\"\n assert self.valid_channel_name(channel)\n self._clean_expired()\n\n queue = self.channels.setdefault(channel, asyncio.Queue())\n\n # Do a plain direct receive\n _, message = await queue.get()\n\n # Delete if empty\n if queue.empty():\n del self.channels[channel]\n\n return message\n\n async def new_channel(self, prefix=\"specific.\"):\n \"\"\"\n Returns a new channel name that can be used by something in our\n process as a specific channel.\n \"\"\"\n return \"%s.inmemory!%s\" % (\n prefix,\n \"\".join(random.choice(string.ascii_letters) for i in range(12)),\n )\n\n ### Expire cleanup ###\n\n def _clean_expired(self):\n \"\"\"\n Goes through all messages and groups and removes those that are expired.\n Any channel with an expired message is removed from all groups.\n \"\"\"\n # Channel cleanup\n for channel, queue in list(self.channels.items()):\n remove = False\n # See if it's expired\n while not queue.empty() and queue._queue[0][0] < time.time():\n queue.get_nowait()\n remove = True\n # Any removal prompts group discard\n if remove:\n self._remove_from_groups(channel)\n # Is the channel now empty and needs deleting?\n if not queue:\n del self.channels[channel]\n\n # Group Expiration\n timeout = int(time.time()) - self.group_expiry\n for group in self.groups:\n for channel in list(self.groups.get(group, set())):\n # If join time is older than group_expiry end the group membership\n if (\n self.groups[group][channel]\n and int(self.groups[group][channel]) < timeout\n ):\n # Delete from group\n del self.groups[group][channel]\n\n ### Flush extension ###\n\n async def flush(self):\n self.channels = {}\n self.groups = {}\n\n async def close(self):\n # Nothing to go\n pass\n\n def _remove_from_groups(self, channel):\n \"\"\"\n Removes a channel from all groups. Used when a message on it expires.\n \"\"\"\n for channels in self.groups.values():\n if channel in channels:\n del channels[channel]\n\n ### Groups extension ###\n\n async def group_add(self, group, channel):\n \"\"\"\n Adds the channel name to a group.\n \"\"\"\n # Check the inputs\n assert self.valid_group_name(group), \"Group name not valid\"\n assert self.valid_channel_name(channel), \"Channel name not valid\"\n # Add to group dict\n self.groups.setdefault(group, {})\n self.groups[group][channel] = time.time()\n\n async def group_discard(self, group, channel):\n # Both should be text and valid\n assert self.valid_channel_name(channel), \"Invalid channel name\"\n assert self.valid_group_name(group), \"Invalid group name\"\n # Remove from group set\n if group in self.groups:\n if channel in self.groups[group]:\n del self.groups[group][channel]\n if not self.groups[group]:\n del self.groups[group]\n\n async def group_send(self, group, message):\n # Check types\n assert isinstance(message, dict), \"Message is not a dict\"\n assert self.valid_group_name(group), \"Invalid group name\"\n # Run clean\n self._clean_expired()\n # Send to each channel\n for channel in self.groups.get(group, set()):\n try:\n await self.send(channel, message)\n except ChannelFull:\n pass\n\n\ndef get_channel_layer(alias=DEFAULT_CHANNEL_LAYER):\n \"\"\"\n Returns a channel layer by alias, or None if it is not configured.\n \"\"\"\n try:\n return channel_layers[alias]\n except KeyError:\n return None\n\n\n# Default global instance of the channel layer manager\nchannel_layers = ChannelLayerManager()\n",
"path": "channels/layers.py"
}
] | diff --git a/channels/layers.py b/channels/layers.py
index d8d4ff591..5223d69b1 100644
--- a/channels/layers.py
+++ b/channels/layers.py
@@ -196,8 +196,6 @@ class InMemoryChannelLayer(BaseChannelLayer):
In-memory channel layer implementation
"""
- local_poll_interval = 0.01
-
def __init__(
self,
expiry=60,
|
kivy__python-for-android-2436 | "diff" files are ignored during "pip install ."
in `setup.py` the "diff" is not listed:
https://github.com/kivy/python-for-android/blob/develop/setup.py
```python
package_data = {'': ['*.tmpl',
'*.patch', ], }
```
and therefore this `diff` patch:
https://github.com/kivy/python-for-android/blob/develop/pythonforandroid/recipes/python3/patches/reproducible-buildinfo.diff
is not installed during `pip` invocation:
```sh
cd /tmp
git clone --depth 1 https://github.com/kivy/python-for-android.git
cd python-for-android
pip install .
```
| [
{
"content": "\nimport glob\nfrom io import open # for open(..,encoding=...) parameter in python 2\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport re\nfrom setuptools import setup, find_packages\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch', ], }\n\ndata_files = []\n\n\n# must be a single statement since buildozer is currently parsing it, refs:\n# https://github.com/kivy/buildozer/issues/722\ninstall_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n 'pep517<0.7.0', 'toml',\n]\n# (pep517 and toml are used by pythonpackage.py)\n\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any(glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n '*.mk', '*.jam', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',\n '*.gradle', '.gitkeep', 'gradlew*', '*.jar', \"*.patch\", ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps/webview',\n ['*.html', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nwith open(join(dirname(__file__), 'README.md'),\n encoding=\"utf-8\",\n errors=\"replace\",\n ) as fileh:\n long_description = fileh.read()\n\ninit_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')\nversion = None\ntry:\n with open(init_filen,\n encoding=\"utf-8\",\n errors=\"replace\"\n ) as fileh:\n lines = fileh.readlines()\nexcept IOError:\n pass\nelse:\n for line in lines:\n line = line.strip()\n if line.startswith('__version__ = '):\n matches = re.findall(r'[\"\\'].+[\"\\']', line)\n if matches:\n version = matches[0].strip(\"'\").strip('\"')\n break\nif version is None:\n raise Exception('Error: version could not be loaded from {}'.format(init_filen))\n\nsetup(name='python-for-android',\n version=version,\n description='Android APK packager for Python scripts and apps',\n long_description=long_description,\n long_description_content_type='text/markdown',\n python_requires=\">=3.6.0\",\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android',\n license='MIT',\n install_requires=install_reqs,\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.entrypoints:main',\n 'p4a = pythonforandroid.entrypoints:main',\n ],\n 'distutils.commands': [\n 'apk = pythonforandroid.bdistapk:BdistAPK',\n 'aar = pythonforandroid.bdistapk:BdistAAR',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Android',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n",
"path": "setup.py"
}
] | [
{
"content": "\nimport glob\nfrom io import open # for open(..,encoding=...) parameter in python 2\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport re\nfrom setuptools import setup, find_packages\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch',\n '*.diff', ], }\n\ndata_files = []\n\n\n# must be a single statement since buildozer is currently parsing it, refs:\n# https://github.com/kivy/buildozer/issues/722\ninstall_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n 'pep517<0.7.0', 'toml',\n]\n# (pep517 and toml are used by pythonpackage.py)\n\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any(glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n '*.mk', '*.jam', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',\n '*.gradle', '.gitkeep', 'gradlew*', '*.jar', \"*.patch\", ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps/webview',\n ['*.html', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nwith open(join(dirname(__file__), 'README.md'),\n encoding=\"utf-8\",\n errors=\"replace\",\n ) as fileh:\n long_description = fileh.read()\n\ninit_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')\nversion = None\ntry:\n with open(init_filen,\n encoding=\"utf-8\",\n errors=\"replace\"\n ) as fileh:\n lines = fileh.readlines()\nexcept IOError:\n pass\nelse:\n for line in lines:\n line = line.strip()\n if line.startswith('__version__ = '):\n matches = re.findall(r'[\"\\'].+[\"\\']', line)\n if matches:\n version = matches[0].strip(\"'\").strip('\"')\n break\nif version is None:\n raise Exception('Error: version could not be loaded from {}'.format(init_filen))\n\nsetup(name='python-for-android',\n version=version,\n description='Android APK packager for Python scripts and apps',\n long_description=long_description,\n long_description_content_type='text/markdown',\n python_requires=\">=3.6.0\",\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android',\n license='MIT',\n install_requires=install_reqs,\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.entrypoints:main',\n 'p4a = pythonforandroid.entrypoints:main',\n ],\n 'distutils.commands': [\n 'apk = pythonforandroid.bdistapk:BdistAPK',\n 'aar = pythonforandroid.bdistapk:BdistAAR',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Android',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 25e4a0d041..2d056124a1 100644
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,8 @@
packages = find_packages()
package_data = {'': ['*.tmpl',
- '*.patch', ], }
+ '*.patch',
+ '*.diff', ], }
data_files = []
|
ranaroussi__yfinance-295 | Deprecation warning due to invalid escape sequences
Deprecation warnings are raised due to invalid escape sequences. This can be fixed by using raw strings or escaping the literals. pyupgrade also helps in automatic conversion : https://github.com/asottile/pyupgrade/
```
find . -iname '*.py' | grep -Ev 'test.py' | xargs -P4 -I{} python3.8 -Wall -m py_compile {}
./yfinance/utils.py:67: DeprecationWarning: invalid escape sequence \g
return [_re.sub("([a-z])([A-Z])", "\g<1> \g<2>", i).title() for i in o]
```
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Yahoo! Finance market data downloader (+fix for Pandas Datareader)\n# https://github.com/ranaroussi/yfinance\n#\n# Copyright 2017-2019 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\nimport requests as _requests\nimport re as _re\nimport pandas as _pd\nimport numpy as _np\nimport sys as _sys\nimport re as _re\n\ntry:\n import ujson as _json\nexcept ImportError:\n import json as _json\n\n\ndef empty_df(index=[]):\n empty = _pd.DataFrame(index=index, data={\n 'Open': _np.nan, 'High': _np.nan, 'Low': _np.nan,\n 'Close': _np.nan, 'Adj Close': _np.nan, 'Volume': _np.nan})\n empty.index.name = 'Date'\n return empty\n\n\ndef get_json(url, proxy=None):\n html = _requests.get(url=url, proxies=proxy).text\n\n if \"QuoteSummaryStore\" not in html:\n html = _requests.get(url=url, proxies=proxy).text\n if \"QuoteSummaryStore\" not in html:\n return {}\n\n json_str = html.split('root.App.main =')[1].split(\n '(this)')[0].split(';\\n}')[0].strip()\n data = _json.loads(json_str)[\n 'context']['dispatcher']['stores']['QuoteSummaryStore']\n\n # return data\n new_data = _json.dumps(data).replace('{}', 'null')\n new_data = _re.sub(\n r'\\{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)\\}', r'\\1', new_data)\n\n return _json.loads(new_data)\n\n\ndef camel2title(o):\n return [_re.sub(\"([a-z])([A-Z])\", \"\\g<1> \\g<2>\", i).title() for i in o]\n\n\ndef auto_adjust(data):\n df = data.copy()\n ratio = df[\"Close\"] / df[\"Adj Close\"]\n df[\"Adj Open\"] = df[\"Open\"] / ratio\n df[\"Adj High\"] = df[\"High\"] / ratio\n df[\"Adj Low\"] = df[\"Low\"] / ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\", \"Adj Close\": \"Close\"\n }, inplace=True)\n\n df = df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef back_adjust(data):\n \"\"\" back-adjusted data to mimic true historical prices \"\"\"\n\n df = data.copy()\n ratio = df[\"Adj Close\"] / df[\"Close\"]\n df[\"Adj Open\"] = df[\"Open\"] * ratio\n df[\"Adj High\"] = df[\"High\"] * ratio\n df[\"Adj Low\"] = df[\"Low\"] * ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Adj Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\"\n }, inplace=True)\n\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef parse_quotes(data, tz=None):\n timestamps = data[\"timestamp\"]\n ohlc = data[\"indicators\"][\"quote\"][0]\n volumes = ohlc[\"volume\"]\n opens = ohlc[\"open\"]\n closes = ohlc[\"close\"]\n lows = ohlc[\"low\"]\n highs = ohlc[\"high\"]\n\n adjclose = closes\n if \"adjclose\" in data[\"indicators\"]:\n adjclose = data[\"indicators\"][\"adjclose\"][0][\"adjclose\"]\n\n quotes = _pd.DataFrame({\"Open\": opens,\n \"High\": highs,\n \"Low\": lows,\n \"Close\": closes,\n \"Adj Close\": adjclose,\n \"Volume\": volumes})\n\n quotes.index = _pd.to_datetime(timestamps, unit=\"s\")\n quotes.sort_index(inplace=True)\n\n if tz is not None:\n quotes.index = quotes.index.tz_localize(tz)\n\n return quotes\n\n\ndef parse_actions(data, tz=None):\n dividends = _pd.DataFrame(columns=[\"Dividends\"])\n splits = _pd.DataFrame(columns=[\"Stock Splits\"])\n\n if \"events\" in data:\n if \"dividends\" in data[\"events\"]:\n dividends = _pd.DataFrame(\n data=list(data[\"events\"][\"dividends\"].values()))\n dividends.set_index(\"date\", inplace=True)\n dividends.index = _pd.to_datetime(dividends.index, unit=\"s\")\n dividends.sort_index(inplace=True)\n if tz is not None:\n dividends.index = dividends.index.tz_localize(tz)\n\n dividends.columns = [\"Dividends\"]\n\n if \"splits\" in data[\"events\"]:\n splits = _pd.DataFrame(\n data=list(data[\"events\"][\"splits\"].values()))\n splits.set_index(\"date\", inplace=True)\n splits.index = _pd.to_datetime(splits.index, unit=\"s\")\n splits.sort_index(inplace=True)\n if tz is not None:\n splits.index = splits.index.tz_localize(tz)\n splits[\"Stock Splits\"] = splits[\"numerator\"] / \\\n splits[\"denominator\"]\n splits = splits[\"Stock Splits\"]\n\n return dividends, splits\n\n\nclass ProgressBar:\n def __init__(self, iterations, text='completed'):\n self.text = text\n self.iterations = iterations\n self.prog_bar = '[]'\n self.fill_char = '*'\n self.width = 50\n self.__update_amount(0)\n self.elapsed = 1\n\n def completed(self):\n if self.elapsed > self.iterations:\n self.elapsed = self.iterations\n self.update_iteration(1)\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n print()\n\n def animate(self, iteration=None):\n if iteration is None:\n self.elapsed += 1\n iteration = self.elapsed\n else:\n self.elapsed += iteration\n\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n self.update_iteration()\n\n def update_iteration(self, val=None):\n val = val if val is not None else self.elapsed / float(self.iterations)\n self.__update_amount(val * 100.0)\n self.prog_bar += ' %s of %s %s' % (\n self.elapsed, self.iterations, self.text)\n\n def __update_amount(self, new_amount):\n percent_done = int(round((new_amount / 100.0) * 100.0))\n all_full = self.width - 2\n num_hashes = int(round((percent_done / 100.0) * all_full))\n self.prog_bar = '[' + self.fill_char * \\\n num_hashes + ' ' * (all_full - num_hashes) + ']'\n pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))\n pct_string = '%d%%' % percent_done\n self.prog_bar = self.prog_bar[0:pct_place] + \\\n (pct_string + self.prog_bar[pct_place + len(pct_string):])\n\n def __str__(self):\n return str(self.prog_bar)\n",
"path": "yfinance/utils.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Yahoo! Finance market data downloader (+fix for Pandas Datareader)\n# https://github.com/ranaroussi/yfinance\n#\n# Copyright 2017-2019 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\nimport requests as _requests\nimport re as _re\nimport pandas as _pd\nimport numpy as _np\nimport sys as _sys\nimport re as _re\n\ntry:\n import ujson as _json\nexcept ImportError:\n import json as _json\n\n\ndef empty_df(index=[]):\n empty = _pd.DataFrame(index=index, data={\n 'Open': _np.nan, 'High': _np.nan, 'Low': _np.nan,\n 'Close': _np.nan, 'Adj Close': _np.nan, 'Volume': _np.nan})\n empty.index.name = 'Date'\n return empty\n\n\ndef get_json(url, proxy=None):\n html = _requests.get(url=url, proxies=proxy).text\n\n if \"QuoteSummaryStore\" not in html:\n html = _requests.get(url=url, proxies=proxy).text\n if \"QuoteSummaryStore\" not in html:\n return {}\n\n json_str = html.split('root.App.main =')[1].split(\n '(this)')[0].split(';\\n}')[0].strip()\n data = _json.loads(json_str)[\n 'context']['dispatcher']['stores']['QuoteSummaryStore']\n\n # return data\n new_data = _json.dumps(data).replace('{}', 'null')\n new_data = _re.sub(\n r'\\{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)\\}', r'\\1', new_data)\n\n return _json.loads(new_data)\n\n\ndef camel2title(o):\n return [_re.sub(\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", i).title() for i in o]\n\n\ndef auto_adjust(data):\n df = data.copy()\n ratio = df[\"Close\"] / df[\"Adj Close\"]\n df[\"Adj Open\"] = df[\"Open\"] / ratio\n df[\"Adj High\"] = df[\"High\"] / ratio\n df[\"Adj Low\"] = df[\"Low\"] / ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\", \"Adj Close\": \"Close\"\n }, inplace=True)\n\n df = df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef back_adjust(data):\n \"\"\" back-adjusted data to mimic true historical prices \"\"\"\n\n df = data.copy()\n ratio = df[\"Adj Close\"] / df[\"Close\"]\n df[\"Adj Open\"] = df[\"Open\"] * ratio\n df[\"Adj High\"] = df[\"High\"] * ratio\n df[\"Adj Low\"] = df[\"Low\"] * ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Adj Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\"\n }, inplace=True)\n\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef parse_quotes(data, tz=None):\n timestamps = data[\"timestamp\"]\n ohlc = data[\"indicators\"][\"quote\"][0]\n volumes = ohlc[\"volume\"]\n opens = ohlc[\"open\"]\n closes = ohlc[\"close\"]\n lows = ohlc[\"low\"]\n highs = ohlc[\"high\"]\n\n adjclose = closes\n if \"adjclose\" in data[\"indicators\"]:\n adjclose = data[\"indicators\"][\"adjclose\"][0][\"adjclose\"]\n\n quotes = _pd.DataFrame({\"Open\": opens,\n \"High\": highs,\n \"Low\": lows,\n \"Close\": closes,\n \"Adj Close\": adjclose,\n \"Volume\": volumes})\n\n quotes.index = _pd.to_datetime(timestamps, unit=\"s\")\n quotes.sort_index(inplace=True)\n\n if tz is not None:\n quotes.index = quotes.index.tz_localize(tz)\n\n return quotes\n\n\ndef parse_actions(data, tz=None):\n dividends = _pd.DataFrame(columns=[\"Dividends\"])\n splits = _pd.DataFrame(columns=[\"Stock Splits\"])\n\n if \"events\" in data:\n if \"dividends\" in data[\"events\"]:\n dividends = _pd.DataFrame(\n data=list(data[\"events\"][\"dividends\"].values()))\n dividends.set_index(\"date\", inplace=True)\n dividends.index = _pd.to_datetime(dividends.index, unit=\"s\")\n dividends.sort_index(inplace=True)\n if tz is not None:\n dividends.index = dividends.index.tz_localize(tz)\n\n dividends.columns = [\"Dividends\"]\n\n if \"splits\" in data[\"events\"]:\n splits = _pd.DataFrame(\n data=list(data[\"events\"][\"splits\"].values()))\n splits.set_index(\"date\", inplace=True)\n splits.index = _pd.to_datetime(splits.index, unit=\"s\")\n splits.sort_index(inplace=True)\n if tz is not None:\n splits.index = splits.index.tz_localize(tz)\n splits[\"Stock Splits\"] = splits[\"numerator\"] / \\\n splits[\"denominator\"]\n splits = splits[\"Stock Splits\"]\n\n return dividends, splits\n\n\nclass ProgressBar:\n def __init__(self, iterations, text='completed'):\n self.text = text\n self.iterations = iterations\n self.prog_bar = '[]'\n self.fill_char = '*'\n self.width = 50\n self.__update_amount(0)\n self.elapsed = 1\n\n def completed(self):\n if self.elapsed > self.iterations:\n self.elapsed = self.iterations\n self.update_iteration(1)\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n print()\n\n def animate(self, iteration=None):\n if iteration is None:\n self.elapsed += 1\n iteration = self.elapsed\n else:\n self.elapsed += iteration\n\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n self.update_iteration()\n\n def update_iteration(self, val=None):\n val = val if val is not None else self.elapsed / float(self.iterations)\n self.__update_amount(val * 100.0)\n self.prog_bar += ' %s of %s %s' % (\n self.elapsed, self.iterations, self.text)\n\n def __update_amount(self, new_amount):\n percent_done = int(round((new_amount / 100.0) * 100.0))\n all_full = self.width - 2\n num_hashes = int(round((percent_done / 100.0) * all_full))\n self.prog_bar = '[' + self.fill_char * \\\n num_hashes + ' ' * (all_full - num_hashes) + ']'\n pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))\n pct_string = '%d%%' % percent_done\n self.prog_bar = self.prog_bar[0:pct_place] + \\\n (pct_string + self.prog_bar[pct_place + len(pct_string):])\n\n def __str__(self):\n return str(self.prog_bar)\n",
"path": "yfinance/utils.py"
}
] | diff --git a/yfinance/utils.py b/yfinance/utils.py
index 53fdc0808..79ee9cfc5 100644
--- a/yfinance/utils.py
+++ b/yfinance/utils.py
@@ -64,7 +64,7 @@ def get_json(url, proxy=None):
def camel2title(o):
- return [_re.sub("([a-z])([A-Z])", "\g<1> \g<2>", i).title() for i in o]
+ return [_re.sub("([a-z])([A-Z])", r"\g<1> \g<2>", i).title() for i in o]
def auto_adjust(data):
|
google__turbinia-1070 | Missing sys module import in logger.py
Logger module is missing an import statement for 'sys'
| [
{
"content": "# -*- coding: utf-8 -*-\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Sets up logging.\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\n\nimport warnings\nimport logging.handlers\nimport os\n\nfrom turbinia import config\nfrom turbinia import TurbiniaException\n\n# Environment variable to look for node name in\nENVNODENAME = 'NODE_NAME'\n\n\ndef setup(need_file_handler=True, need_stream_handler=True, log_file_path=None):\n \"\"\"Set up logging parameters.\n\n This will also set the root logger, which is the default logger when a named\n logger is not specified. We currently use 'turbinia' as the named logger,\n however some external modules that are called by Turbinia can use the root\n logger, so we want to be able to optionally configure that as well.\n \"\"\"\n # Remove known warning about credentials\n warnings.filterwarnings(\n 'ignore', 'Your application has authenticated using end user credentials')\n\n logger = logging.getLogger('turbinia')\n # Eliminate double logging from root logger\n logger.propagate = False\n\n # We only need a handler if one of that type doesn't exist already\n if logger.handlers:\n for handler in logger.handlers:\n # Want to do strict type-checking here because is instance will include\n # subclasses and so won't distinguish between StreamHandlers and\n # FileHandlers.\n # pylint: disable=unidiomatic-typecheck\n if type(handler) == logging.FileHandler:\n need_file_handler = False\n\n # pylint: disable=unidiomatic-typecheck\n if type(handler) == logging.StreamHandler:\n need_stream_handler = False\n\n if need_file_handler:\n try:\n config.LoadConfig()\n except TurbiniaException as exception:\n print(\n 'Could not load config file ({0!s}).\\n{1:s}'.format(\n exception, config.CONFIG_MSG))\n sys.exit(1)\n\n # Check if a user specified log path was provided else create default path\n if not log_file_path:\n log_name = os.uname().nodename\n # Check if NODE_NAME available for GKE setups\n if ENVNODENAME in os.environ:\n log_name = log_name + '.{0!s}'.format(os.environ[ENVNODENAME])\n log_file_path = os.path.join(config.LOG_DIR, log_name) + '.log'\n\n file_handler = logging.FileHandler(log_file_path)\n formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.DEBUG)\n logger.addHandler(file_handler)\n\n console_handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s [%(levelname)s] %(message)s', \"%Y-%m-%d %H:%M:%S\")\n console_handler.setFormatter(formatter)\n if need_stream_handler:\n logger.addHandler(console_handler)\n\n # Configure the root logger to use exactly our handlers because other modules\n # like PSQ use this, and we want to see log messages from it when executing\n # from CLI.\n root_log = logging.getLogger()\n for handler in root_log.handlers:\n root_log.removeHandler(handler)\n root_log.addHandler(console_handler)\n if need_file_handler:\n root_log.addHandler(file_handler)\n\n # Set filelock logging to ERROR due to log spam\n logging.getLogger(\"filelock\").setLevel(logging.ERROR)\n",
"path": "turbinia/config/logger.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Sets up logging.\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\n\nimport warnings\nimport logging.handlers\nimport os\nimport sys\n\nfrom turbinia import config\nfrom turbinia import TurbiniaException\n\n# Environment variable to look for node name in\nENVNODENAME = 'NODE_NAME'\n\n\ndef setup(need_file_handler=True, need_stream_handler=True, log_file_path=None):\n \"\"\"Set up logging parameters.\n\n This will also set the root logger, which is the default logger when a named\n logger is not specified. We currently use 'turbinia' as the named logger,\n however some external modules that are called by Turbinia can use the root\n logger, so we want to be able to optionally configure that as well.\n \"\"\"\n # Remove known warning about credentials\n warnings.filterwarnings(\n 'ignore', 'Your application has authenticated using end user credentials')\n\n logger = logging.getLogger('turbinia')\n # Eliminate double logging from root logger\n logger.propagate = False\n\n # We only need a handler if one of that type doesn't exist already\n if logger.handlers:\n for handler in logger.handlers:\n # Want to do strict type-checking here because is instance will include\n # subclasses and so won't distinguish between StreamHandlers and\n # FileHandlers.\n # pylint: disable=unidiomatic-typecheck\n if type(handler) == logging.FileHandler:\n need_file_handler = False\n\n # pylint: disable=unidiomatic-typecheck\n if type(handler) == logging.StreamHandler:\n need_stream_handler = False\n\n if need_file_handler:\n try:\n config.LoadConfig()\n except TurbiniaException as exception:\n print(\n 'Could not load config file ({0!s}).\\n{1:s}'.format(\n exception, config.CONFIG_MSG))\n sys.exit(1)\n\n # Check if a user specified log path was provided else create default path\n if not log_file_path:\n log_name = os.uname().nodename\n # Check if NODE_NAME available for GKE setups\n if ENVNODENAME in os.environ:\n log_name = log_name + '.{0!s}'.format(os.environ[ENVNODENAME])\n log_file_path = os.path.join(config.LOG_DIR, log_name) + '.log'\n\n file_handler = logging.FileHandler(log_file_path)\n formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.DEBUG)\n logger.addHandler(file_handler)\n\n console_handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s [%(levelname)s] %(message)s', \"%Y-%m-%d %H:%M:%S\")\n console_handler.setFormatter(formatter)\n if need_stream_handler:\n logger.addHandler(console_handler)\n\n # Configure the root logger to use exactly our handlers because other modules\n # like PSQ use this, and we want to see log messages from it when executing\n # from CLI.\n root_log = logging.getLogger()\n for handler in root_log.handlers:\n root_log.removeHandler(handler)\n root_log.addHandler(console_handler)\n if need_file_handler:\n root_log.addHandler(file_handler)\n\n # Set filelock logging to ERROR due to log spam\n logging.getLogger(\"filelock\").setLevel(logging.ERROR)\n",
"path": "turbinia/config/logger.py"
}
] | diff --git a/turbinia/config/logger.py b/turbinia/config/logger.py
index 2d10830bf..f15f39756 100644
--- a/turbinia/config/logger.py
+++ b/turbinia/config/logger.py
@@ -20,6 +20,7 @@
import warnings
import logging.handlers
import os
+import sys
from turbinia import config
from turbinia import TurbiniaException
|
scikit-image__scikit-image-1124 | NameError on del version when init has ImportError
In `__init__.py`, `del version` gives `NameError` when `ImportError` happens.
```
try:
from .version import version as __version__
except ImportError:
__version__ = "unbuilt-dev"
del version
```
should be
```
try:
from .version import version as __version__
except ImportError:
__version__ = "unbuilt-dev"
else:
del version
```
| [
{
"content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikit-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Drawing primitives (lines, text, etc.) that operate on NumPy arrays.\nexposure\n Image intensity adjustment, e.g., histogram equalization, etc.\nfeature\n Feature detection and extraction, e.g., texture analysis corners, etc.\nfilter\n Sharpening, edge finding, rank filters, thresholding, etc.\ngraph\n Graph-theoretic operations, e.g., shortest paths.\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g., opening or skeletonization.\nnovice\n Simplified interface for teaching purposes.\nrestoration\n Restoration algorithms, e.g., deconvolution algorithms, denoising, etc.\nsegmentation\n Partitioning an image into multiple regions.\ntransform\n Geometric and other transforms, e.g., rotation or the Radon transform.\nutil\n Generic utilities.\nviewer\n A simple graphical user interface for visualizing results and exploring\n parameters.\n\nUtility Functions\n-----------------\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as _osp\nimport imp as _imp\nimport functools as _functools\nimport warnings as _warnings\nfrom skimage._shared.utils import deprecated as _deprecated\n\npkg_dir = _osp.abspath(_osp.dirname(__file__))\ndata_dir = _osp.join(pkg_dir, 'data')\n\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\ndel version\n\n\ntry:\n _imp.find_module('nose')\nexcept ImportError:\n def _test(verbose=False):\n \"\"\"This would run all unit tests, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Unit tests not available.\")\n\n def _doctest(verbose=False):\n \"\"\"This would run all doc tests, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Doctests not available.\")\nelse:\n def _test(doctest=False, verbose=False):\n \"\"\"Run all unit tests.\"\"\"\n import nose\n args = ['', pkg_dir, '--exe', '--ignore-files=^_test']\n if verbose:\n args.extend(['-v', '-s'])\n if doctest:\n args.extend(['--with-doctest', '--ignore-files=^\\.',\n '--ignore-files=^setup\\.py$$', '--ignore-files=test'])\n # Make sure warnings do not break the doc tests\n with _warnings.catch_warnings():\n _warnings.simplefilter(\"ignore\")\n success = nose.run('skimage', argv=args)\n else:\n success = nose.run('skimage', argv=args)\n # Return sys.exit code\n if success:\n return 0\n else:\n return 1\n\n\n# do not use `test` as function name as this leads to a recursion problem with\n# the nose test suite\ntest = _test\ntest_verbose = _functools.partial(test, verbose=True)\ntest_verbose.__doc__ = test.__doc__\ndoctest = _functools.partial(test, doctest=True)\ndoctest.__doc__ = doctest.__doc__\ndoctest_verbose = _functools.partial(test, doctest=True, verbose=True)\ndoctest_verbose.__doc__ = doctest.__doc__\n\n\nclass _Log(Warning):\n pass\n\n\nclass _FakeLog(object):\n def __init__(self, name):\n \"\"\"\n Parameters\n ----------\n name : str\n Name of the log.\n repeat : bool\n Whether to print repeating messages more than once (False by\n default).\n \"\"\"\n self._name = name\n\n warnings.simplefilter(\"always\", _Log)\n\n self._warnings = _warnings\n\n def _warn(self, msg, wtype):\n self._warnings.warn('%s: %s' % (wtype, msg), _Log)\n\n def debug(self, msg):\n self._warn(msg, 'DEBUG')\n\n def info(self, msg):\n self._warn(msg, 'INFO')\n\n def warning(self, msg):\n self._warn(msg, 'WARNING')\n\n warn = warning\n\n def error(self, msg):\n self._warn(msg, 'ERROR')\n\n def critical(self, msg):\n self._warn(msg, 'CRITICAL')\n\n def addHandler(*args):\n pass\n\n def setLevel(*args):\n pass\n\n\nfrom .util.dtype import *\n",
"path": "skimage/__init__.py"
}
] | [
{
"content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikit-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Drawing primitives (lines, text, etc.) that operate on NumPy arrays.\nexposure\n Image intensity adjustment, e.g., histogram equalization, etc.\nfeature\n Feature detection and extraction, e.g., texture analysis corners, etc.\nfilter\n Sharpening, edge finding, rank filters, thresholding, etc.\ngraph\n Graph-theoretic operations, e.g., shortest paths.\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g., opening or skeletonization.\nnovice\n Simplified interface for teaching purposes.\nrestoration\n Restoration algorithms, e.g., deconvolution algorithms, denoising, etc.\nsegmentation\n Partitioning an image into multiple regions.\ntransform\n Geometric and other transforms, e.g., rotation or the Radon transform.\nutil\n Generic utilities.\nviewer\n A simple graphical user interface for visualizing results and exploring\n parameters.\n\nUtility Functions\n-----------------\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as _osp\nimport imp as _imp\nimport functools as _functools\nimport warnings as _warnings\nfrom skimage._shared.utils import deprecated as _deprecated\n\npkg_dir = _osp.abspath(_osp.dirname(__file__))\ndata_dir = _osp.join(pkg_dir, 'data')\n\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\nelse:\n del version\n\n\ntry:\n _imp.find_module('nose')\nexcept ImportError:\n def _test(verbose=False):\n \"\"\"This would run all unit tests, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Unit tests not available.\")\n\n def _doctest(verbose=False):\n \"\"\"This would run all doc tests, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Doctests not available.\")\nelse:\n def _test(doctest=False, verbose=False):\n \"\"\"Run all unit tests.\"\"\"\n import nose\n args = ['', pkg_dir, '--exe', '--ignore-files=^_test']\n if verbose:\n args.extend(['-v', '-s'])\n if doctest:\n args.extend(['--with-doctest', '--ignore-files=^\\.',\n '--ignore-files=^setup\\.py$$', '--ignore-files=test'])\n # Make sure warnings do not break the doc tests\n with _warnings.catch_warnings():\n _warnings.simplefilter(\"ignore\")\n success = nose.run('skimage', argv=args)\n else:\n success = nose.run('skimage', argv=args)\n # Return sys.exit code\n if success:\n return 0\n else:\n return 1\n\n\n# do not use `test` as function name as this leads to a recursion problem with\n# the nose test suite\ntest = _test\ntest_verbose = _functools.partial(test, verbose=True)\ntest_verbose.__doc__ = test.__doc__\ndoctest = _functools.partial(test, doctest=True)\ndoctest.__doc__ = doctest.__doc__\ndoctest_verbose = _functools.partial(test, doctest=True, verbose=True)\ndoctest_verbose.__doc__ = doctest.__doc__\n\n\nclass _Log(Warning):\n pass\n\n\nclass _FakeLog(object):\n def __init__(self, name):\n \"\"\"\n Parameters\n ----------\n name : str\n Name of the log.\n repeat : bool\n Whether to print repeating messages more than once (False by\n default).\n \"\"\"\n self._name = name\n\n warnings.simplefilter(\"always\", _Log)\n\n self._warnings = _warnings\n\n def _warn(self, msg, wtype):\n self._warnings.warn('%s: %s' % (wtype, msg), _Log)\n\n def debug(self, msg):\n self._warn(msg, 'DEBUG')\n\n def info(self, msg):\n self._warn(msg, 'INFO')\n\n def warning(self, msg):\n self._warn(msg, 'WARNING')\n\n warn = warning\n\n def error(self, msg):\n self._warn(msg, 'ERROR')\n\n def critical(self, msg):\n self._warn(msg, 'CRITICAL')\n\n def addHandler(*args):\n pass\n\n def setLevel(*args):\n pass\n\n\nfrom .util.dtype import *\n",
"path": "skimage/__init__.py"
}
] | diff --git a/skimage/__init__.py b/skimage/__init__.py
index 59c80ed2c15..d1f98ff7484 100644
--- a/skimage/__init__.py
+++ b/skimage/__init__.py
@@ -69,7 +69,8 @@
from .version import version as __version__
except ImportError:
__version__ = "unbuilt-dev"
-del version
+else:
+ del version
try:
|
ethereum__web3.py-3083 | RuntimeError: release unlocked lock
* Version: 6.8.0
* Python: 3.11.1
* OS: linux
* `pip freeze` output
```
pip freeze 4167ms
aiofiles==23.1.0
aiohttp==3.8.5
aiosignal==1.3.1
alembic==1.11.3
async-timeout==4.0.2
asyncpg==0.28.0
attrs==23.1.0
base58==2.1.1
bitarray==2.7.5
certifi==2023.5.7
charset-normalizer==3.1.0
cytoolz==0.12.1
ecs-logging==2.1.0
eth-abi==4.1.0
eth-account==0.9.0
eth-hash==0.5.2
eth-keyfile==0.6.1
eth-keys==0.4.0
eth-rlp==0.3.0
eth-typing==3.4.0
eth-utils==2.2.0
frozenlist==1.3.3
grpcio==1.57.0
grpcio-tools==1.57.0
hexbytes==0.3.1
html5tagger==1.3.0
httptools==0.5.0
idna==3.4
jsonschema==4.17.3
lru-dict==1.2.0
Mako==1.2.4
MarkupSafe==2.1.2
multidict==6.0.4
numpy==1.25.2
parsimonious==0.9.0
prometheus-client==0.17.1
protobuf==4.23.0
pycryptodome==3.18.0
pydantic==1.10.12
pyrsistent==0.19.3
pyunormalize==15.0.0
PyYAML==6.0
redis==5.0.0
regex==2023.6.3
requests==2.31.0
rlp==3.0.0
sanic==23.6.0
sanic-ext==23.6.0
sanic-routing==23.6.0
SQLAlchemy==2.0.20
toolz==0.12.0
tracerite==1.1.0
typing_extensions==4.5.0
ujson==5.7.0
urllib3==2.0.2
uvloop==0.17.0
web3==6.8.0
websockets==11.0.3
yarl==1.9.2
```
### What was wrong?
* The code which produced the error
```py
provider = AsyncHTTPProvider(request.app.config.get("ETHEREUM_MAINNET_URL"))
w3 = AsyncWeb3(provider)
contract = w3.eth.contract(
address=MAINNET_TOKEN_ADDRESS_DETECTION,
abi=single_call_balance_checker_abi,
)
address_keys = list(TOKEN_METADATA_MAP.keys())
(native_balance, balance_values) = await asyncio.gather(
w3.eth.get_balance(to_checksum_address(address)),
contract.functions.balances(
[to_checksum_address(address)],
address_keys,
).call(),
)
```
* The full output of the error
```py
File "handle_request", line 97, in handle_request
File "/app/data_service/ethereum/views/balances.py", line 54, in get_balances
(native_balance, balance_values) = await asyncio.gather(
^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/eth/async_eth.py", line 435, in get_balance
return await self._get_balance(account, block_identifier)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/module.py", line 114, in caller
result = await async_w3.manager.coro_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/manager.py", line 264, in coro_request
response = await self._coro_make_request(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/manager.py", line 199, in _coro_make_request
return await request_func(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/middleware/gas_price_strategy.py", line 126, in middleware
return await make_request(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/middleware/names.py", line 139, in middleware
return await make_request(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/middleware/attrdict.py", line 69, in middleware
response = await make_request(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/middleware/formatting.py", line 165, in middleware
response = await make_request(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/middleware/buffered_gas_estimate.py", line 58, in middleware
return await make_request(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/middleware/exception_retry_request.py", line 151, in middleware
return await make_request(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/providers/async_rpc.py", line 91, in make_request
raw_response = await async_make_post_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py", line 239, in async_make_post_request
response = await async_get_response_from_post_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py", line 231, in async_get_response_from_post_request
session = await async_cache_and_return_session(endpoint_uri)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py", line 141, in async_cache_and_return_session
async with async_lock(_async_session_pool, _async_session_cache_lock):
File "/usr/local/lib/python3.11/contextlib.py", line 204, in __aenter__
return await anext(self.gen)
^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/_utils/async_caching.py", line 21, in async_lock
lock.release()
```
* What type of node you were connecting to.
AsyncHTTPProvider (Infura)
### How can it be fixed?
I tried to reproduce this error locally, but it only occurs in production.
| [
{
"content": "import asyncio\nfrom concurrent.futures import (\n ThreadPoolExecutor,\n)\nimport contextlib\nimport threading\nfrom typing import (\n AsyncGenerator,\n)\n\n\[email protected]\nasync def async_lock(\n thread_pool: ThreadPoolExecutor, lock: threading.Lock\n) -> AsyncGenerator[None, None]:\n loop = asyncio.get_event_loop()\n try:\n await loop.run_in_executor(thread_pool, lock.acquire)\n yield\n finally:\n lock.release()\n",
"path": "web3/_utils/async_caching.py"
}
] | [
{
"content": "import asyncio\nfrom concurrent.futures import (\n ThreadPoolExecutor,\n)\nimport contextlib\nimport threading\nfrom typing import (\n AsyncGenerator,\n)\n\n\[email protected]\nasync def async_lock(\n thread_pool: ThreadPoolExecutor, lock: threading.Lock\n) -> AsyncGenerator[None, None]:\n loop = asyncio.get_event_loop()\n try:\n await loop.run_in_executor(thread_pool, lock.acquire)\n yield\n finally:\n if lock.locked():\n lock.release()\n",
"path": "web3/_utils/async_caching.py"
}
] | diff --git a/newsfragments/3083.bugfix.rst b/newsfragments/3083.bugfix.rst
new file mode 100644
index 0000000000..4845959fd0
--- /dev/null
+++ b/newsfragments/3083.bugfix.rst
@@ -0,0 +1 @@
+Only release ``async_lock`` if it's locked to begin with.
diff --git a/web3/_utils/async_caching.py b/web3/_utils/async_caching.py
index 4997a162f2..42a7e1aaa0 100644
--- a/web3/_utils/async_caching.py
+++ b/web3/_utils/async_caching.py
@@ -18,4 +18,5 @@ async def async_lock(
await loop.run_in_executor(thread_pool, lock.acquire)
yield
finally:
- lock.release()
+ if lock.locked():
+ lock.release()
|
edgedb__edgedb-1057 | Bad pretty printing of datetime
Here is what I get:
```
edgedb> SELECT <datetime>'2020-01-08T17:03:06.026178+00:00';
{<local_date>'2020-01-08T17:03:06.026178+00:00'}
```
Well the `datetime` in python is a subclass of `date` but `singledispatch` is supposed to handle that well. Do we have a patched singledispatch now?
| [
{
"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\nfrom typing import * # NoQA\n\nimport datetime\nimport decimal\nimport functools\nimport uuid\n\nimport edgedb\nfrom edgedb import introspect\n\nfrom edb.common.markup.renderers import terminal\nfrom edb.common.markup.renderers import styles\n\nfrom . import context\n\n\nstyle = styles.Dark256\n\n\[email protected]\ndef walk(\n o: Any,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n # The default renderer. Shouldn't be ever called,\n # but if for some reason we haven't defined a renderer\n # for some edgedb type it's better to render something\n # than crash.\n buf.write(str(o))\n\n\ndef _object_guts(\n o: edgedb.Object,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer,\n *,\n include_id_when_empty: bool\n) -> bool:\n pointers = introspect.introspect_object(o).pointers\n if not repl_ctx.show_implicit_fields:\n pointers = tuple(ptr for ptr in pointers if not ptr.implicit)\n pointers_len = len(pointers)\n\n pointers_rendered = 0\n for ptr in pointers:\n buf.write(ptr.name, style.key)\n buf.write(': ')\n\n if ptr.kind is introspect.PointerKind.LINK:\n link = o[ptr.name]\n walk(link, repl_ctx, buf)\n else:\n val = getattr(o, ptr.name)\n walk(val, repl_ctx, buf)\n\n pointers_rendered += 1\n if pointers_rendered < pointers_len:\n buf.write(',')\n buf.mark_line_break()\n\n if pointers_rendered == 0 and include_id_when_empty:\n buf.write('id', style.key)\n buf.write(': ')\n walk(o.id, repl_ctx, buf)\n pointers_rendered = 1\n\n return pointers_rendered > 0\n\n\ndef _object_name(o: edgedb.Object, repl_ctx: context.ReplContext) -> str:\n if not repl_ctx.introspect_types:\n return 'Object'\n assert repl_ctx.typenames\n return repl_ctx.typenames.get(o.__tid__, 'Object')\n\n\[email protected]\ndef _link(\n o: edgedb.Link,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer,\n) -> None:\n with buf.foldable_lines():\n buf.write(_object_name(o.target, repl_ctx), style.tree_node)\n buf.write(' {', style.tree_node)\n buf.folded_space()\n with buf.indent():\n pointers = o.__dir__()\n pointers = tuple(ptr for ptr in pointers\n if ptr not in {'source', 'target'})\n pointers_len = len(pointers)\n\n non_empty = _object_guts(\n o.target, repl_ctx, buf,\n include_id_when_empty=pointers_len == 0)\n\n if pointers_len > 0:\n if non_empty:\n buf.write(',')\n buf.mark_line_break()\n\n i = 0\n for name in pointers:\n val = getattr(o, name)\n\n buf.write(f'@{name}', style.code_tag)\n buf.write(': ')\n walk(val, repl_ctx, buf)\n non_empty = True\n\n i += 1\n if i < pointers_len:\n buf.write(',')\n buf.mark_line_break()\n\n if non_empty:\n buf.folded_space()\n buf.write('}', style.tree_node)\n\n\[email protected]\ndef _object(\n o: edgedb.Object,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n with buf.foldable_lines():\n buf.write(_object_name(o, repl_ctx), style.tree_node)\n buf.write(' {', style.tree_node)\n buf.folded_space()\n with buf.indent():\n non_empty = _object_guts(\n o, repl_ctx, buf, include_id_when_empty=True)\n if non_empty:\n buf.folded_space()\n buf.write('}', style.tree_node)\n\n\[email protected]\ndef _namedtuple(\n o: edgedb.NamedTuple,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n with buf.foldable_lines():\n buf.write('(', style.bracket)\n with buf.indent():\n # Call __dir__ directly as dir() scrambles the order.\n for idx, name in enumerate(o.__dir__()):\n val = getattr(o, name)\n\n buf.write(name)\n buf.write(' := ')\n walk(val, repl_ctx, buf)\n\n if idx < (len(o) - 1):\n buf.write(',')\n buf.mark_line_break()\n buf.write(')', style.bracket)\n\n\[email protected](edgedb.Array)\[email protected](edgedb.Tuple)\[email protected](edgedb.Set)\[email protected](edgedb.LinkSet)\ndef _set(\n o: Union[edgedb.Array, edgedb.Tuple, edgedb.Set, edgedb.LinkSet],\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if isinstance(o, edgedb.Array):\n begin, end = '[', ']'\n elif isinstance(o, edgedb.Tuple):\n begin, end = '(', ')'\n else:\n begin, end = '{', '}'\n\n with buf.foldable_lines():\n buf.write(begin, style.bracket)\n with buf.indent():\n for idx, el in enumerate(o):\n walk(el, repl_ctx, buf)\n if idx < (len(o) - 1):\n buf.write(',')\n buf.mark_line_break()\n buf.write(end, style.bracket)\n\n\[email protected]\ndef _uuid(\n o: uuid.UUID,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(f'<uuid>{repr(str(o))}', style.code_comment)\n\n\[email protected](int)\[email protected](float)\ndef _numeric(\n o: Union[int, float],\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(str(o), style.code_number)\n\n\[email protected]\ndef _str(\n o: str,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if \"'\" in o:\n rs = '\"' + o.replace('\"', r'\\\"') + '\"'\n else:\n rs = \"'\" + o.replace(\"'\", r\"\\'\") + \"'\"\n buf.write(rs, style.code_string)\n\n\[email protected]\ndef _bytes(\n o: bytes,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(repr(o), style.code_string)\n\n\[email protected]\ndef _bool(\n o: bool,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(str(o).lower(), style.code_constant)\n\n\[email protected]\ndef _decimal(\n o: decimal.Decimal,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(f'{o}n', style.code_number)\n\n\[email protected]\ndef _empty(\n o: None,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write('{}', style.bracket)\n\n\ndef _datetime(\n o: datetime.datetime,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if o.tzinfo:\n buf.write(\"<datetime>\", style.code_comment)\n else:\n buf.write(\"<local_datetime>\", style.code_comment)\n\n buf.write(repr(o.isoformat()), style.code_string)\n\n\[email protected]\ndef _date(\n o: datetime.date,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(\"<local_date>\", style.code_comment)\n buf.write(repr(o.isoformat()), style.code_string)\n\n\[email protected]\ndef _time(\n o: datetime.time,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(\"<local_time>\", style.code_comment)\n buf.write(repr(o.isoformat()), style.code_string)\n\n\[email protected]\ndef _duration(\n o: datetime.timedelta,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(\"<duration>\", style.code_comment)\n buf.write(repr(str(o)), style.code_string)\n\n\[email protected]\ndef _enum(\n o: edgedb.EnumValue,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if not repl_ctx.introspect_types:\n typename = 'enum'\n else:\n assert repl_ctx.typenames\n typename = repl_ctx.typenames.get(o.__tid__, 'enum')\n\n buf.write(f\"<{typename}>\", style.code_comment)\n buf.write(f\"'{o}'\", style.code_string)\n",
"path": "edb/repl/render_binary.py"
}
] | [
{
"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\nfrom typing import * # NoQA\n\nimport datetime\nimport decimal\nimport functools\nimport uuid\n\nimport edgedb\nfrom edgedb import introspect\n\nfrom edb.common.markup.renderers import terminal\nfrom edb.common.markup.renderers import styles\n\nfrom . import context\n\n\nstyle = styles.Dark256\n\n\[email protected]\ndef walk(\n o: Any,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n # The default renderer. Shouldn't be ever called,\n # but if for some reason we haven't defined a renderer\n # for some edgedb type it's better to render something\n # than crash.\n buf.write(str(o))\n\n\ndef _object_guts(\n o: edgedb.Object,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer,\n *,\n include_id_when_empty: bool\n) -> bool:\n pointers = introspect.introspect_object(o).pointers\n if not repl_ctx.show_implicit_fields:\n pointers = tuple(ptr for ptr in pointers if not ptr.implicit)\n pointers_len = len(pointers)\n\n pointers_rendered = 0\n for ptr in pointers:\n buf.write(ptr.name, style.key)\n buf.write(': ')\n\n if ptr.kind is introspect.PointerKind.LINK:\n link = o[ptr.name]\n walk(link, repl_ctx, buf)\n else:\n val = getattr(o, ptr.name)\n walk(val, repl_ctx, buf)\n\n pointers_rendered += 1\n if pointers_rendered < pointers_len:\n buf.write(',')\n buf.mark_line_break()\n\n if pointers_rendered == 0 and include_id_when_empty:\n buf.write('id', style.key)\n buf.write(': ')\n walk(o.id, repl_ctx, buf)\n pointers_rendered = 1\n\n return pointers_rendered > 0\n\n\ndef _object_name(o: edgedb.Object, repl_ctx: context.ReplContext) -> str:\n if not repl_ctx.introspect_types:\n return 'Object'\n assert repl_ctx.typenames\n return repl_ctx.typenames.get(o.__tid__, 'Object')\n\n\[email protected]\ndef _link(\n o: edgedb.Link,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer,\n) -> None:\n with buf.foldable_lines():\n buf.write(_object_name(o.target, repl_ctx), style.tree_node)\n buf.write(' {', style.tree_node)\n buf.folded_space()\n with buf.indent():\n pointers = o.__dir__()\n pointers = tuple(ptr for ptr in pointers\n if ptr not in {'source', 'target'})\n pointers_len = len(pointers)\n\n non_empty = _object_guts(\n o.target, repl_ctx, buf,\n include_id_when_empty=pointers_len == 0)\n\n if pointers_len > 0:\n if non_empty:\n buf.write(',')\n buf.mark_line_break()\n\n i = 0\n for name in pointers:\n val = getattr(o, name)\n\n buf.write(f'@{name}', style.code_tag)\n buf.write(': ')\n walk(val, repl_ctx, buf)\n non_empty = True\n\n i += 1\n if i < pointers_len:\n buf.write(',')\n buf.mark_line_break()\n\n if non_empty:\n buf.folded_space()\n buf.write('}', style.tree_node)\n\n\[email protected]\ndef _object(\n o: edgedb.Object,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n with buf.foldable_lines():\n buf.write(_object_name(o, repl_ctx), style.tree_node)\n buf.write(' {', style.tree_node)\n buf.folded_space()\n with buf.indent():\n non_empty = _object_guts(\n o, repl_ctx, buf, include_id_when_empty=True)\n if non_empty:\n buf.folded_space()\n buf.write('}', style.tree_node)\n\n\[email protected]\ndef _namedtuple(\n o: edgedb.NamedTuple,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n with buf.foldable_lines():\n buf.write('(', style.bracket)\n with buf.indent():\n # Call __dir__ directly as dir() scrambles the order.\n for idx, name in enumerate(o.__dir__()):\n val = getattr(o, name)\n\n buf.write(name)\n buf.write(' := ')\n walk(val, repl_ctx, buf)\n\n if idx < (len(o) - 1):\n buf.write(',')\n buf.mark_line_break()\n buf.write(')', style.bracket)\n\n\[email protected](edgedb.Array)\[email protected](edgedb.Tuple)\[email protected](edgedb.Set)\[email protected](edgedb.LinkSet)\ndef _set(\n o: Union[edgedb.Array, edgedb.Tuple, edgedb.Set, edgedb.LinkSet],\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if isinstance(o, edgedb.Array):\n begin, end = '[', ']'\n elif isinstance(o, edgedb.Tuple):\n begin, end = '(', ')'\n else:\n begin, end = '{', '}'\n\n with buf.foldable_lines():\n buf.write(begin, style.bracket)\n with buf.indent():\n for idx, el in enumerate(o):\n walk(el, repl_ctx, buf)\n if idx < (len(o) - 1):\n buf.write(',')\n buf.mark_line_break()\n buf.write(end, style.bracket)\n\n\[email protected]\ndef _uuid(\n o: uuid.UUID,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(f'<uuid>{repr(str(o))}', style.code_comment)\n\n\[email protected](int)\[email protected](float)\ndef _numeric(\n o: Union[int, float],\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(str(o), style.code_number)\n\n\[email protected]\ndef _str(\n o: str,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if \"'\" in o:\n rs = '\"' + o.replace('\"', r'\\\"') + '\"'\n else:\n rs = \"'\" + o.replace(\"'\", r\"\\'\") + \"'\"\n buf.write(rs, style.code_string)\n\n\[email protected]\ndef _bytes(\n o: bytes,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(repr(o), style.code_string)\n\n\[email protected]\ndef _bool(\n o: bool,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(str(o).lower(), style.code_constant)\n\n\[email protected]\ndef _decimal(\n o: decimal.Decimal,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(f'{o}n', style.code_number)\n\n\[email protected]\ndef _empty(\n o: None,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write('{}', style.bracket)\n\n\[email protected]\ndef _datetime(\n o: datetime.datetime,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if o.tzinfo:\n buf.write(\"<datetime>\", style.code_comment)\n else:\n buf.write(\"<local_datetime>\", style.code_comment)\n\n buf.write(repr(o.isoformat()), style.code_string)\n\n\[email protected]\ndef _date(\n o: datetime.date,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(\"<local_date>\", style.code_comment)\n buf.write(repr(o.isoformat()), style.code_string)\n\n\[email protected]\ndef _time(\n o: datetime.time,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(\"<local_time>\", style.code_comment)\n buf.write(repr(o.isoformat()), style.code_string)\n\n\[email protected]\ndef _duration(\n o: datetime.timedelta,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n buf.write(\"<duration>\", style.code_comment)\n buf.write(repr(str(o)), style.code_string)\n\n\[email protected]\ndef _enum(\n o: edgedb.EnumValue,\n repl_ctx: context.ReplContext,\n buf: terminal.Buffer\n) -> None:\n if not repl_ctx.introspect_types:\n typename = 'enum'\n else:\n assert repl_ctx.typenames\n typename = repl_ctx.typenames.get(o.__tid__, 'enum')\n\n buf.write(f\"<{typename}>\", style.code_comment)\n buf.write(f\"'{o}'\", style.code_string)\n",
"path": "edb/repl/render_binary.py"
}
] | diff --git a/edb/repl/render_binary.py b/edb/repl/render_binary.py
index 134f4fd2d94..54a61f5feb4 100644
--- a/edb/repl/render_binary.py
+++ b/edb/repl/render_binary.py
@@ -275,6 +275,7 @@ def _empty(
buf.write('{}', style.bracket)
[email protected]
def _datetime(
o: datetime.datetime,
repl_ctx: context.ReplContext,
|
sopel-irc__sopel-1044 | [announce] Send confirmation to caller after all channels announced
When Sopel is in many channels, announces are likely to be rate-limited. This makes it hard to know, for example, when it's safe to shut down the bot if announce is being used to broadcast an upgrade notice.
It's an easy fix, and I'll open a PR for it tomorrow if there are no objections.
I am as-yet undecided whether it's best to use `bot.reply()` or `bot.notice()` for this (or even `bot.msg()` via PM to the caller), but I'll think about it between now and when I open the PR, and it can always be changed before merging.
| [
{
"content": "# coding=utf-8\n\"\"\"\nannounce.py - Send a message to all channels\nCopyright © 2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.module import commands, example\n\n\n@commands('announce')\n@example('.announce Some important message here')\ndef announce(bot, trigger):\n \"\"\"\n Send an announcement to all channels the bot is in\n \"\"\"\n if not trigger.admin:\n bot.reply('Sorry, I can\\'t let you do that')\n return\n for channel in bot.channels:\n bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2))\n",
"path": "sopel/modules/announce.py"
}
] | [
{
"content": "# coding=utf-8\n\"\"\"\nannounce.py - Send a message to all channels\nCopyright © 2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.module import commands, example\n\n\n@commands('announce')\n@example('.announce Some important message here')\ndef announce(bot, trigger):\n \"\"\"\n Send an announcement to all channels the bot is in\n \"\"\"\n if not trigger.admin:\n bot.reply('Sorry, I can\\'t let you do that')\n return\n for channel in bot.channels:\n bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2))\n bot.reply('Announce complete.')\n",
"path": "sopel/modules/announce.py"
}
] | diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py
index 2b3df4bc60..0f3f217ace 100644
--- a/sopel/modules/announce.py
+++ b/sopel/modules/announce.py
@@ -21,3 +21,4 @@ def announce(bot, trigger):
return
for channel in bot.channels:
bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2))
+ bot.reply('Announce complete.')
|
sanic-org__sanic-1343 | Pin versions for LTS release
I think that versions of (some) should be allowed to float but when we are ready for an LTS release, the versions should be pinned at that time.
@r0fls @ahopkins @seemethere @ashleysommer @yunstanford @ahopkins
| [
{
"content": "\"\"\"\nSanic\n\"\"\"\nimport codecs\nimport os\nimport re\nfrom distutils.errors import DistutilsPlatformError\nfrom distutils.util import strtobool\n\nfrom setuptools import setup\n\n\ndef open_local(paths, mode='r', encoding='utf8'):\n path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n *paths\n )\n\n return codecs.open(path, mode, encoding)\n\n\nwith open_local(['sanic', '__init__.py'], encoding='latin1') as fp:\n try:\n version = re.findall(r\"^__version__ = '([^']+)'\\r?$\",\n fp.read(), re.M)[0]\n except IndexError:\n raise RuntimeError('Unable to determine version.')\n\n\nwith open_local(['README.rst']) as rm:\n long_description = rm.read()\n\nsetup_kwargs = {\n 'name': 'sanic',\n 'version': version,\n 'url': 'http://github.com/channelcat/sanic/',\n 'license': 'MIT',\n 'author': 'Channel Cat',\n 'author_email': '[email protected]',\n 'description': (\n 'A microframework based on uvloop, httptools, and learnings of flask'),\n 'long_description': long_description,\n 'packages': ['sanic'],\n 'platforms': 'any',\n 'classifiers': [\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n}\n\nenv_dependency = '; sys_platform != \"win32\" and implementation_name == \"cpython\"'\nujson = 'ujson>=1.35' + env_dependency\nuvloop = 'uvloop>=0.5.3' + env_dependency\n\nrequirements = [\n 'httptools>=0.0.9',\n uvloop,\n ujson,\n 'aiofiles>=0.3.0',\n 'websockets>=5.0,<6.0',\n 'multidict>=4.0,<5.0',\n]\nif strtobool(os.environ.get(\"SANIC_NO_UJSON\", \"no\")):\n print(\"Installing without uJSON\")\n requirements.remove(ujson)\n\n# 'nt' means windows OS\nif strtobool(os.environ.get(\"SANIC_NO_UVLOOP\", \"no\")):\n print(\"Installing without uvLoop\")\n requirements.remove(uvloop)\n\nsetup_kwargs['install_requires'] = requirements\nsetup(**setup_kwargs)\n",
"path": "setup.py"
}
] | [
{
"content": "\"\"\"\nSanic\n\"\"\"\nimport codecs\nimport os\nimport re\nfrom distutils.errors import DistutilsPlatformError\nfrom distutils.util import strtobool\n\nfrom setuptools import setup\n\n\ndef open_local(paths, mode='r', encoding='utf8'):\n path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n *paths\n )\n\n return codecs.open(path, mode, encoding)\n\n\nwith open_local(['sanic', '__init__.py'], encoding='latin1') as fp:\n try:\n version = re.findall(r\"^__version__ = '([^']+)'\\r?$\",\n fp.read(), re.M)[0]\n except IndexError:\n raise RuntimeError('Unable to determine version.')\n\n\nwith open_local(['README.rst']) as rm:\n long_description = rm.read()\n\nsetup_kwargs = {\n 'name': 'sanic',\n 'version': version,\n 'url': 'http://github.com/channelcat/sanic/',\n 'license': 'MIT',\n 'author': 'Channel Cat',\n 'author_email': '[email protected]',\n 'description': (\n 'A microframework based on uvloop, httptools, and learnings of flask'),\n 'long_description': long_description,\n 'packages': ['sanic'],\n 'platforms': 'any',\n 'classifiers': [\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n}\n\nenv_dependency = '; sys_platform != \"win32\" and implementation_name == \"cpython\"'\nujson = 'ujson>=1.35' + env_dependency\nuvloop = 'uvloop>=0.5.3' + env_dependency\n\nrequirements = [\n 'httptools>=0.0.10',\n uvloop,\n ujson,\n 'aiofiles>=0.3.0',\n 'websockets>=5.0,<6.0',\n 'multidict>=4.0,<5.0',\n]\nif strtobool(os.environ.get(\"SANIC_NO_UJSON\", \"no\")):\n print(\"Installing without uJSON\")\n requirements.remove(ujson)\n\n# 'nt' means windows OS\nif strtobool(os.environ.get(\"SANIC_NO_UVLOOP\", \"no\")):\n print(\"Installing without uvLoop\")\n requirements.remove(uvloop)\n\nsetup_kwargs['install_requires'] = requirements\nsetup(**setup_kwargs)\n",
"path": "setup.py"
}
] | diff --git a/environment.yml b/environment.yml
index 9f416c0e33..e13c76fe9a 100644
--- a/environment.yml
+++ b/environment.yml
@@ -12,7 +12,7 @@ dependencies:
- zlib=1.2.8=0
- pip:
- uvloop>=0.5.3
- - httptools>=0.0.9
+ - httptools>=0.0.10
- ujson>=1.35
- aiofiles>=0.3.0
- websockets>=3.2
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 004f6f9ec8..12b29a2b87 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -3,7 +3,7 @@ aiohttp>=2.3.0,<=3.2.1
chardet<=2.3.0
beautifulsoup4
coverage
-httptools
+httptools>=0.0.10
flake8
pytest==3.3.2
tox
diff --git a/requirements.txt b/requirements.txt
index e320e78181..74d9bf8353 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
aiofiles
-httptools
+httptools>=0.0.10
ujson; sys_platform != "win32" and implementation_name == "cpython"
uvloop; sys_platform != "win32" and implementation_name == "cpython"
websockets>=5.0,<6.0
diff --git a/setup.py b/setup.py
index 34703ab4d3..2ce1510f16 100644
--- a/setup.py
+++ b/setup.py
@@ -56,7 +56,7 @@ def open_local(paths, mode='r', encoding='utf8'):
uvloop = 'uvloop>=0.5.3' + env_dependency
requirements = [
- 'httptools>=0.0.9',
+ 'httptools>=0.0.10',
uvloop,
ujson,
'aiofiles>=0.3.0',
|
AnalogJ__lexicon-479 | GoDaddy provider should recognize domaincontrol.com as its nameserver
For the auto provider, it should recognize that domains managed by GoDaddy often have nameservers under the *.domaincontrol.com namespace. You can verify this is GoDaddy via whois; and I also tested this by adding 'domaincontrol.com' to the recognized nameservers with the following patch.
```
--- providers/godaddy.py.orig 2020-01-09 08:58:26.160360574 +0000
+++ providers/godaddy.py 2020-01-10 19:27:29.292030195 +0000
@@ -14,5 +14,5 @@
LOGGER = logging.getLogger(__name__)
-NAMESERVER_DOMAINS = ['godaddy.com']
+NAMESERVER_DOMAINS = ['godaddy.com','domaincontrol.com']
```
And the current whois excerpt:
```
$ whois domaincontrol.com
Domain Name: DOMAINCONTROL.COM
...
Updated Date: 2018-08-07T19:25:37Z
...
Registrant Organization: Go Daddy Operating Company, LLC
Registrant State/Province: Arizona
Registrant Country: US
```
| [
{
"content": "\"\"\"Module provider for Godaddy\"\"\"\nfrom __future__ import absolute_import\nimport hashlib\nimport json\nimport logging\n\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom urllib3.util.retry import Retry\n\nfrom lexicon.providers.base import Provider as BaseProvider\n\n\nLOGGER = logging.getLogger(__name__)\n\nNAMESERVER_DOMAINS = ['godaddy.com']\n\n\ndef provider_parser(subparser):\n \"\"\"Generate a subparser for Godaddy\"\"\"\n subparser.add_argument(\n '--auth-key', help='specify the key to access the API')\n subparser.add_argument(\n '--auth-secret', help='specify the secret to access the API')\n\n\nclass Provider(BaseProvider):\n \"\"\"\n Implements the DNS GoDaddy provider.\n Some general remarks about this provider, because it uses a weirdly designed API.\n Indeed, there is no direct way to insert, update or delete a specific record.\n Furthermore, there is no unique identifier for a record.\n Instead GoDaddy use a replace approach: for a given set of records one\n can replace this set with a new set sent through API.\n For the sake of simplicity and consistency across the provider edit methods,\n the set will be always all records in the DNS zone.\n With this approach:\n - adding a record consists in appending a record to the obtained set and call\n replace with the updated set,\n - updating a record consists in modifying a record in the obtained set and call\n replace with the updated set,\n - deleting a record consists in removing a record in the obtained set and call\n replace with the updated set.\n In parallel, as said before, there is no unique identifier.\n This provider then implement a pseudo-identifier, to allow an easy update or delete\n using the '--identifier' lexicon parameter.\n But you need to call the 'list' command just before executing and update/delete action,\n because identifier value is tied to the content of the record, and will change anytime\n something is changed in the record.\n \"\"\"\n\n def __init__(self, config):\n super(Provider, self).__init__(config)\n self.domain_id = None\n self.api_endpoint = 'https://api.godaddy.com/v1'\n\n def _authenticate(self):\n domain = self.domain\n\n result = self._get('/domains/{0}'.format(domain))\n self.domain_id = result['domainId']\n\n def _list_records(self, rtype=None, name=None, content=None):\n domain = self.domain\n\n url = '/domains/{0}/records'.format(domain)\n if rtype:\n url += '/{0}'.format(rtype)\n if name:\n url += '/{0}'.format(self._relative_name(name))\n\n raws = self._get(url)\n\n records = []\n for raw in raws:\n records.append({\n 'id': Provider._identifier(raw),\n 'type': raw['type'],\n 'name': self._full_name(raw['name']),\n 'ttl': raw['ttl'],\n 'content': raw['data']\n })\n\n if content:\n records = [\n record for record in records if record['data'] == content]\n\n LOGGER.debug('list_records: %s', records)\n\n return records\n\n def _create_record(self, rtype, name, content):\n domain = self.domain\n relative_name = self._relative_name(name)\n ttl = self._get_lexicon_option('ttl')\n\n # Retrieve existing data in DNS zone.\n records = self._get('/domains/{0}/records'.format(domain))\n\n # Check if a record already matches given parameters\n for record in records:\n if (record['type'] == rtype and self._relative_name(record['name']) == relative_name\n and record['data'] == content):\n LOGGER.debug(\n 'create_record (ignored, duplicate): %s %s %s', rtype, name, content)\n return True\n\n # Append a new entry corresponding to given parameters.\n data = {'type': rtype, 'name': relative_name, 'data': content}\n if ttl:\n data['ttl'] = ttl\n\n records.append(data)\n\n # Synchronize data with inserted record into DNS zone.\n self._put('/domains/{0}/records'.format(domain), records)\n\n LOGGER.debug('create_record: %s %s %s', rtype, name, content)\n\n return True\n\n def _update_record(self, identifier, rtype=None, name=None, content=None):\n # No identifier is used with GoDaddy.\n # We can rely either:\n # - only on rtype/name to get the relevant records, both of them are required\n # or we will could update to much records ...,\n # - or by the pseudo-identifier provided\n # Furthermore for rtype/name approach, we cannot update all matching records, as it\n # would lead o an error (two entries of same rtype + name cannot have the same content).\n # So for rtype/name approach, we search first matching record for rtype/name on which\n # content is different, and we update it before synchronizing the DNS zone.\n if not identifier and not rtype:\n raise Exception('ERROR: rtype is required')\n if not identifier and not name:\n raise Exception('ERROR: name is required')\n\n domain = self.domain\n relative_name = None\n if name:\n relative_name = self._relative_name(name)\n\n # Retrieve existing data in DNS zone.\n records = self._get('/domains/{0}/records'.format(domain))\n\n # Get the record to update:\n # - either explicitly by its identifier,\n # - or the first matching by its rtype+name where content does not match\n # (first match, see first method comment for explanation).\n for record in records:\n if ((identifier and Provider._identifier(record) == identifier) or # pylint: disable=too-many-boolean-expressions\n (not identifier and record['type'] == rtype\n and self._relative_name(record['name']) == relative_name\n and record['data'] != content)):\n record['data'] = content\n break\n\n # Synchronize data with updated records into DNS zone.\n self._put('/domains/{0}/records'.format(domain), records)\n\n LOGGER.debug('update_record: %s %s %s', rtype, name, content)\n\n return True\n\n def _delete_record(self, identifier=None, rtype=None, name=None, content=None):\n # For the LOL. GoDaddy does not accept an empty array\n # when updating a particular set of records.\n # It means that you cannot request to remove all records\n # matching a particular rtype and/or name.\n # Instead, we get ALL records in the DNS zone, update the set,\n # and replace EVERYTHING in the DNS zone.\n # You will always have at minimal NS/SRV entries in the array,\n # otherwise your DNS zone is broken, and updating the zone is the least of your problem ...\n domain = self.domain\n\n # Retrieve all records in the DNS zone\n records = self._get('/domains/{0}/records'.format(domain))\n\n relative_name = None\n if name:\n relative_name = self._relative_name(name)\n\n # Filter out all records which matches the pattern (either identifier\n # or some combination of rtype/name/content).\n filtered_records = []\n if identifier:\n filtered_records = [\n record for record in records if Provider._identifier(record) != identifier]\n else:\n for record in records:\n if ((not rtype and not relative_name and not content) # pylint: disable=too-many-boolean-expressions\n or (rtype and not relative_name and not content and record['type'] != rtype)\n or (not rtype and relative_name and not content\n and self._relative_name(record['name']) != relative_name)\n or (not rtype and not relative_name and content\n and record['data'] != content)\n or (rtype and relative_name and not content\n and (record['type'] != rtype\n or self._relative_name(record['name']) != relative_name))\n or (rtype and not relative_name and content\n and (record['type'] != rtype or record['data'] != content))\n or (not rtype and relative_name and content\n and (self._relative_name(record['name']) != relative_name\n or record['data'] != content))\n or (rtype and relative_name and content\n and (record['type'] != rtype\n or self._relative_name(record['name']) != relative_name\n or record['data'] != content))):\n filtered_records.append(record)\n\n # Synchronize data with expurged entries into DNS zone.\n self._put('/domains/{0}/records'.format(domain), filtered_records)\n\n LOGGER.debug('delete_records: %s %s %s', rtype, name, content)\n\n return True\n\n # GoDaddy provides no identifier for a record, which is a problem\n # where identifiers can be used (delete and update).\n # To circumvent this, we implement a pseudo-identifier,which is basically\n # a hash of type+name+content of a given record.\n # It is far from perfect, as the identifier will change each time\n # we change something in the record ...\n # But at least, one can use 'lexicon godaddy list ...' then\n # 'lexicon godaddy update --identifier ...' to modify specific record.\n # However, 'lexicon godaddy list ...' should be called each time DNS\n # zone had been changed to calculate new identifiers.\n @staticmethod\n def _identifier(record):\n sha256 = hashlib.sha256()\n sha256.update(('type=' + record.get('type', '') + ',').encode('utf-8'))\n sha256.update(('name=' + record.get('name', '') + ',').encode('utf-8'))\n sha256.update(('data=' + record.get('data', '') + ',').encode('utf-8'))\n return sha256.hexdigest()[0:7]\n\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if not data:\n data = {}\n if not query_params:\n query_params = {}\n\n # When editing DNS zone, API is unavailable for few seconds\n # (until modifications are propagated).\n # In this case, call to API will return 409 HTTP error.\n # We use the Retry extension to retry the requests until\n # we get a processable reponse (402 HTTP status, or an HTTP error != 409)\n retries = Retry(\n total=10,\n backoff_factor=0.5,\n status_forcelist=[409],\n method_whitelist=frozenset(\n ['GET', 'PUT', 'POST', 'DELETE', 'PATCH'])\n )\n\n session = requests.Session()\n session.mount('https://', HTTPAdapter(max_retries=retries))\n\n result = session.request(action, self.api_endpoint + url,\n params=query_params,\n data=json.dumps(data),\n headers={\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n # GoDaddy use a key/secret pair to authenticate\n 'Authorization': 'sso-key {0}:{1}'.format(\n self._get_provider_option(\n 'auth_key'),\n self._get_provider_option('auth_secret'))\n })\n\n result.raise_for_status()\n\n try:\n # Return the JSON body response if exists.\n return result.json()\n except ValueError:\n # For some requests command (eg. PUT), GoDaddy will not\n # return any JSON, just an HTTP status without body.\n return None\n",
"path": "lexicon/providers/godaddy.py"
}
] | [
{
"content": "\"\"\"Module provider for Godaddy\"\"\"\nfrom __future__ import absolute_import\nimport hashlib\nimport json\nimport logging\n\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom urllib3.util.retry import Retry\n\nfrom lexicon.providers.base import Provider as BaseProvider\n\n\nLOGGER = logging.getLogger(__name__)\n\nNAMESERVER_DOMAINS = ['godaddy.com', 'domaincontrol.com']\n\n\ndef provider_parser(subparser):\n \"\"\"Generate a subparser for Godaddy\"\"\"\n subparser.add_argument(\n '--auth-key', help='specify the key to access the API')\n subparser.add_argument(\n '--auth-secret', help='specify the secret to access the API')\n\n\nclass Provider(BaseProvider):\n \"\"\"\n Implements the DNS GoDaddy provider.\n Some general remarks about this provider, because it uses a weirdly designed API.\n Indeed, there is no direct way to insert, update or delete a specific record.\n Furthermore, there is no unique identifier for a record.\n Instead GoDaddy use a replace approach: for a given set of records one\n can replace this set with a new set sent through API.\n For the sake of simplicity and consistency across the provider edit methods,\n the set will be always all records in the DNS zone.\n With this approach:\n - adding a record consists in appending a record to the obtained set and call\n replace with the updated set,\n - updating a record consists in modifying a record in the obtained set and call\n replace with the updated set,\n - deleting a record consists in removing a record in the obtained set and call\n replace with the updated set.\n In parallel, as said before, there is no unique identifier.\n This provider then implement a pseudo-identifier, to allow an easy update or delete\n using the '--identifier' lexicon parameter.\n But you need to call the 'list' command just before executing and update/delete action,\n because identifier value is tied to the content of the record, and will change anytime\n something is changed in the record.\n \"\"\"\n\n def __init__(self, config):\n super(Provider, self).__init__(config)\n self.domain_id = None\n self.api_endpoint = 'https://api.godaddy.com/v1'\n\n def _authenticate(self):\n domain = self.domain\n\n result = self._get('/domains/{0}'.format(domain))\n self.domain_id = result['domainId']\n\n def _list_records(self, rtype=None, name=None, content=None):\n domain = self.domain\n\n url = '/domains/{0}/records'.format(domain)\n if rtype:\n url += '/{0}'.format(rtype)\n if name:\n url += '/{0}'.format(self._relative_name(name))\n\n raws = self._get(url)\n\n records = []\n for raw in raws:\n records.append({\n 'id': Provider._identifier(raw),\n 'type': raw['type'],\n 'name': self._full_name(raw['name']),\n 'ttl': raw['ttl'],\n 'content': raw['data']\n })\n\n if content:\n records = [\n record for record in records if record['data'] == content]\n\n LOGGER.debug('list_records: %s', records)\n\n return records\n\n def _create_record(self, rtype, name, content):\n domain = self.domain\n relative_name = self._relative_name(name)\n ttl = self._get_lexicon_option('ttl')\n\n # Retrieve existing data in DNS zone.\n records = self._get('/domains/{0}/records'.format(domain))\n\n # Check if a record already matches given parameters\n for record in records:\n if (record['type'] == rtype and self._relative_name(record['name']) == relative_name\n and record['data'] == content):\n LOGGER.debug(\n 'create_record (ignored, duplicate): %s %s %s', rtype, name, content)\n return True\n\n # Append a new entry corresponding to given parameters.\n data = {'type': rtype, 'name': relative_name, 'data': content}\n if ttl:\n data['ttl'] = ttl\n\n records.append(data)\n\n # Synchronize data with inserted record into DNS zone.\n self._put('/domains/{0}/records'.format(domain), records)\n\n LOGGER.debug('create_record: %s %s %s', rtype, name, content)\n\n return True\n\n def _update_record(self, identifier, rtype=None, name=None, content=None):\n # No identifier is used with GoDaddy.\n # We can rely either:\n # - only on rtype/name to get the relevant records, both of them are required\n # or we will could update to much records ...,\n # - or by the pseudo-identifier provided\n # Furthermore for rtype/name approach, we cannot update all matching records, as it\n # would lead o an error (two entries of same rtype + name cannot have the same content).\n # So for rtype/name approach, we search first matching record for rtype/name on which\n # content is different, and we update it before synchronizing the DNS zone.\n if not identifier and not rtype:\n raise Exception('ERROR: rtype is required')\n if not identifier and not name:\n raise Exception('ERROR: name is required')\n\n domain = self.domain\n relative_name = None\n if name:\n relative_name = self._relative_name(name)\n\n # Retrieve existing data in DNS zone.\n records = self._get('/domains/{0}/records'.format(domain))\n\n # Get the record to update:\n # - either explicitly by its identifier,\n # - or the first matching by its rtype+name where content does not match\n # (first match, see first method comment for explanation).\n for record in records:\n if ((identifier and Provider._identifier(record) == identifier) or # pylint: disable=too-many-boolean-expressions\n (not identifier and record['type'] == rtype\n and self._relative_name(record['name']) == relative_name\n and record['data'] != content)):\n record['data'] = content\n break\n\n # Synchronize data with updated records into DNS zone.\n self._put('/domains/{0}/records'.format(domain), records)\n\n LOGGER.debug('update_record: %s %s %s', rtype, name, content)\n\n return True\n\n def _delete_record(self, identifier=None, rtype=None, name=None, content=None):\n # For the LOL. GoDaddy does not accept an empty array\n # when updating a particular set of records.\n # It means that you cannot request to remove all records\n # matching a particular rtype and/or name.\n # Instead, we get ALL records in the DNS zone, update the set,\n # and replace EVERYTHING in the DNS zone.\n # You will always have at minimal NS/SRV entries in the array,\n # otherwise your DNS zone is broken, and updating the zone is the least of your problem ...\n domain = self.domain\n\n # Retrieve all records in the DNS zone\n records = self._get('/domains/{0}/records'.format(domain))\n\n relative_name = None\n if name:\n relative_name = self._relative_name(name)\n\n # Filter out all records which matches the pattern (either identifier\n # or some combination of rtype/name/content).\n filtered_records = []\n if identifier:\n filtered_records = [\n record for record in records if Provider._identifier(record) != identifier]\n else:\n for record in records:\n if ((not rtype and not relative_name and not content) # pylint: disable=too-many-boolean-expressions\n or (rtype and not relative_name and not content and record['type'] != rtype)\n or (not rtype and relative_name and not content\n and self._relative_name(record['name']) != relative_name)\n or (not rtype and not relative_name and content\n and record['data'] != content)\n or (rtype and relative_name and not content\n and (record['type'] != rtype\n or self._relative_name(record['name']) != relative_name))\n or (rtype and not relative_name and content\n and (record['type'] != rtype or record['data'] != content))\n or (not rtype and relative_name and content\n and (self._relative_name(record['name']) != relative_name\n or record['data'] != content))\n or (rtype and relative_name and content\n and (record['type'] != rtype\n or self._relative_name(record['name']) != relative_name\n or record['data'] != content))):\n filtered_records.append(record)\n\n # Synchronize data with expurged entries into DNS zone.\n self._put('/domains/{0}/records'.format(domain), filtered_records)\n\n LOGGER.debug('delete_records: %s %s %s', rtype, name, content)\n\n return True\n\n # GoDaddy provides no identifier for a record, which is a problem\n # where identifiers can be used (delete and update).\n # To circumvent this, we implement a pseudo-identifier,which is basically\n # a hash of type+name+content of a given record.\n # It is far from perfect, as the identifier will change each time\n # we change something in the record ...\n # But at least, one can use 'lexicon godaddy list ...' then\n # 'lexicon godaddy update --identifier ...' to modify specific record.\n # However, 'lexicon godaddy list ...' should be called each time DNS\n # zone had been changed to calculate new identifiers.\n @staticmethod\n def _identifier(record):\n sha256 = hashlib.sha256()\n sha256.update(('type=' + record.get('type', '') + ',').encode('utf-8'))\n sha256.update(('name=' + record.get('name', '') + ',').encode('utf-8'))\n sha256.update(('data=' + record.get('data', '') + ',').encode('utf-8'))\n return sha256.hexdigest()[0:7]\n\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if not data:\n data = {}\n if not query_params:\n query_params = {}\n\n # When editing DNS zone, API is unavailable for few seconds\n # (until modifications are propagated).\n # In this case, call to API will return 409 HTTP error.\n # We use the Retry extension to retry the requests until\n # we get a processable reponse (402 HTTP status, or an HTTP error != 409)\n retries = Retry(\n total=10,\n backoff_factor=0.5,\n status_forcelist=[409],\n method_whitelist=frozenset(\n ['GET', 'PUT', 'POST', 'DELETE', 'PATCH'])\n )\n\n session = requests.Session()\n session.mount('https://', HTTPAdapter(max_retries=retries))\n\n result = session.request(action, self.api_endpoint + url,\n params=query_params,\n data=json.dumps(data),\n headers={\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n # GoDaddy use a key/secret pair to authenticate\n 'Authorization': 'sso-key {0}:{1}'.format(\n self._get_provider_option(\n 'auth_key'),\n self._get_provider_option('auth_secret'))\n })\n\n result.raise_for_status()\n\n try:\n # Return the JSON body response if exists.\n return result.json()\n except ValueError:\n # For some requests command (eg. PUT), GoDaddy will not\n # return any JSON, just an HTTP status without body.\n return None\n",
"path": "lexicon/providers/godaddy.py"
}
] | diff --git a/lexicon/providers/godaddy.py b/lexicon/providers/godaddy.py
index cb1703f68..26c3da0f3 100644
--- a/lexicon/providers/godaddy.py
+++ b/lexicon/providers/godaddy.py
@@ -13,7 +13,7 @@
LOGGER = logging.getLogger(__name__)
-NAMESERVER_DOMAINS = ['godaddy.com']
+NAMESERVER_DOMAINS = ['godaddy.com', 'domaincontrol.com']
def provider_parser(subparser):
|
zigpy__zha-device-handlers-4 | Missing import for types breaking LocalDataCluster
| [
{
"content": "import importlib\nimport pkgutil\nfrom zigpy.quirks import CustomCluster\nfrom zigpy.util import ListenableMixin\n\nUNKNOWN = 'Unknown'\n\n\nclass Bus(ListenableMixin):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._listeners = {}\n\n\nclass LocalDataCluster(CustomCluster):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n async def read_attributes_raw(self, attributes, manufacturer=None):\n attributes = [types.uint16_t(a) for a in attributes]\n v = [self._attr_cache.get(attr) for attr in attributes]\n return v\n\n def _update_attribute(self, attrid, value):\n super()._update_attribute(attrid, value)\n\n\nclass EventableCluster(CustomCluster):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def handle_cluster_request(self, tsn, command_id, args):\n super().handle_cluster_request(tsn, command_id, args)\n if self.server_commands is not None and\\\n self.server_commands.get(command_id) is not None:\n self.listener_event(\n 'zha_send_event',\n self,\n self.server_commands.get(command_id)[0],\n args\n )\n\n def _update_attribute(self, attrid, value):\n super()._update_attribute(attrid, value)\n self.listener_event(\n 'zha_send_event',\n self,\n 'attribute_updated',\n {\n 'attribute_id': attrid,\n 'attribute_name': self.attributes.get(attrid, [UNKNOWN])[0],\n 'value': value\n }\n )\n\nname = __name__\npath = __path__\nfor importer, modname, ispkg in pkgutil.walk_packages(\n path=path,\n prefix=name +'.'\n ):\n importlib.import_module(modname)\n",
"path": "zhaquirks/__init__.py"
}
] | [
{
"content": "import importlib\nimport pkgutil\nfrom zigpy.quirks import CustomCluster\nfrom zigpy.util import ListenableMixin\nimport zigpy.types as types\n\nUNKNOWN = 'Unknown'\n\n\nclass Bus(ListenableMixin):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._listeners = {}\n\n\nclass LocalDataCluster(CustomCluster):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n async def read_attributes_raw(self, attributes, manufacturer=None):\n attributes = [types.uint16_t(a) for a in attributes]\n v = [self._attr_cache.get(attr) for attr in attributes]\n return v\n\n def _update_attribute(self, attrid, value):\n super()._update_attribute(attrid, value)\n\n\nclass EventableCluster(CustomCluster):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def handle_cluster_request(self, tsn, command_id, args):\n super().handle_cluster_request(tsn, command_id, args)\n if self.server_commands is not None and\\\n self.server_commands.get(command_id) is not None:\n self.listener_event(\n 'zha_send_event',\n self,\n self.server_commands.get(command_id)[0],\n args\n )\n\n def _update_attribute(self, attrid, value):\n super()._update_attribute(attrid, value)\n self.listener_event(\n 'zha_send_event',\n self,\n 'attribute_updated',\n {\n 'attribute_id': attrid,\n 'attribute_name': self.attributes.get(attrid, [UNKNOWN])[0],\n 'value': value\n }\n )\n\nname = __name__\npath = __path__\nfor importer, modname, ispkg in pkgutil.walk_packages(\n path=path,\n prefix=name +'.'\n ):\n importlib.import_module(modname)\n",
"path": "zhaquirks/__init__.py"
}
] | diff --git a/zhaquirks/__init__.py b/zhaquirks/__init__.py
index 571c1f5605..36bf731e71 100644
--- a/zhaquirks/__init__.py
+++ b/zhaquirks/__init__.py
@@ -2,6 +2,7 @@
import pkgutil
from zigpy.quirks import CustomCluster
from zigpy.util import ListenableMixin
+import zigpy.types as types
UNKNOWN = 'Unknown'
|
scikit-image__scikit-image-2821 | Performance regression in morphology.watershed from 0.12.3 to 0.13.0
## Description
Great performance degradation in skimage.morphology.watershed for ver. 0.13.0 in comparison with ver. 0.12.3.
3D images with size about (500, 500, 500) were testes.
## Way to reproduce
```
import numpy as np
from scipy import ndimage
from skimage import feature
from skimage import morphology
import time
dimensions = 500
z, y, x = np.indices((dimensions, dimensions + 5, dimensions + 10))
radius = 100
xc, yc, zc = dimensions // 3 - 6, dimensions // 3, dimensions // 3 + 6
test_image = np.int32((x - xc) ** 2 + (y - yc) ** 2 + (z - zc) ** 2 <= radius ** 2)
test_image[zc - 5, yc - 5, xc - 5] = 0
test_image[zc - 5, yc + 5, xc - 5] = 0
test_image[zc - 5, yc + 4, xc - 5] = 0
xc *= 2
yc *= 2
zc *= 2
test_image[zc:zc + 100, yc:yc + 100, xc:xc + 100] = 2
test_image[zc + 10 + 3, yc + 10 + 3, xc + 10 - 3] = 0
test_image[zc + 10 - 3, yc + 10 - 3, xc + 10 + 3] = 0
test_image[zc + 10 + 3, yc + 10 - 3, xc + 10 + 3] = 0
test_image[zc-5, yc-5, xc-5] = 3
dist = ndimage.distance_transform_edt(test_image)
local_maxi = feature.peak_local_max(dist, min_distance=2, indices=False)
labels, num_obj = ndimage.label(local_maxi)
start_t = time.clock()
labels = morphology.watershed(-dist, labels, connectivity=ndimage.generate_binary_structure(3, 3), mask=test_image)
print(time.clock() - start_t)
```
Processing time for ver. 0.12.3 is about 16 s on my computer.
Processing time for ver. 0.13.0 is about 90 s.
Slowing down about 4 times!!!
Python version 3.5 for Windows
scikit-image versions 0.12.3 and 0.13.0
| [
{
"content": "\"\"\"watershed.py - watershed algorithm\n\nThis module implements a watershed algorithm that apportions pixels into\nmarked basins. The algorithm uses a priority queue to hold the pixels\nwith the metric for the priority queue being pixel value, then the time\nof entry into the queue - this settles ties in favor of the closest marker.\n\nSome ideas taken from\nSoille, \"Automated Basin Delineation from Digital Elevation Models Using\nMathematical Morphology\", Signal Processing 20 (1990) 171-182.\n\nThe most important insight in the paper is that entry time onto the queue\nsolves two problems: a pixel should be assigned to the neighbor with the\nlargest gradient or, if there is no gradient, pixels on a plateau should\nbe split between markers on opposite sides.\n\nOriginally part of CellProfiler, code licensed under both GPL and BSD licenses.\nWebsite: http://www.cellprofiler.org\n\nCopyright (c) 2003-2009 Massachusetts Institute of Technology\nCopyright (c) 2009-2011 Broad Institute\nAll rights reserved.\n\nOriginal author: Lee Kamentsky\n\"\"\"\n\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom . import _watershed\nfrom ..util import crop, regular_seeds\n\n\ndef _validate_inputs(image, markers, mask):\n \"\"\"Ensure that all inputs to watershed have matching shapes and types.\n\n Parameters\n ----------\n image : array\n The input image.\n markers : int or array of int\n The marker image.\n mask : array, or None\n A boolean mask, True where we want to compute the watershed.\n\n Returns\n -------\n image, markers, mask : arrays\n The validated and formatted arrays. Image will have dtype float64,\n markers int32, and mask int8. If ``None`` was given for the mask,\n it is a volume of all 1s.\n\n Raises\n ------\n ValueError\n If the shapes of the given arrays don't match.\n \"\"\"\n if not isinstance(markers, (np.ndarray, list, tuple)):\n # not array-like, assume int\n markers = regular_seeds(image.shape, markers)\n elif markers.shape != image.shape:\n raise ValueError(\"`markers` (shape {}) must have same shape \"\n \"as `image` (shape {})\".format(markers.shape, image.shape))\n if mask is not None and mask.shape != image.shape:\n raise ValueError(\"`mask` must have same shape as `image`\")\n if mask is None:\n # Use a complete `True` mask if none is provided\n mask = np.ones(image.shape, bool)\n return (image.astype(np.float64),\n markers.astype(np.int32),\n mask.astype(np.int8))\n\n\ndef _validate_connectivity(image_dim, connectivity, offset):\n \"\"\"Convert any valid connectivity to a structuring element and offset.\n\n Parameters\n ----------\n image_dim : int\n The number of dimensions of the input image.\n connectivity : int, array, or None\n The neighborhood connectivity. An integer is interpreted as in\n ``scipy.ndimage.generate_binary_structure``, as the maximum number\n of orthogonal steps to reach a neighbor. An array is directly\n interpreted as a structuring element and its shape is validated against\n the input image shape. ``None`` is interpreted as a connectivity of 1.\n offset : tuple of int, or None\n The coordinates of the center of the structuring element.\n\n Returns\n -------\n c_connectivity : array of bool\n The structuring element corresponding to the input `connectivity`.\n offset : array of int\n The offset corresponding to the center of the structuring element.\n\n Raises\n ------\n ValueError:\n If the image dimension and the connectivity or offset dimensions don't\n match.\n \"\"\"\n if connectivity is None:\n connectivity = 1\n if np.isscalar(connectivity):\n c_connectivity = ndi.generate_binary_structure(image_dim, connectivity)\n else:\n c_connectivity = np.array(connectivity, bool)\n if c_connectivity.ndim != image_dim:\n raise ValueError(\"Connectivity dimension must be same as image\")\n if offset is None:\n if any([x % 2 == 0 for x in c_connectivity.shape]):\n raise ValueError(\"Connectivity array must have an unambiguous \"\n \"center\")\n offset = np.array(c_connectivity.shape) // 2\n return c_connectivity, offset\n\n\ndef _compute_neighbors(image, structure, offset):\n \"\"\"Compute neighborhood as an array of linear offsets into the image.\n\n These are sorted according to Euclidean distance from the center (given\n by `offset`), ensuring that immediate neighbors are visited first.\n \"\"\"\n structure[tuple(offset)] = 0 # ignore the center; it's not a neighbor\n locations = np.transpose(np.nonzero(structure))\n sqdistances = np.sum((locations - offset)**2, axis=1)\n neighborhood = (np.ravel_multi_index(locations.T, image.shape) -\n np.ravel_multi_index(offset, image.shape)).astype(np.int32)\n sorted_neighborhood = neighborhood[np.argsort(sqdistances)]\n return sorted_neighborhood\n\n\ndef watershed(image, markers, connectivity=1, offset=None, mask=None,\n compactness=0, watershed_line=False):\n \"\"\"Find watershed basins in `image` flooded from given `markers`.\n\n Parameters\n ----------\n image: ndarray (2-D, 3-D, ...) of integers\n Data array where the lowest value points are labeled first.\n markers: int, or ndarray of int, same shape as `image`\n The desired number of markers, or an array marking the basins with the\n values to be assigned in the label matrix. Zero means not a marker.\n connectivity: ndarray, optional\n An array with the same number of dimensions as `image` whose\n non-zero elements indicate neighbors for connection.\n Following the scipy convention, default is a one-connected array of\n the dimension of the image.\n offset: array_like of shape image.ndim, optional\n offset of the connectivity (one offset per dimension)\n mask: ndarray of bools or 0s and 1s, optional\n Array of same shape as `image`. Only points at which mask == True\n will be labeled.\n compactness : float, optional\n Use compact watershed [3]_ with given compactness parameter.\n Higher values result in more regularly-shaped watershed basins.\n watershed_line : bool, optional\n If watershed_line is True, a one-pixel wide line separates the regions\n obtained by the watershed algorithm. The line has the label 0.\n\n Returns\n -------\n out: ndarray\n A labeled matrix of the same type and shape as markers\n\n See also\n --------\n skimage.segmentation.random_walker: random walker segmentation\n A segmentation algorithm based on anisotropic diffusion, usually\n slower than the watershed but with good results on noisy data and\n boundaries with holes.\n\n Notes\n -----\n This function implements a watershed algorithm [1]_ [2]_ that apportions\n pixels into marked basins. The algorithm uses a priority queue to hold\n the pixels with the metric for the priority queue being pixel value, then\n the time of entry into the queue - this settles ties in favor of the\n closest marker.\n\n Some ideas taken from\n Soille, \"Automated Basin Delineation from Digital Elevation Models Using\n Mathematical Morphology\", Signal Processing 20 (1990) 171-182\n\n The most important insight in the paper is that entry time onto the queue\n solves two problems: a pixel should be assigned to the neighbor with the\n largest gradient or, if there is no gradient, pixels on a plateau should\n be split between markers on opposite sides.\n\n This implementation converts all arguments to specific, lowest common\n denominator types, then passes these to a C algorithm.\n\n Markers can be determined manually, or automatically using for example\n the local minima of the gradient of the image, or the local maxima of the\n distance function to the background for separating overlapping objects\n (see example).\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Watershed_%28image_processing%29\n\n .. [2] http://cmm.ensmp.fr/~beucher/wtshed.html\n\n .. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and\n Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation\n Algorithms. ICPR 2014, pp 996-1001. DOI:10.1109/ICPR.2014.181\n https://www.tu-chemnitz.de/etit/proaut/forschung/rsrc/cws_pSLIC_ICPR.pdf\n\n Examples\n --------\n The watershed algorithm is useful to separate overlapping objects.\n\n We first generate an initial image with two overlapping circles:\n\n >>> x, y = np.indices((80, 80))\n >>> x1, y1, x2, y2 = 28, 28, 44, 52\n >>> r1, r2 = 16, 20\n >>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2\n >>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2\n >>> image = np.logical_or(mask_circle1, mask_circle2)\n\n Next, we want to separate the two circles. We generate markers at the\n maxima of the distance to the background:\n\n >>> from scipy import ndimage as ndi\n >>> distance = ndi.distance_transform_edt(image)\n >>> from skimage.feature import peak_local_max\n >>> local_maxi = peak_local_max(distance, labels=image,\n ... footprint=np.ones((3, 3)),\n ... indices=False)\n >>> markers = ndi.label(local_maxi)[0]\n\n Finally, we run the watershed on the image and markers:\n\n >>> labels = watershed(-distance, markers, mask=image)\n\n The algorithm works also for 3-D images, and can be used for example to\n separate overlapping spheres.\n \"\"\"\n image, markers, mask = _validate_inputs(image, markers, mask)\n connectivity, offset = _validate_connectivity(image.ndim, connectivity,\n offset)\n\n # pad the image, markers, and mask so that we can use the mask to\n # keep from running off the edges\n pad_width = [(p, p) for p in offset]\n image = np.pad(image, pad_width, mode='constant')\n mask = np.pad(mask, pad_width, mode='constant').ravel()\n output = np.pad(markers, pad_width, mode='constant')\n\n flat_neighborhood = _compute_neighbors(image, connectivity, offset)\n marker_locations = np.flatnonzero(output).astype(np.int32)\n image_strides = np.array(image.strides, dtype=np.int32) // image.itemsize\n\n _watershed.watershed_raveled(image.ravel(),\n marker_locations, flat_neighborhood,\n mask, image_strides, compactness,\n output.ravel(),\n watershed_line)\n\n output = crop(output, pad_width, copy=True)\n\n if watershed_line:\n min_val = output.min()\n output[output == min_val] = 0\n\n return output\n",
"path": "skimage/morphology/watershed.py"
}
] | [
{
"content": "\"\"\"watershed.py - watershed algorithm\n\nThis module implements a watershed algorithm that apportions pixels into\nmarked basins. The algorithm uses a priority queue to hold the pixels\nwith the metric for the priority queue being pixel value, then the time\nof entry into the queue - this settles ties in favor of the closest marker.\n\nSome ideas taken from\nSoille, \"Automated Basin Delineation from Digital Elevation Models Using\nMathematical Morphology\", Signal Processing 20 (1990) 171-182.\n\nThe most important insight in the paper is that entry time onto the queue\nsolves two problems: a pixel should be assigned to the neighbor with the\nlargest gradient or, if there is no gradient, pixels on a plateau should\nbe split between markers on opposite sides.\n\nOriginally part of CellProfiler, code licensed under both GPL and BSD licenses.\nWebsite: http://www.cellprofiler.org\n\nCopyright (c) 2003-2009 Massachusetts Institute of Technology\nCopyright (c) 2009-2011 Broad Institute\nAll rights reserved.\n\nOriginal author: Lee Kamentsky\n\"\"\"\n\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom . import _watershed\nfrom ..util import crop, regular_seeds\n\n\ndef _validate_inputs(image, markers, mask):\n \"\"\"Ensure that all inputs to watershed have matching shapes and types.\n\n Parameters\n ----------\n image : array\n The input image.\n markers : int or array of int\n The marker image.\n mask : array, or None\n A boolean mask, True where we want to compute the watershed.\n\n Returns\n -------\n image, markers, mask : arrays\n The validated and formatted arrays. Image will have dtype float64,\n markers int32, and mask int8. If ``None`` was given for the mask,\n it is a volume of all 1s.\n\n Raises\n ------\n ValueError\n If the shapes of the given arrays don't match.\n \"\"\"\n if not isinstance(markers, (np.ndarray, list, tuple)):\n # not array-like, assume int\n markers = regular_seeds(image.shape, markers)\n elif markers.shape != image.shape:\n raise ValueError(\"`markers` (shape {}) must have same shape \"\n \"as `image` (shape {})\".format(markers.shape, image.shape))\n if mask is not None and mask.shape != image.shape:\n raise ValueError(\"`mask` must have same shape as `image`\")\n if mask is None:\n # Use a complete `True` mask if none is provided\n mask = np.ones(image.shape, bool)\n return (image.astype(np.float64),\n markers.astype(np.int32),\n mask.astype(np.int8))\n\n\ndef _validate_connectivity(image_dim, connectivity, offset):\n \"\"\"Convert any valid connectivity to a structuring element and offset.\n\n Parameters\n ----------\n image_dim : int\n The number of dimensions of the input image.\n connectivity : int, array, or None\n The neighborhood connectivity. An integer is interpreted as in\n ``scipy.ndimage.generate_binary_structure``, as the maximum number\n of orthogonal steps to reach a neighbor. An array is directly\n interpreted as a structuring element and its shape is validated against\n the input image shape. ``None`` is interpreted as a connectivity of 1.\n offset : tuple of int, or None\n The coordinates of the center of the structuring element.\n\n Returns\n -------\n c_connectivity : array of bool\n The structuring element corresponding to the input `connectivity`.\n offset : array of int\n The offset corresponding to the center of the structuring element.\n\n Raises\n ------\n ValueError:\n If the image dimension and the connectivity or offset dimensions don't\n match.\n \"\"\"\n if connectivity is None:\n connectivity = 1\n if np.isscalar(connectivity):\n c_connectivity = ndi.generate_binary_structure(image_dim, connectivity)\n else:\n c_connectivity = np.array(connectivity, bool)\n if c_connectivity.ndim != image_dim:\n raise ValueError(\"Connectivity dimension must be same as image\")\n if offset is None:\n if any([x % 2 == 0 for x in c_connectivity.shape]):\n raise ValueError(\"Connectivity array must have an unambiguous \"\n \"center\")\n offset = np.array(c_connectivity.shape) // 2\n return c_connectivity, offset\n\n\ndef _compute_neighbors(image, structure, offset):\n \"\"\"Compute neighborhood as an array of linear offsets into the image.\n\n These are sorted according to Euclidean distance from the center (given\n by `offset`), ensuring that immediate neighbors are visited first.\n \"\"\"\n structure[tuple(offset)] = 0 # ignore the center; it's not a neighbor\n locations = np.transpose(np.nonzero(structure))\n sqdistances = np.sum((locations - offset)**2, axis=1)\n neighborhood = (np.ravel_multi_index(locations.T, image.shape) -\n np.ravel_multi_index(offset, image.shape)).astype(np.int32)\n sorted_neighborhood = neighborhood[np.argsort(sqdistances)]\n return sorted_neighborhood\n\n\ndef watershed(image, markers, connectivity=1, offset=None, mask=None,\n compactness=0, watershed_line=False):\n \"\"\"Find watershed basins in `image` flooded from given `markers`.\n\n Parameters\n ----------\n image: ndarray (2-D, 3-D, ...) of integers\n Data array where the lowest value points are labeled first.\n markers: int, or ndarray of int, same shape as `image`\n The desired number of markers, or an array marking the basins with the\n values to be assigned in the label matrix. Zero means not a marker.\n connectivity: ndarray, optional\n An array with the same number of dimensions as `image` whose\n non-zero elements indicate neighbors for connection.\n Following the scipy convention, default is a one-connected array of\n the dimension of the image.\n offset: array_like of shape image.ndim, optional\n offset of the connectivity (one offset per dimension)\n mask: ndarray of bools or 0s and 1s, optional\n Array of same shape as `image`. Only points at which mask == True\n will be labeled.\n compactness : float, optional\n Use compact watershed [3]_ with given compactness parameter.\n Higher values result in more regularly-shaped watershed basins.\n watershed_line : bool, optional\n If watershed_line is True, a one-pixel wide line separates the regions\n obtained by the watershed algorithm. The line has the label 0.\n\n Returns\n -------\n out: ndarray\n A labeled matrix of the same type and shape as markers\n\n See also\n --------\n skimage.segmentation.random_walker: random walker segmentation\n A segmentation algorithm based on anisotropic diffusion, usually\n slower than the watershed but with good results on noisy data and\n boundaries with holes.\n\n Notes\n -----\n This function implements a watershed algorithm [1]_ [2]_ that apportions\n pixels into marked basins. The algorithm uses a priority queue to hold\n the pixels with the metric for the priority queue being pixel value, then\n the time of entry into the queue - this settles ties in favor of the\n closest marker.\n\n Some ideas taken from\n Soille, \"Automated Basin Delineation from Digital Elevation Models Using\n Mathematical Morphology\", Signal Processing 20 (1990) 171-182\n\n The most important insight in the paper is that entry time onto the queue\n solves two problems: a pixel should be assigned to the neighbor with the\n largest gradient or, if there is no gradient, pixels on a plateau should\n be split between markers on opposite sides.\n\n This implementation converts all arguments to specific, lowest common\n denominator types, then passes these to a C algorithm.\n\n Markers can be determined manually, or automatically using for example\n the local minima of the gradient of the image, or the local maxima of the\n distance function to the background for separating overlapping objects\n (see example).\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Watershed_%28image_processing%29\n\n .. [2] http://cmm.ensmp.fr/~beucher/wtshed.html\n\n .. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and\n Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation\n Algorithms. ICPR 2014, pp 996-1001. DOI:10.1109/ICPR.2014.181\n https://www.tu-chemnitz.de/etit/proaut/forschung/rsrc/cws_pSLIC_ICPR.pdf\n\n Examples\n --------\n The watershed algorithm is useful to separate overlapping objects.\n\n We first generate an initial image with two overlapping circles:\n\n >>> x, y = np.indices((80, 80))\n >>> x1, y1, x2, y2 = 28, 28, 44, 52\n >>> r1, r2 = 16, 20\n >>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2\n >>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2\n >>> image = np.logical_or(mask_circle1, mask_circle2)\n\n Next, we want to separate the two circles. We generate markers at the\n maxima of the distance to the background:\n\n >>> from scipy import ndimage as ndi\n >>> distance = ndi.distance_transform_edt(image)\n >>> from skimage.feature import peak_local_max\n >>> local_maxi = peak_local_max(distance, labels=image,\n ... footprint=np.ones((3, 3)),\n ... indices=False)\n >>> markers = ndi.label(local_maxi)[0]\n\n Finally, we run the watershed on the image and markers:\n\n >>> labels = watershed(-distance, markers, mask=image)\n\n The algorithm works also for 3-D images, and can be used for example to\n separate overlapping spheres.\n \"\"\"\n image, markers, mask = _validate_inputs(image, markers, mask)\n connectivity, offset = _validate_connectivity(image.ndim, connectivity,\n offset)\n\n # pad the image, markers, and mask so that we can use the mask to\n # keep from running off the edges\n pad_width = [(p, p) for p in offset]\n image = np.pad(image, pad_width, mode='constant')\n mask = np.pad(mask, pad_width, mode='constant').ravel()\n output = np.pad(markers, pad_width, mode='constant')\n\n flat_neighborhood = _compute_neighbors(image, connectivity, offset)\n marker_locations = np.flatnonzero(output).astype(np.int32)\n image_strides = np.array(image.strides, dtype=np.int32) // image.itemsize\n\n _watershed.watershed_raveled(image.ravel(),\n marker_locations, flat_neighborhood,\n mask, image_strides, compactness,\n output.ravel(),\n watershed_line)\n\n output = crop(output, pad_width, copy=True)\n\n return output\n",
"path": "skimage/morphology/watershed.py"
}
] | diff --git a/skimage/morphology/_watershed.pyx b/skimage/morphology/_watershed.pyx
index 5d31fe76560..f7f9e1f1f5f 100644
--- a/skimage/morphology/_watershed.pyx
+++ b/skimage/morphology/_watershed.pyx
@@ -23,6 +23,7 @@ ctypedef cnp.int8_t DTYPE_BOOL_t
include "heap_watershed.pxi"
[email protected](False)
@cython.boundscheck(False)
@cython.cdivision(True)
@cython.overflowcheck(False)
@@ -40,7 +41,42 @@ cdef inline double _euclid_dist(cnp.int32_t pt0, cnp.int32_t pt1,
return sqrt(result)
[email protected](False)
@cython.boundscheck(False)
[email protected](True)
[email protected]_tracebacks(False)
+cdef inline DTYPE_BOOL_t _diff_neighbors(DTYPE_INT32_t[::1] output,
+ DTYPE_INT32_t[::1] structure,
+ DTYPE_BOOL_t[::1] mask,
+ Py_ssize_t index):
+ """
+ Return ``True`` and set ``mask[index]`` to ``False`` if the neighbors of
+ ``index`` (as given by the offsets in ``structure``) have more than one
+ distinct nonzero label.
+ """
+ cdef:
+ Py_ssize_t i, neighbor_index
+ DTYPE_INT32_t neighbor_label0, neighbor_label1
+ Py_ssize_t nneighbors = structure.shape[0]
+
+ if not mask[index]:
+ return True
+
+ neighbor_label0, neighbor_label1 = 0, 0
+ for i in range(nneighbors):
+ neighbor_index = structure[i] + index
+ if mask[neighbor_index]: # neighbor not a watershed line
+ if not neighbor_label0:
+ neighbor_label0 = output[neighbor_index]
+ else:
+ neighbor_label1 = output[neighbor_index]
+ if neighbor_label1 and neighbor_label1 != neighbor_label0:
+ mask[index] = False
+ return True
+ return False
+
[email protected](False)
[email protected](False)
def watershed_raveled(cnp.float64_t[::1] image,
DTYPE_INT32_t[::1] marker_locations,
DTYPE_INT32_t[::1] structure,
@@ -89,7 +125,7 @@ def watershed_raveled(cnp.float64_t[::1] image,
cdef Py_ssize_t i = 0
cdef Py_ssize_t age = 1
cdef Py_ssize_t index = 0
- cdef DTYPE_INT32_t wsl_label = -1
+ cdef DTYPE_BOOL_t compact = (compactness > 0)
cdef Heap *hp = <Heap *> heap_from_numpy2()
@@ -100,52 +136,58 @@ def watershed_raveled(cnp.float64_t[::1] image,
elem.index = index
elem.source = index
heappush(hp, &elem)
- if wsl and wsl_label >= output[index]:
- wsl_label = output[index] - 1
while hp.items > 0:
heappop(hp, &elem)
- # this can happen if the same pixel entered the queue
- # several times before being processed.
- if wsl and output[elem.index] == wsl_label:
- # wsl labels are not propagated.
- continue
-
- if output[elem.index] and elem.index != elem.source:
- # non-marker, already visited from another neighbor
- continue
+ if compact or wsl:
+ # in the compact case, we need to label pixels as they come off
+ # the heap, because the same pixel can be pushed twice, *and* the
+ # later push can have lower cost because of the compactness.
+ #
+ # In the case of preserving watershed lines, a similar argument
+ # applies: we can only observe that all neighbors have been labeled
+ # as the pixel comes off the heap. Trying to do so at push time
+ # is a bug.
+ if output[elem.index] and elem.index != elem.source:
+ # non-marker, already visited from another neighbor
+ continue
+ if wsl:
+ # if the current element has different-labeled neighbors and we
+ # want to preserve watershed lines, we mask it and move on
+ if _diff_neighbors(output, structure, mask, elem.index):
+ continue
+ output[elem.index] = output[elem.source]
- output[elem.index] = output[elem.source]
for i in range(nneighbors):
# get the flattened address of the neighbor
- index = structure[i] + elem.index
+ neighbor_index = structure[i] + elem.index
- if not mask[index]:
+ if not mask[neighbor_index]:
+ # this branch includes basin boundaries, aka watershed lines
# neighbor is not in mask
continue
- if wsl and output[index] == wsl_label:
- continue
-
- if output[index]:
- # neighbor has a label (but not wsl_label):
- # the neighbor is not added to the queue.
- if wsl:
- # if the label of the neighbor is different
- # from the label of the pixel taken from the queue,
- # the latter takes the WSL label.
- if output[index] != output[elem.index]:
- output[elem.index] = wsl_label
+ if output[neighbor_index]:
+ # pre-labeled neighbor is not added to the queue.
continue
age += 1
- new_elem.value = image[index]
- if compactness > 0:
+ new_elem.value = image[neighbor_index]
+ if compact:
new_elem.value += (compactness *
- _euclid_dist(index, elem.source, strides))
+ _euclid_dist(neighbor_index, elem.source,
+ strides))
+ elif not wsl:
+ # in the simplest watershed case (no compactness and no
+ # watershed lines), we can label a pixel at the time that
+ # we push it onto the heap, because it can't be reached with
+ # lower cost later.
+ # This results in a very significant performance gain, see:
+ # https://github.com/scikit-image/scikit-image/issues/2636
+ output[neighbor_index] = output[elem.index]
new_elem.age = age
- new_elem.index = index
+ new_elem.index = neighbor_index
new_elem.source = elem.source
heappush(hp, &new_elem)
diff --git a/skimage/morphology/watershed.py b/skimage/morphology/watershed.py
index d98c245e5a8..ff30c2c8314 100644
--- a/skimage/morphology/watershed.py
+++ b/skimage/morphology/watershed.py
@@ -261,8 +261,4 @@ def watershed(image, markers, connectivity=1, offset=None, mask=None,
output = crop(output, pad_width, copy=True)
- if watershed_line:
- min_val = output.min()
- output[output == min_val] = 0
-
return output
|
spacetelescope__jwql-550 | Cron jobs for monitors currently failing
Traceback (most recent call last):
File "/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/bias_monitor.py", line 58, in <module>
from jwql.instrument_monitors.common_monitors.dark_monitor import mast_query_darks
File "/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/dark_monitor.py", line 77, in <module>
from jwql.jwql_monitors import monitor_mast
File "/home/jwqladm/repositories/jwql/jwql/jwql_monitors/monitor_mast.py", line 25, in <module>
from bokeh.embed import components
File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/__init__.py", line 81, in <module>
from .util import logconfig
File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/util/logconfig.py", line 87, in <module>
level = settings.py_log_level()
File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py", line 310, in __call__
return self._convert(os.environ[self._env_var])
File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py", line 236, in convert_logging
raise ValueError("Cannot convert {} to log level, valid values are: {}".format(value, ", ".join(_log_levels)))
ValueError: Cannot convert WARN to log level, valid values are: CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE, NONE
| [
{
"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.22.0'\n\nAUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n 'codecov',\n 'django>=2.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema==2.6.0',\n 'jwedb>=0.0.3',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.22.0'\n\nAUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'django>=2.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema==2.6.0',\n 'jwedb>=0.0.3',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n",
"path": "setup.py"
}
] | diff --git a/environment_python_3_6.yml b/environment_python_3_6.yml
index d00a53d5f..2d7ca225a 100644
--- a/environment_python_3_6.yml
+++ b/environment_python_3_6.yml
@@ -3,7 +3,7 @@ channels:
- http://ssb.stsci.edu/astroconda
dependencies:
- astroquery=0.3.10
-- bokeh=1.4.0
+- bokeh>=1.0,<1.4
- django=2.2.5
- flake8=3.7.8
- inflection=0.3.1
diff --git a/requirements.txt b/requirements.txt
index 691234c14..305c99ffb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,7 +2,7 @@ asdf==2.4.2
astropy==4.0
astroquery==0.4
authlib==0.13
-bokeh==1.4.0
+bokeh>=1.0,<1.4
codecov==2.0.15
django==3.0.3
flake8==3.7.9
diff --git a/setup.py b/setup.py
index de0d0f3ee..b61b1caea 100644
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,7 @@
'astropy>=3.2.1',
'astroquery>=0.3.9',
'authlib',
- 'bokeh>=1.0',
+ 'bokeh>=1.0,<1.4',
'codecov',
'django>=2.0',
'flake8',
|
blaze__blaze-873 | Pandas Boolean Arithmetic Display
Boolean arithmetic with a `Data(pandas.DataFrame)` seems to mess up its interactive display, but the actual expression is fully funcitonal.
``` Python
>>> import blaze as bz
>>> import pandas as pd
>>> data = bz.Data(pd.read_csv("iris.csv"))
>>> data
SepalLength SepalWidth PetalLength PetalWidth Name
0 5.1 3.5 1.4 0.2 Iris-setosa
1 4.9 3.0 1.4 0.2 Iris-setosa
2 4.7 3.2 1.3 0.2 Iris-setosa
3 4.6 3.1 1.5 0.2 Iris-setosa
4 5.0 3.6 1.4 0.2 Iris-setosa
5 5.4 3.9 1.7 0.4 Iris-setosa
6 4.6 3.4 1.4 0.3 Iris-setosa
7 5.0 3.4 1.5 0.2 Iris-setosa
8 4.4 2.9 1.4 0.2 Iris-setosa
9 4.9 3.1 1.5 0.1 Iris-setosa
...
>>> (data.SepalLength > 5.0) & (data.SepalLength < 5.5)
Empty DataFrame
Columns: [None]
Index: []
>>> bz.compute( (data.SepalLength > 5.0) & (data.SepalLength < 5.5) )
0 True
1 False
2 False
3 False
4 False
...
145 False
146 False
147 False
148 False
149 False
Name: SepalLength, Length: 150, dtype: bool
>>> data[(data.SepalLength > 5.0) & (data.SepalLength < 5.5)]
SepalLength SepalWidth PetalLength PetalWidth Name
0 5.1 3.5 1.4 0.2 Iris-setosa
5 5.4 3.9 1.7 0.4 Iris-setosa
10 5.4 3.7 1.5 0.2 Iris-setosa
16 5.4 3.9 1.3 0.4 Iris-setosa
17 5.1 3.5 1.4 0.3 Iris-setosa
19 5.1 3.8 1.5 0.3 Iris-setosa
20 5.4 3.4 1.7 0.2 Iris-setosa
21 5.1 3.7 1.5 0.4 Iris-setosa
23 5.1 3.3 1.7 0.5 Iris-setosa
27 5.2 3.5 1.5 0.2 Iris-setosa
...
```
| [
{
"content": "from __future__ import absolute_import, division, print_function\n\nimport operator\nfrom toolz import first\nimport numpy as np\nfrom datashape import dshape, var, DataShape\nfrom dateutil.parser import parse as dt_parse\nfrom datashape.predicates import isscalar\nfrom datashape import coretypes as ct\n\nfrom .core import parenthesize, eval_str\nfrom .expressions import Expr, shape, ElemWise\nfrom ..dispatch import dispatch\nfrom ..compatibility import _strtypes\n\n\n__all__ = '''BinOp UnaryOp Arithmetic Add Mult Sub Div FloorDiv Pow Mod USub\nRelational Eq Ne Ge Lt Le Gt Gt And Or Not'''.split()\n\n\ndef name(o):\n if hasattr(o, '_name'):\n return o._name\n else:\n return None\n\nclass BinOp(ElemWise):\n __slots__ = '_hash', 'lhs', 'rhs'\n __inputs__ = 'lhs', 'rhs'\n\n def __init__(self, lhs, rhs):\n self.lhs = lhs\n self.rhs = rhs\n\n def __str__(self):\n lhs = parenthesize(eval_str(self.lhs))\n rhs = parenthesize(eval_str(self.rhs))\n return '%s %s %s' % (lhs, self.symbol, rhs)\n\n @property\n def _name(self):\n if not isscalar(self.dshape.measure):\n return None\n l, r = name(self.lhs), name(self.rhs)\n if l and not r:\n return l\n if r and not l:\n return r\n if l == r:\n return l\n\n @property\n def _inputs(self):\n result = []\n if isinstance(self.lhs, Expr):\n result.append(self.lhs)\n if isinstance(self.rhs, Expr):\n result.append(self.rhs)\n return tuple(result)\n\n\ndef maxvar(L):\n \"\"\"\n\n >>> maxvar([1, 2, var])\n Var()\n\n >>> maxvar([1, 2, 3])\n 3\n \"\"\"\n if var in L:\n return var\n else:\n return max(L)\n\n\ndef maxshape(shapes):\n \"\"\"\n\n >>> maxshape([(10, 1), (1, 10), ()])\n (10, 10)\n \"\"\"\n shapes = [shape for shape in shapes if shape]\n if not shapes:\n return ()\n if len(set(map(len, shapes))) != 1:\n raise ValueError(\"Only support arithmetic on expressions with equal \"\n \"number of dimensions.\")\n return tuple(map(maxvar, zip(*shapes)))\n\n\nclass UnaryOp(ElemWise):\n __slots__ = '_hash', '_child',\n\n def __init__(self, child):\n self._child = child\n\n def __str__(self):\n return '%s(%s)' % (self.symbol, eval_str(self._child))\n\n @property\n def symbol(self):\n return type(self).__name__\n\n @property\n def dshape(self):\n return DataShape(*(shape(self._child) + (self._dtype,)))\n\n @property\n def _name(self):\n return self._child._name\n\n\nclass Arithmetic(BinOp):\n \"\"\" Super class for arithmetic operators like add or mul \"\"\"\n _dtype = ct.real\n\n @property\n def dshape(self):\n # TODO: better inference. e.g. int + int -> int\n return DataShape(*(maxshape([shape(self.lhs), shape(self.rhs)]) + (self._dtype,)))\n\n\nclass Add(Arithmetic):\n symbol = '+'\n op = operator.add\n\n\nclass Mult(Arithmetic):\n symbol = '*'\n op = operator.mul\n\n\nclass Sub(Arithmetic):\n symbol = '-'\n op = operator.sub\n\n\nclass Div(Arithmetic):\n symbol = '/'\n op = operator.truediv\n\n\nclass FloorDiv(Arithmetic):\n symbol = '//'\n op = operator.floordiv\n\n\nclass Pow(Arithmetic):\n symbol = '**'\n op = operator.pow\n\n\nclass Mod(Arithmetic):\n symbol = '%'\n op = operator.mod\n\n\nclass USub(UnaryOp):\n op = operator.neg\n symbol = '-'\n\n def __str__(self):\n return '-%s' % parenthesize(eval_str(self._child))\n\n @property\n def _dtype(self):\n # TODO: better inference. -uint -> int\n return self._child.schema\n\n\n@dispatch(ct.Option, object)\ndef scalar_coerce(ds, val):\n if val or val == 0:\n return scalar_coerce(ds.ty, val)\n else:\n return None\n\n\n@dispatch(ct.Date, _strtypes)\ndef scalar_coerce(_, val):\n dt = dt_parse(val)\n if dt.time():\n raise ValueError(\"Can not coerce %s to type Date, \"\n \"contains time information\")\n return dt.date()\n\n\n@dispatch(ct.DateTime, _strtypes)\ndef scalar_coerce(_, val):\n return dt_parse(val)\n\n\n@dispatch(ct.CType, _strtypes)\ndef scalar_coerce(dt, val):\n return np.asscalar(np.asarray(val, dtype=dt.to_numpy_dtype()))\n\n\n@dispatch(ct.Record, object)\ndef scalar_coerce(rec, val):\n if len(rec.fields) == 1:\n return scalar_coerce(first(rec.types), val)\n else:\n raise TypeError(\"Trying to coerce complex datashape\\n\"\n \"got dshape: %s\\n\"\n \"scalar_coerce only intended for scalar values\" % rec)\n\n\n@dispatch(ct.DataShape, object)\ndef scalar_coerce(ds, val):\n return scalar_coerce(ds.measure, val)\n\n\n@dispatch(object, object)\ndef scalar_coerce(dtype, val):\n return val\n\n\n@dispatch(_strtypes, object)\ndef scalar_coerce(ds, val):\n return scalar_coerce(dshape(ds), val)\n\n\ndef _neg(self):\n return USub(self)\n\ndef _add(self, other):\n return Add(self, scalar_coerce(self.dshape, other))\n\ndef _radd(self, other):\n return Add(scalar_coerce(self.dshape, other), self)\n\ndef _mul(self, other):\n return Mult(self, scalar_coerce(self.dshape, other))\n\ndef _rmul(self, other):\n return Mult(scalar_coerce(self.dshape, other), self)\n\ndef _div(self, other):\n return Div(self, scalar_coerce(self.dshape, other))\n\ndef _rdiv(self, other):\n return Div(scalar_coerce(self.dshape, other), self)\n\ndef _floordiv(self, other):\n return FloorDiv(self, scalar_coerce(self.dshape, other))\n\ndef _rfloordiv(self, other):\n return FloorDiv(scalar_coerce(self.dshape, other), self)\n\ndef _sub(self, other):\n return Sub(self, scalar_coerce(self.dshape, other))\n\ndef _rsub(self, other):\n return Sub(scalar_coerce(self.dshape, other), self)\n\ndef _pow(self, other):\n return Pow(self, scalar_coerce(self.dshape, other))\n\ndef _rpow(self, other):\n return Pow(scalar_coerce(self.dshape, other), self)\n\ndef _mod(self, other):\n return Mod(self, scalar_coerce(self.dshape, other))\n\ndef _rmod(self, other):\n return Mod(scalar_coerce(self.dshape, other), self)\n\n\nclass Relational(Arithmetic):\n _dtype = ct.bool_\n\n\nclass Eq(Relational):\n symbol = '=='\n op = operator.eq\n\n\nclass Ne(Relational):\n symbol = '!='\n op = operator.ne\n\n\nclass Ge(Relational):\n symbol = '>='\n op = operator.ge\n\n\nclass Le(Relational):\n symbol = '<='\n op = operator.le\n\n\nclass Gt(Relational):\n symbol = '>'\n op = operator.gt\n\n\nclass Lt(Relational):\n symbol = '<'\n op = operator.lt\n\n\nclass And(Arithmetic):\n symbol = '&'\n op = operator.and_\n _dtype = ct.bool_\n\n\nclass Or(Arithmetic):\n symbol = '|'\n op = operator.or_\n _dtype = ct.bool_\n\n\nclass Not(UnaryOp):\n symbol = '~'\n op = operator.invert\n _dtype = ct.bool_\n def __str__(self):\n return '~%s' % parenthesize(eval_str(self._child))\n\n\ndef _eq(self, other):\n return Eq(self, scalar_coerce(self.dshape, other))\n\ndef _ne(self, other):\n return Ne(self, scalar_coerce(self.dshape, other))\n\ndef _lt(self, other):\n return Lt(self, scalar_coerce(self.dshape, other))\n\ndef _le(self, other):\n return Le(self, scalar_coerce(self.dshape, other))\n\ndef _gt(self, other):\n return Gt(self, scalar_coerce(self.dshape, other))\n\ndef _ge(self, other):\n return Ge(self, scalar_coerce(self.dshape, other))\n\ndef _invert(self):\n return Invert(self)\n\ndef _and(self, other):\n return And(self, other)\n\ndef _rand(self, other):\n return And(other, self)\n\ndef _or(self, other):\n return Or(self, other)\n\ndef _ror(self, other):\n return Or(other, self)\n\ndef _invert(self):\n return Not(self)\n\nInvert = Not\nBitAnd = And\nBitOr = Or\n\n\nfrom .expressions import schema_method_list\n\nschema_method_list.extend([\n (isscalar,\n set([_add, _radd, _mul,\n _rmul, _div, _rdiv, _floordiv, _rfloordiv, _sub, _rsub, _pow,\n _rpow, _mod, _rmod, _neg])),\n (isscalar, set([_eq, _ne, _lt, _le, _gt, _ge])),\n (isscalar, set([_or, _ror, _and, _rand, _invert])),\n ])\n",
"path": "blaze/expr/arithmetic.py"
}
] | [
{
"content": "from __future__ import absolute_import, division, print_function\n\nimport operator\nfrom toolz import first\nimport numpy as np\nfrom datashape import dshape, var, DataShape\nfrom dateutil.parser import parse as dt_parse\nfrom datashape.predicates import isscalar\nfrom datashape import coretypes as ct\n\nfrom .core import parenthesize, eval_str\nfrom .expressions import Expr, shape, ElemWise\nfrom ..dispatch import dispatch\nfrom ..compatibility import _strtypes\n\n\n__all__ = '''BinOp UnaryOp Arithmetic Add Mult Sub Div FloorDiv Pow Mod USub\nRelational Eq Ne Ge Lt Le Gt Gt And Or Not'''.split()\n\n\ndef name(o):\n if hasattr(o, '_name'):\n return o._name\n else:\n return None\n\nclass BinOp(ElemWise):\n __slots__ = '_hash', 'lhs', 'rhs'\n __inputs__ = 'lhs', 'rhs'\n\n def __init__(self, lhs, rhs):\n self.lhs = lhs\n self.rhs = rhs\n\n def __str__(self):\n lhs = parenthesize(eval_str(self.lhs))\n rhs = parenthesize(eval_str(self.rhs))\n return '%s %s %s' % (lhs, self.symbol, rhs)\n\n @property\n def _name(self):\n if not isscalar(self.dshape.measure):\n return None\n l, r = name(self.lhs), name(self.rhs)\n if l and not r:\n return l\n if r and not l:\n return r\n if l == r:\n return l\n\n @property\n def _inputs(self):\n result = []\n if isinstance(self.lhs, Expr):\n result.append(self.lhs)\n if isinstance(self.rhs, Expr):\n result.append(self.rhs)\n return tuple(result)\n\n\ndef maxvar(L):\n \"\"\"\n\n >>> maxvar([1, 2, var])\n Var()\n\n >>> maxvar([1, 2, 3])\n 3\n \"\"\"\n if var in L:\n return var\n else:\n return max(L)\n\n\ndef maxshape(shapes):\n \"\"\"\n\n >>> maxshape([(10, 1), (1, 10), ()])\n (10, 10)\n \"\"\"\n shapes = [shape for shape in shapes if shape]\n if not shapes:\n return ()\n if len(set(map(len, shapes))) != 1:\n raise ValueError(\"Only support arithmetic on expressions with equal \"\n \"number of dimensions.\")\n return tuple(map(maxvar, zip(*shapes)))\n\n\nclass UnaryOp(ElemWise):\n __slots__ = '_hash', '_child',\n\n def __init__(self, child):\n self._child = child\n\n def __str__(self):\n return '%s(%s)' % (self.symbol, eval_str(self._child))\n\n @property\n def symbol(self):\n return type(self).__name__\n\n @property\n def dshape(self):\n return DataShape(*(shape(self._child) + (self._dtype,)))\n\n @property\n def _name(self):\n return self._child._name\n\n\nclass Arithmetic(BinOp):\n \"\"\" Super class for arithmetic operators like add or mul \"\"\"\n _dtype = ct.real\n\n @property\n def dshape(self):\n # TODO: better inference. e.g. int + int -> int\n return DataShape(*(maxshape([shape(self.lhs), shape(self.rhs)]) + (self._dtype,)))\n\n\nclass Add(Arithmetic):\n symbol = '+'\n op = operator.add\n\n\nclass Mult(Arithmetic):\n symbol = '*'\n op = operator.mul\n\n\nclass Sub(Arithmetic):\n symbol = '-'\n op = operator.sub\n\n\nclass Div(Arithmetic):\n symbol = '/'\n op = operator.truediv\n\n\nclass FloorDiv(Arithmetic):\n symbol = '//'\n op = operator.floordiv\n\n\nclass Pow(Arithmetic):\n symbol = '**'\n op = operator.pow\n\n\nclass Mod(Arithmetic):\n symbol = '%'\n op = operator.mod\n\n\nclass USub(UnaryOp):\n op = operator.neg\n symbol = '-'\n\n def __str__(self):\n return '-%s' % parenthesize(eval_str(self._child))\n\n @property\n def _dtype(self):\n # TODO: better inference. -uint -> int\n return self._child.schema\n\n\n@dispatch(ct.Option, object)\ndef scalar_coerce(ds, val):\n if val or val == 0:\n return scalar_coerce(ds.ty, val)\n else:\n return None\n\n\n@dispatch(ct.Date, _strtypes)\ndef scalar_coerce(_, val):\n dt = dt_parse(val)\n if dt.time():\n raise ValueError(\"Can not coerce %s to type Date, \"\n \"contains time information\")\n return dt.date()\n\n\n@dispatch(ct.DateTime, _strtypes)\ndef scalar_coerce(_, val):\n return dt_parse(val)\n\n\n@dispatch(ct.CType, _strtypes)\ndef scalar_coerce(dt, val):\n return np.asscalar(np.asarray(val, dtype=dt.to_numpy_dtype()))\n\n\n@dispatch(ct.Record, object)\ndef scalar_coerce(rec, val):\n if len(rec.fields) == 1:\n return scalar_coerce(first(rec.types), val)\n else:\n raise TypeError(\"Trying to coerce complex datashape\\n\"\n \"got dshape: %s\\n\"\n \"scalar_coerce only intended for scalar values\" % rec)\n\n\n@dispatch(ct.DataShape, object)\ndef scalar_coerce(ds, val):\n return scalar_coerce(ds.measure, val)\n\n\n@dispatch(object, object)\ndef scalar_coerce(dtype, val):\n return val\n\n\n@dispatch(_strtypes, object)\ndef scalar_coerce(ds, val):\n return scalar_coerce(dshape(ds), val)\n\n\ndef _neg(self):\n return USub(self)\n\ndef _add(self, other):\n return Add(self, scalar_coerce(self.dshape, other))\n\ndef _radd(self, other):\n return Add(scalar_coerce(self.dshape, other), self)\n\ndef _mul(self, other):\n return Mult(self, scalar_coerce(self.dshape, other))\n\ndef _rmul(self, other):\n return Mult(scalar_coerce(self.dshape, other), self)\n\ndef _div(self, other):\n return Div(self, scalar_coerce(self.dshape, other))\n\ndef _rdiv(self, other):\n return Div(scalar_coerce(self.dshape, other), self)\n\ndef _floordiv(self, other):\n return FloorDiv(self, scalar_coerce(self.dshape, other))\n\ndef _rfloordiv(self, other):\n return FloorDiv(scalar_coerce(self.dshape, other), self)\n\ndef _sub(self, other):\n return Sub(self, scalar_coerce(self.dshape, other))\n\ndef _rsub(self, other):\n return Sub(scalar_coerce(self.dshape, other), self)\n\ndef _pow(self, other):\n return Pow(self, scalar_coerce(self.dshape, other))\n\ndef _rpow(self, other):\n return Pow(scalar_coerce(self.dshape, other), self)\n\ndef _mod(self, other):\n return Mod(self, scalar_coerce(self.dshape, other))\n\ndef _rmod(self, other):\n return Mod(scalar_coerce(self.dshape, other), self)\n\n\nclass Relational(Arithmetic):\n _dtype = ct.bool_\n\n\nclass Eq(Relational):\n symbol = '=='\n op = operator.eq\n\n\nclass Ne(Relational):\n symbol = '!='\n op = operator.ne\n\n\nclass Ge(Relational):\n symbol = '>='\n op = operator.ge\n\n\nclass Le(Relational):\n symbol = '<='\n op = operator.le\n\n\nclass Gt(Relational):\n symbol = '>'\n op = operator.gt\n\n\nclass Lt(Relational):\n symbol = '<'\n op = operator.lt\n\n\nclass And(Arithmetic):\n symbol = '&'\n op = operator.and_\n _dtype = ct.bool_\n\n\nclass Or(Arithmetic):\n symbol = '|'\n op = operator.or_\n _dtype = ct.bool_\n\n\nclass Not(UnaryOp):\n symbol = '~'\n op = operator.invert\n _dtype = ct.bool_\n def __str__(self):\n return '~%s' % parenthesize(eval_str(self._child))\n\n\ndef _eq(self, other):\n return Eq(self, scalar_coerce(self.dshape, other))\n\ndef _ne(self, other):\n return Ne(self, scalar_coerce(self.dshape, other))\n\ndef _lt(self, other):\n return Lt(self, scalar_coerce(self.dshape, other))\n\ndef _le(self, other):\n return Le(self, scalar_coerce(self.dshape, other))\n\ndef _gt(self, other):\n return Gt(self, scalar_coerce(self.dshape, other))\n\ndef _ge(self, other):\n return Ge(self, scalar_coerce(self.dshape, other))\n\ndef _invert(self):\n return Invert(self)\n\ndef _and(self, other):\n return And(self, other)\n\ndef _rand(self, other):\n return And(other, self)\n\ndef _or(self, other):\n return Or(self, other)\n\ndef _ror(self, other):\n return Or(other, self)\n\ndef _invert(self):\n return Not(self)\n\nInvert = Not\nBitAnd = And\nBitOr = Or\n\n\nfrom .expressions import schema_method_list\n\nschema_method_list.extend([\n (isscalar,\n set([_add, _radd, _mul,\n _rmul, _div, _rdiv, _floordiv, _rfloordiv, _sub, _rsub, _pow,\n _rpow, _mod, _rmod, _neg])),\n (isscalar, set([_eq, _ne, _lt, _le, _gt, _ge])),\n (isscalar, set([_or, _ror, _and, _rand, _invert])),\n ])\n",
"path": "blaze/expr/arithmetic.py"
}
] | diff --git a/blaze/expr/arithmetic.py b/blaze/expr/arithmetic.py
index d829b2e64..40a40dd0f 100644
--- a/blaze/expr/arithmetic.py
+++ b/blaze/expr/arithmetic.py
@@ -46,6 +46,8 @@ def _name(self):
return l
if r and not l:
return r
+ if l == r:
+ return l
@property
def _inputs(self):
diff --git a/blaze/expr/tests/test_arithmetic.py b/blaze/expr/tests/test_arithmetic.py
index cf8eb0ed9..50a076605 100644
--- a/blaze/expr/tests/test_arithmetic.py
+++ b/blaze/expr/tests/test_arithmetic.py
@@ -32,6 +32,8 @@ def test_names():
assert Add(y, x)._name != x._name
assert Add(y, x)._name != y._name
+ assert Add(x, x)._name == x._name
+
def test_inputs():
assert (x + y)._inputs == (x, y)
assert (x + 1)._inputs == (x,)
|
OctoPrint__OctoPrint-407 | Support circular beds in g-code visualiser
With delta printers we have center of bed at center of coordinate system. So now in G-code visualizer i get my objects in corner of bed. Pronterface has offset or center setting for this case, and repetier host has just checkbox "origin in center of bed" or similar.
Also would be nice to have round grid, like that in pronterface.
| [
{
"content": "# coding=utf-8\n__author__ = \"Gina Häußge <[email protected]>\"\n__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'\n\nimport sys\nimport os\nimport yaml\nimport logging\nimport re\nimport uuid\n\nAPPNAME=\"OctoPrint\"\n\ninstance = None\n\ndef settings(init=False, configfile=None, basedir=None):\n\tglobal instance\n\tif instance is None:\n\t\tif init:\n\t\t\tinstance = Settings(configfile, basedir)\n\t\telse:\n\t\t\traise ValueError(\"Settings not initialized yet\")\n\treturn instance\n\ndefault_settings = {\n\t\"serial\": {\n\t\t\"port\": None,\n\t\t\"baudrate\": None,\n\t\t\"autoconnect\": False,\n\t\t\"log\": False,\n\t\t\"timeout\": {\n\t\t\t\"detection\": 0.5,\n\t\t\t\"connection\": 2,\n\t\t\t\"communication\": 5,\n\t\t\t\"temperature\": 5,\n\t\t\t\"sdStatus\": 1\n\t\t},\n\t\t\"additionalPorts\": []\n\t},\n\t\"server\": {\n\t\t\"host\": \"0.0.0.0\",\n\t\t\"port\": 5000,\n\t\t\"firstRun\": True,\n\t\t\"baseUrl\": \"\",\n\t\t\"scheme\": \"\"\n\t},\n\t\"webcam\": {\n\t\t\"stream\": None,\n\t\t\"snapshot\": None,\n\t\t\"ffmpeg\": None,\n\t\t\"bitrate\": \"5000k\",\n\t\t\"watermark\": True,\n\t\t\"flipH\": False,\n\t\t\"flipV\": False,\n\t\t\"timelapse\": {\n\t\t\t\"type\": \"off\",\n\t\t\t\"options\": {},\n\t\t\t\"postRoll\": 0\n\t\t}\n\t},\n\t\"gcodeViewer\": {\n\t\t\"enabled\": True,\n\t\t\"mobileSizeThreshold\": 2 * 1024 * 1024, # 2MB\n\t\t\"sizeThreshold\": 20 * 1024 * 1024, # 20MB\n\t},\n\t\"feature\": {\n\t\t\"temperatureGraph\": True,\n\t\t\"waitForStartOnConnect\": False,\n\t\t\"alwaysSendChecksum\": False,\n\t\t\"sdSupport\": True,\n\t\t\"swallowOkAfterResend\": True,\n\t\t\"repetierTargetTemp\": False\n\t},\n\t\"folder\": {\n\t\t\"uploads\": None,\n\t\t\"timelapse\": None,\n\t\t\"timelapse_tmp\": None,\n\t\t\"logs\": None,\n\t\t\"virtualSd\": None\n\t},\n\t\"temperature\": {\n\t\t\"profiles\":\n\t\t\t[\n\t\t\t\t{\"name\": \"ABS\", \"extruder\" : 210, \"bed\" : 100 },\n\t\t\t\t{\"name\": \"PLA\", \"extruder\" : 180, \"bed\" : 60 }\n\t\t\t]\n\t},\n\t\"printerParameters\": {\n\t\t\"movementSpeed\": {\n\t\t\t\"x\": 6000,\n\t\t\t\"y\": 6000,\n\t\t\t\"z\": 200,\n\t\t\t\"e\": 300\n\t\t},\n\t\t\"pauseTriggers\": [],\n\t\t\"invertAxes\": [],\n\t\t\"numExtruders\": 1,\n\t\t\"extruderOffsets\": [\n\t\t\t{\"x\": 0.0, \"y\": 0.0}\n\t\t],\n\t\t\"bedDimensions\": {\n\t\t\t\"x\": 200.0, \"y\": 200.0\n\t\t}\n\t},\n\t\"appearance\": {\n\t\t\"name\": \"\",\n\t\t\"color\": \"default\"\n\t},\n\t\"controls\": [],\n\t\"system\": {\n\t\t\"actions\": []\n\t},\n\t\"accessControl\": {\n\t\t\"enabled\": True,\n\t\t\"userManager\": \"octoprint.users.FilebasedUserManager\",\n\t\t\"userfile\": None,\n\t\t\"autologinLocal\": False,\n\t\t\"localNetworks\": [\"127.0.0.0/8\"],\n\t\t\"autologinAs\": None\n\t},\n\t\"cura\": {\n\t\t\"enabled\": False,\n\t\t\"path\": \"/default/path/to/cura\",\n\t\t\"config\": \"/default/path/to/your/cura/config.ini\"\n\t},\n\t\"events\": {\n\t\t\"systemCommandTrigger\": {\n\t\t\t\"enabled\": False\n\t\t},\n\t\t\"gcodeCommandTrigger\": {\n\t\t\t\"enabled\": False\n\t\t}\n\t},\n\t\"api\": {\n\t\t\"enabled\": False,\n\t\t\"key\": ''.join('%02X' % ord(z) for z in uuid.uuid4().bytes)\n\t},\n\t\"terminalFilters\": [\n\t\t{ \"name\": \"Suppress M105 requests/responses\", \"regex\": \"(Send: M105)|(Recv: ok T\\d*:)\" },\n\t\t{ \"name\": \"Suppress M27 requests/responses\", \"regex\": \"(Send: M27)|(Recv: SD printing byte)\" }\n\t],\n\t\"devel\": {\n\t\t\"stylesheet\": \"css\",\n\t\t\"virtualPrinter\": {\n\t\t\t\"enabled\": False,\n\t\t\t\"okAfterResend\": False,\n\t\t\t\"forceChecksum\": False,\n\t\t\t\"okWithLinenumber\": False,\n\t\t\t\"numExtruders\": 1,\n\t\t\t\"includeCurrentToolInTemps\": True,\n\t\t\t\"hasBed\": True,\n\t\t\t\"repetierStyleTargetTemperature\": False\n\t\t}\n\t}\n}\n\nvalid_boolean_trues = [True, \"true\", \"yes\", \"y\", \"1\"]\n\nclass Settings(object):\n\n\tdef __init__(self, configfile=None, basedir=None):\n\t\tself._logger = logging.getLogger(__name__)\n\n\t\tself.settings_dir = None\n\n\t\tself._config = None\n\t\tself._dirty = False\n\n\t\tself._init_settings_dir(basedir)\n\n\t\tif configfile is not None:\n\t\t\tself._configfile = configfile\n\t\telse:\n\t\t\tself._configfile = os.path.join(self.settings_dir, \"config.yaml\")\n\t\tself.load()\n\n\tdef _init_settings_dir(self, basedir):\n\t\tif basedir is not None:\n\t\t\tself.settings_dir = basedir\n\t\telse:\n\t\t\tself.settings_dir = _resolveSettingsDir(APPNAME)\n\n\tdef _getDefaultFolder(self, type):\n\t\tfolder = default_settings[\"folder\"][type]\n\t\tif folder is None:\n\t\t\tfolder = os.path.join(self.settings_dir, type.replace(\"_\", os.path.sep))\n\t\treturn folder\n\n\t#~~ load and save\n\n\tdef load(self):\n\t\tif os.path.exists(self._configfile) and os.path.isfile(self._configfile):\n\t\t\twith open(self._configfile, \"r\") as f:\n\t\t\t\tself._config = yaml.safe_load(f)\n\t\t# chamged from else to handle cases where the file exists, but is empty / 0 bytes\n\t\tif not self._config:\n\t\t\tself._config = {}\n\n\tdef save(self, force=False):\n\t\tif not self._dirty and not force:\n\t\t\treturn\n\n\t\twith open(self._configfile, \"wb\") as configFile:\n\t\t\tyaml.safe_dump(self._config, configFile, default_flow_style=False, indent=\" \", allow_unicode=True)\n\t\t\tself._dirty = False\n\t\tself.load()\n\n\t#~~ getter\n\n\tdef get(self, path, asdict=False):\n\t\tif len(path) == 0:\n\t\t\treturn None\n\n\t\tconfig = self._config\n\t\tdefaults = default_settings\n\n\t\twhile len(path) > 1:\n\t\t\tkey = path.pop(0)\n\t\t\tif key in config.keys() and key in defaults.keys():\n\t\t\t\tconfig = config[key]\n\t\t\t\tdefaults = defaults[key]\n\t\t\telif key in defaults.keys():\n\t\t\t\tconfig = {}\n\t\t\t\tdefaults = defaults[key]\n\t\t\telse:\n\t\t\t\treturn None\n\n\t\tk = path.pop(0)\n\t\tif not isinstance(k, (list, tuple)):\n\t\t\tkeys = [k]\n\t\telse:\n\t\t\tkeys = k\n\n\t\tif asdict:\n\t\t\tresults = {}\n\t\telse:\n\t\t\tresults = []\n\t\tfor key in keys:\n\t\t\tif key in config.keys():\n\t\t\t\tvalue = config[key]\n\t\t\telif key in defaults:\n\t\t\t\tvalue = defaults[key]\n\t\t\telse:\n\t\t\t\tvalue = None\n\n\t\t\tif asdict:\n\t\t\t\tresults[key] = value\n\t\t\telse:\n\t\t\t\tresults.append(value)\n\n\t\tif not isinstance(k, (list, tuple)):\n\t\t\tif asdict:\n\t\t\t\treturn results.values().pop()\n\t\t\telse:\n\t\t\t\treturn results.pop()\n\t\telse:\n\t\t\treturn results\n\n\tdef getInt(self, path):\n\t\tvalue = self.get(path)\n\t\tif value is None:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\treturn int(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when getting option %r\" % (value, path))\n\t\t\treturn None\n\n\tdef getFloat(self, path):\n\t\tvalue = self.get(path)\n\t\tif value is None:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\treturn float(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when getting option %r\" % (value, path))\n\t\t\treturn None\n\n\tdef getBoolean(self, path):\n\t\tvalue = self.get(path)\n\t\tif value is None:\n\t\t\treturn None\n\t\tif isinstance(value, bool):\n\t\t\treturn value\n\t\treturn value.lower() in valid_boolean_trues\n\n\tdef getBaseFolder(self, type):\n\t\tif type not in default_settings[\"folder\"].keys():\n\t\t\treturn None\n\n\t\tfolder = self.get([\"folder\", type])\n\t\tif folder is None:\n\t\t\tfolder = self._getDefaultFolder(type)\n\n\t\tif not os.path.isdir(folder):\n\t\t\tos.makedirs(folder)\n\n\t\treturn folder\n\n\tdef getFeedbackControls(self):\n\t\tfeedbackControls = []\n\t\tfor control in self.get([\"controls\"]):\n\t\t\tfeedbackControls.extend(self._getFeedbackControls(control))\n\t\treturn feedbackControls\n\n\tdef _getFeedbackControls(self, control=None):\n\t\tif control[\"type\"] == \"feedback_command\" or control[\"type\"] == \"feedback\":\n\t\t\tpattern = control[\"regex\"]\n\t\t\ttry:\n\t\t\t\tmatcher = re.compile(pattern)\n\t\t\t\treturn [(control[\"name\"], matcher, control[\"template\"])]\n\t\t\texcept:\n\t\t\t\t# invalid regex or something like this, we'll just skip this entry\n\t\t\t\tpass\n\t\telif control[\"type\"] == \"section\":\n\t\t\tresult = []\n\t\t\tfor c in control[\"children\"]:\n\t\t\t\tresult.extend(self._getFeedbackControls(c))\n\t\t\treturn result\n\t\telse:\n\t\t\treturn []\n\n\tdef getPauseTriggers(self):\n\t\ttriggers = {\n\t\t\t\"enable\": [],\n\t\t\t\"disable\": [],\n\t\t\t\"toggle\": []\n\t\t}\n\t\tfor trigger in self.get([\"printerParameters\", \"pauseTriggers\"]):\n\t\t\ttry:\n\t\t\t\tregex = trigger[\"regex\"]\n\t\t\t\ttype = trigger[\"type\"]\n\t\t\t\tif type in triggers.keys():\n\t\t\t\t\t# make sure regex is valid\n\t\t\t\t\tre.compile(regex)\n\t\t\t\t\t# add to type list\n\t\t\t\t\ttriggers[type].append(regex)\n\t\t\texcept:\n\t\t\t\t# invalid regex or something like this, we'll just skip this entry\n\t\t\t\tpass\n\n\t\tresult = {}\n\t\tfor type in triggers.keys():\n\t\t\tif len(triggers[type]) > 0:\n\t\t\t\tresult[type] = re.compile(\"|\".join(map(lambda x: \"(%s)\" % x, triggers[type])))\n\t\treturn result\n\n\t#~~ setter\n\n\tdef set(self, path, value, force=False):\n\t\tif len(path) == 0:\n\t\t\treturn\n\n\t\tconfig = self._config\n\t\tdefaults = default_settings\n\n\t\twhile len(path) > 1:\n\t\t\tkey = path.pop(0)\n\t\t\tif key in config.keys() and key in defaults.keys():\n\t\t\t\tconfig = config[key]\n\t\t\t\tdefaults = defaults[key]\n\t\t\telif key in defaults.keys():\n\t\t\t\tconfig[key] = {}\n\t\t\t\tconfig = config[key]\n\t\t\t\tdefaults = defaults[key]\n\t\t\telse:\n\t\t\t\treturn\n\n\t\tkey = path.pop(0)\n\t\tif not force and key in defaults.keys() and key in config.keys() and defaults[key] == value:\n\t\t\tdel config[key]\n\t\t\tself._dirty = True\n\t\telif force or (not key in config.keys() and defaults[key] != value) or (key in config.keys() and config[key] != value):\n\t\t\tif value is None:\n\t\t\t\tdel config[key]\n\t\t\telse:\n\t\t\t\tconfig[key] = value\n\t\t\tself._dirty = True\n\n\tdef setInt(self, path, value, force=False):\n\t\tif value is None:\n\t\t\tself.set(path, None, force)\n\t\t\treturn\n\n\t\ttry:\n\t\t\tintValue = int(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when setting option %r\" % (value, path))\n\t\t\treturn\n\n\t\tself.set(path, intValue, force)\n\n\tdef setFloat(self, path, value, force=False):\n\t\tif value is None:\n\t\t\tself.set(path, None, force)\n\t\t\treturn\n\n\t\ttry:\n\t\t\tfloatValue = float(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when setting option %r\" % (value, path))\n\t\t\treturn\n\n\t\tself.set(path, floatValue, force)\n\n\tdef setBoolean(self, path, value, force=False):\n\t\tif value is None or isinstance(value, bool):\n\t\t\tself.set(path, value, force)\n\t\telif value.lower() in valid_boolean_trues:\n\t\t\tself.set(path, True, force)\n\t\telse:\n\t\t\tself.set(path, False, force)\n\n\tdef setBaseFolder(self, type, path, force=False):\n\t\tif type not in default_settings[\"folder\"].keys():\n\t\t\treturn None\n\n\t\tcurrentPath = self.getBaseFolder(type)\n\t\tdefaultPath = self._getDefaultFolder(type)\n\t\tif (path is None or path == defaultPath) and \"folder\" in self._config.keys() and type in self._config[\"folder\"].keys():\n\t\t\tdel self._config[\"folder\"][type]\n\t\t\tif not self._config[\"folder\"]:\n\t\t\t\tdel self._config[\"folder\"]\n\t\t\tself._dirty = True\n\t\telif (path != currentPath and path != defaultPath) or force:\n\t\t\tif not \"folder\" in self._config.keys():\n\t\t\t\tself._config[\"folder\"] = {}\n\t\t\tself._config[\"folder\"][type] = path\n\t\t\tself._dirty = True\n\ndef _resolveSettingsDir(applicationName):\n\t# taken from http://stackoverflow.com/questions/1084697/how-do-i-store-desktop-application-data-in-a-cross-platform-way-for-python\n\tif sys.platform == \"darwin\":\n\t\tfrom AppKit import NSSearchPathForDirectoriesInDomains\n\t\t# http://developer.apple.com/DOCUMENTATION/Cocoa/Reference/Foundation/Miscellaneous/Foundation_Functions/Reference/reference.html#//apple_ref/c/func/NSSearchPathForDirectoriesInDomains\n\t\t# NSApplicationSupportDirectory = 14\n\t\t# NSUserDomainMask = 1\n\t\t# True for expanding the tilde into a fully qualified path\n\t\treturn os.path.join(NSSearchPathForDirectoriesInDomains(14, 1, True)[0], applicationName)\n\telif sys.platform == \"win32\":\n\t\treturn os.path.join(os.environ[\"APPDATA\"], applicationName)\n\telse:\n\t\treturn os.path.expanduser(os.path.join(\"~\", \".\" + applicationName.lower()))\n",
"path": "src/octoprint/settings.py"
}
] | [
{
"content": "# coding=utf-8\n__author__ = \"Gina Häußge <[email protected]>\"\n__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'\n\nimport sys\nimport os\nimport yaml\nimport logging\nimport re\nimport uuid\n\nAPPNAME=\"OctoPrint\"\n\ninstance = None\n\ndef settings(init=False, configfile=None, basedir=None):\n\tglobal instance\n\tif instance is None:\n\t\tif init:\n\t\t\tinstance = Settings(configfile, basedir)\n\t\telse:\n\t\t\traise ValueError(\"Settings not initialized yet\")\n\treturn instance\n\ndefault_settings = {\n\t\"serial\": {\n\t\t\"port\": None,\n\t\t\"baudrate\": None,\n\t\t\"autoconnect\": False,\n\t\t\"log\": False,\n\t\t\"timeout\": {\n\t\t\t\"detection\": 0.5,\n\t\t\t\"connection\": 2,\n\t\t\t\"communication\": 5\n\t\t},\n\t\t\"additionalPorts\": []\n\t},\n\t\"server\": {\n\t\t\"host\": \"0.0.0.0\",\n\t\t\"port\": 5000,\n\t\t\"firstRun\": True,\n\t\t\"baseUrl\": \"\",\n\t\t\"scheme\": \"\"\n\t},\n\t\"webcam\": {\n\t\t\"stream\": None,\n\t\t\"snapshot\": None,\n\t\t\"ffmpeg\": None,\n\t\t\"bitrate\": \"5000k\",\n\t\t\"watermark\": True,\n\t\t\"flipH\": False,\n\t\t\"flipV\": False,\n\t\t\"timelapse\": {\n\t\t\t\"type\": \"off\",\n\t\t\t\"options\": {},\n\t\t\t\"postRoll\": 0\n\t\t}\n\t},\n\t\"gcodeViewer\": {\n\t\t\"enabled\": True,\n\t\t\"mobileSizeThreshold\": 2 * 1024 * 1024, # 2MB\n\t\t\"sizeThreshold\": 20 * 1024 * 1024, # 20MB\n\t},\n\t\"feature\": {\n\t\t\"temperatureGraph\": True,\n\t\t\"waitForStartOnConnect\": False,\n\t\t\"alwaysSendChecksum\": False,\n\t\t\"sdSupport\": True,\n\t\t\"swallowOkAfterResend\": True,\n\t\t\"repetierTargetTemp\": False\n\t},\n\t\"folder\": {\n\t\t\"uploads\": None,\n\t\t\"timelapse\": None,\n\t\t\"timelapse_tmp\": None,\n\t\t\"logs\": None,\n\t\t\"virtualSd\": None\n\t},\n\t\"temperature\": {\n\t\t\"profiles\":\n\t\t\t[\n\t\t\t\t{\"name\": \"ABS\", \"extruder\" : 210, \"bed\" : 100 },\n\t\t\t\t{\"name\": \"PLA\", \"extruder\" : 180, \"bed\" : 60 }\n\t\t\t]\n\t},\n\t\"printerParameters\": {\n\t\t\"movementSpeed\": {\n\t\t\t\"x\": 6000,\n\t\t\t\"y\": 6000,\n\t\t\t\"z\": 200,\n\t\t\t\"e\": 300\n\t\t},\n\t\t\"pauseTriggers\": [],\n\t\t\"invertAxes\": [],\n\t\t\"numExtruders\": 1,\n\t\t\"extruderOffsets\": [\n\t\t\t{\"x\": 0.0, \"y\": 0.0}\n\t\t],\n\t\t\"bedDimensions\": {\n\t\t\t\"x\": 200.0, \"y\": 200.0, \"r\": 100\n\t\t}\n\t},\n\t\"appearance\": {\n\t\t\"name\": \"\",\n\t\t\"color\": \"default\"\n\t},\n\t\"controls\": [],\n\t\"system\": {\n\t\t\"actions\": []\n\t},\n\t\"accessControl\": {\n\t\t\"enabled\": True,\n\t\t\"userManager\": \"octoprint.users.FilebasedUserManager\",\n\t\t\"userfile\": None,\n\t\t\"autologinLocal\": False,\n\t\t\"localNetworks\": [\"127.0.0.0/8\"],\n\t\t\"autologinAs\": None\n\t},\n\t\"cura\": {\n\t\t\"enabled\": False,\n\t\t\"path\": \"/default/path/to/cura\",\n\t\t\"config\": \"/default/path/to/your/cura/config.ini\"\n\t},\n\t\"events\": {\n\t\t\"systemCommandTrigger\": {\n\t\t\t\"enabled\": False\n\t\t},\n\t\t\"gcodeCommandTrigger\": {\n\t\t\t\"enabled\": False\n\t\t}\n\t},\n\t\"api\": {\n\t\t\"enabled\": False,\n\t\t\"key\": ''.join('%02X' % ord(z) for z in uuid.uuid4().bytes)\n\t},\n\t\"terminalFilters\": [\n\t\t{ \"name\": \"Suppress M105 requests/responses\", \"regex\": \"(Send: M105)|(Recv: ok T\\d*:)\" },\n\t\t{ \"name\": \"Suppress M27 requests/responses\", \"regex\": \"(Send: M27)|(Recv: SD printing byte)\" }\n\t],\n\t\"devel\": {\n\t\t\"stylesheet\": \"css\",\n\t\t\"virtualPrinter\": {\n\t\t\t\"enabled\": False,\n\t\t\t\"okAfterResend\": False,\n\t\t\t\"forceChecksum\": False,\n\t\t\t\"okWithLinenumber\": False,\n\t\t\t\"numExtruders\": 1,\n\t\t\t\"includeCurrentToolInTemps\": True,\n\t\t\t\"hasBed\": True\n\t\t}\n\t}\n}\n\nvalid_boolean_trues = [True, \"true\", \"yes\", \"y\", \"1\"]\n\nclass Settings(object):\n\n\tdef __init__(self, configfile=None, basedir=None):\n\t\tself._logger = logging.getLogger(__name__)\n\n\t\tself.settings_dir = None\n\n\t\tself._config = None\n\t\tself._dirty = False\n\n\t\tself._init_settings_dir(basedir)\n\n\t\tif configfile is not None:\n\t\t\tself._configfile = configfile\n\t\telse:\n\t\t\tself._configfile = os.path.join(self.settings_dir, \"config.yaml\")\n\t\tself.load()\n\n\tdef _init_settings_dir(self, basedir):\n\t\tif basedir is not None:\n\t\t\tself.settings_dir = basedir\n\t\telse:\n\t\t\tself.settings_dir = _resolveSettingsDir(APPNAME)\n\n\tdef _getDefaultFolder(self, type):\n\t\tfolder = default_settings[\"folder\"][type]\n\t\tif folder is None:\n\t\t\tfolder = os.path.join(self.settings_dir, type.replace(\"_\", os.path.sep))\n\t\treturn folder\n\n\t#~~ load and save\n\n\tdef load(self):\n\t\tif os.path.exists(self._configfile) and os.path.isfile(self._configfile):\n\t\t\twith open(self._configfile, \"r\") as f:\n\t\t\t\tself._config = yaml.safe_load(f)\n\t\t# chamged from else to handle cases where the file exists, but is empty / 0 bytes\n\t\tif not self._config:\n\t\t\tself._config = {}\n\n\tdef save(self, force=False):\n\t\tif not self._dirty and not force:\n\t\t\treturn\n\n\t\twith open(self._configfile, \"wb\") as configFile:\n\t\t\tyaml.safe_dump(self._config, configFile, default_flow_style=False, indent=\" \", allow_unicode=True)\n\t\t\tself._dirty = False\n\t\tself.load()\n\n\t#~~ getter\n\n\tdef get(self, path, asdict=False):\n\t\tif len(path) == 0:\n\t\t\treturn None\n\n\t\tconfig = self._config\n\t\tdefaults = default_settings\n\n\t\twhile len(path) > 1:\n\t\t\tkey = path.pop(0)\n\t\t\tif key in config.keys() and key in defaults.keys():\n\t\t\t\tconfig = config[key]\n\t\t\t\tdefaults = defaults[key]\n\t\t\telif key in defaults.keys():\n\t\t\t\tconfig = {}\n\t\t\t\tdefaults = defaults[key]\n\t\t\telse:\n\t\t\t\treturn None\n\n\t\tk = path.pop(0)\n\t\tif not isinstance(k, (list, tuple)):\n\t\t\tkeys = [k]\n\t\telse:\n\t\t\tkeys = k\n\n\t\tif asdict:\n\t\t\tresults = {}\n\t\telse:\n\t\t\tresults = []\n\t\tfor key in keys:\n\t\t\tif key in config.keys():\n\t\t\t\tvalue = config[key]\n\t\t\telif key in defaults:\n\t\t\t\tvalue = defaults[key]\n\t\t\telse:\n\t\t\t\tvalue = None\n\n\t\t\tif asdict:\n\t\t\t\tresults[key] = value\n\t\t\telse:\n\t\t\t\tresults.append(value)\n\n\t\tif not isinstance(k, (list, tuple)):\n\t\t\tif asdict:\n\t\t\t\treturn results.values().pop()\n\t\t\telse:\n\t\t\t\treturn results.pop()\n\t\telse:\n\t\t\treturn results\n\n\tdef getInt(self, path):\n\t\tvalue = self.get(path)\n\t\tif value is None:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\treturn int(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when getting option %r\" % (value, path))\n\t\t\treturn None\n\n\tdef getFloat(self, path):\n\t\tvalue = self.get(path)\n\t\tif value is None:\n\t\t\treturn None\n\n\t\ttry:\n\t\t\treturn float(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when getting option %r\" % (value, path))\n\t\t\treturn None\n\n\tdef getBoolean(self, path):\n\t\tvalue = self.get(path)\n\t\tif value is None:\n\t\t\treturn None\n\t\tif isinstance(value, bool):\n\t\t\treturn value\n\t\treturn value.lower() in valid_boolean_trues\n\n\tdef getBaseFolder(self, type):\n\t\tif type not in default_settings[\"folder\"].keys():\n\t\t\treturn None\n\n\t\tfolder = self.get([\"folder\", type])\n\t\tif folder is None:\n\t\t\tfolder = self._getDefaultFolder(type)\n\n\t\tif not os.path.isdir(folder):\n\t\t\tos.makedirs(folder)\n\n\t\treturn folder\n\n\tdef getFeedbackControls(self):\n\t\tfeedbackControls = []\n\t\tfor control in self.get([\"controls\"]):\n\t\t\tfeedbackControls.extend(self._getFeedbackControls(control))\n\t\treturn feedbackControls\n\n\tdef _getFeedbackControls(self, control=None):\n\t\tif control[\"type\"] == \"feedback_command\" or control[\"type\"] == \"feedback\":\n\t\t\tpattern = control[\"regex\"]\n\t\t\ttry:\n\t\t\t\tmatcher = re.compile(pattern)\n\t\t\t\treturn [(control[\"name\"], matcher, control[\"template\"])]\n\t\t\texcept:\n\t\t\t\t# invalid regex or something like this, we'll just skip this entry\n\t\t\t\tpass\n\t\telif control[\"type\"] == \"section\":\n\t\t\tresult = []\n\t\t\tfor c in control[\"children\"]:\n\t\t\t\tresult.extend(self._getFeedbackControls(c))\n\t\t\treturn result\n\t\telse:\n\t\t\treturn []\n\n\tdef getPauseTriggers(self):\n\t\ttriggers = {\n\t\t\t\"enable\": [],\n\t\t\t\"disable\": [],\n\t\t\t\"toggle\": []\n\t\t}\n\t\tfor trigger in self.get([\"printerParameters\", \"pauseTriggers\"]):\n\t\t\ttry:\n\t\t\t\tregex = trigger[\"regex\"]\n\t\t\t\ttype = trigger[\"type\"]\n\t\t\t\tif type in triggers.keys():\n\t\t\t\t\t# make sure regex is valid\n\t\t\t\t\tre.compile(regex)\n\t\t\t\t\t# add to type list\n\t\t\t\t\ttriggers[type].append(regex)\n\t\t\texcept:\n\t\t\t\t# invalid regex or something like this, we'll just skip this entry\n\t\t\t\tpass\n\n\t\tresult = {}\n\t\tfor type in triggers.keys():\n\t\t\tif len(triggers[type]) > 0:\n\t\t\t\tresult[type] = re.compile(\"|\".join(map(lambda x: \"(%s)\" % x, triggers[type])))\n\t\treturn result\n\n\t#~~ setter\n\n\tdef set(self, path, value, force=False):\n\t\tif len(path) == 0:\n\t\t\treturn\n\n\t\tconfig = self._config\n\t\tdefaults = default_settings\n\n\t\twhile len(path) > 1:\n\t\t\tkey = path.pop(0)\n\t\t\tif key in config.keys() and key in defaults.keys():\n\t\t\t\tconfig = config[key]\n\t\t\t\tdefaults = defaults[key]\n\t\t\telif key in defaults.keys():\n\t\t\t\tconfig[key] = {}\n\t\t\t\tconfig = config[key]\n\t\t\t\tdefaults = defaults[key]\n\t\t\telse:\n\t\t\t\treturn\n\n\t\tkey = path.pop(0)\n\t\tif not force and key in defaults.keys() and key in config.keys() and defaults[key] == value:\n\t\t\tdel config[key]\n\t\t\tself._dirty = True\n\t\telif force or (not key in config.keys() and defaults[key] != value) or (key in config.keys() and config[key] != value):\n\t\t\tif value is None:\n\t\t\t\tdel config[key]\n\t\t\telse:\n\t\t\t\tconfig[key] = value\n\t\t\tself._dirty = True\n\n\tdef setInt(self, path, value, force=False):\n\t\tif value is None:\n\t\t\tself.set(path, None, force)\n\t\t\treturn\n\n\t\ttry:\n\t\t\tintValue = int(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when setting option %r\" % (value, path))\n\t\t\treturn\n\n\t\tself.set(path, intValue, force)\n\n\tdef setFloat(self, path, value, force=False):\n\t\tif value is None:\n\t\t\tself.set(path, None, force)\n\t\t\treturn\n\n\t\ttry:\n\t\t\tfloatValue = float(value)\n\t\texcept ValueError:\n\t\t\tself._logger.warn(\"Could not convert %r to a valid integer when setting option %r\" % (value, path))\n\t\t\treturn\n\n\t\tself.set(path, floatValue, force)\n\n\tdef setBoolean(self, path, value, force=False):\n\t\tif value is None or isinstance(value, bool):\n\t\t\tself.set(path, value, force)\n\t\telif value.lower() in valid_boolean_trues:\n\t\t\tself.set(path, True, force)\n\t\telse:\n\t\t\tself.set(path, False, force)\n\n\tdef setBaseFolder(self, type, path, force=False):\n\t\tif type not in default_settings[\"folder\"].keys():\n\t\t\treturn None\n\n\t\tcurrentPath = self.getBaseFolder(type)\n\t\tdefaultPath = self._getDefaultFolder(type)\n\t\tif (path is None or path == defaultPath) and \"folder\" in self._config.keys() and type in self._config[\"folder\"].keys():\n\t\t\tdel self._config[\"folder\"][type]\n\t\t\tif not self._config[\"folder\"]:\n\t\t\t\tdel self._config[\"folder\"]\n\t\t\tself._dirty = True\n\t\telif (path != currentPath and path != defaultPath) or force:\n\t\t\tif not \"folder\" in self._config.keys():\n\t\t\t\tself._config[\"folder\"] = {}\n\t\t\tself._config[\"folder\"][type] = path\n\t\t\tself._dirty = True\n\ndef _resolveSettingsDir(applicationName):\n\t# taken from http://stackoverflow.com/questions/1084697/how-do-i-store-desktop-application-data-in-a-cross-platform-way-for-python\n\tif sys.platform == \"darwin\":\n\t\tfrom AppKit import NSSearchPathForDirectoriesInDomains\n\t\t# http://developer.apple.com/DOCUMENTATION/Cocoa/Reference/Foundation/Miscellaneous/Foundation_Functions/Reference/reference.html#//apple_ref/c/func/NSSearchPathForDirectoriesInDomains\n\t\t# NSApplicationSupportDirectory = 14\n\t\t# NSUserDomainMask = 1\n\t\t# True for expanding the tilde into a fully qualified path\n\t\treturn os.path.join(NSSearchPathForDirectoriesInDomains(14, 1, True)[0], applicationName)\n\telif sys.platform == \"win32\":\n\t\treturn os.path.join(os.environ[\"APPDATA\"], applicationName)\n\telse:\n\t\treturn os.path.expanduser(os.path.join(\"~\", \".\" + applicationName.lower()))\n",
"path": "src/octoprint/settings.py"
}
] | diff --git a/src/octoprint/settings.py b/src/octoprint/settings.py
index 3c796e678b..8c2684bf0f 100644
--- a/src/octoprint/settings.py
+++ b/src/octoprint/settings.py
@@ -97,7 +97,7 @@ def settings(init=False, configfile=None, basedir=None):
{"x": 0.0, "y": 0.0}
],
"bedDimensions": {
- "x": 200.0, "y": 200.0
+ "x": 200.0, "y": 200.0, "r": 100
}
},
"appearance": {
diff --git a/src/octoprint/static/gcodeviewer/js/renderer.js b/src/octoprint/static/gcodeviewer/js/renderer.js
index ac5a53d2ce..7fe9c032bc 100644
--- a/src/octoprint/static/gcodeviewer/js/renderer.js
+++ b/src/octoprint/static/gcodeviewer/js/renderer.js
@@ -186,37 +186,68 @@ GCODE.renderer = (function(){
ctx.translate(offsetBedX, offsetBedY);
- ctx.beginPath();
- var width = renderOptions["bed"]["x"] * zoomFactor;
- var height = renderOptions["bed"]["y"] * zoomFactor;
- var origin = {
- x: 0,
- y: -1 * renderOptions["bed"]["y"] * zoomFactor
- };
- ctx.strokeStyle = renderOptions["colorGrid"];
- ctx.fillStyle = "#ffffff";
- ctx.lineWidth = 2;
- ctx.rect(origin.x, origin.y, width, height);
- ctx.fill();
- ctx.stroke();
-
var i;
- ctx.strokeStyle = renderOptions["colorGrid"];
- ctx.lineWidth = 1;
- ctx.beginPath();
- for (i = 0; i <= renderOptions["bed"]["x"]; i += gridStep) {
- ctx.moveTo(i * zoomFactor, 0);
- ctx.lineTo(i * zoomFactor, -1 * renderOptions["bed"]["y"] * zoomFactor);
- }
- ctx.stroke();
+ if(renderOptions["bed"]["circular"]) {
+ ctx.strokeStyle = renderOptions["colorGrid"];
+ ctx.fillStyle = "#ffffff";
+ ctx.lineWidth = 2;
+
+ ctx.beginPath();
+ ctx.arc(0, 0, renderOptions["bed"]["r"] * zoomFactor, 0, Math.PI * 2, true);
+ ctx.fill();
+ ctx.stroke();
+
+ ctx.strokeStyle = renderOptions["colorGrid"];
+ ctx.lineWidth = 1;
+
+ ctx.beginPath();
+ for (i = -renderOptions["bed"]["r"]; i <= renderOptions["bed"]["r"]; i += gridStep) {
+ var x = i;
+ var y = Math.sqrt(Math.pow(renderOptions["bed"]["r"], 2) - Math.pow(x, 2));
+
+ ctx.moveTo(x * zoomFactor, y * zoomFactor);
+ ctx.lineTo(x * zoomFactor, -1 * y * zoomFactor);
+
+ ctx.moveTo(y * zoomFactor, x * zoomFactor);
+ ctx.lineTo(-1 * y * zoomFactor, x * zoomFactor);
+ }
+ ctx.stroke();
+ } else {
+ var width = renderOptions["bed"]["x"] * zoomFactor;
+ var height = renderOptions["bed"]["y"] * zoomFactor;
+ var origin = {
+ x: 0,
+ y: -1 * renderOptions["bed"]["y"] * zoomFactor
+ };
- ctx.beginPath();
- for (i = 0; i <= renderOptions["bed"]["y"]; i += gridStep) {
- ctx.moveTo(0, -1 * i * zoomFactor);
- ctx.lineTo(renderOptions["bed"]["x"] * zoomFactor, -1 * i * zoomFactor);
+ ctx.beginPath();
+ ctx.strokeStyle = renderOptions["colorGrid"];
+ ctx.fillStyle = "#ffffff";
+ ctx.lineWidth = 2;
+
+ ctx.rect(origin.x, origin.y, width, height);
+
+ ctx.fill();
+ ctx.stroke();
+
+ ctx.strokeStyle = renderOptions["colorGrid"];
+ ctx.lineWidth = 1;
+
+ ctx.beginPath();
+ for (i = 0; i <= renderOptions["bed"]["x"]; i += gridStep) {
+ ctx.moveTo(i * zoomFactor, 0);
+ ctx.lineTo(i * zoomFactor, -1 * renderOptions["bed"]["y"] * zoomFactor);
+ }
+ ctx.stroke();
+
+ ctx.beginPath();
+ for (i = 0; i <= renderOptions["bed"]["y"]; i += gridStep) {
+ ctx.moveTo(0, -1 * i * zoomFactor);
+ ctx.lineTo(renderOptions["bed"]["x"] * zoomFactor, -1 * i * zoomFactor);
+ }
+ ctx.stroke();
}
- ctx.stroke();
ctx.translate(-offsetBedX, -offsetBedY);
};
@@ -358,6 +389,12 @@ GCODE.renderer = (function(){
offsetModelY = -1 * (renderOptions["bed"]["y"] / 2 - (mdlInfo.min.y + mdlInfo.modelSize.y / 2)) * zoomFactor;
offsetBedX = -1 * (renderOptions["bed"]["x"] / 2 - (mdlInfo.min.x + mdlInfo.modelSize.x / 2)) * zoomFactor;
offsetBedY = (renderOptions["bed"]["y"] / 2 - (mdlInfo.min.y + mdlInfo.modelSize.y / 2)) * zoomFactor;
+ } else if (renderOptions["bed"]["circular"]) {
+ var canvasCenter = ctx.transformedPoint(canvas.width / 2, canvas.height / 2);
+ offsetModelX = canvasCenter.x;
+ offsetModelY = canvasCenter.y;
+ offsetBedX = 0;
+ offsetBedY = 0;
} else {
offsetModelX = 0;
offsetModelY = 0;
@@ -397,8 +434,13 @@ GCODE.renderer = (function(){
init: function(){
startCanvas();
initialized = true;
- zoomFactor = Math.min((canvas.width - 10) / renderOptions["bed"]["x"], (canvas.height - 10) / renderOptions["bed"]["y"]);
- ctx.translate((canvas.width - renderOptions["bed"]["x"] * zoomFactor) / 2, renderOptions["bed"]["y"] * zoomFactor + (canvas.height - renderOptions["bed"]["y"] * zoomFactor) / 2);
+ var bedWidth = renderOptions["bed"]["x"];
+ var bedHeight = renderOptions["bed"]["y"];;
+ if(renderOptions["bed"]["circular"]) {
+ bedWidth = bedHeight = renderOptions["bed"]["r"] *2;
+ }
+ zoomFactor = Math.min((canvas.width - 10) / bedWidth, (canvas.height - 10) / bedHeight);
+ ctx.translate((canvas.width - bedWidth * zoomFactor) / 2, bedHeight * zoomFactor + (canvas.height - bedHeight * zoomFactor) / 2);
offsetModelX = 0;
offsetModelY = 0;
diff --git a/src/octoprint/static/js/app/viewmodels/settings.js b/src/octoprint/static/js/app/viewmodels/settings.js
index eef8d58473..8037974302 100644
--- a/src/octoprint/static/js/app/viewmodels/settings.js
+++ b/src/octoprint/static/js/app/viewmodels/settings.js
@@ -68,16 +68,22 @@ function SettingsViewModel(loginStateViewModel, usersViewModel) {
self.printer_bedDimensionX = ko.observable(undefined);
self.printer_bedDimensionY = ko.observable(undefined);
+ self.printer_bedDimensionR = ko.observable(undefined);
+ self.printer_bedCircular = ko.observable(undefined);
self.printer_bedDimensions = ko.computed({
read: function () {
return {
x: parseFloat(self.printer_bedDimensionX()),
- y: parseFloat(self.printer_bedDimensionY())
+ y: parseFloat(self.printer_bedDimensionY()),
+ r: parseFloat(self.printer_bedDimensionR()),
+ circular: self.printer_bedCircular()
};
},
write: function(value) {
self.printer_bedDimensionX(value.x);
self.printer_bedDimensionY(value.y);
+ self.printer_bedDimensionR(value.r);
+ self.printer_bedCircular(value.circular);
},
owner: self
});
diff --git a/src/octoprint/templates/settings.jinja2 b/src/octoprint/templates/settings.jinja2
index 4c29e6b179..1a3d8ace8b 100644
--- a/src/octoprint/templates/settings.jinja2
+++ b/src/octoprint/templates/settings.jinja2
@@ -148,7 +148,7 @@
</div>
<div class="control-group">
<label class="control-label" for="settings-bedSize">Bed Size</label>
- <div class="controls form-inline">
+ <div class="controls form-inline" data-bind="ifnot: printer_bedCircular">
<label>X:</label>
<div class="input-append">
<input type="number" step="0.01" class="input-mini text-right" data-bind="value: printer_bedDimensionX" id="settings-bedX">
@@ -160,6 +160,18 @@
<span class="add-on">mm</span>
</div>
</div>
+ <div class="controls form-inline" data-bind="if: printer_bedCircular">
+ <label>Radius:</label>
+ <div class="input-append">
+ <input type="number" step="0.01" class="input-mini text-right" data-bind="value: printer_bedDimensionR" id="settings-bedR">
+ <span class="add-on">mm</span>
+ </div>
+ </div>
+ <div class="controls form-inline">
+ <label class="checkbox">
+ <input type="checkbox" data-bind="checked: printer_bedCircular" id="settings-bedCircular">Circular
+ </label>
+ </div>
</div>
</form>
</div>
|
NVIDIA__NVFlare-1314 | [BUG] Command "nvflare" not found
Unfortunately I can't run nvflare on Ubuntu because it tells me that the command (`nvflare simulator -h`) is not found. Even if I use the Docker solution. Inside the docker it also shows "command not found". Thus unable to run local experiments.
I tried the MWE from https://nvflare.readthedocs.io/en/2.2.1/getting_started.html
Used OS: Ubuntu 18.04.5 LTS
| [
{
"content": "# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport sphinx_rtd_theme\nimport os\nimport sys\nfrom sphinx.domains.python import PythonDomain\nimport subprocess\n\n\nclass PatchedPythonDomain(PythonDomain):\n def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):\n if \"refspecific\" in node:\n del node[\"refspecific\"]\n return super(PatchedPythonDomain, self).resolve_xref(env, fromdocname, builder, typ, target, node, contnode)\n\n\nsys.path.insert(0, os.path.abspath(\"..\"))\nprint(sys.path)\n\n# -- Project information -----------------------------------------------------\n\nproject = \"NVIDIA FLARE\"\ncopyright = \"2022, NVIDIA\"\nauthor = \"NVIDIA\"\n\n# The full version, including alpha/beta/rc tags\nrelease = \"2.2.1\"\nversion = \"2.2.1\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\n# Add napoleon to the extensions list\n# source_parsers = {'.md': CommonMarkParser}\n\ntemplates_path = [\"templates\"]\n\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".txt\": \"restructuredtext\",\n \".md\": \"markdown\",\n}\n\nextensions = [\n \"recommonmark\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosectionlabel\",\n]\n\nautoclass_content = \"both\"\nadd_module_names = False\nautosectionlabel_prefix_document = True\n\n# Add any paths that contain templates here, relative to this directory.\n# templates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_theme_options = {\n \"collapse_navigation\": True,\n \"display_version\": True,\n \"navigation_depth\": 5,\n \"sticky_navigation\": True, # Set to False to disable the sticky nav while scrolling.\n # 'logo_only': True, # if we have a html_logo below, this shows /only/ the logo with no title text\n}\nhtml_scaled_image_link = False\nhtml_show_sourcelink = True\nhtml_favicon = \"favicon.ico\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n\ndef generate_apidocs(*args):\n \"\"\"Generate API docs automatically by trawling the available modules\"\"\"\n module_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", \"nvflare\"))\n output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"apidocs\"))\n print(f\"output_path {output_path}\")\n print(f\"module_path {module_path}\")\n subprocess.check_call(\n [sys.executable, \"-m\", \"sphinx.ext.apidoc\", \"-f\", \"-e\"]\n + [\"-o\", output_path]\n + [module_path]\n + [os.path.join(module_path, p) for p in exclude_patterns]\n )\n\n\ndef setup(app):\n app.connect(\"builder-inited\", generate_apidocs)\n app.add_domain(PatchedPythonDomain, override=True)\n app.add_css_file(\"css/additions.css\")\n",
"path": "docs/conf.py"
}
] | [
{
"content": "# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport sphinx_rtd_theme\nimport os\nimport sys\nfrom sphinx.domains.python import PythonDomain\nimport subprocess\n\n\nclass PatchedPythonDomain(PythonDomain):\n def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):\n if \"refspecific\" in node:\n del node[\"refspecific\"]\n return super(PatchedPythonDomain, self).resolve_xref(env, fromdocname, builder, typ, target, node, contnode)\n\n\nsys.path.insert(0, os.path.abspath(\"..\"))\nprint(sys.path)\n\n# -- Project information -----------------------------------------------------\n\nproject = \"NVIDIA FLARE\"\ncopyright = \"2022, NVIDIA\"\nauthor = \"NVIDIA\"\n\n# The full version, including alpha/beta/rc tags\nrelease = \"2.2.1\"\nversion = \"2.2.1\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\n# Add napoleon to the extensions list\n# source_parsers = {'.md': CommonMarkParser}\n\ntemplates_path = [\"templates\"]\n\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".txt\": \"restructuredtext\",\n \".md\": \"markdown\",\n}\n\nextensions = [\n \"recommonmark\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx_copybutton\",\n]\n\nautoclass_content = \"both\"\nadd_module_names = False\nautosectionlabel_prefix_document = True\n\n# Add any paths that contain templates here, relative to this directory.\n# templates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_theme_options = {\n \"collapse_navigation\": True,\n \"display_version\": True,\n \"navigation_depth\": 5,\n \"sticky_navigation\": True, # Set to False to disable the sticky nav while scrolling.\n # 'logo_only': True, # if we have a html_logo below, this shows /only/ the logo with no title text\n}\nhtml_scaled_image_link = False\nhtml_show_sourcelink = True\nhtml_favicon = \"favicon.ico\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n\ndef generate_apidocs(*args):\n \"\"\"Generate API docs automatically by trawling the available modules\"\"\"\n module_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", \"nvflare\"))\n output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"apidocs\"))\n print(f\"output_path {output_path}\")\n print(f\"module_path {module_path}\")\n subprocess.check_call(\n [sys.executable, \"-m\", \"sphinx.ext.apidoc\", \"-f\", \"-e\"]\n + [\"-o\", output_path]\n + [module_path]\n + [os.path.join(module_path, p) for p in exclude_patterns]\n )\n\n\ndef setup(app):\n app.connect(\"builder-inited\", generate_apidocs)\n app.add_domain(PatchedPythonDomain, override=True)\n app.add_css_file(\"css/additions.css\")\n",
"path": "docs/conf.py"
}
] | diff --git a/docs/conf.py b/docs/conf.py
index 8c2b6cb40f..e3989174fd 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -76,6 +76,7 @@ def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.autosectionlabel",
+ "sphinx_copybutton",
]
autoclass_content = "both"
diff --git a/docs/example_applications_algorithms.rst b/docs/example_applications_algorithms.rst
index 0fd523e450..633bdf01f8 100644
--- a/docs/example_applications_algorithms.rst
+++ b/docs/example_applications_algorithms.rst
@@ -41,15 +41,18 @@ The following quickstart guides walk you through some of these examples:
* :ref:`Hello TensorFlow <hello_tf2>` - Example image classifier using FedAvg and TensorFlow as the deep learning training frameworks
2. **FL algorithms**
+
* `Federated Learning with CIFAR-10 (GitHub) <https://github.com/NVIDIA/NVFlare/tree/main/examples/cifar10>`_ - Includes examples of using FedAvg, FedProx, FedOpt, SCAFFOLD, homomorphic encryption, and streaming of TensorBoard metrics to the server during training
* `Federated XGBoost (GitHub) <https://github.com/NVIDIA/NVFlare/tree/main/examples/xgboost>`_ - Includes examples of histogram-based and tree-based algorithms. Tree-based algorithms also includes bagging and cyclic approaches
3. **Medical Image Analysis**
+
* `Hello MONAI Bundle (GitHub) <https://github.com/NVIDIA/NVFlare/tree/main/examples/hello-monai-bundle>`_ - For an example of using NVIDIA FLARE to train a 3D medical image analysis model using federated averaging (FedAvg) and MONAI Bundle `MONAI <https://monai.io/>`_
* `Federated Learning with Differential Privacy for BraTS18 segmentation (GitHub) <https://github.com/NVIDIA/NVFlare/tree/main/examples/brats18>`_ - Illustrates the use of differential privacy for training brain tumor segmentation models using federated learning
* `Federated Learning for Prostate Segmentation from Multi-source Data (GitHub) <https://github.com/NVIDIA/NVFlare/tree/main/examples/prostate>`_ - Example of training a multi-institutional prostate segmentation model using `FedAvg <https://arxiv.org/abs/1602.05629>`_, `FedProx <https://arxiv.org/abs/1812.06127>`_, and `Ditto <https://arxiv.org/abs/2012.04221>`_
4. **Federated Statistics**
+
* :ref:`Federated Statistic Overview <federated_statistics>` - Discuss the overall federated statistics features
* `Federated Statistics for medical imaging (Github) <https://github.com/NVIDIA/NVFlare/tree/main/examples/federated_statistics/image_stats/README.md>`_ - Example of gathering local image histogram to compute the global dataset histograms.
* `Federated Statistics for tabular data with DataFrame (Github) <https://github.com/NVIDIA/NVFlare/tree/main/examples/federated_statistics/df_stats/README.md>`_ - Example of gathering local statistics summary from Pandas DataFrame to compute the global dataset statistics.
@@ -115,10 +118,12 @@ for an example with ditto in addition to FedProx, FedAvg, and centralized traini
Federated XGBoost
^^^^^^^^^^^^^^^^^
+
* `Federated XGBoost (GitHub) <https://github.com/NVIDIA/NVFlare/tree/main/examples/xgboost>`_ - Includes examples of histogram-based and tree-based algorithms. Tree-based algorithms also includes bagging and cyclic approaches
Federated Analytics
^^^^^^^^^^^^^^^^^^^
+
* `Federated Statistics for medical imaging (Github) <https://github.com/NVIDIA/NVFlare/tree/main/examples/federated_statistics/image_stats/README.md>`_ - Example of gathering local image histogram to compute the global dataset histograms.
* `Federated Statistics for tabular data with DataFrame (Github) <https://github.com/NVIDIA/NVFlare/tree/main/examples/federated_statistics/df_stats/README.md>`_ - Example of gathering local statistics summary from Pandas DataFrame to compute the global dataset statistics.
* `Federated Statistics with Monai Statistics integration for Spleen CT Image (Github) <https://github.com/NVIDIA/NVFlare/tree/main/integration/monai/examples/spleen_ct_segmentation/README.md>`_ - Example demonstrated Monai statistics integration and few other features in federated statistics
diff --git a/docs/examples/hello_cross_val.rst b/docs/examples/hello_cross_val.rst
index 0741177092..5085982d68 100644
--- a/docs/examples/hello_cross_val.rst
+++ b/docs/examples/hello_cross_val.rst
@@ -9,7 +9,7 @@ Before You Start
Before jumping into this guide, make sure you have an environment
with `NVIDIA FLARE <https://pypi.org/project/nvflare/>`_ installed.
-You can follow the :ref:`installation <installation>` guide on the general concept of setting up a
+You can follow :ref:`getting_started` on the general concept of setting up a
Python virtual environment (the recommended environment) and how to install NVIDIA FLARE.
Prerequisite
@@ -115,8 +115,8 @@ Application Configuration
Inside the config folder there are two files, ``config_fed_client.json`` and ``config_fed_server.json``.
-.. literalinclude:: ../../examples/hello-numpy-cross-val/app/config/config_fed_server.json
- :language: python
+.. literalinclude:: ../../examples/hello-world/hello-numpy-cross-val/app/config/config_fed_server.json
+ :language: json
:linenos:
:caption: config_fed_server.json
@@ -126,8 +126,8 @@ The components "model_locator" and "formatter" have been added to work with the
and the rest is the same as in :doc:`Hello Scatter and Gather <hello_scatter_and_gather>`.
-.. literalinclude:: ../../examples/hello-numpy-cross-val/app/config/config_fed_client.json
- :language: python
+.. literalinclude:: ../../examples/hello-world/hello-numpy-cross-val/app/config/config_fed_client.json
+ :language: json
:linenos:
:caption: config_fed_client.json
diff --git a/docs/examples/hello_pt.rst b/docs/examples/hello_pt.rst
index a7a21311c7..5a3a4de399 100644
--- a/docs/examples/hello_pt.rst
+++ b/docs/examples/hello_pt.rst
@@ -11,7 +11,7 @@ to learn more about the specifics of `NVIDIA FLARE <https://pypi.org/project/nvf
Make sure you have an environment with NVIDIA FLARE installed.
-You can follow the :ref:`installation <installation>` guide on the general concept of setting up a
+You can follow :ref:`getting_started` on the general concept of setting up a
Python virtual environment (the recommended environment) and how to install NVIDIA FLARE.
@@ -77,11 +77,8 @@ architecture are modified from
Let's see what an extremely simplified CIFAR10 training looks like:
-.. literalinclude:: ../../examples/hello-pt/app/custom/simple_network.py
+.. literalinclude:: ../../examples/hello-world/hello-pt/app/custom/simple_network.py
:language: python
- :lines: 15-
- :lineno-start: 15
- :linenos:
:caption: simple_network.py
This ``SimpleNetwork`` class is your convolutional neural network to train with the CIFAR10 dataset.
@@ -101,11 +98,8 @@ You can think of all of this code as part of your local training loop, as every
Since you will encapsulate every training-related step in the ``Cifar10Trainer`` class,
let's put this preparation stage into the ``__init__`` method:
-.. literalinclude:: ../../examples/hello-pt/app/custom/cifar10trainer.py
+.. literalinclude:: ../../examples/hello-world/hello-pt/app/custom/cifar10trainer.py
:language: python
- :lines: 37-82
- :lineno-start: 37
- :linenos:
Local Train
@@ -114,7 +108,7 @@ Local Train
Now that you have your network and dataset setup, in the ``Cifar10Trainer`` class.
Let's also implement a local training loop in a method called ``local_train``:
-.. literalinclude:: ../../examples/hello-pt/app/custom/cifar10trainer.py
+.. literalinclude:: ../../examples/hello-world/hello-pt/app/custom/cifar10trainer.py
:language: python
:pyobject: Cifar10Trainer.local_train
@@ -144,7 +138,7 @@ We can then call our local train inside the ``execute`` method.
Take a look at the following code:
-.. literalinclude:: ../../examples/hello-pt/app/custom/cifar10trainer.py
+.. literalinclude:: ../../examples/hello-world/hello-pt/app/custom/cifar10trainer.py
:language: python
:pyobject: Cifar10Trainer.execute
@@ -187,7 +181,7 @@ Application Configuration
Inside the config folder there are two files, ``config_fed_client.json`` and ``config_fed_server.json``.
-.. literalinclude:: ../../examples/hello-pt/app/config/config_fed_client.json
+.. literalinclude:: ../../examples/hello-world/hello-pt/app/config/config_fed_client.json
:language: json
:linenos:
:caption: config_fed_client.json
@@ -205,7 +199,7 @@ The "validate" task for ``Cifar10Validator`` and the "submit_model" task are use
so we will be addressing that in a later example.
-.. literalinclude:: ../../examples/hello-pt/app/config/config_fed_server.json
+.. literalinclude:: ../../examples/hello-world/hello-pt/app/config/config_fed_server.json
:language: json
:linenos:
:caption: config_fed_server.json
diff --git a/docs/examples/hello_pt_tb.rst b/docs/examples/hello_pt_tb.rst
index e9228b15bf..715e83729b 100644
--- a/docs/examples/hello_pt_tb.rst
+++ b/docs/examples/hello_pt_tb.rst
@@ -23,7 +23,7 @@ The setup of this exercise consists of one **server** and two **clients**.
Let's get started. Make sure you have an environment with NVIDIA FLARE installed as described in
-:doc:`quickstart <../quickstart>` guide. First clone the repo:
+:ref:`getting_started`. First clone the repo:
.. code-block:: shell
@@ -42,7 +42,7 @@ Adding TensorBoard Streaming to Configurations
Inside the config folder there are two files, ``config_fed_client.json`` and ``config_fed_server.json``.
-.. literalinclude:: ../../examples/hello-pt-tb/app/config/config_fed_client.json
+.. literalinclude:: ../../examples/hello-world/hello-pt-tb/app/config/config_fed_client.json
:language: json
:linenos:
:caption: config_fed_client.json
@@ -60,7 +60,7 @@ which converts local events to federated events.
This changes the event ``analytix_log_stats`` into a fed event ``fed.analytix_log_stats``,
which will then be streamed from the clients to the server.
-.. literalinclude:: ../../examples/hello-pt-tb/app/config/config_fed_server.json
+.. literalinclude:: ../../examples/hello-world/hello-pt-tb/app/config/config_fed_server.json
:language: json
:linenos:
:caption: config_fed_server.json
@@ -83,7 +83,7 @@ In this exercise, all of the TensorBoard code additions will be made in ``pt_lea
First we must initialize our TensorBoard writer to the ``AnalyticsSender`` we defined in the client config:
-.. literalinclude:: ../../examples/hello-pt-tb/app/custom/pt_learner.py
+.. literalinclude:: ../../examples/hello-world/hello-pt-tb/app/custom/pt_learner.py
:language: python
:lines: 61, 89-92
:lineno-start: 61
@@ -98,7 +98,7 @@ but we can also define it in the client config to be passed into the constructor
Now that our TensorBoard writer is set to ``AnalyticsSender``,
we can write and stream training metrics to the server in ``local_train()``:
-.. literalinclude:: ../../examples/hello-pt-tb/app/custom/pt_learner.py
+.. literalinclude:: ../../examples/hello-world/hello-pt-tb/app/custom/pt_learner.py
:language: python
:lines: 127-159
:lineno-start: 127
diff --git a/docs/examples/hello_scatter_and_gather.rst b/docs/examples/hello_scatter_and_gather.rst
index 3e0394a7b7..ef04dced79 100644
--- a/docs/examples/hello_scatter_and_gather.rst
+++ b/docs/examples/hello_scatter_and_gather.rst
@@ -9,7 +9,7 @@ Before You Start
Before jumping into this guide, make sure you have an environment with
`NVIDIA FLARE <https://pypi.org/project/nvflare/>`_ installed.
-You can follow the :ref:`installation <installation>` guide on the general concept of setting up a
+You can follow :ref:`getting_started` on the general concept of setting up a
Python virtual environment (the recommended environment) and how to install NVIDIA FLARE.
@@ -121,12 +121,12 @@ Inside the config folder there are two files, ``config_fed_client.json`` and ``c
For now, the default configurations are sufficient.
-.. literalinclude:: ../../examples/hello-numpy-sag/app/config/config_fed_server.json
+.. literalinclude:: ../../examples/hello-world/hello-numpy-sag/app/config/config_fed_server.json
:language: json
:linenos:
:caption: config_fed_server.json
-.. literalinclude:: ../../examples/hello-numpy-sag/app/config/config_fed_client.json
+.. literalinclude:: ../../examples/hello-world/hello-numpy-sag/app/config/config_fed_client.json
:language: json
:linenos:
:caption: config_fed_client.json
diff --git a/docs/examples/hello_tf2.rst b/docs/examples/hello_tf2.rst
index b877c8799b..3a1c8b118f 100644
--- a/docs/examples/hello_tf2.rst
+++ b/docs/examples/hello_tf2.rst
@@ -55,7 +55,7 @@ with two clients and one server.
Before you start, let's see what a simplified MNIST network looks like.
-.. literalinclude:: ../../examples/hello-tf2/app/custom/tf2_net.py
+.. literalinclude:: ../../examples/hello-world/hello-tf2/app/custom/tf2_net.py
:language: python
:lines: 15-
:lineno-start: 15
@@ -79,7 +79,7 @@ Additionally, you must setup the optimizer, loss function and transform to proce
Since every step will be encapsulated in the ``SimpleTrainer`` class,
let's put this preparation stage into one method ``setup``:
-.. literalinclude:: ../../examples/hello-tf2/app/custom/trainer.py
+.. literalinclude:: ../../examples/hello-world/hello-tf2/app/custom/trainer.py
:language: python
:lines: 41-71
:lineno-start: 41
@@ -94,7 +94,7 @@ NVIDIA FLARE enters or leaves a certain stage.
In this case, there is an ``Event`` called ``EventType.START_RUN`` which perfectly matches these requirements.
Because our trainer is a subclass of ``FLComponent``, you can implement the handler to handle the event and call the setup method:
-.. literalinclude:: ../../examples/hello-tf2/app/custom/trainer.py
+.. literalinclude:: ../../examples/hello-world/hello-tf2/app/custom/trainer.py
:language: python
:lines: 37-39
:lineno-start: 37
@@ -119,14 +119,14 @@ Link NVIDIA FLARE with Local Train
Take a look at the following code:
-.. literalinclude:: ../../examples/hello-tf2/app/custom/trainer.py
+.. literalinclude:: ../../examples/hello-world/hello-tf2/app/custom/trainer.py
:language: python
:pyobject: SimpleTrainer.execute
Every NVIDIA FLARE client receives the model weights from the server in the :ref:`shareable <shareable>`.
This application uses the ``exclude_var`` filter, so make sure to replace the missing layer with weights from the clients' previous training round:
-.. literalinclude:: ../../examples/hello-tf2/app/custom/trainer.py
+.. literalinclude:: ../../examples/hello-world/hello-tf2/app/custom/trainer.py
:language: python
:lines: 111-115
:lineno-start: 111
@@ -134,7 +134,7 @@ This application uses the ``exclude_var`` filter, so make sure to replace the mi
Now update the local model with those received weights:
-.. literalinclude:: ../../examples/hello-tf2/app/custom/trainer.py
+.. literalinclude:: ../../examples/hello-world/hello-tf2/app/custom/trainer.py
:language: python
:lines: 118
:lineno-start: 118
@@ -142,7 +142,7 @@ Now update the local model with those received weights:
Then perform a simple :code:`self.model.fit` so the client's model is trained with its own dataset:
-.. literalinclude:: ../../examples/hello-tf2/app/custom/trainer.py
+.. literalinclude:: ../../examples/hello-world/hello-tf2/app/custom/trainer.py
:language: python
:lines: 122-127
:lineno-start: 122
@@ -165,7 +165,7 @@ For this exercise, we use a basic ``exclude_var`` filter to exclude the variable
as it goes outbound from the client to the server. The excluded layer is replaced with all zeros of the same shape,
which reduces compression size and ensures that the clients' weights for this variable are not shared with the server.
-.. literalinclude:: ../../examples/hello-tf2/app/custom/filter.py
+.. literalinclude:: ../../examples/hello-world/hello-tf2/app/custom/filter.py
:language: python
:lines: 15-
:lineno-start: 15
@@ -192,7 +192,7 @@ Model Persistor
The model persistor is used to load and save models on the server.
-.. literalinclude:: ../../examples/hello-tf2/app/custom/tf2_model_persistor.py
+.. literalinclude:: ../../examples/hello-world/hello-tf2/app/custom/tf2_model_persistor.py
:language: python
:lines: 15-
:lineno-start: 15
@@ -212,7 +212,7 @@ Application Configuration
Finally, inside the config folder there are two files, ``config_fed_client.json`` and ``config_fed_server.json``.
-.. literalinclude:: ../../examples/hello-tf2/app/config/config_fed_server.json
+.. literalinclude:: ../../examples/hello-world/hello-tf2/app/config/config_fed_server.json
:language: json
:linenos:
:caption: config_fed_server.json
@@ -225,7 +225,7 @@ The ``persistor`` is configured to use ``TF2ModelPersistor`` in the custom direc
Python module paths.
-.. literalinclude:: ../../examples/hello-tf2/app/config/config_fed_client.json
+.. literalinclude:: ../../examples/hello-world/hello-tf2/app/config/config_fed_client.json
:language: json
:linenos:
:caption: config_fed_client.json
diff --git a/docs/getting_started.rst b/docs/getting_started.rst
index a2fc8695d1..4aeb780e45 100644
--- a/docs/getting_started.rst
+++ b/docs/getting_started.rst
@@ -1,4 +1,4 @@
-.. _quickstart:
+.. _getting_started:
###############
Getting Started
@@ -98,34 +98,34 @@ environment.
To get started with a containerized deployment, you will first need to install a supported
container runtime and the NVIDIA Container Toolkit to enable support for GPUs. System requirements
-and instructions for this can be found in the `NVIDIA Container Toolkit Install Guide <https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html>`.
+and instructions for this can be found in the `NVIDIA Container Toolkit Install Guide <https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html>`_.
A simple Dockerfile is used to capture the base requirements and dependencies. In
this case, we're building an environment that will support PyTorch-based workflows,
in particular the `Hello PyTorch with Tensorboard Streaming <https://github.com/NVIDIA/NVFlare/tree/main/examples/hello-pt-tb>`_
example. The base for this build is the NGC PyTorch container. On this base image,
we will install the necessary dependencies and clone the NVIDIA FLARE GitHub
-source code into the root workspace directory. To create a Dockerfile, create a file named ``Dockerfile``
-using any text editor and include the following:
+source code into the root workspace directory.
-.. code-block:: dockerfile
+Let's first create a folder called ``build`` and then create a file inside named ``Dockerfile``:
- ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:22.09-py3
- FROM ${PYTORCH_IMAGE}
+.. code-block:: shell
+
+ mkdir build
+ cd build
+ touch Dockerfile
- RUN python3 -m pip install -U pip
- RUN python3 -m pip install -U setuptools
- RUN python3 -m pip install torch torchvision tensorboard nvflare
+Using any text editor to edit the Dockerfile and paste the following:
- WORKDIR /workspace/
- RUN git clone https://github.com/NVIDIA/NVFlare.git
+.. literalinclude:: resources/Dockerfile.doc
+ :language: dockerfile
We can then build the new container by running docker build in the directory containing
this Dockerfile, for example tagging it nvflare-pt:
.. code-block:: shell
- docker build -t nvflare-pt .
+ docker build -t nvflare-pt . -f Dockerfile
This will result in a docker image, ``nvflare-pt:latest``. You can run this container with Docker,
in this example mounting a local ``my-workspace`` directory into the container for use as a persistent
diff --git a/docs/programming_guide/high_availability.rst b/docs/programming_guide/high_availability.rst
index e34c3504fe..2f14b5ccc3 100644
--- a/docs/programming_guide/high_availability.rst
+++ b/docs/programming_guide/high_availability.rst
@@ -187,8 +187,8 @@ occurs, the new SP will restore the FLContext, the Job workspace, and all the co
depending on when the state is persisted, there is potentially a portion of work that may still be lost when the state
is restored.
-FLCompoent
-==========
+FLComponent
+===========
Each FLComponent has its implementation to decide what kind of data it needs to persist and migrate, and then how
to restore from the persisted data.
diff --git a/docs/programming_guide/provisioning_system.rst b/docs/programming_guide/provisioning_system.rst
index 1928cfdbc0..803b49fdea 100644
--- a/docs/programming_guide/provisioning_system.rst
+++ b/docs/programming_guide/provisioning_system.rst
@@ -65,7 +65,9 @@ they can add API calls to Open Provision API to generate required outputs.
Provisioner
-----------
This is the container class that owns all instances of Project, Workspace, Provision Context, Builders and Participants,
-as shown in the above diagram. A typical usage of this class is like the following::
+as shown in the above diagram. A typical usage of this class is like the following:
+
+.. code-block:: python
provisioner = Provisioner(workspace_full_path, builders)
@@ -74,7 +76,9 @@ as shown in the above diagram. A typical usage of this class is like the follow
Project
-------
The Project class keeps information about participants. Therefore, information of any participant can be retrieved from
-the Project instance::
+the Project instance:
+
+.. code-block:: python
class Project(object):
def __init__(self, name: str, description: str, participants: List[Participant]):
@@ -102,7 +106,9 @@ Participant
-----------
Each participant is one entity that communicates with other participants inside the NVIDIA FLARE system during runtime.
Each participant has the following attributes: type, name, org and props. The attribute ``props`` is a dictionary and
-stores additional information::
+stores additional information:
+
+.. code-block:: python
class Participant(object):
def __init__(self, type: str, name: str, org: str, *args, **kwargs):
@@ -145,7 +151,9 @@ builders being called after it will not be able to access the wip folder.
.. note:: The collaboration among all builders is the responsibility of Open Provision API developers.
-Every builder has to subclass the Builder class and override one or more of these three methods::
+Every builder has to subclass the Builder class and override one or more of these three methods:
+
+.. code-block:: python
class Builder(ABC):
def initialize(self, ctx: dict):
@@ -187,14 +195,18 @@ is called before other builders' finalize methods and before other builders' bui
Case 1: generating additional files
-----------------------------------
The developers would like to add a configuration file about a database server to admin participants. The configuration
-is like this::
+is like this:
+
+.. code-block:: yaml
[database]
db_server = server name
db_port = port_number
user_name = admin's name
-As this requires adding one file to every admin participant, the developer can write a DBBuilder as follows::
+As this requires adding one file to every admin participant, the developer can write a DBBuilder as follows:
+
+.. code-block:: python
class DBConfigBuilder(Builder):
def __init__(self, db_server, db_port):
@@ -210,7 +222,9 @@ As this requires adding one file to every admin participant, the developer can w
f.write(f"db_port = {self.db_port}\n")
f.write(f"user_name = {admin.name}\n")
-And in project.yml, add an entry in the builders section::
+And in project.yml, add an entry in the builders section:
+
+.. code-block:: yaml
- path: byob.DBConfigBuilder
args:
@@ -221,7 +235,9 @@ Case 2: enhancing an existing builder
-------------------------------------
The developer would like to push zip files of each generated folder, to
a web server via a POST method. This can be done easily by implementing a new builder as
-follows (after pip install requests)::
+follows (after pip install requests):
+
+.. code-block:: python
class WebPostDistributionBuilder(Builder):
def __init__(self, url):
@@ -236,14 +252,18 @@ follows (after pip install requests)::
files = {"upload_file": open(dest_zip_file, "rb")}
r = requests.post(self.url, files=files)
-And just replace the existing one with the new builder under Builders in the project.yml::
+And just replace the existing one with the new builder under Builders in the project.yml:
+
+.. code-block:: yaml
- path: byob.WebPostDistributionBuilder
args:
url: https://example.com/nvflare/provision
For the above two cases, if developers opt to use Open Provision API directly instead of project.yml, they can do this
-(some code omitted for clarity)::
+(some code omitted for clarity):
+
+.. code-block:: python
from byob import WebPostDistributionBuilder
builders = list()
@@ -258,20 +278,26 @@ For the above two cases, if developers opt to use Open Provision API directly in
Case 3: adding both new builders and participants of new types
--------------------------------------------------------------
The developers would like to add participants of type = 'gateway.' In order to handle this type of participants, a new
-builder is needed to write gateway specific configuration. First, specify that in project.yml::
+builder is needed to write gateway specific configuration. First, specify that in project.yml:
+
+.. code-block:: yaml
- name: gateway1
type: gateway
org: nvidia
port: 8102
-or in API style::
+or in API style:
+
+.. code-block:: python
participants = list()
p = Participant(name="gateway1", type="gateway", org="nvidia", port=8102)
participants.append(p)
-A new builder to write 'gateway.conf' can be implemented as follows (for reference)::
+A new builder to write 'gateway.conf' can be implemented as follows (for reference):
+
+.. code-block:: python
class GWConfigBuilder(Builder):
def build(self, project, ctx):
@@ -288,7 +314,9 @@ A new builder to write 'gateway.conf' can be implemented as follows (for referen
Case 4: adding a builder for enabling the creation of zip archives for the startup kits
---------------------------------------------------------------------------------------
DistributionBuilder was included in NVIDIA FLARE before version 2.2.1 but has been removed from the
-default builders. You can make this builder available and add it as a builder in project.yml if you want to zip the startup kits::
+default builders. You can make this builder available and add it as a builder in project.yml if you want to zip the startup kits:
+
+.. code-block:: python
import os
import shutil
@@ -338,7 +366,9 @@ default builders. You can make this builder available and add it as a builder in
else:
shutil.make_archive(dest_zip_file, "zip", root_dir=os.path.join(wip_dir, dir), base_dir="startup")
-If the above code is made available at ``nvflare.lighter.impl.workspace.DistributionBuilder``, add the following to your project.yml at the bottom of the list of builders::
+If the above code is made available at ``nvflare.lighter.impl.workspace.DistributionBuilder``, add the following to your project.yml at the bottom of the list of builders:
+
+.. code-block:: yaml
path: nvflare.lighter.impl.workspace.DistributionBuilder
args:
@@ -418,7 +448,7 @@ This is the key file that describes the information which provisioning tool will
If there is no ``project.yml`` in your current working directory, simply run ``provision`` without any option. It
will ask you if you would like to have one sample copy of this file created.
-.. code-block:: shell
+.. code-block:: console
(nvflare-venv) ~/workspace$ provision
No project.yml found in current folder.
@@ -436,9 +466,11 @@ Edit the project.yml configuration file to meet your project requirements:
- "participants" describes the different parties in the FL system, distinguished by type. For all participants, "name"
should be unique, and "org" should be defined in AuthPolicyBuilder. The "name" of the Overseer and servers should
be in the format of fully qualified domain names. It is possible to use a unique hostname rather than FQDN, with
- the IP mapped to the hostname by having it added to ``/etc/hosts``.
+ the IP mapped to the hostname by having it added to ``/etc/hosts``:
+
- Type "overseer" describes the Overseer, with the "org", "name", "protocol", "api_root", and "port".
- - Type "server" describes the FL servers, with the "org", "name", "fed_learn_port", "admin_port", and "enable_byoc".
+ - Type "server" describes the FL servers, with the "org", "name", "fed_learn_port", "admin_port", and "enable_byoc":
+
- "fed_learn_port" is the port number for communication between the FL server and FL clients
- "admin_port" is the port number for communication between the FL server and FL administration client
- Type "client" describes the FL clients, with one "org" and "name" for each client as well as "enable_byoc" settings.
diff --git a/docs/quickstart.rst b/docs/quickstart.rst
index a9a7b819bb..d0c7e60a1c 100644
--- a/docs/quickstart.rst
+++ b/docs/quickstart.rst
@@ -2,4 +2,4 @@
Quickstart
##########
-See :ref:`quickstart`.
+See :ref:`getting_started`.
diff --git a/docs/real_world_fl/application.rst b/docs/real_world_fl/application.rst
index b3a651bde0..9b3f7fe305 100644
--- a/docs/real_world_fl/application.rst
+++ b/docs/real_world_fl/application.rst
@@ -107,7 +107,7 @@ the client config should have the following in order to configure it as an Execu
Configuration of Executor Tasks is ignored here.
-Please follow :ref:`quickstart` to learn more.
+Please follow :ref:`getting_started` to learn more.
.. _troubleshooting_byoc:
diff --git a/docs/real_world_fl/flare_api.rst b/docs/real_world_fl/flare_api.rst
index 85a6d4a0ea..8f1ca49aaa 100644
--- a/docs/real_world_fl/flare_api.rst
+++ b/docs/real_world_fl/flare_api.rst
@@ -13,7 +13,7 @@ Initialization and Usage
------------------------
Initialize the FLARE API with :func:`new_secure_session<nvflare.fuel.flare_api.flare_api.new_secure_session>` by providing
the username and the path to the startup kit folder of the provisioned user containing the startup folder with the admin client's
-certs and keys::
+certs and keys:
.. code:: python
diff --git a/docs/real_world_fl/migrating_to_flare_api.rst b/docs/real_world_fl/migrating_to_flare_api.rst
index 44c3e9f696..7b34bdcc81 100644
--- a/docs/real_world_fl/migrating_to_flare_api.rst
+++ b/docs/real_world_fl/migrating_to_flare_api.rst
@@ -44,7 +44,7 @@ Initializing the FLAdminAPIRunner, which initializes FLAdminAPI with the values
:ref:`flare_api_initialization` is similar to :class:`FLAdminAPIRunner<nvflare.fuel.hci.client.fl_admin_api_runner.FLAdminAPIRunner>`
with :func:`new_secure_session<nvflare.fuel.flare_api.flare_api.new_secure_session>` taking two required arguments of
the username and the path to the root admin directory containing the startup folder with the admin client's
-certs and keys::
+certs and keys:
.. code:: python
@@ -92,7 +92,7 @@ and the new way with FLARE API.
.. csv-table::
:header: Command for FLAdminAPI,Command for FLARE API,Differences
- :widths: 15, 15, 30
+ :widths: 15, 15, 30, 30
check_status(),get_system_info(),Simplified and reformatted output, see below for details
submit_job(),submit_job(),Simplified output, see below for details
diff --git a/docs/real_world_fl/overview.rst b/docs/real_world_fl/overview.rst
index 707bdc0893..8218c110dd 100644
--- a/docs/real_world_fl/overview.rst
+++ b/docs/real_world_fl/overview.rst
@@ -203,7 +203,7 @@ issue commands to operate the system so it can be run with a script.
For a complete list of admin commands, see :ref:`operating_nvflare`.
-For examples of using the commands to operate a FL system, see the examples in the :ref:`quickstart` section.
+For examples of using the commands to operate a FL system, see the examples in the :ref:`getting_started` section.
****************************************************
Internal folder and file structures for NVIDIA FLARE
diff --git a/docs/resources/Dockerfile.doc b/docs/resources/Dockerfile.doc
new file mode 100644
index 0000000000..44d148aa92
--- /dev/null
+++ b/docs/resources/Dockerfile.doc
@@ -0,0 +1,12 @@
+ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:22.12-py3
+FROM ${PYTORCH_IMAGE}
+
+ARG NVF_VERSION=2.2
+ENV NVF_BRANCH=${NVF_VERSION}
+
+RUN python3 -m pip install -U pip
+RUN python3 -m pip install -U setuptools
+RUN python3 -m pip install nvflare
+
+WORKDIR /workspace/
+RUN git clone https://github.com/NVIDIA/NVFlare.git --branch ${NVF_BRANCH} --single-branch NVFlare
diff --git a/docs/user_guide/dashboard_ui.rst b/docs/user_guide/dashboard_ui.rst
index fc28176349..6006797701 100644
--- a/docs/user_guide/dashboard_ui.rst
+++ b/docs/user_guide/dashboard_ui.rst
@@ -120,8 +120,6 @@ the table, then type the client site name in the input box in the table for the
outside of the input. Click on the value again to edit it. The fields for NUM GPU (number of GPUs) and MEMORY PER GPU (memory per GPU in GiBs) can
also be edited here. When done configuring client sites, click ``Next`` below to complete registration, and you will be linked to the User Dashboard.
-.. _dashboard_org_admin_user_reg:
-
Org Admin User Dashboard
------------------------
The User Dashboard for ``Org Admin`` users is the same as for ``Member`` and ``Lead`` users on the top, but below the user information is an
diff --git a/requirements-doc.txt b/requirements-doc.txt
index fa4e9cc46c..6e053ad73b 100644
--- a/requirements-doc.txt
+++ b/requirements-doc.txt
@@ -2,3 +2,4 @@
sphinx>=4.1.1
sphinx_rtd_theme
recommonmark
+sphinx-copybutton
|
numpy__numpy-3055 | 2to3 run `execfile` fixer
| [
{
"content": "#!/usr/bin/env python\n\"\"\"\nA setup.py script to use setuptools, which gives egg goodness, etc.\n\nThis is used to build installers for OS X through bdist_mpkg.\n\nNotes\n-----\nUsing ``python setupegg.py install`` directly results in file permissions being\nset wrong, with nose refusing to run any tests. To run the tests anyway, use::\n\n >>> np.test(extra_argv=['--exe'])\n\n\"\"\"\n\nimport sys\nfrom setuptools import setup\n\nif sys.version_info[0] >= 3:\n import imp\n setupfile = imp.load_source('setupfile', 'setup.py')\n setupfile.setup_package()\nelse:\n execfile('setup.py')\n",
"path": "setupegg.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\"\"\"\nA setup.py script to use setuptools, which gives egg goodness, etc.\n\nThis is used to build installers for OS X through bdist_mpkg.\n\nNotes\n-----\nUsing ``python setupegg.py install`` directly results in file permissions being\nset wrong, with nose refusing to run any tests. To run the tests anyway, use::\n\n >>> np.test(extra_argv=['--exe'])\n\n\"\"\"\n\nimport sys\nfrom setuptools import setup\n\nif sys.version_info[0] >= 3:\n import imp\n setupfile = imp.load_source('setupfile', 'setup.py')\n setupfile.setup_package()\nelse:\n exec(compile(open('setup.py').read(), 'setup.py', 'exec'))\n",
"path": "setupegg.py"
}
] | diff --git a/setupegg.py b/setupegg.py
index 3ed1e0b0bc70..82b35fd696f6 100755
--- a/setupegg.py
+++ b/setupegg.py
@@ -21,4 +21,4 @@
setupfile = imp.load_source('setupfile', 'setup.py')
setupfile.setup_package()
else:
- execfile('setup.py')
+ exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
|
mozilla__bugbug-3921 | [model:regressor] AttributeError: 'IsotonicRegressionCalibrator' object has no attribute 'n_features_in_'
https://community-tc.services.mozilla.com/tasks/HncpjvKKRcSnxL_GJ8PV9A/runs/0/logs/public/logs/live.log
```
Traceback (most recent call last):
File "/usr/local/bin/bugbug-train", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.10/site-packages/scripts/trainer.py", line 141, in main
retriever.go(args)
File "/usr/local/lib/python3.10/site-packages/scripts/trainer.py", line 41, in go
metrics = model_obj.train(limit=args.limit)
File "/usr/local/lib/python3.10/site-packages/bugbug/model.py", line 418, in train
logger.info("Number of features: %d", self.clf.steps[-1][1].n_features_in_)
AttributeError: 'IsotonicRegressionCalibrator' object has no attribute 'n_features_in_'
```
| [
{
"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.model_selection import train_test_split\n\n\nclass IsotonicRegressionCalibrator(BaseEstimator, ClassifierMixin):\n def __init__(self, base_clf):\n self.base_clf = base_clf\n self.calibrated_clf = CalibratedClassifierCV(\n base_clf, cv=\"prefit\", method=\"isotonic\"\n )\n\n def fit(self, X_train, y_train):\n X_train, X_val, y_train, y_val = train_test_split(\n X_train, y_train, test_size=0.2, random_state=42\n )\n self.base_clf.fit(X_train, y_train)\n self.calibrated_clf.fit(X_val, y_val)\n\n def predict(self, X):\n return self.calibrated_clf.predict(X)\n\n def predict_proba(self, X):\n return self.calibrated_clf.predict_proba(X)\n",
"path": "bugbug/model_calibration.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.model_selection import train_test_split\n\n\nclass IsotonicRegressionCalibrator(BaseEstimator, ClassifierMixin):\n def __init__(self, base_clf):\n self.base_clf = base_clf\n self.calibrated_clf = CalibratedClassifierCV(\n base_clf, cv=\"prefit\", method=\"isotonic\"\n )\n\n def fit(self, X_train, y_train):\n X_train, X_val, y_train, y_val = train_test_split(\n X_train, y_train, test_size=0.2, random_state=42\n )\n self.base_clf.fit(X_train, y_train)\n self.calibrated_clf.fit(X_val, y_val)\n\n def predict(self, X):\n return self.calibrated_clf.predict(X)\n\n def predict_proba(self, X):\n return self.calibrated_clf.predict_proba(X)\n\n @property\n def n_features_in_(self):\n return self.base_clf.n_features_in_\n",
"path": "bugbug/model_calibration.py"
}
] | diff --git a/bugbug/model_calibration.py b/bugbug/model_calibration.py
index 2d2f2398d3..5643706d29 100644
--- a/bugbug/model_calibration.py
+++ b/bugbug/model_calibration.py
@@ -27,3 +27,7 @@ def predict(self, X):
def predict_proba(self, X):
return self.calibrated_clf.predict_proba(X)
+
+ @property
+ def n_features_in_(self):
+ return self.base_clf.n_features_in_
|
statsmodels__statsmodels-2279 | version number convention pep-440
https://github.com/scipy/scipy/pull/4307
just changes a `-` to `+`
we need the same change
line 228 in setup.py `FULLVERSION += '.dev-' + GIT_REVISION[:7]`
I have no idea if we need anything else
| [
{
"content": "\"\"\"\nMuch of the build system code was adapted from work done by the pandas\ndevelopers [1], which was in turn based on work done in pyzmq [2] and lxml [3].\n\n[1] http://pandas.pydata.org\n[2] http://zeromq.github.io/pyzmq/\n[3] http://lxml.de/\n\"\"\"\n\nimport os\nfrom os.path import relpath, join as pjoin\nimport sys\nimport subprocess\nimport re\nfrom distutils.version import StrictVersion\n\n\n# temporarily redirect config directory to prevent matplotlib importing\n# testing that for writeable directory which results in sandbox error in\n# certain easy_install versions\nos.environ[\"MPLCONFIGDIR\"] = \".\"\n\nno_frills = (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or\n sys.argv[1] in ('--help-commands',\n 'egg_info', '--version',\n 'clean')))\n\n# try bootstrapping setuptools if it doesn't exist\ntry:\n import pkg_resources\n try:\n pkg_resources.require(\"setuptools>=0.6c5\")\n except pkg_resources.VersionConflict:\n from ez_setup import use_setuptools\n use_setuptools(version=\"0.6c5\")\n from setuptools import setup, Command, find_packages\n _have_setuptools = True\nexcept ImportError:\n # no setuptools installed\n from distutils.core import setup, Command\n _have_setuptools = False\n\nif _have_setuptools:\n setuptools_kwargs = {\"zip_safe\": False,\n \"test_suite\": \"nose.collector\"}\nelse:\n setuptools_kwargs = {}\n if sys.version_info[0] >= 3:\n sys.exit(\"Need setuptools to install statsmodels for Python 3.x\")\n\n\ncurdir = os.path.abspath(os.path.dirname(__file__))\nREADME = open(pjoin(curdir, \"README.rst\")).read()\n\nDISTNAME = 'statsmodels'\nDESCRIPTION = 'Statistical computations and models for use with SciPy'\nLONG_DESCRIPTION = README\nMAINTAINER = 'Skipper Seabold, Josef Perktold'\nMAINTAINER_EMAIL ='[email protected]'\nURL = 'http://statsmodels.sourceforge.net/'\nLICENSE = 'BSD License'\nDOWNLOAD_URL = ''\n\n# These imports need to be here; setuptools needs to be imported first.\nfrom distutils.extension import Extension\nfrom distutils.command.build import build\nfrom distutils.command.build_ext import build_ext as _build_ext\n\n\nclass build_ext(_build_ext):\n def build_extensions(self):\n numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')\n\n for ext in self.extensions:\n if (hasattr(ext, 'include_dirs') and\n not numpy_incl in ext.include_dirs):\n ext.include_dirs.append(numpy_incl)\n _build_ext.build_extensions(self)\n\n\ndef generate_cython():\n cwd = os.path.abspath(os.path.dirname(__file__))\n print(\"Cythonizing sources\")\n p = subprocess.call([sys.executable,\n os.path.join(cwd, 'tools', 'cythonize.py'),\n 'statsmodels'],\n cwd=cwd)\n if p != 0:\n raise RuntimeError(\"Running cythonize failed!\")\n\n\ndef strip_rc(version):\n return re.sub(r\"rc\\d+$\", \"\", version)\n\n\ndef check_dependency_versions(min_versions):\n \"\"\"\n Don't let pip/setuptools do this all by itself. It's rude.\n\n For all dependencies, try to import them and check if the versions of\n installed dependencies match the minimum version requirements. If\n installed but version too low, raise an error. If not installed at all,\n return the correct ``setup_requires`` and ``install_requires`` arguments to\n be added to the setuptools kwargs. This prevents upgrading installed\n dependencies like numpy (that should be an explicit choice by the user and\n never happen automatically), but make things work when installing into an\n empty virtualenv for example.\n\n \"\"\"\n setup_requires = []\n install_requires = []\n\n try:\n from numpy.version import short_version as npversion\n except ImportError:\n setup_requires.append('numpy')\n install_requires.append('numpy')\n else:\n if not (StrictVersion(strip_rc(npversion)) >= min_versions['numpy']):\n raise ImportError(\"Numpy version is %s. Requires >= %s\" %\n (npversion, min_versions['numpy']))\n\n try:\n import scipy\n except ImportError:\n install_requires.append('scipy')\n else:\n try:\n from scipy.version import short_version as spversion\n except ImportError:\n from scipy.version import version as spversion # scipy 0.7.0\n if not (StrictVersion(strip_rc(spversion)) >= min_versions['scipy']):\n raise ImportError(\"Scipy version is %s. Requires >= %s\" %\n (spversion, min_versions['scipy']))\n\n try:\n from pandas.version import short_version as pversion\n except ImportError:\n install_requires.append('pandas')\n else:\n if not (StrictVersion(strip_rc(pversion)) >= min_versions['pandas']):\n ImportError(\"Pandas version is %s. Requires >= %s\" %\n (pversion, min_versions['pandas']))\n\n try:\n from patsy import __version__ as patsy_version\n except ImportError:\n install_requires.append('patsy')\n else:\n # patsy dev looks like 0.1.0+dev\n pversion = re.match(\"\\d*\\.\\d*\\.\\d*\", patsy_version).group()\n if not (StrictVersion(pversion) >= min_versions['patsy']):\n raise ImportError(\"Patsy version is %s. Requires >= %s\" %\n (pversion, min_versions[\"patsy\"]))\n\n return setup_requires, install_requires\n\n\nMAJ = 0\nMIN = 7\nREV = 0\nISRELEASED = False\nVERSION = '%d.%d.%d' % (MAJ,MIN,REV)\n\nclassifiers = [ 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Topic :: Scientific/Engineering']\n\n# Return the git revision as a string\ndef git_version():\n def _minimal_ext_cmd(cmd):\n # construct minimal environment\n env = {}\n for k in ['SYSTEMROOT', 'PATH']:\n v = os.environ.get(k)\n if v is not None:\n env[k] = v\n # LANGUAGE is used on win32\n env['LANGUAGE'] = 'C'\n env['LANG'] = 'C'\n env['LC_ALL'] = 'C'\n out = subprocess.Popen(\" \".join(cmd), stdout = subprocess.PIPE, env=env,\n shell=True).communicate()[0]\n return out\n\n try:\n out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])\n GIT_REVISION = out.strip().decode('ascii')\n except OSError:\n GIT_REVISION = \"Unknown\"\n\n return GIT_REVISION\n\ndef write_version_py(filename=pjoin(curdir, 'statsmodels/version.py')):\n cnt = \"\\n\".join([\"\",\n \"# THIS FILE IS GENERATED FROM SETUP.PY\",\n \"short_version = '%(version)s'\",\n \"version = '%(version)s'\",\n \"full_version = '%(full_version)s'\",\n \"git_revision = '%(git_revision)s'\",\n \"release = %(isrelease)s\", \"\",\n \"if not release:\",\n \" version = full_version\"])\n # Adding the git rev number needs to be done inside write_version_py(),\n # otherwise the import of numpy.version messes up the build under Python 3.\n FULLVERSION = VERSION\n dowrite = True\n if os.path.exists('.git'):\n GIT_REVISION = git_version()\n elif os.path.exists(filename):\n # must be a source distribution, use existing version file\n try:\n from statsmodels.version import git_revision as GIT_REVISION\n except ImportError:\n dowrite = False\n GIT_REVISION = \"Unknown\"\n else:\n GIT_REVISION = \"Unknown\"\n\n if not ISRELEASED:\n FULLVERSION += '.dev-' + GIT_REVISION[:7]\n\n\n if dowrite:\n try:\n a = open(filename, 'w')\n a.write(cnt % {'version': VERSION,\n 'full_version' : FULLVERSION,\n 'git_revision' : GIT_REVISION,\n 'isrelease': str(ISRELEASED)})\n finally:\n a.close()\n\n\nclass CleanCommand(Command):\n \"\"\"Custom distutils command to clean the .so and .pyc files.\"\"\"\n\n user_options = [(\"all\", \"a\", \"\")]\n\n def initialize_options(self):\n self.all = True\n self._clean_me = []\n self._clean_trees = []\n self._clean_exclude = [\"bspline_ext.c\",\n \"bspline_impl.c\"]\n\n for root, dirs, files in list(os.walk('statsmodels')):\n for f in files:\n if f in self._clean_exclude:\n continue\n if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',\n '.pyo',\n '.pyd', '.c', '.orig'):\n self._clean_me.append(pjoin(root, f))\n for d in dirs:\n if d == '__pycache__':\n self._clean_trees.append(pjoin(root, d))\n\n for d in ('build',):\n if os.path.exists(d):\n self._clean_trees.append(d)\n\n def finalize_options(self):\n pass\n\n def run(self):\n for clean_me in self._clean_me:\n try:\n os.unlink(clean_me)\n except Exception:\n pass\n for clean_tree in self._clean_trees:\n try:\n import shutil\n shutil.rmtree(clean_tree)\n except Exception:\n pass\n\n\nclass CheckingBuildExt(build_ext):\n \"\"\"Subclass build_ext to get clearer report if Cython is necessary.\"\"\"\n\n def check_cython_extensions(self, extensions):\n for ext in extensions:\n for src in ext.sources:\n if not os.path.exists(src):\n raise Exception(\"\"\"Cython-generated file '%s' not found.\n Cython is required to compile statsmodels from a development branch.\n Please install Cython or download a source release of statsmodels.\n \"\"\" % src)\n\n def build_extensions(self):\n self.check_cython_extensions(self.extensions)\n build_ext.build_extensions(self)\n\n\nclass DummyBuildSrc(Command):\n \"\"\" numpy's build_src command interferes with Cython's build_ext.\n \"\"\"\n user_options = []\n\n def initialize_options(self):\n self.py_modules_dict = {}\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n\n\ncmdclass = {'clean': CleanCommand,\n 'build': build}\n\ncmdclass[\"build_src\"] = DummyBuildSrc\ncmdclass[\"build_ext\"] = CheckingBuildExt\n\n\n# some linux distros require it\n#NOTE: we are not currently using this but add it to Extension, if needed.\n# libraries = ['m'] if 'win32' not in sys.platform else []\n\nfrom numpy.distutils.misc_util import get_info\n\nnpymath_info = get_info(\"npymath\")\next_data = dict(\n kalman_loglike = {\"name\" : \"statsmodels/tsa/kalmanf/kalman_loglike.c\",\n \"depends\" : [\"statsmodels/src/capsule.h\"],\n \"include_dirs\": [\"statsmodels/src\"],\n \"sources\" : []},\n _statespace = {\"name\" : \"statsmodels/tsa/statespace/_statespace.c\",\n \"depends\" : [\"statsmodels/src/capsule.h\"],\n \"include_dirs\": [\"statsmodels/src\"] + npymath_info['include_dirs'],\n \"libraries\": npymath_info['libraries'],\n \"library_dirs\": npymath_info['library_dirs'],\n \"sources\" : []},\n linbin = {\"name\" : \"statsmodels/nonparametric/linbin.c\",\n \"depends\" : [],\n \"sources\" : []},\n _smoothers_lowess = {\"name\" : \"statsmodels/nonparametric/_smoothers_lowess.c\",\n \"depends\" : [],\n \"sources\" : []}\n )\n\nextensions = []\nfor name, data in ext_data.items():\n data['sources'] = data.get('sources', []) + [data['name']]\n\n destdir = \".\".join(os.path.dirname(data[\"name\"]).split(\"/\"))\n data.pop('name')\n\n obj = Extension('%s.%s' % (destdir, name), **data)\n\n extensions.append(obj)\n\n\ndef get_data_files():\n sep = os.path.sep\n # install the datasets\n data_files = {}\n root = pjoin(curdir, \"statsmodels\", \"datasets\")\n for i in os.listdir(root):\n if i is \"tests\":\n continue\n path = pjoin(root, i)\n if os.path.isdir(path):\n data_files.update({relpath(path, start=curdir).replace(sep, \".\") : [\"*.csv\",\n \"*.dta\"]})\n # add all the tests and results files\n for r, ds, fs in os.walk(pjoin(curdir, \"statsmodels\")):\n r_ = relpath(r, start=curdir)\n if r_.endswith('results'):\n data_files.update({r_.replace(sep, \".\") : [\"*.csv\",\n \"*.txt\"]})\n\n return data_files\n\n\nif __name__ == \"__main__\":\n if os.path.exists('MANIFEST'):\n os.unlink('MANIFEST')\n\n min_versions = {\n 'numpy' : '1.4.0',\n 'scipy' : '0.7.0',\n 'pandas' : '0.7.1',\n 'patsy' : '0.1.0',\n }\n if sys.version_info[0] == 3 and sys.version_info[1] >= 3:\n # 3.3 needs numpy 1.7+\n min_versions.update({\"numpy\" : \"1.7.0b2\"})\n\n (setup_requires,\n install_requires) = check_dependency_versions(min_versions)\n\n if _have_setuptools:\n setuptools_kwargs['setup_requires'] = setup_requires\n setuptools_kwargs['install_requires'] = install_requires\n\n write_version_py()\n\n # this adds *.csv and *.dta files in datasets folders\n # and *.csv and *.txt files in test/results folders\n package_data = get_data_files()\n packages = find_packages()\n packages.append(\"statsmodels.tsa.vector_ar.data\")\n\n package_data[\"statsmodels.datasets.tests\"].append(\"*.zip\")\n package_data[\"statsmodels.iolib.tests.results\"].append(\"*.dta\")\n package_data[\"statsmodels.stats.tests.results\"].append(\"*.json\")\n package_data[\"statsmodels.tsa.vector_ar.tests.results\"].append(\"*.npz\")\n # data files that don't follow the tests/results pattern. should fix.\n package_data.update({\"statsmodels.stats.tests\" : [\"*.txt\"]})\n\n package_data.update({\"statsmodels.stats.libqsturng\" :\n [\"*.r\", \"*.txt\", \"*.dat\"]})\n package_data.update({\"statsmodels.stats.libqsturng.tests\" :\n [\"*.csv\", \"*.dat\"]})\n package_data.update({\"statsmodels.tsa.vector_ar.data\" : [\"*.dat\"]})\n package_data.update({\"statsmodels.tsa.vector_ar.data\" : [\"*.dat\"]})\n # temporary, until moved:\n package_data.update({\"statsmodels.sandbox.regression.tests\" :\n [\"*.dta\", \"*.csv\"]})\n\n #TODO: deal with this. Not sure if it ever worked for bdists\n #('docs/build/htmlhelp/statsmodelsdoc.chm',\n # 'statsmodels/statsmodelsdoc.chm')\n\n cwd = os.path.abspath(os.path.dirname(__file__))\n if not os.path.exists(os.path.join(cwd, 'PKG-INFO')) and not no_frills:\n # Generate Cython sources, unless building from source release\n generate_cython()\n\n setup(name = DISTNAME,\n version = VERSION,\n maintainer = MAINTAINER,\n ext_modules = extensions,\n maintainer_email = MAINTAINER_EMAIL,\n description = DESCRIPTION,\n license = LICENSE,\n url = URL,\n download_url = DOWNLOAD_URL,\n long_description = LONG_DESCRIPTION,\n classifiers = classifiers,\n platforms = 'any',\n cmdclass = cmdclass,\n packages = packages,\n package_data = package_data,\n include_package_data=False, # True will install all files in repo\n **setuptools_kwargs)\n",
"path": "setup.py"
}
] | [
{
"content": "\"\"\"\nMuch of the build system code was adapted from work done by the pandas\ndevelopers [1], which was in turn based on work done in pyzmq [2] and lxml [3].\n\n[1] http://pandas.pydata.org\n[2] http://zeromq.github.io/pyzmq/\n[3] http://lxml.de/\n\"\"\"\n\nimport os\nfrom os.path import relpath, join as pjoin\nimport sys\nimport subprocess\nimport re\nfrom distutils.version import StrictVersion\n\n\n# temporarily redirect config directory to prevent matplotlib importing\n# testing that for writeable directory which results in sandbox error in\n# certain easy_install versions\nos.environ[\"MPLCONFIGDIR\"] = \".\"\n\nno_frills = (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or\n sys.argv[1] in ('--help-commands',\n 'egg_info', '--version',\n 'clean')))\n\n# try bootstrapping setuptools if it doesn't exist\ntry:\n import pkg_resources\n try:\n pkg_resources.require(\"setuptools>=0.6c5\")\n except pkg_resources.VersionConflict:\n from ez_setup import use_setuptools\n use_setuptools(version=\"0.6c5\")\n from setuptools import setup, Command, find_packages\n _have_setuptools = True\nexcept ImportError:\n # no setuptools installed\n from distutils.core import setup, Command\n _have_setuptools = False\n\nif _have_setuptools:\n setuptools_kwargs = {\"zip_safe\": False,\n \"test_suite\": \"nose.collector\"}\nelse:\n setuptools_kwargs = {}\n if sys.version_info[0] >= 3:\n sys.exit(\"Need setuptools to install statsmodels for Python 3.x\")\n\n\ncurdir = os.path.abspath(os.path.dirname(__file__))\nREADME = open(pjoin(curdir, \"README.rst\")).read()\n\nDISTNAME = 'statsmodels'\nDESCRIPTION = 'Statistical computations and models for use with SciPy'\nLONG_DESCRIPTION = README\nMAINTAINER = 'Skipper Seabold, Josef Perktold'\nMAINTAINER_EMAIL ='[email protected]'\nURL = 'http://statsmodels.sourceforge.net/'\nLICENSE = 'BSD License'\nDOWNLOAD_URL = ''\n\n# These imports need to be here; setuptools needs to be imported first.\nfrom distutils.extension import Extension\nfrom distutils.command.build import build\nfrom distutils.command.build_ext import build_ext as _build_ext\n\n\nclass build_ext(_build_ext):\n def build_extensions(self):\n numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')\n\n for ext in self.extensions:\n if (hasattr(ext, 'include_dirs') and\n not numpy_incl in ext.include_dirs):\n ext.include_dirs.append(numpy_incl)\n _build_ext.build_extensions(self)\n\n\ndef generate_cython():\n cwd = os.path.abspath(os.path.dirname(__file__))\n print(\"Cythonizing sources\")\n p = subprocess.call([sys.executable,\n os.path.join(cwd, 'tools', 'cythonize.py'),\n 'statsmodels'],\n cwd=cwd)\n if p != 0:\n raise RuntimeError(\"Running cythonize failed!\")\n\n\ndef strip_rc(version):\n return re.sub(r\"rc\\d+$\", \"\", version)\n\n\ndef check_dependency_versions(min_versions):\n \"\"\"\n Don't let pip/setuptools do this all by itself. It's rude.\n\n For all dependencies, try to import them and check if the versions of\n installed dependencies match the minimum version requirements. If\n installed but version too low, raise an error. If not installed at all,\n return the correct ``setup_requires`` and ``install_requires`` arguments to\n be added to the setuptools kwargs. This prevents upgrading installed\n dependencies like numpy (that should be an explicit choice by the user and\n never happen automatically), but make things work when installing into an\n empty virtualenv for example.\n\n \"\"\"\n setup_requires = []\n install_requires = []\n\n try:\n from numpy.version import short_version as npversion\n except ImportError:\n setup_requires.append('numpy')\n install_requires.append('numpy')\n else:\n if not (StrictVersion(strip_rc(npversion)) >= min_versions['numpy']):\n raise ImportError(\"Numpy version is %s. Requires >= %s\" %\n (npversion, min_versions['numpy']))\n\n try:\n import scipy\n except ImportError:\n install_requires.append('scipy')\n else:\n try:\n from scipy.version import short_version as spversion\n except ImportError:\n from scipy.version import version as spversion # scipy 0.7.0\n if not (StrictVersion(strip_rc(spversion)) >= min_versions['scipy']):\n raise ImportError(\"Scipy version is %s. Requires >= %s\" %\n (spversion, min_versions['scipy']))\n\n try:\n from pandas.version import short_version as pversion\n except ImportError:\n install_requires.append('pandas')\n else:\n if not (StrictVersion(strip_rc(pversion)) >= min_versions['pandas']):\n ImportError(\"Pandas version is %s. Requires >= %s\" %\n (pversion, min_versions['pandas']))\n\n try:\n from patsy import __version__ as patsy_version\n except ImportError:\n install_requires.append('patsy')\n else:\n # patsy dev looks like 0.1.0+dev\n pversion = re.match(\"\\d*\\.\\d*\\.\\d*\", patsy_version).group()\n if not (StrictVersion(pversion) >= min_versions['patsy']):\n raise ImportError(\"Patsy version is %s. Requires >= %s\" %\n (pversion, min_versions[\"patsy\"]))\n\n return setup_requires, install_requires\n\n\nMAJ = 0\nMIN = 7\nREV = 0\nISRELEASED = False\nVERSION = '%d.%d.%d' % (MAJ,MIN,REV)\n\nclassifiers = [ 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Topic :: Scientific/Engineering']\n\n# Return the git revision as a string\ndef git_version():\n def _minimal_ext_cmd(cmd):\n # construct minimal environment\n env = {}\n for k in ['SYSTEMROOT', 'PATH']:\n v = os.environ.get(k)\n if v is not None:\n env[k] = v\n # LANGUAGE is used on win32\n env['LANGUAGE'] = 'C'\n env['LANG'] = 'C'\n env['LC_ALL'] = 'C'\n out = subprocess.Popen(\" \".join(cmd), stdout = subprocess.PIPE, env=env,\n shell=True).communicate()[0]\n return out\n\n try:\n out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])\n GIT_REVISION = out.strip().decode('ascii')\n except OSError:\n GIT_REVISION = \"Unknown\"\n\n return GIT_REVISION\n\ndef write_version_py(filename=pjoin(curdir, 'statsmodels/version.py')):\n cnt = \"\\n\".join([\"\",\n \"# THIS FILE IS GENERATED FROM SETUP.PY\",\n \"short_version = '%(version)s'\",\n \"version = '%(version)s'\",\n \"full_version = '%(full_version)s'\",\n \"git_revision = '%(git_revision)s'\",\n \"release = %(isrelease)s\", \"\",\n \"if not release:\",\n \" version = full_version\"])\n # Adding the git rev number needs to be done inside write_version_py(),\n # otherwise the import of numpy.version messes up the build under Python 3.\n FULLVERSION = VERSION\n dowrite = True\n if os.path.exists('.git'):\n GIT_REVISION = git_version()\n elif os.path.exists(filename):\n # must be a source distribution, use existing version file\n try:\n from statsmodels.version import git_revision as GIT_REVISION\n except ImportError:\n dowrite = False\n GIT_REVISION = \"Unknown\"\n else:\n GIT_REVISION = \"Unknown\"\n\n if not ISRELEASED:\n FULLVERSION += '.dev0+' + GIT_REVISION[:7]\n\n\n if dowrite:\n try:\n a = open(filename, 'w')\n a.write(cnt % {'version': VERSION,\n 'full_version' : FULLVERSION,\n 'git_revision' : GIT_REVISION,\n 'isrelease': str(ISRELEASED)})\n finally:\n a.close()\n\n\nclass CleanCommand(Command):\n \"\"\"Custom distutils command to clean the .so and .pyc files.\"\"\"\n\n user_options = [(\"all\", \"a\", \"\")]\n\n def initialize_options(self):\n self.all = True\n self._clean_me = []\n self._clean_trees = []\n self._clean_exclude = [\"bspline_ext.c\",\n \"bspline_impl.c\"]\n\n for root, dirs, files in list(os.walk('statsmodels')):\n for f in files:\n if f in self._clean_exclude:\n continue\n if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',\n '.pyo',\n '.pyd', '.c', '.orig'):\n self._clean_me.append(pjoin(root, f))\n for d in dirs:\n if d == '__pycache__':\n self._clean_trees.append(pjoin(root, d))\n\n for d in ('build',):\n if os.path.exists(d):\n self._clean_trees.append(d)\n\n def finalize_options(self):\n pass\n\n def run(self):\n for clean_me in self._clean_me:\n try:\n os.unlink(clean_me)\n except Exception:\n pass\n for clean_tree in self._clean_trees:\n try:\n import shutil\n shutil.rmtree(clean_tree)\n except Exception:\n pass\n\n\nclass CheckingBuildExt(build_ext):\n \"\"\"Subclass build_ext to get clearer report if Cython is necessary.\"\"\"\n\n def check_cython_extensions(self, extensions):\n for ext in extensions:\n for src in ext.sources:\n if not os.path.exists(src):\n raise Exception(\"\"\"Cython-generated file '%s' not found.\n Cython is required to compile statsmodels from a development branch.\n Please install Cython or download a source release of statsmodels.\n \"\"\" % src)\n\n def build_extensions(self):\n self.check_cython_extensions(self.extensions)\n build_ext.build_extensions(self)\n\n\nclass DummyBuildSrc(Command):\n \"\"\" numpy's build_src command interferes with Cython's build_ext.\n \"\"\"\n user_options = []\n\n def initialize_options(self):\n self.py_modules_dict = {}\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n\n\ncmdclass = {'clean': CleanCommand,\n 'build': build}\n\ncmdclass[\"build_src\"] = DummyBuildSrc\ncmdclass[\"build_ext\"] = CheckingBuildExt\n\n\n# some linux distros require it\n#NOTE: we are not currently using this but add it to Extension, if needed.\n# libraries = ['m'] if 'win32' not in sys.platform else []\n\nfrom numpy.distutils.misc_util import get_info\n\nnpymath_info = get_info(\"npymath\")\next_data = dict(\n kalman_loglike = {\"name\" : \"statsmodels/tsa/kalmanf/kalman_loglike.c\",\n \"depends\" : [\"statsmodels/src/capsule.h\"],\n \"include_dirs\": [\"statsmodels/src\"],\n \"sources\" : []},\n _statespace = {\"name\" : \"statsmodels/tsa/statespace/_statespace.c\",\n \"depends\" : [\"statsmodels/src/capsule.h\"],\n \"include_dirs\": [\"statsmodels/src\"] + npymath_info['include_dirs'],\n \"libraries\": npymath_info['libraries'],\n \"library_dirs\": npymath_info['library_dirs'],\n \"sources\" : []},\n linbin = {\"name\" : \"statsmodels/nonparametric/linbin.c\",\n \"depends\" : [],\n \"sources\" : []},\n _smoothers_lowess = {\"name\" : \"statsmodels/nonparametric/_smoothers_lowess.c\",\n \"depends\" : [],\n \"sources\" : []}\n )\n\nextensions = []\nfor name, data in ext_data.items():\n data['sources'] = data.get('sources', []) + [data['name']]\n\n destdir = \".\".join(os.path.dirname(data[\"name\"]).split(\"/\"))\n data.pop('name')\n\n obj = Extension('%s.%s' % (destdir, name), **data)\n\n extensions.append(obj)\n\n\ndef get_data_files():\n sep = os.path.sep\n # install the datasets\n data_files = {}\n root = pjoin(curdir, \"statsmodels\", \"datasets\")\n for i in os.listdir(root):\n if i is \"tests\":\n continue\n path = pjoin(root, i)\n if os.path.isdir(path):\n data_files.update({relpath(path, start=curdir).replace(sep, \".\") : [\"*.csv\",\n \"*.dta\"]})\n # add all the tests and results files\n for r, ds, fs in os.walk(pjoin(curdir, \"statsmodels\")):\n r_ = relpath(r, start=curdir)\n if r_.endswith('results'):\n data_files.update({r_.replace(sep, \".\") : [\"*.csv\",\n \"*.txt\"]})\n\n return data_files\n\n\nif __name__ == \"__main__\":\n if os.path.exists('MANIFEST'):\n os.unlink('MANIFEST')\n\n min_versions = {\n 'numpy' : '1.4.0',\n 'scipy' : '0.7.0',\n 'pandas' : '0.7.1',\n 'patsy' : '0.1.0',\n }\n if sys.version_info[0] == 3 and sys.version_info[1] >= 3:\n # 3.3 needs numpy 1.7+\n min_versions.update({\"numpy\" : \"1.7.0b2\"})\n\n (setup_requires,\n install_requires) = check_dependency_versions(min_versions)\n\n if _have_setuptools:\n setuptools_kwargs['setup_requires'] = setup_requires\n setuptools_kwargs['install_requires'] = install_requires\n\n write_version_py()\n\n # this adds *.csv and *.dta files in datasets folders\n # and *.csv and *.txt files in test/results folders\n package_data = get_data_files()\n packages = find_packages()\n packages.append(\"statsmodels.tsa.vector_ar.data\")\n\n package_data[\"statsmodels.datasets.tests\"].append(\"*.zip\")\n package_data[\"statsmodels.iolib.tests.results\"].append(\"*.dta\")\n package_data[\"statsmodels.stats.tests.results\"].append(\"*.json\")\n package_data[\"statsmodels.tsa.vector_ar.tests.results\"].append(\"*.npz\")\n # data files that don't follow the tests/results pattern. should fix.\n package_data.update({\"statsmodels.stats.tests\" : [\"*.txt\"]})\n\n package_data.update({\"statsmodels.stats.libqsturng\" :\n [\"*.r\", \"*.txt\", \"*.dat\"]})\n package_data.update({\"statsmodels.stats.libqsturng.tests\" :\n [\"*.csv\", \"*.dat\"]})\n package_data.update({\"statsmodels.tsa.vector_ar.data\" : [\"*.dat\"]})\n package_data.update({\"statsmodels.tsa.vector_ar.data\" : [\"*.dat\"]})\n # temporary, until moved:\n package_data.update({\"statsmodels.sandbox.regression.tests\" :\n [\"*.dta\", \"*.csv\"]})\n\n #TODO: deal with this. Not sure if it ever worked for bdists\n #('docs/build/htmlhelp/statsmodelsdoc.chm',\n # 'statsmodels/statsmodelsdoc.chm')\n\n cwd = os.path.abspath(os.path.dirname(__file__))\n if not os.path.exists(os.path.join(cwd, 'PKG-INFO')) and not no_frills:\n # Generate Cython sources, unless building from source release\n generate_cython()\n\n setup(name = DISTNAME,\n version = VERSION,\n maintainer = MAINTAINER,\n ext_modules = extensions,\n maintainer_email = MAINTAINER_EMAIL,\n description = DESCRIPTION,\n license = LICENSE,\n url = URL,\n download_url = DOWNLOAD_URL,\n long_description = LONG_DESCRIPTION,\n classifiers = classifiers,\n platforms = 'any',\n cmdclass = cmdclass,\n packages = packages,\n package_data = package_data,\n include_package_data=False, # True will install all files in repo\n **setuptools_kwargs)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 9ba11cdc2ec..07e9708cf7e 100644
--- a/setup.py
+++ b/setup.py
@@ -225,7 +225,7 @@ def write_version_py(filename=pjoin(curdir, 'statsmodels/version.py')):
GIT_REVISION = "Unknown"
if not ISRELEASED:
- FULLVERSION += '.dev-' + GIT_REVISION[:7]
+ FULLVERSION += '.dev0+' + GIT_REVISION[:7]
if dowrite:
|
pypa__pipenv-5495 | Include missing package data for Safety
### The issue
#5491
### The fix
Include the missing package data for Safety.
### The checklist
* [ ] Build wheels and test if it is working fine.
<!--
### If this is a patch to the `vendor` directory...
Please try to refrain from submitting patches directly to `vendor` or `patched`, but raise your issue to the upstream project instead, and inform Pipenv to upgrade when the upstream project accepts the fix.
A pull request to upgrade vendor packages is strongly discouraged, unless there is a very good reason (e.g. you need to test Pipenv’s integration to a new vendor feature). Pipenv audits and performs vendor upgrades regularly, generally before a new release is about to drop.
If your patch is not or cannot be accepted by upstream, but is essential to Pipenv (make sure to discuss this with maintainers!), please remember to attach a patch file in `tasks/vendoring/patched`, so this divergence from upstream can be recorded and replayed afterwards.
-->
| [
{
"content": "#!/usr/bin/env python\nimport codecs\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith codecs.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = \"\\n\" + f.read()\n\nabout = {}\n\nwith open(os.path.join(here, \"pipenv\", \"__version__.py\")) as f:\n exec(f.read(), about)\n\nif sys.argv[-1] == \"publish\":\n os.system(\"python setup.py sdist bdist_wheel upload\")\n sys.exit()\n\nrequired = [\n \"certifi\",\n \"setuptools>=36.2.1\",\n \"virtualenv-clone>=0.2.5\",\n \"virtualenv\",\n]\nextras = {\n \"dev\": [\n \"towncrier\",\n \"bs4\",\n \"sphinx\",\n \"flake8>=3.3.0,<4.0\",\n \"black;python_version>='3.7'\",\n \"parver\",\n \"invoke\",\n ],\n \"tests\": [\"pytest>=5.0\", \"pytest-timeout\", \"pytest-xdist\", \"flaky\", \"mock\"],\n}\n\n\nsetup(\n name=\"pipenv\",\n version=about[\"__version__\"],\n description=\"Python Development Workflow for Humans.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Pipenv maintainer team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pypa/pipenv\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tasks\", \"tasks.*\"]),\n entry_points={\n \"console_scripts\": [\n \"pipenv=pipenv:cli\",\n \"pipenv-resolver=pipenv.resolver:main\",\n ]\n },\n package_data={\n \"\": [\"LICENSE\", \"NOTICES\"],\n \"pipenv.patched.safety\": [\"VERSION\", \"safety-policy-template.yml\"],\n \"pipenv.patched.pip._vendor.certifi\": [\"*.pem\"],\n \"pipenv.patched.pip._vendor.requests\": [\"*.pem\"],\n \"pipenv.patched.pip._vendor.distlib._backport\": [\"sysconfig.cfg\"],\n \"pipenv.patched.pip._vendor.distlib\": [\n \"t32.exe\",\n \"t64.exe\",\n \"w32.exe\",\n \"w64.exe\",\n ],\n },\n python_requires=\">=3.7\",\n zip_safe=True,\n setup_requires=[],\n install_requires=required,\n extras_require=extras,\n include_package_data=True,\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\nimport codecs\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith codecs.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = \"\\n\" + f.read()\n\nabout = {}\n\nwith open(os.path.join(here, \"pipenv\", \"__version__.py\")) as f:\n exec(f.read(), about)\n\nif sys.argv[-1] == \"publish\":\n os.system(\"python setup.py sdist bdist_wheel upload\")\n sys.exit()\n\nrequired = [\n \"certifi\",\n \"setuptools>=36.2.1\",\n \"virtualenv-clone>=0.2.5\",\n \"virtualenv\",\n]\nextras = {\n \"dev\": [\n \"towncrier\",\n \"bs4\",\n \"sphinx\",\n \"flake8>=3.3.0,<4.0\",\n \"black;python_version>='3.7'\",\n \"parver\",\n \"invoke\",\n ],\n \"tests\": [\"pytest>=5.0\", \"pytest-timeout\", \"pytest-xdist\", \"flaky\", \"mock\"],\n}\n\n\nsetup(\n name=\"pipenv\",\n version=about[\"__version__\"],\n description=\"Python Development Workflow for Humans.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Pipenv maintainer team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pypa/pipenv\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tasks\", \"tasks.*\"]),\n entry_points={\n \"console_scripts\": [\n \"pipenv=pipenv:cli\",\n \"pipenv-resolver=pipenv.resolver:main\",\n ]\n },\n package_data={\n \"\": [\"LICENSE\", \"NOTICES\"],\n \"pipenv.patched.safety\": [\"VERSION\", \"safety-policy-template.yml\"],\n \"pipenv.patched.pip._vendor.certifi\": [\"*.pem\"],\n \"pipenv.patched.pip._vendor.requests\": [\"*.pem\"],\n \"pipenv.patched.pip._vendor.distlib._backport\": [\"sysconfig.cfg\"],\n \"pipenv.patched.pip._vendor.distlib\": [\n \"t32.exe\",\n \"t64.exe\",\n \"w32.exe\",\n \"w64.exe\",\n ],\n \"pipenv.vendor.ruamel\": [\"yaml\"],\n },\n python_requires=\">=3.7\",\n zip_safe=True,\n setup_requires=[],\n install_requires=required,\n extras_require=extras,\n include_package_data=True,\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 73d52ff3d9..beb4818108 100644
--- a/setup.py
+++ b/setup.py
@@ -67,6 +67,7 @@
"w32.exe",
"w64.exe",
],
+ "pipenv.vendor.ruamel": ["yaml"],
},
python_requires=">=3.7",
zip_safe=True,
|
python-telegram-bot__python-telegram-bot-1485 | Use UTC dates
https://github.com/python-telegram-bot/python-telegram-bot/blob/439790375ed8ed493c43e464aa8e2b60a77939db/telegram/utils/helpers.py#L78-L90
Should probably be using `tz=timezone.utc`. Python's `datetime` isn't the best, and `fromtimestamp` by default sets no `tz` information, which uses the local time, which in turn is generally a bad idea.
| [
{
"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains helper functions.\"\"\"\nfrom collections import defaultdict\n\ntry:\n import ujson as json\nexcept ImportError:\n import json\nfrom html import escape\n\nimport re\nimport signal\nfrom datetime import datetime\n\n# From https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python\n_signames = {v: k\n for k, v in reversed(sorted(vars(signal).items()))\n if k.startswith('SIG') and not k.startswith('SIG_')}\n\n\ndef get_signal_name(signum):\n \"\"\"Returns the signal name of the given signal number.\"\"\"\n return _signames[signum]\n\n\n# Not using future.backports.datetime here as datetime value might be an input from the user,\n# making every isinstace() call more delicate. So we just use our own compat layer.\nif hasattr(datetime, 'timestamp'):\n # Python 3.3+\n def _timestamp(dt_obj):\n return dt_obj.timestamp()\nelse:\n # Python < 3.3 (incl 2.7)\n from time import mktime\n\n def _timestamp(dt_obj):\n return mktime(dt_obj.timetuple())\n\n\ndef escape_markdown(text):\n \"\"\"Helper function to escape telegram markup symbols.\"\"\"\n escape_chars = '\\*_`\\['\n return re.sub(r'([%s])' % escape_chars, r'\\\\\\1', text)\n\n\ndef to_timestamp(dt_obj):\n \"\"\"\n Args:\n dt_obj (:class:`datetime.datetime`):\n\n Returns:\n int:\n\n \"\"\"\n if not dt_obj:\n return None\n\n return int(_timestamp(dt_obj))\n\n\ndef from_timestamp(unixtime):\n \"\"\"\n Args:\n unixtime (int):\n\n Returns:\n datetime.datetime:\n\n \"\"\"\n if not unixtime:\n return None\n\n return datetime.fromtimestamp(unixtime)\n\n\ndef mention_html(user_id, name):\n \"\"\"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as html.\n \"\"\"\n if isinstance(user_id, int):\n return u'<a href=\"tg://user?id={}\">{}</a>'.format(user_id, escape(name))\n\n\ndef mention_markdown(user_id, name):\n \"\"\"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown.\n \"\"\"\n if isinstance(user_id, int):\n return u'[{}](tg://user?id={})'.format(escape_markdown(name), user_id)\n\n\ndef effective_message_type(entity):\n \"\"\"\n Extracts the type of message as a string identifier from a :class:`telegram.Message` or a\n :class:`telegram.Update`.\n\n Args:\n entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from\n\n Returns:\n str: One of ``Message.MESSAGE_TYPES``\n\n \"\"\"\n\n # Importing on file-level yields cyclic Import Errors\n from telegram import Message\n from telegram import Update\n\n if isinstance(entity, Message):\n message = entity\n elif isinstance(entity, Update):\n message = entity.effective_message\n else:\n raise TypeError(\"entity is not Message or Update (got: {})\".format(type(entity)))\n\n for i in Message.MESSAGE_TYPES:\n if getattr(message, i, None):\n return i\n\n return None\n\n\ndef enocde_conversations_to_json(conversations):\n \"\"\"Helper method to encode a conversations dict (that uses tuples as keys) to a\n JSON-serializable way. Use :attr:`_decode_conversations_from_json` to decode.\n\n Args:\n conversations (:obj:`dict`): The conversations dict to transofrm to JSON.\n\n Returns:\n :obj:`str`: The JSON-serialized conversations dict\n \"\"\"\n tmp = {}\n for handler, states in conversations.items():\n tmp[handler] = {}\n for key, state in states.items():\n tmp[handler][json.dumps(key)] = state\n return json.dumps(tmp)\n\n\ndef decode_conversations_from_json(json_string):\n \"\"\"Helper method to decode a conversations dict (that uses tuples as keys) from a\n JSON-string created with :attr:`_encode_conversations_to_json`.\n\n Args:\n json_string (:obj:`str`): The conversations dict as JSON string.\n\n Returns:\n :obj:`dict`: The conversations dict after decoding\n \"\"\"\n tmp = json.loads(json_string)\n conversations = {}\n for handler, states in tmp.items():\n conversations[handler] = {}\n for key, state in states.items():\n conversations[handler][tuple(json.loads(key))] = state\n return conversations\n\n\ndef decode_user_chat_data_from_json(data):\n \"\"\"Helper method to decode chat or user data (that uses ints as keys) from a\n JSON-string.\n\n Args:\n data (:obj:`str`): The user/chat_data dict as JSON string.\n\n Returns:\n :obj:`dict`: The user/chat_data defaultdict after decoding\n \"\"\"\n\n tmp = defaultdict(dict)\n decoded_data = json.loads(data)\n for user, data in decoded_data.items():\n user = int(user)\n tmp[user] = {}\n for key, value in data.items():\n try:\n key = int(key)\n except ValueError:\n pass\n tmp[user][key] = value\n return tmp\n",
"path": "telegram/utils/helpers.py"
}
] | [
{
"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains helper functions.\"\"\"\nfrom collections import defaultdict\n\ntry:\n import ujson as json\nexcept ImportError:\n import json\nfrom html import escape\n\nimport re\nimport signal\nfrom datetime import datetime\n\n# From https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python\n_signames = {v: k\n for k, v in reversed(sorted(vars(signal).items()))\n if k.startswith('SIG') and not k.startswith('SIG_')}\n\n\ndef get_signal_name(signum):\n \"\"\"Returns the signal name of the given signal number.\"\"\"\n return _signames[signum]\n\n\n# Not using future.backports.datetime here as datetime value might be an input from the user,\n# making every isinstace() call more delicate. So we just use our own compat layer.\nif hasattr(datetime, 'timestamp'):\n # Python 3.3+\n def _timestamp(dt_obj):\n return dt_obj.timestamp()\nelse:\n # Python < 3.3 (incl 2.7)\n from time import mktime\n\n def _timestamp(dt_obj):\n return mktime(dt_obj.timetuple())\n\n\ndef escape_markdown(text):\n \"\"\"Helper function to escape telegram markup symbols.\"\"\"\n escape_chars = '\\*_`\\['\n return re.sub(r'([%s])' % escape_chars, r'\\\\\\1', text)\n\n\ndef to_timestamp(dt_obj):\n \"\"\"\n Args:\n dt_obj (:class:`datetime.datetime`):\n\n Returns:\n int:\n\n \"\"\"\n if not dt_obj:\n return None\n\n return int(_timestamp(dt_obj))\n\n\ndef from_timestamp(unixtime):\n \"\"\"\n Args:\n unixtime (int):\n\n Returns:\n datetime.datetime:\n\n \"\"\"\n if not unixtime:\n return None\n\n return datetime.utcfromtimestamp(unixtime)\n\n\ndef mention_html(user_id, name):\n \"\"\"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as html.\n \"\"\"\n if isinstance(user_id, int):\n return u'<a href=\"tg://user?id={}\">{}</a>'.format(user_id, escape(name))\n\n\ndef mention_markdown(user_id, name):\n \"\"\"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown.\n \"\"\"\n if isinstance(user_id, int):\n return u'[{}](tg://user?id={})'.format(escape_markdown(name), user_id)\n\n\ndef effective_message_type(entity):\n \"\"\"\n Extracts the type of message as a string identifier from a :class:`telegram.Message` or a\n :class:`telegram.Update`.\n\n Args:\n entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from\n\n Returns:\n str: One of ``Message.MESSAGE_TYPES``\n\n \"\"\"\n\n # Importing on file-level yields cyclic Import Errors\n from telegram import Message\n from telegram import Update\n\n if isinstance(entity, Message):\n message = entity\n elif isinstance(entity, Update):\n message = entity.effective_message\n else:\n raise TypeError(\"entity is not Message or Update (got: {})\".format(type(entity)))\n\n for i in Message.MESSAGE_TYPES:\n if getattr(message, i, None):\n return i\n\n return None\n\n\ndef enocde_conversations_to_json(conversations):\n \"\"\"Helper method to encode a conversations dict (that uses tuples as keys) to a\n JSON-serializable way. Use :attr:`_decode_conversations_from_json` to decode.\n\n Args:\n conversations (:obj:`dict`): The conversations dict to transofrm to JSON.\n\n Returns:\n :obj:`str`: The JSON-serialized conversations dict\n \"\"\"\n tmp = {}\n for handler, states in conversations.items():\n tmp[handler] = {}\n for key, state in states.items():\n tmp[handler][json.dumps(key)] = state\n return json.dumps(tmp)\n\n\ndef decode_conversations_from_json(json_string):\n \"\"\"Helper method to decode a conversations dict (that uses tuples as keys) from a\n JSON-string created with :attr:`_encode_conversations_to_json`.\n\n Args:\n json_string (:obj:`str`): The conversations dict as JSON string.\n\n Returns:\n :obj:`dict`: The conversations dict after decoding\n \"\"\"\n tmp = json.loads(json_string)\n conversations = {}\n for handler, states in tmp.items():\n conversations[handler] = {}\n for key, state in states.items():\n conversations[handler][tuple(json.loads(key))] = state\n return conversations\n\n\ndef decode_user_chat_data_from_json(data):\n \"\"\"Helper method to decode chat or user data (that uses ints as keys) from a\n JSON-string.\n\n Args:\n data (:obj:`str`): The user/chat_data dict as JSON string.\n\n Returns:\n :obj:`dict`: The user/chat_data defaultdict after decoding\n \"\"\"\n\n tmp = defaultdict(dict)\n decoded_data = json.loads(data)\n for user, data in decoded_data.items():\n user = int(user)\n tmp[user] = {}\n for key, value in data.items():\n try:\n key = int(key)\n except ValueError:\n pass\n tmp[user][key] = value\n return tmp\n",
"path": "telegram/utils/helpers.py"
}
] | diff --git a/telegram/utils/helpers.py b/telegram/utils/helpers.py
index 740c19ff1bd..c7697db4239 100644
--- a/telegram/utils/helpers.py
+++ b/telegram/utils/helpers.py
@@ -87,7 +87,7 @@ def from_timestamp(unixtime):
if not unixtime:
return None
- return datetime.fromtimestamp(unixtime)
+ return datetime.utcfromtimestamp(unixtime)
def mention_html(user_id, name):
|
rasterio__rasterio-1305 | Add `GetMetadataItem` like method
Ref: https://github.com/mapbox/rasterio/issues/1077,
I'm proposing to add a new method in https://github.com/mapbox/rasterio/blob/master/rasterio/_base.pyx to replicate GDAL GetMetadataItem
**Method Name:** `get_metadata_item` or `get_metadata`
**Why:** I need to be able to get TIFF metadata like `band.GetMetadataItem('IFD_OFFSET', 'TIFF')`
**Code:**
```cython
def get_metadata(self, bidx, ns, dm=None, ovr=None):
"""Returns metadata item
Parameters
----------
bidx: int
Band index, starting with 1.
name: str
The key for the metadata item to fetch.
domain: str
The domain to fetch for.
ovr: int
Overview level
Returns
-------
str
"""
cdef GDALMajorObjectH b = NULL
cdef GDALMajorObjectH obj = NULL
cdef char *value = NULL
cdef const char *name = NULL
cdef const char *domain = NULL
ns = ns.encode('utf-8')
name = ns
if dm:
dm = dm.encode('utf-8')
domain = dm
b = self.band(bidx)
if ovr:
b = GDALGetOverview(b, ovr)
obj = b
value = GDALGetMetadataItem(obj, name, domain)
if value == NULL:
return None
else:
return value
```
@sgillies I'm happy to submit a PR for that :-)
| [
{
"content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n",
"path": "rasterio/errors.py"
}
] | [
{
"content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n\n\nclass BandOverviewError(UserWarning):\n \"\"\"Raised when a band overview access fails.\"\"\"\n",
"path": "rasterio/errors.py"
}
] | diff --git a/rasterio/_base.pyx b/rasterio/_base.pyx
index 4677caf4c..dcc7d6393 100644
--- a/rasterio/_base.pyx
+++ b/rasterio/_base.pyx
@@ -25,8 +25,8 @@ from rasterio.enums import (
ColorInterp, Compression, Interleaving, MaskFlags, PhotometricInterp)
from rasterio.env import Env
from rasterio.errors import (
- RasterioIOError, CRSError, DriverRegistrationError,
- NotGeoreferencedWarning, RasterBlockError)
+ RasterioIOError, CRSError, DriverRegistrationError, NotGeoreferencedWarning,
+ RasterBlockError, BandOverviewError)
from rasterio.profiles import Profile
from rasterio.transform import Affine, guard_transform, tastes_like_gdal
from rasterio.vfs import parse_path, vsi_path
@@ -782,17 +782,72 @@ cdef class DatasetBase(object):
num_items = CSLCount(metadata)
return dict(metadata[i].split('=', 1) for i in range(num_items))
+
+ def get_tag_item(self, ns, dm=None, bidx=0, ovr=None):
+ """Returns tag item value
+
+ Parameters
+ ----------
+ ns: str
+ The key for the metadata item to fetch.
+ dm: str
+ The domain to fetch for.
+ bidx: int
+ Band index, starting with 1.
+ ovr: int
+ Overview level
+
+ Returns
+ -------
+ str
+ """
+ cdef GDALMajorObjectH band = NULL
+ cdef GDALMajorObjectH obj = NULL
+ cdef char *value = NULL
+ cdef const char *name = NULL
+ cdef const char *domain = NULL
+
+ ns = ns.encode('utf-8')
+ name = ns
+
+ if dm:
+ dm = dm.encode('utf-8')
+ domain = dm
+
+ if not bidx > 0 and ovr:
+ raise Exception("Band index (bidx) option needed for overview level")
+
+ if bidx > 0:
+ band = self.band(bidx)
+ else:
+ band = self._hds
+
+ if ovr:
+ obj = GDALGetOverview(band, ovr)
+ if obj == NULL:
+ raise BandOverviewError(
+ "Failed to retrieve overview {}".format(ovr))
+ else:
+ obj = band
+
+ value = GDALGetMetadataItem(obj, name, domain)
+ if value == NULL:
+ return None
+ else:
+ return value
+
+
property colorinterp:
"""Returns a sequence of ``ColorInterp.<enum>`` representing
color interpretation in band order.
-
+
To set color interpretation, provide a sequence of
``ColorInterp.<enum>``:
-
+
import rasterio
from rasterio.enums import ColorInterp
-
+
with rasterio.open('rgba.tif', 'r+') as src:
src.colorinterp = (
ColorInterp.red,
diff --git a/rasterio/errors.py b/rasterio/errors.py
index d2a1c2532..f42cfda18 100644
--- a/rasterio/errors.py
+++ b/rasterio/errors.py
@@ -86,3 +86,7 @@ class RasterioDeprecationWarning(UserWarning):
class RasterBlockError(RasterioError):
"""Raised when raster block access fails"""
+
+
+class BandOverviewError(UserWarning):
+ """Raised when a band overview access fails."""
diff --git a/tests/data/cogeo.tif b/tests/data/cogeo.tif
new file mode 100644
index 000000000..7d40bfb64
Binary files /dev/null and b/tests/data/cogeo.tif differ
diff --git a/tests/test_tag_item.py b/tests/test_tag_item.py
new file mode 100644
index 000000000..d44ea9236
--- /dev/null
+++ b/tests/test_tag_item.py
@@ -0,0 +1,34 @@
+#-*- coding: utf-8 -*-
+import logging
+import sys
+
+import pytest
+
+from .conftest import requires_gdal22
+
+import rasterio
+from rasterio.errors import BandOverviewError
+
+logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
+
+
+def test_get_tag_item():
+ with rasterio.open('tests/data/cogeo.tif') as src:
+ assert src.get_tag_item('INTERLEAVE', 'IMAGE_STRUCTURE') == 'PIXEL'
+
+
+@requires_gdal22(reason="Access to IFD tags requires GDAL 2.2+")
+def test_get_tag_item_Tiff():
+ with rasterio.open('tests/data/cogeo.tif') as src:
+ assert src.get_tag_item('IFD_OFFSET', 'TIFF', bidx=1) == '8'
+ assert src.get_tag_item('IFD_OFFSET', 'TIFF', bidx=1, ovr=1) == '1504'
+ assert not src.get_tag_item('IF', 'TIFF', bidx=1)
+ with pytest.raises(Exception):
+ src.get_tag_item('IFD_OFFSET', 'TIFF', ovr=1)
+
+
+@requires_gdal22(reason="Access to IFD tags requires GDAL 2.2+")
+def test_get_tag_item_noOverview():
+ with rasterio.open('tests/data/rgb3.tif') as src:
+ with pytest.raises(BandOverviewError):
+ src.get_tag_item('IFD_OFFSET', 'TIFF', bidx=1, ovr=1)
|
adamchainz__django-perf-rec-320 | Doesn't capture cache.get_or_set
I would expect that `cache.get_or_set` either be recorded as a `get` and (optionally) a `set`, or perhaps as its own entry. Instead it seems to be ignored completely:
``` python
class TestCache(TestCase):
def test_get(self):
with django_perf_rec.record():
django_cache.get('foo')
def test_set(self):
with django_perf_rec.record():
django_cache.set('foo', 42)
def test_get_or_set(self):
with django_perf_rec.record():
django_cache.get_or_set('foo', 42)
```
This results in
``` yaml
TestCache.test_get:
- cache|get: foo
TestCache.test_get_or_set: []
TestCache.test_set:
- cache|set: foo
```
It looks like this is a result of some code which aims to identify "internal" usages of the cache, though I'm not really sure.
| [
{
"content": "import inspect\nimport re\nimport traceback\nfrom collections.abc import Mapping, Sequence\nfrom functools import wraps\nfrom types import MethodType\n\nfrom django.core.cache import DEFAULT_CACHE_ALIAS, caches\n\nfrom django_perf_rec.operation import AllSourceRecorder, Operation\n\n\nclass CacheOp(Operation):\n def __init__(self, alias, operation, key_or_keys, traceback):\n self.alias = alias\n self.operation = operation\n if isinstance(key_or_keys, str):\n cleaned_key_or_keys = self.clean_key(key_or_keys)\n elif isinstance(key_or_keys, (Mapping, Sequence)):\n cleaned_key_or_keys = sorted(self.clean_key(k) for k in key_or_keys)\n else:\n raise ValueError(\"key_or_keys must be a string, mapping, or sequence\")\n\n super().__init__(alias, cleaned_key_or_keys, traceback)\n\n @classmethod\n def clean_key(cls, key):\n \"\"\"\n Replace things that look like variables with a '#' so tests aren't\n affected by random variables\n \"\"\"\n for var_re in cls.VARIABLE_RES:\n key = var_re.sub(\"#\", key)\n return key\n\n VARIABLE_RES = (\n # Django session keys for 'cache' backend\n re.compile(r\"(?<=django\\.contrib\\.sessions\\.cache)[0-9a-z]{32}\\b\"),\n # Django session keys for 'cached_db' backend\n re.compile(r\"(?<=django\\.contrib\\.sessions\\.cached_db)[0-9a-z]{32}\\b\"),\n # Long random hashes\n re.compile(r\"\\b[0-9a-f]{32}\\b\"),\n # UUIDs\n re.compile(r\"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\"),\n # Integers\n re.compile(r\"\\d+\"),\n )\n\n def __eq__(self, other):\n return super().__eq__(other) and self.operation == other.operation\n\n @property\n def name(self):\n name_parts = [\"cache\"]\n if self.alias != DEFAULT_CACHE_ALIAS:\n name_parts.append(self.alias)\n name_parts.append(self.operation)\n return \"|\".join(name_parts)\n\n\nclass CacheRecorder:\n \"\"\"\n Monkey patches a cache class to call 'callback' on every operation it calls\n \"\"\"\n\n def __init__(self, alias, callback):\n self.alias = alias\n self.callback = callback\n\n def __enter__(self):\n cache = caches[self.alias]\n\n def call_callback(func):\n alias = self.alias\n callback = self.callback\n\n @wraps(func)\n def inner(self, *args, **kwargs):\n # Ignore operations from the cache class calling itself\n\n # Get the self of the parent via stack inspection\n frame = inspect.currentframe()\n try:\n frame = frame.f_back\n is_internal_call = frame.f_locals.get(\"self\", None) is self\n finally:\n # Always delete frame references to help garbage collector\n del frame\n\n if not is_internal_call:\n key_or_keys = args[0]\n callback(\n CacheOp(\n alias=alias,\n operation=str(func.__name__),\n key_or_keys=key_or_keys,\n traceback=traceback.extract_stack(),\n )\n )\n\n return func(*args, **kwargs)\n\n return inner\n\n self.orig_methods = {name: getattr(cache, name) for name in self.cache_methods}\n for name in self.cache_methods:\n orig_method = self.orig_methods[name]\n setattr(cache, name, MethodType(call_callback(orig_method), cache))\n\n def __exit__(self, _, __, ___):\n cache = caches[self.alias]\n for name in self.cache_methods:\n setattr(cache, name, self.orig_methods[name])\n del self.orig_methods\n\n cache_methods = (\n \"add\",\n \"decr\",\n \"delete\",\n \"delete_many\",\n \"get\",\n \"get_many\",\n \"incr\",\n \"set\",\n \"set_many\",\n )\n\n\nclass AllCacheRecorder(AllSourceRecorder):\n \"\"\"\n Launches CacheRecorders on all the active caches\n \"\"\"\n\n sources_setting = \"CACHES\"\n recorder_class = CacheRecorder\n",
"path": "src/django_perf_rec/cache.py"
}
] | [
{
"content": "import inspect\nimport re\nimport traceback\nfrom collections.abc import Mapping, Sequence\nfrom functools import wraps\nfrom types import MethodType\n\nfrom django.core.cache import DEFAULT_CACHE_ALIAS, caches\n\nfrom django_perf_rec.operation import AllSourceRecorder, Operation\n\n\nclass CacheOp(Operation):\n def __init__(self, alias, operation, key_or_keys, traceback):\n self.alias = alias\n self.operation = operation\n if isinstance(key_or_keys, str):\n cleaned_key_or_keys = self.clean_key(key_or_keys)\n elif isinstance(key_or_keys, (Mapping, Sequence)):\n cleaned_key_or_keys = sorted(self.clean_key(k) for k in key_or_keys)\n else:\n raise ValueError(\"key_or_keys must be a string, mapping, or sequence\")\n\n super().__init__(alias, cleaned_key_or_keys, traceback)\n\n @classmethod\n def clean_key(cls, key):\n \"\"\"\n Replace things that look like variables with a '#' so tests aren't\n affected by random variables\n \"\"\"\n for var_re in cls.VARIABLE_RES:\n key = var_re.sub(\"#\", key)\n return key\n\n VARIABLE_RES = (\n # Django session keys for 'cache' backend\n re.compile(r\"(?<=django\\.contrib\\.sessions\\.cache)[0-9a-z]{32}\\b\"),\n # Django session keys for 'cached_db' backend\n re.compile(r\"(?<=django\\.contrib\\.sessions\\.cached_db)[0-9a-z]{32}\\b\"),\n # Long random hashes\n re.compile(r\"\\b[0-9a-f]{32}\\b\"),\n # UUIDs\n re.compile(r\"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\"),\n # Integers\n re.compile(r\"\\d+\"),\n )\n\n def __eq__(self, other):\n return super().__eq__(other) and self.operation == other.operation\n\n @property\n def name(self):\n name_parts = [\"cache\"]\n if self.alias != DEFAULT_CACHE_ALIAS:\n name_parts.append(self.alias)\n name_parts.append(self.operation)\n return \"|\".join(name_parts)\n\n\nclass CacheRecorder:\n \"\"\"\n Monkey patches a cache class to call 'callback' on every operation it calls\n \"\"\"\n\n def __init__(self, alias, callback):\n self.alias = alias\n self.callback = callback\n\n def __enter__(self):\n cache = caches[self.alias]\n\n def call_callback(func):\n alias = self.alias\n callback = self.callback\n\n @wraps(func)\n def inner(self, *args, **kwargs):\n # Ignore operations from the cache class calling itself\n\n # Get the self of the parent via stack inspection\n frame = inspect.currentframe()\n try:\n frame = frame.f_back\n is_internal_call = frame.f_locals.get(\"self\", None) is self\n finally:\n # Always delete frame references to help garbage collector\n del frame\n\n if not is_internal_call:\n key_or_keys = args[0]\n callback(\n CacheOp(\n alias=alias,\n operation=str(func.__name__),\n key_or_keys=key_or_keys,\n traceback=traceback.extract_stack(),\n )\n )\n\n return func(*args, **kwargs)\n\n return inner\n\n self.orig_methods = {name: getattr(cache, name) for name in self.cache_methods}\n for name in self.cache_methods:\n orig_method = self.orig_methods[name]\n setattr(cache, name, MethodType(call_callback(orig_method), cache))\n\n def __exit__(self, _, __, ___):\n cache = caches[self.alias]\n for name in self.cache_methods:\n setattr(cache, name, self.orig_methods[name])\n del self.orig_methods\n\n cache_methods = (\n \"add\",\n \"decr\",\n \"delete\",\n \"delete_many\",\n \"get\",\n \"get_many\",\n \"get_or_set\",\n \"incr\",\n \"set\",\n \"set_many\",\n )\n\n\nclass AllCacheRecorder(AllSourceRecorder):\n \"\"\"\n Launches CacheRecorders on all the active caches\n \"\"\"\n\n sources_setting = \"CACHES\"\n recorder_class = CacheRecorder\n",
"path": "src/django_perf_rec/cache.py"
}
] | diff --git a/HISTORY.rst b/HISTORY.rst
index 479d3bb9..d15de7d0 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -2,6 +2,11 @@
History
=======
+* Correctly record calls to ``cache.get_or_set()``.
+
+ Thanks to Peter Law for the report in
+ `Issue #319 <https://github.com/adamchainz/django-perf-rec/issues/319>`__.
+
4.9.0 (2020-11-04)
------------------
diff --git a/src/django_perf_rec/cache.py b/src/django_perf_rec/cache.py
index 22257a73..4ab8a0b2 100644
--- a/src/django_perf_rec/cache.py
+++ b/src/django_perf_rec/cache.py
@@ -120,6 +120,7 @@ def __exit__(self, _, __, ___):
"delete_many",
"get",
"get_many",
+ "get_or_set",
"incr",
"set",
"set_many",
diff --git a/tests/test_api.perf.yml b/tests/test_api.perf.yml
index 85282e1c..d64b5f85 100644
--- a/tests/test_api.perf.yml
+++ b/tests/test_api.perf.yml
@@ -1,8 +1,10 @@
RecordTests.test_delete_on_cascade_called_twice:
-- db: 'DELETE FROM "testapp_book" WHERE "testapp_book"."author_id" IN (#)'
-- db: 'DELETE FROM "testapp_award" WHERE "testapp_award"."author_id" IN (#)'
-- db: 'DELETE FROM "testapp_contract_author" WHERE "testapp_contract_author"."author_id" IN (#)'
-- db: 'DELETE FROM "testapp_author" WHERE "testapp_author"."id" IN (#)'
+- db: DELETE FROM "testapp_book" WHERE "testapp_book"."author_id" IN (#)
+- db: DELETE FROM "testapp_award" WHERE "testapp_award"."author_id" IN (#)
+- db: DELETE FROM "testapp_contract_author" WHERE "testapp_contract_author"."author_id" IN (#)
+- db: DELETE FROM "testapp_author" WHERE "testapp_author"."id" IN (#)
+RecordTests.test_get_or_set:
+- cache|get_or_set: foo
RecordTests.test_multiple_cache_ops:
- cache|set: foo
- cache|second|get_many:
diff --git a/tests/test_api.py b/tests/test_api.py
index c0975b3e..232a8147 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -69,6 +69,10 @@ def test_single_cache_op(self):
with record():
caches["default"].get("foo")
+ def test_get_or_set(self):
+ with record():
+ caches["default"].get_or_set("foo", 42)
+
def test_single_cache_op_with_traceback(self):
with pretend_not_under_pytest():
with pytest.raises(AssertionError) as excinfo:
|
jazzband__django-oauth-toolkit-1145 | Release of version 2.0.0
Hello everyone
I would like to know if there is any prediction for the 2.0.0 version to be released. Today it is unrealized in the doc, and has two vulnerability alerts for versions lower than it in the [safety](https://pyup.io/). Thank you!
| [
{
"content": "import django\n\n\n__version__ = \"1.7.1\"\n\nif django.VERSION < (3, 2):\n default_app_config = \"oauth2_provider.apps.DOTConfig\"\n",
"path": "oauth2_provider/__init__.py"
}
] | [
{
"content": "import django\n\n\n__version__ = \"2.0.0\"\n\nif django.VERSION < (3, 2):\n default_app_config = \"oauth2_provider.apps.DOTConfig\"\n",
"path": "oauth2_provider/__init__.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index baae70de8..7819fe616 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -16,7 +16,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [unreleased]
-## [2.0.0] unreleased
+## [2.0.0] 2022-04-24
+
+This is a major release with **BREAKING** changes. Please make sure to review these changes before upgrading:
### Added
* #1106 OIDC: Add "scopes_supported" to the [ConnectDiscoveryInfoView](https://django-oauth-toolkit.readthedocs.io/en/latest/oidc.html#connectdiscoveryinfoview).
@@ -28,8 +30,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
* #1129 (**Breaking**) Changed default value of PKCE_REQUIRED to True. This is a **breaking change**. Clients without
PKCE enabled will fail to authenticate. This breaks with [section 5 of RFC7636](https://datatracker.ietf.org/doc/html/rfc7636)
in favor of the [OAuth2 Security Best Practices for Authorization Code Grants](https://datatracker.ietf.org/doc/html/draft-ietf-oauth-security-topics#section-2.1).
- If you want to retain the pre-2.x behavior, set `PKCE_REQUIRED = False ` in your settings.py
-
+ If you want to retain the pre-2.x behavior, set `PKCE_REQUIRED = False` in your settings.py
* #1093 (**Breaking**) Changed to implement [hashed](https://docs.djangoproject.com/en/stable/topics/auth/passwords/)
client_secret values. This is a **breaking change** that will migrate all your existing
cleartext `application.client_secret` values to be hashed with Django's default password hashing algorithm
@@ -43,7 +44,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Fixed
* #1108 OIDC: Fix `validate_bearer_token()` to properly set `request.scopes` to the list of granted scopes.
-* #1132: Fixed help text for `--skip-authorization` argument of the `createapplication` management command
+* #1132: Fixed help text for `--skip-authorization` argument of the `createapplication` management command.
### Removed
* #1124 (**Breaking**, **Security**) Removes support for insecure `urn:ietf:wg:oauth:2.0:oob` and `urn:ietf:wg:oauth:2.0:oob:auto` which are replaced
diff --git a/oauth2_provider/__init__.py b/oauth2_provider/__init__.py
index 9024b6f63..49a4433da 100644
--- a/oauth2_provider/__init__.py
+++ b/oauth2_provider/__init__.py
@@ -1,7 +1,7 @@
import django
-__version__ = "1.7.1"
+__version__ = "2.0.0"
if django.VERSION < (3, 2):
default_app_config = "oauth2_provider.apps.DOTConfig"
|
frappe__frappe-17020 | Remove Share doesn't disappear
## Description of the issue
When the read Permission of a Share is removed by de-selecting the checkbox, then the corresponding DocShare is removed in the Backend, but the checkbox is automatically re-selected in the frontend. After a refresh, the share
## Context information (for bug reports)
**Output of `bench version`**
```
frappe 14.x.x-develop
```
## Steps to reproduce the issue
1. Open an arbitrary Document
2. Add a share with read permissions
3. Remove the read permission by clicking the checkbox
### Observed result
The checkbox is automatically re-selected
### Expected result
The share entry disappears
## Additional information
| [
{
"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe import _\nfrom frappe.desk.doctype.notification_log.notification_log import (\n\tenqueue_create_notification,\n\tget_title,\n\tget_title_html,\n)\nfrom frappe.desk.form.document_follow import follow_document\nfrom frappe.utils import cint\n\n\[email protected]()\ndef add(\n\tdoctype, name, user=None, read=1, write=0, submit=0, share=0, everyone=0, flags=None, notify=0\n):\n\t\"\"\"Share the given document with a user.\"\"\"\n\tif not user:\n\t\tuser = frappe.session.user\n\n\tif not (flags or {}).get(\"ignore_share_permission\"):\n\t\tcheck_share_permission(doctype, name)\n\n\tshare_name = get_share_name(doctype, name, user, everyone)\n\n\tif share_name:\n\t\tdoc = frappe.get_doc(\"DocShare\", share_name)\n\telse:\n\t\tdoc = frappe.new_doc(\"DocShare\")\n\t\tdoc.update(\n\t\t\t{\"user\": user, \"share_doctype\": doctype, \"share_name\": name, \"everyone\": cint(everyone)}\n\t\t)\n\n\tif flags:\n\t\tdoc.flags.update(flags)\n\n\tdoc.update(\n\t\t{\n\t\t\t# always add read, since you are adding!\n\t\t\t\"read\": 1,\n\t\t\t\"write\": cint(write),\n\t\t\t\"submit\": cint(submit),\n\t\t\t\"share\": cint(share),\n\t\t}\n\t)\n\n\tdoc.save(ignore_permissions=True)\n\tnotify_assignment(user, doctype, name, everyone, notify=notify)\n\n\tif frappe.get_cached_value(\"User\", user, \"follow_shared_documents\"):\n\t\tfollow_document(doctype, name, user)\n\n\treturn doc\n\n\ndef remove(doctype, name, user, flags=None):\n\tshare_name = frappe.db.get_value(\n\t\t\"DocShare\", {\"user\": user, \"share_name\": name, \"share_doctype\": doctype}\n\t)\n\n\tif share_name:\n\t\tfrappe.delete_doc(\"DocShare\", share_name, flags=flags)\n\n\[email protected]()\ndef set_permission(doctype, name, user, permission_to, value=1, everyone=0):\n\t\"\"\"Set share permission.\"\"\"\n\tcheck_share_permission(doctype, name)\n\n\tshare_name = get_share_name(doctype, name, user, everyone)\n\tvalue = int(value)\n\n\tif not share_name:\n\t\tif value:\n\t\t\tshare = add(doctype, name, user, everyone=everyone, **{permission_to: 1})\n\t\telse:\n\t\t\t# no share found, nothing to remove\n\t\t\tshare = {}\n\t\t\tpass\n\telse:\n\t\tshare = frappe.get_doc(\"DocShare\", share_name)\n\t\tshare.flags.ignore_permissions = True\n\t\tshare.set(permission_to, value)\n\n\t\tif not value:\n\t\t\t# un-set higher-order permissions too\n\t\t\tif permission_to == \"read\":\n\t\t\t\tshare.read = share.write = share.submit = share.share = 0\n\n\t\tshare.save()\n\n\t\tif not (share.read or share.write or share.submit or share.share):\n\t\t\tshare.delete()\n\t\t\tshare = {}\n\n\treturn share\n\n\[email protected]()\ndef get_users(doctype, name):\n\t\"\"\"Get list of users with which this document is shared\"\"\"\n\treturn frappe.db.get_all(\n\t\t\"DocShare\",\n\t\tfields=[\n\t\t\t\"`name`\",\n\t\t\t\"`user`\",\n\t\t\t\"`read`\",\n\t\t\t\"`write`\",\n\t\t\t\"`submit`\",\n\t\t\t\"`share`\",\n\t\t\t\"everyone\",\n\t\t\t\"owner\",\n\t\t\t\"creation\",\n\t\t],\n\t\tfilters=dict(share_doctype=doctype, share_name=name),\n\t)\n\n\ndef get_shared(doctype, user=None, rights=None):\n\t\"\"\"Get list of shared document names for given user and DocType.\n\n\t:param doctype: DocType of which shared names are queried.\n\t:param user: User for which shared names are queried.\n\t:param rights: List of rights for which the document is shared. List of `read`, `write`, `share`\"\"\"\n\n\tif not user:\n\t\tuser = frappe.session.user\n\n\tif not rights:\n\t\trights = [\"read\"]\n\n\tfilters = [[right, \"=\", 1] for right in rights]\n\tfilters += [[\"share_doctype\", \"=\", doctype]]\n\tor_filters = [[\"user\", \"=\", user]]\n\tif user != \"Guest\":\n\t\tor_filters += [[\"everyone\", \"=\", 1]]\n\n\tshared_docs = frappe.db.get_all(\n\t\t\"DocShare\", fields=[\"share_name\"], filters=filters, or_filters=or_filters\n\t)\n\n\treturn [doc.share_name for doc in shared_docs]\n\n\ndef get_shared_doctypes(user=None):\n\t\"\"\"Return list of doctypes in which documents are shared for the given user.\"\"\"\n\tif not user:\n\t\tuser = frappe.session.user\n\ttable = frappe.qb.DocType(\"DocShare\")\n\tquery = (\n\t\tfrappe.qb.from_(table)\n\t\t.where((table.user == user) | (table.everyone == 1))\n\t\t.select(table.share_doctype)\n\t\t.distinct()\n\t)\n\treturn query.run(pluck=True)\n\n\ndef get_share_name(doctype, name, user, everyone):\n\tif cint(everyone):\n\t\tshare_name = frappe.db.get_value(\n\t\t\t\"DocShare\", {\"everyone\": 1, \"share_name\": name, \"share_doctype\": doctype}\n\t\t)\n\telse:\n\t\tshare_name = frappe.db.get_value(\n\t\t\t\"DocShare\", {\"user\": user, \"share_name\": name, \"share_doctype\": doctype}\n\t\t)\n\n\treturn share_name\n\n\ndef check_share_permission(doctype, name):\n\t\"\"\"Check if the user can share with other users\"\"\"\n\tif not frappe.has_permission(doctype, ptype=\"share\", doc=name):\n\t\tfrappe.throw(\n\t\t\t_(\"No permission to {0} {1} {2}\").format(\"share\", doctype, name), frappe.PermissionError\n\t\t)\n\n\ndef notify_assignment(shared_by, doctype, doc_name, everyone, notify=0):\n\n\tif not (shared_by and doctype and doc_name) or everyone or not notify:\n\t\treturn\n\n\tfrom frappe.utils import get_fullname\n\n\ttitle = get_title(doctype, doc_name)\n\n\treference_user = get_fullname(frappe.session.user)\n\tnotification_message = _(\"{0} shared a document {1} {2} with you\").format(\n\t\tfrappe.bold(reference_user), frappe.bold(doctype), get_title_html(title)\n\t)\n\n\tnotification_doc = {\n\t\t\"type\": \"Share\",\n\t\t\"document_type\": doctype,\n\t\t\"subject\": notification_message,\n\t\t\"document_name\": doc_name,\n\t\t\"from_user\": frappe.session.user,\n\t}\n\n\tenqueue_create_notification(shared_by, notification_doc)\n",
"path": "frappe/share.py"
}
] | [
{
"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe import _\nfrom frappe.desk.doctype.notification_log.notification_log import (\n\tenqueue_create_notification,\n\tget_title,\n\tget_title_html,\n)\nfrom frappe.desk.form.document_follow import follow_document\nfrom frappe.utils import cint\n\n\[email protected]()\ndef add(\n\tdoctype, name, user=None, read=1, write=0, submit=0, share=0, everyone=0, flags=None, notify=0\n):\n\t\"\"\"Share the given document with a user.\"\"\"\n\tif not user:\n\t\tuser = frappe.session.user\n\n\tif not (flags or {}).get(\"ignore_share_permission\"):\n\t\tcheck_share_permission(doctype, name)\n\n\tshare_name = get_share_name(doctype, name, user, everyone)\n\n\tif share_name:\n\t\tdoc = frappe.get_doc(\"DocShare\", share_name)\n\telse:\n\t\tdoc = frappe.new_doc(\"DocShare\")\n\t\tdoc.update(\n\t\t\t{\"user\": user, \"share_doctype\": doctype, \"share_name\": name, \"everyone\": cint(everyone)}\n\t\t)\n\n\tif flags:\n\t\tdoc.flags.update(flags)\n\n\tdoc.update(\n\t\t{\n\t\t\t# always add read, since you are adding!\n\t\t\t\"read\": 1,\n\t\t\t\"write\": cint(write),\n\t\t\t\"submit\": cint(submit),\n\t\t\t\"share\": cint(share),\n\t\t}\n\t)\n\n\tdoc.save(ignore_permissions=True)\n\tnotify_assignment(user, doctype, name, everyone, notify=notify)\n\n\tif frappe.get_cached_value(\"User\", user, \"follow_shared_documents\"):\n\t\tfollow_document(doctype, name, user)\n\n\treturn doc\n\n\ndef remove(doctype, name, user, flags=None):\n\tshare_name = frappe.db.get_value(\n\t\t\"DocShare\", {\"user\": user, \"share_name\": name, \"share_doctype\": doctype}\n\t)\n\n\tif share_name:\n\t\tfrappe.delete_doc(\"DocShare\", share_name, flags=flags)\n\n\[email protected]()\ndef set_permission(doctype, name, user, permission_to, value=1, everyone=0):\n\t\"\"\"Set share permission.\"\"\"\n\tcheck_share_permission(doctype, name)\n\n\tshare_name = get_share_name(doctype, name, user, everyone)\n\tvalue = int(value)\n\n\tif not share_name:\n\t\tif value:\n\t\t\tshare = add(doctype, name, user, everyone=everyone, **{permission_to: 1})\n\t\telse:\n\t\t\t# no share found, nothing to remove\n\t\t\tshare = {}\n\t\t\tpass\n\telse:\n\t\tshare = frappe.get_doc(\"DocShare\", share_name)\n\t\tshare.flags.ignore_permissions = True\n\t\tshare.set(permission_to, value)\n\n\t\tif not value:\n\t\t\t# un-set higher-order permissions too\n\t\t\tif permission_to == \"read\":\n\t\t\t\tshare.read = share.write = share.submit = share.share = 0\n\n\t\tshare.save()\n\n\t\tif not (share.read or share.write or share.submit or share.share):\n\t\t\tshare.delete()\n\t\t\tshare = None\n\n\treturn share\n\n\[email protected]()\ndef get_users(doctype, name):\n\t\"\"\"Get list of users with which this document is shared\"\"\"\n\treturn frappe.db.get_all(\n\t\t\"DocShare\",\n\t\tfields=[\n\t\t\t\"`name`\",\n\t\t\t\"`user`\",\n\t\t\t\"`read`\",\n\t\t\t\"`write`\",\n\t\t\t\"`submit`\",\n\t\t\t\"`share`\",\n\t\t\t\"everyone\",\n\t\t\t\"owner\",\n\t\t\t\"creation\",\n\t\t],\n\t\tfilters=dict(share_doctype=doctype, share_name=name),\n\t)\n\n\ndef get_shared(doctype, user=None, rights=None):\n\t\"\"\"Get list of shared document names for given user and DocType.\n\n\t:param doctype: DocType of which shared names are queried.\n\t:param user: User for which shared names are queried.\n\t:param rights: List of rights for which the document is shared. List of `read`, `write`, `share`\"\"\"\n\n\tif not user:\n\t\tuser = frappe.session.user\n\n\tif not rights:\n\t\trights = [\"read\"]\n\n\tfilters = [[right, \"=\", 1] for right in rights]\n\tfilters += [[\"share_doctype\", \"=\", doctype]]\n\tor_filters = [[\"user\", \"=\", user]]\n\tif user != \"Guest\":\n\t\tor_filters += [[\"everyone\", \"=\", 1]]\n\n\tshared_docs = frappe.db.get_all(\n\t\t\"DocShare\", fields=[\"share_name\"], filters=filters, or_filters=or_filters\n\t)\n\n\treturn [doc.share_name for doc in shared_docs]\n\n\ndef get_shared_doctypes(user=None):\n\t\"\"\"Return list of doctypes in which documents are shared for the given user.\"\"\"\n\tif not user:\n\t\tuser = frappe.session.user\n\ttable = frappe.qb.DocType(\"DocShare\")\n\tquery = (\n\t\tfrappe.qb.from_(table)\n\t\t.where((table.user == user) | (table.everyone == 1))\n\t\t.select(table.share_doctype)\n\t\t.distinct()\n\t)\n\treturn query.run(pluck=True)\n\n\ndef get_share_name(doctype, name, user, everyone):\n\tif cint(everyone):\n\t\tshare_name = frappe.db.get_value(\n\t\t\t\"DocShare\", {\"everyone\": 1, \"share_name\": name, \"share_doctype\": doctype}\n\t\t)\n\telse:\n\t\tshare_name = frappe.db.get_value(\n\t\t\t\"DocShare\", {\"user\": user, \"share_name\": name, \"share_doctype\": doctype}\n\t\t)\n\n\treturn share_name\n\n\ndef check_share_permission(doctype, name):\n\t\"\"\"Check if the user can share with other users\"\"\"\n\tif not frappe.has_permission(doctype, ptype=\"share\", doc=name):\n\t\tfrappe.throw(\n\t\t\t_(\"No permission to {0} {1} {2}\").format(\"share\", doctype, name), frappe.PermissionError\n\t\t)\n\n\ndef notify_assignment(shared_by, doctype, doc_name, everyone, notify=0):\n\n\tif not (shared_by and doctype and doc_name) or everyone or not notify:\n\t\treturn\n\n\tfrom frappe.utils import get_fullname\n\n\ttitle = get_title(doctype, doc_name)\n\n\treference_user = get_fullname(frappe.session.user)\n\tnotification_message = _(\"{0} shared a document {1} {2} with you\").format(\n\t\tfrappe.bold(reference_user), frappe.bold(doctype), get_title_html(title)\n\t)\n\n\tnotification_doc = {\n\t\t\"type\": \"Share\",\n\t\t\"document_type\": doctype,\n\t\t\"subject\": notification_message,\n\t\t\"document_name\": doc_name,\n\t\t\"from_user\": frappe.session.user,\n\t}\n\n\tenqueue_create_notification(shared_by, notification_doc)\n",
"path": "frappe/share.py"
}
] | diff --git a/frappe/share.py b/frappe/share.py
index 01d1412b8d8f..3edcb1be38c8 100644
--- a/frappe/share.py
+++ b/frappe/share.py
@@ -93,7 +93,7 @@ def set_permission(doctype, name, user, permission_to, value=1, everyone=0):
if not (share.read or share.write or share.submit or share.share):
share.delete()
- share = {}
+ share = None
return share
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.