code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def _get_project_subdomain(self, project):
"""Determine canonical project domain as subdomain."""
subdomain_slug = project.slug.replace("_", "-")
return "{}.{}".format(subdomain_slug, settings.PUBLIC_DOMAIN) | Determine canonical project domain as subdomain. | _get_project_subdomain | python | readthedocs/readthedocs.org | readthedocs/core/resolver.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/resolver.py | MIT |
def _fix_filename(self, filename):
"""
Force filenames that might be HTML file paths into proper URL's.
This basically means stripping /.
"""
filename = filename.lstrip("/")
return filename | Force filenames that might be HTML file paths into proper URL's.
This basically means stripping /. | _fix_filename | python | readthedocs/readthedocs.org | readthedocs/core/resolver.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/resolver.py | MIT |
def _use_cname(self, project):
"""Test if to allow direct serving for project on CNAME."""
return bool(get_feature(project, feature_type=TYPE_CNAME)) | Test if to allow direct serving for project on CNAME. | _use_cname | python | readthedocs/readthedocs.org | readthedocs/core/resolver.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/resolver.py | MIT |
def test_delete_user_deletes_projects(self):
"""When deleting a user, delete projects where it's the only owner."""
user = get(User)
another_user = get(User)
project_one = get(Project, slug="one", users=[user])
project_two = get(Project, slug="two", users=[user])
project_three = get(Project, slug="three", users=[another_user])
project_four = get(Project, slug="four", users=[user, another_user])
project_five = get(
Project,
slug="five",
users=[],
)
assert Project.objects.all().count() == 5
assert Version.objects.all().count() == 5
user.delete()
assert {project_three, project_four, project_five} == set(Project.objects.all())
assert Version.objects.all().count() == 3 | When deleting a user, delete projects where it's the only owner. | test_delete_user_deletes_projects | python | readthedocs/readthedocs.org | readthedocs/core/tests/test_signals.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/tests/test_signals.py | MIT |
def test_delete_user_deletes_organizations(self):
"""When deleting a user, delete organizations where it's the only owner."""
user = get(User)
member = get(User)
another_user = get(User)
project_one = get(Project, slug="one")
project_two = get(Project, slug="two")
project_three = get(Project, slug="three")
org_one = get(Organization, slug="one", owners=[user], projects=[project_one])
org_two = get(
Organization,
slug="two",
owners=[user, another_user],
projects=[project_two],
)
org_three = get(
Organization, slug="three", owners=[another_user], projects=[project_three]
)
team_one = get(
Team, organization=org_one, members=[member, user], projects=[project_one]
)
team_two = get(
Team,
organization=org_three,
members=[another_user],
projects=[project_three],
)
assert Organization.objects.all().count() == 3
assert Project.objects.all().count() == 3
assert Version.objects.all().count() == 3
assert Team.objects.all().count() == 2
assert TeamMember.objects.all().count() == 3
assert User.objects.all().count() == 3
user.delete()
assert {org_two, org_three} == set(Organization.objects.all())
assert {project_two, project_three} == set(Project.objects.all())
assert Version.objects.all().count() == 2
assert {team_two} == set(Team.objects.all())
assert TeamMember.objects.all().count() == 1
assert User.objects.all().count() == 2 | When deleting a user, delete organizations where it's the only owner. | test_delete_user_deletes_organizations | python | readthedocs/readthedocs.org | readthedocs/core/tests/test_signals.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/tests/test_signals.py | MIT |
def make_document_url(project, version=None, page="", path=""):
"""
Create a URL for a Project, Version and page (and/or path).
:param page: is the name of the document as Sphinx call it (e.g.
/config-file/v1) (note that the extension is not present)
:param path: is the full path of the page (e.g. /section/configuration.html)
:returns: URL to the page (e.g. https://docs.domain.com/en/latest/section/configuration.html)
"""
if not project:
return ""
filename = path or page
return Resolver().resolve(project=project, version_slug=version, filename=filename) | Create a URL for a Project, Version and page (and/or path).
:param page: is the name of the document as Sphinx call it (e.g.
/config-file/v1) (note that the extension is not present)
:param path: is the full path of the page (e.g. /section/configuration.html)
:returns: URL to the page (e.g. https://docs.domain.com/en/latest/section/configuration.html) | make_document_url | python | readthedocs/readthedocs.org | readthedocs/core/templatetags/core_tags.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/templatetags/core_tags.py | MIT |
def escapejson(data, indent=None):
"""
Escape JSON correctly for inclusion in Django templates.
This code was mostly taken from Django's implementation
https://docs.djangoproject.com/en/2.2/ref/templates/builtins/#json-script
https://github.com/django/django/blob/2.2.2/django/utils/html.py#L74-L92
After upgrading to Django 2.1+, we could replace this with Django's implementation
although the inputs and outputs are a bit different.
Example:
var jsvar = {{ dictionary_value | escapejson }}
"""
if indent:
indent = int(indent)
_json_script_escapes = {
ord(">"): "\\u003E",
ord("<"): "\\u003C",
ord("&"): "\\u0026",
}
return mark_safe(
json.dumps(
data,
cls=DjangoJSONEncoder,
indent=indent,
).translate(_json_script_escapes)
) | Escape JSON correctly for inclusion in Django templates.
This code was mostly taken from Django's implementation
https://docs.djangoproject.com/en/2.2/ref/templates/builtins/#json-script
https://github.com/django/django/blob/2.2.2/django/utils/html.py#L74-L92
After upgrading to Django 2.1+, we could replace this with Django's implementation
although the inputs and outputs are a bit different.
Example:
var jsvar = {{ dictionary_value | escapejson }} | escapejson | python | readthedocs/readthedocs.org | readthedocs/core/templatetags/core_tags.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/templatetags/core_tags.py | MIT |
def can_be_disconnected(account):
"""
Check if a social account can be disconnected.
This is used to disable the disconnect button for certain social accounts.
"""
adapter = get_adapter()
try:
adapter.validate_disconnect(account=account, accounts=[])
return True
except forms.ValidationError:
return False | Check if a social account can be disconnected.
This is used to disable the disconnect button for certain social accounts. | can_be_disconnected | python | readthedocs/readthedocs.org | readthedocs/core/templatetags/readthedocs/socialaccounts.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/templatetags/readthedocs/socialaccounts.py | MIT |
def _build_version(project, slug, already_built=()):
"""
Where we actually trigger builds for a project and slug.
All webhook logic should route here to call ``trigger_build``.
"""
if not project.has_valid_webhook:
project.has_valid_webhook = True
project.save()
# Previously we were building the latest version (inactive or active)
# when building the default version,
# some users may have relied on this to update the version list #4450
version = project.versions.filter(active=True, slug=slug).first()
if version and slug not in already_built:
log.info(
"Building.",
project_slug=project.slug,
version_slug=version.slug,
)
trigger_build(project=project, version=version)
return slug
log.info("Not building.", version_slug=slug)
return None | Where we actually trigger builds for a project and slug.
All webhook logic should route here to call ``trigger_build``. | _build_version | python | readthedocs/readthedocs.org | readthedocs/core/views/hooks.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/views/hooks.py | MIT |
def build_branches(project, branch_list):
"""
Build the branches for a specific project.
Returns:
to_build - a list of branches that were built
not_building - a list of branches that we won't build
"""
to_build = set()
not_building = set()
for branch in branch_list:
versions = project.versions_from_branch_name(branch)
for version in versions:
log.debug(
"Processing.",
project_slug=project.slug,
version_slug=version.slug,
)
ret = _build_version(project, version.slug, already_built=to_build)
if ret:
to_build.add(ret)
else:
not_building.add(version.slug)
return (to_build, not_building) | Build the branches for a specific project.
Returns:
to_build - a list of branches that were built
not_building - a list of branches that we won't build | build_branches | python | readthedocs/readthedocs.org | readthedocs/core/views/hooks.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/views/hooks.py | MIT |
def trigger_sync_versions(project):
"""
Sync the versions of a repo using its latest version.
This doesn't register a new build,
but clones the repo and syncs the versions.
Due that `sync_repository_task` is bound to a version,
we always pass the default version.
:returns: The version slug that was used to trigger the clone.
:rtype: str or ``None`` if failed
"""
if not Project.objects.is_active(project):
log.warning(
"Sync not triggered because project is not active.",
project_slug=project.slug,
)
return None
try:
version_identifier = project.get_default_branch()
version = project.versions.filter(
identifier=version_identifier,
).first()
if not version:
log.info("Unable to sync from version.", version_identifier=version_identifier)
return None
if project.has_feature(Feature.SKIP_SYNC_VERSIONS):
log.info("Skipping sync versions for project.", project_slug=project.slug)
return None
_, build_api_key = BuildAPIKey.objects.create_key(project=project)
log.debug(
"Triggering sync repository.",
project_slug=version.project.slug,
version_slug=version.slug,
)
sync_repository_task.apply_async(
args=[version.pk],
kwargs={"build_api_key": build_api_key},
)
return version.slug
except Exception:
log.exception("Unknown sync versions exception")
return None | Sync the versions of a repo using its latest version.
This doesn't register a new build,
but clones the repo and syncs the versions.
Due that `sync_repository_task` is bound to a version,
we always pass the default version.
:returns: The version slug that was used to trigger the clone.
:rtype: str or ``None`` if failed | trigger_sync_versions | python | readthedocs/readthedocs.org | readthedocs/core/views/hooks.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/views/hooks.py | MIT |
def get_or_create_external_version(project, version_data):
"""
Get or create version using the ``commit`` as identifier, and PR id as ``verbose_name``.
if external version does not exist create an external version
:param project: Project instance
:param version_data: A :py:class:`readthedocs.api.v2.views.integrations.ExternalVersionData`
instance.
:returns: External version.
:rtype: Version
"""
external_version, created = project.versions.get_or_create(
verbose_name=version_data.id,
type=EXTERNAL,
defaults={
"identifier": version_data.commit,
"active": True,
"state": EXTERNAL_VERSION_STATE_OPEN,
},
)
if created:
log.info(
"External version created.",
project_slug=project.slug,
version_slug=external_version.slug,
)
else:
# Identifier will change if there is a new commit to the Pull/Merge Request.
external_version.identifier = version_data.commit
# If the PR was previously closed it was marked as closed
external_version.state = EXTERNAL_VERSION_STATE_OPEN
external_version.save()
log.info(
"External version updated.",
project_slug=project.slug,
version_slug=external_version.slug,
)
return external_version | Get or create version using the ``commit`` as identifier, and PR id as ``verbose_name``.
if external version does not exist create an external version
:param project: Project instance
:param version_data: A :py:class:`readthedocs.api.v2.views.integrations.ExternalVersionData`
instance.
:returns: External version.
:rtype: Version | get_or_create_external_version | python | readthedocs/readthedocs.org | readthedocs/core/views/hooks.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/views/hooks.py | MIT |
def close_external_version(project, version_data):
"""
Close external versions using `identifier` and `verbose_name`.
We mark the version's state as `closed` so another celery task will remove
it after some days. If external version does not exist then returns `None`.
:param project: Project instance
:param version_data: A :py:class:`readthedocs.api.v2.views.integrations.ExternalVersionData`
instance.
:rtype: str
"""
external_version = (
project.versions(manager=EXTERNAL)
.filter(
verbose_name=version_data.id,
identifier=version_data.commit,
)
.first()
)
if external_version:
external_version.state = EXTERNAL_VERSION_STATE_CLOSED
external_version.save()
log.info(
"External version marked as closed.",
project_slug=project.slug,
version_slug=external_version.slug,
)
return external_version.verbose_name
return None | Close external versions using `identifier` and `verbose_name`.
We mark the version's state as `closed` so another celery task will remove
it after some days. If external version does not exist then returns `None`.
:param project: Project instance
:param version_data: A :py:class:`readthedocs.api.v2.views.integrations.ExternalVersionData`
instance.
:rtype: str | close_external_version | python | readthedocs/readthedocs.org | readthedocs/core/views/hooks.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/views/hooks.py | MIT |
def build_external_version(project, version):
"""
Where we actually trigger builds for external versions.
All pull/merge request webhook logic should route here to call ``trigger_build``.
"""
if not project.has_valid_webhook:
project.has_valid_webhook = True
project.save()
# Build External version
log.info(
"Building external version",
project_slug=project.slug,
version_slug=version.slug,
)
trigger_build(project=project, version=version, commit=version.identifier)
return version.verbose_name | Where we actually trigger builds for external versions.
All pull/merge request webhook logic should route here to call ``trigger_build``. | build_external_version | python | readthedocs/readthedocs.org | readthedocs/core/views/hooks.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/views/hooks.py | MIT |
def get_context_data(self, **kwargs):
"""Pass along endpoint for support form."""
context = super().get_context_data(**kwargs)
context["SUPPORT_FORM_ENDPOINT"] = settings.SUPPORT_FORM_ENDPOINT
if settings.RTD_EXT_THEME_ENABLED:
context["form"] = self.form_class(self.request.user)
return context | Pass along endpoint for support form. | get_context_data | python | readthedocs/readthedocs.org | readthedocs/core/views/__init__.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/views/__init__.py | MIT |
def unsafe_join_url_path(base, *args):
"""
Joins a base URL path with one or more path components.
This does a simple join of the base path with the path components,
inserting a slash between each component.
The resulting path will always start with a slash.
.. warning::
This does not offer protection against directory traversal attacks,
it simply joins the path components together. This shouldn't be used
to serve files, use ``readthedocs.storage.utils.safe_join`` for that.
"""
base = "/" + base.lstrip("/")
for path in args:
base = base.rstrip("/") + "/" + path.lstrip("/")
return base | Joins a base URL path with one or more path components.
This does a simple join of the base path with the path components,
inserting a slash between each component.
The resulting path will always start with a slash.
.. warning::
This does not offer protection against directory traversal attacks,
it simply joins the path components together. This shouldn't be used
to serve files, use ``readthedocs.storage.utils.safe_join`` for that. | unsafe_join_url_path | python | readthedocs/readthedocs.org | readthedocs/core/utils/url.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/url.py | MIT |
def get_dotted_attribute(obj, attribute, default=_DEFAULT):
"""
Allow to get nested attributes from an object using a dot notation.
This behaves similar to getattr, but allows to get nested attributes.
Similar, if a default value is provided, it will be returned if the
attribute is not found, otherwise it will raise an AttributeError.
"""
for attr in attribute.split("."):
if hasattr(obj, attr):
obj = getattr(obj, attr)
elif default is not _DEFAULT:
return default
else:
raise AttributeError(f"Object {obj} has no attribute {attr}")
return obj | Allow to get nested attributes from an object using a dot notation.
This behaves similar to getattr, but allows to get nested attributes.
Similar, if a default value is provided, it will be returned if the
attribute is not found, otherwise it will raise an AttributeError. | get_dotted_attribute | python | readthedocs/readthedocs.org | readthedocs/core/utils/objects.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/objects.py | MIT |
def get_override_class(proxy_class, default_class=None):
"""
Determine which class to use in an override class.
The `proxy_class` is the main class that is used, and `default_class` is the
default class that this proxy class will instantiate. If `default_class` is
not defined, this will be inferred from the `proxy_class`, as is defined in
:py:class:`SettingsOverrideObject`.
"""
if default_class is None:
default_class = getattr(proxy_class, "_default_class")
class_id = ".".join(
[
inspect.getmodule(proxy_class).__name__,
proxy_class.__name__,
]
)
class_path = settings.CLASS_OVERRIDES.get(class_id)
if class_path is None and proxy_class._override_setting is not None:
class_path = getattr(settings, proxy_class._override_setting, None)
if class_path is not None:
default_class = import_string(class_path)
return default_class | Determine which class to use in an override class.
The `proxy_class` is the main class that is used, and `default_class` is the
default class that this proxy class will instantiate. If `default_class` is
not defined, this will be inferred from the `proxy_class`, as is defined in
:py:class:`SettingsOverrideObject`. | get_override_class | python | readthedocs/readthedocs.org | readthedocs/core/utils/extend.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/extend.py | MIT |
def __new__(cls, *args, **kwargs):
"""
Set up wrapped object.
Create an instance of the underlying target class and return instead of
this class.
"""
return get_override_class(cls, cls._default_class)(*args, **kwargs) | Set up wrapped object.
Create an instance of the underlying target class and return instead of
this class. | __new__ | python | readthedocs/readthedocs.org | readthedocs/core/utils/extend.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/extend.py | MIT |
def is_suspicious_request(request) -> bool:
"""
Returns True if the request is suspicious.
This function is used to detect bots and spammers,
we use Cloudflare to detect them.
"""
# This header is set from Cloudflare,
# it goes from 0 to 100, 0 being low risk,
# and values above 10 are bots/spammers.
# https://developers.cloudflare.com/ruleset-engine/rules-language/fields/#dynamic-fields.
threat_score = int(request.headers.get("X-Cloudflare-Threat-Score", 0))
if threat_score > 10:
log.info(
"Suspicious threat score",
threat_score=threat_score,
)
return True
return False | Returns True if the request is suspicious.
This function is used to detect bots and spammers,
we use Cloudflare to detect them. | is_suspicious_request | python | readthedocs/readthedocs.org | readthedocs/core/utils/requests.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/requests.py | MIT |
def contact_users(
users,
email_subject=None,
email_content=None,
from_email=None,
context_function=None,
dryrun=True,
):
"""
Send an email to a list of users.
:param users: Queryset of Users.
:param string email_subject: Email subject
:param string email_content: Email content (markdown)
:param string from_email: Email to sent from (Test Support <[email protected]>)
:param context_function: A callable that will receive an user
and return a dict of additional context to be used in the email content
:param bool dryrun: If `True` don't sent the email, just logs the content
The `email_content` contents will be rendered using a template with the following context::
{
'user': <user object>,
'production_uri': https://readthedocs.org,
}
:returns: A dictionary with a list of sent/failed emails.
"""
from_email = from_email or settings.DEFAULT_FROM_EMAIL
context_function = context_function or (lambda user: {})
sent_emails = set()
failed_emails = set()
engine = Engine.get_default()
email_template = engine.from_string(email_content or "")
email_txt_template = engine.get_template("core/email/common.txt")
email_html_template = engine.get_template("core/email/common.html")
total = users.count()
for count, user in enumerate(users.iterator(), start=1):
context = {
"user": user,
"production_uri": f"https://{settings.PRODUCTION_DOMAIN}",
}
context.update(context_function(user))
if email_subject:
emails = list(
user.emailaddress_set.filter(verified=True)
.exclude(email=user.email)
.values_list("email", flat=True)
)
emails.append(user.email)
# First render the markdown context.
email_txt_content = email_template.render(Context(context))
email_html_content = markdown.markdown(email_txt_content)
# Now render it using the base email templates.
email_txt_rendered = email_txt_template.render(Context({"content": email_txt_content}))
email_html_rendered = email_html_template.render(
Context({"content": email_html_content})
)
try:
kwargs = {
"subject": email_subject,
"message": email_txt_rendered,
"html_message": email_html_rendered,
"from_email": from_email,
"recipient_list": emails,
}
if not dryrun:
send_mail(**kwargs)
except Exception:
log.exception("Email failed to send")
failed_emails.update(emails)
else:
log.info("Email sent.", emails=emails, count=count, total=total)
sent_emails.update(emails)
return {
"email": {
"sent": sent_emails,
"failed": failed_emails,
},
} | Send an email to a list of users.
:param users: Queryset of Users.
:param string email_subject: Email subject
:param string email_content: Email content (markdown)
:param string from_email: Email to sent from (Test Support <[email protected]>)
:param context_function: A callable that will receive an user
and return a dict of additional context to be used in the email content
:param bool dryrun: If `True` don't sent the email, just logs the content
The `email_content` contents will be rendered using a template with the following context::
{
'user': <user object>,
'production_uri': https://readthedocs.org,
}
:returns: A dictionary with a list of sent/failed emails. | contact_users | python | readthedocs/readthedocs.org | readthedocs/core/utils/contact.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/contact.py | MIT |
def pretty_json_field(instance, field):
"""
Display a pretty version of a JSON field in the admin.
Thanks to PyDanny: https://www.pydanny.com/pretty-formatting-json-django-admin.html
"""
# Convert the data to sorted, indented JSON
response = json.dumps(getattr(instance, field), sort_keys=True, indent=2)
# Get the Pygments formatter
formatter = HtmlFormatter()
# Highlight the data
response = highlight(response, JsonLexer(), formatter)
# Get the stylesheet
style = "<style>" + formatter.get_style_defs() + "</style><br>"
# Safe the output
return mark_safe(style + response) | Display a pretty version of a JSON field in the admin.
Thanks to PyDanny: https://www.pydanny.com/pretty-formatting-json-django-admin.html | pretty_json_field | python | readthedocs/readthedocs.org | readthedocs/core/utils/admin.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/admin.py | MIT |
def prepare_build(
project,
version=None,
commit=None,
immutable=True,
):
"""
Prepare a build in a Celery task for project and version.
If project has a ``build_queue``, execute the task on this build queue. If
project has ``skip=True``, the build is not triggered.
:param project: project's documentation to be built
:param version: version of the project to be built. Default: ``project.get_default_version()``
:param commit: commit sha of the version required for sending build status reports
:param immutable: whether or not create an immutable Celery signature
:returns: Celery signature of update_docs_task and Build instance
:rtype: tuple
"""
# Avoid circular import
from readthedocs.api.v2.models import BuildAPIKey
from readthedocs.builds.models import Build
from readthedocs.builds.tasks import send_build_notifications
from readthedocs.projects.models import Project
from readthedocs.projects.models import WebHookEvent
from readthedocs.projects.tasks.builds import update_docs_task
from readthedocs.projects.tasks.utils import send_external_build_status
log.bind(project_slug=project.slug)
if not Project.objects.is_active(project):
log.warning(
"Build not triggered because project is not active.",
)
return (None, None)
if not version:
default_version = project.get_default_version()
version = project.versions.get(slug=default_version)
build = Build.objects.create(
project=project,
version=version,
type="html",
state=BUILD_STATE_TRIGGERED,
success=True,
commit=commit,
)
log.bind(
build_id=build.id,
version_slug=version.slug,
)
options = {}
if project.build_queue:
options["queue"] = project.build_queue
# Set per-task time limit
# TODO remove the use of Docker limits or replace the logic here. This
# was pulling the Docker limits that were set on each stack, but we moved
# to dynamic setting of the Docker limits. This sets a failsafe higher
# limit, but if no builds hit this limit, it should be safe to remove and
# rely on Docker to terminate things on time.
# time_limit = DOCKER_LIMITS['time']
time_limit = 7200
try:
if project.container_time_limit:
time_limit = int(project.container_time_limit)
except ValueError:
log.warning("Invalid time_limit for project.")
# Add 20% overhead to task, to ensure the build can timeout and the task
# will cleanly finish.
options["soft_time_limit"] = time_limit
options["time_limit"] = int(time_limit * 1.2)
if commit:
log.bind(commit=commit)
# Send pending Build Status using Git Status API for External Builds.
send_external_build_status(
version_type=version.type,
build_pk=build.id,
commit=commit,
status=BUILD_STATUS_PENDING,
)
if version.type != EXTERNAL:
# Send notifications for build triggered.
send_build_notifications.delay(
version_pk=version.pk,
build_pk=build.pk,
event=WebHookEvent.BUILD_TRIGGERED,
)
# Reduce overhead when doing multiple push on the same version.
running_builds = (
Build.objects.filter(
project=project,
version=version,
)
.exclude(
state__in=BUILD_FINAL_STATES,
)
.exclude(
pk=build.pk,
)
)
if running_builds.count() > 0:
log.warning(
"Canceling running builds automatically due a new one arrived.",
running_builds=running_builds.count(),
)
# If there are builds triggered/running for this particular project and version,
# we cancel all of them and trigger a new one for the latest commit received.
for running_build in running_builds:
cancel_build(running_build)
# Start the build in X minutes and mark it as limited
limit_reached, _, max_concurrent_builds = Build.objects.concurrent(project)
if limit_reached:
log.warning(
"Delaying tasks at trigger step due to concurrency limit.",
)
# Delay the start of the build for the build retry delay.
# We're still triggering the task, but it won't run immediately,
# and the user will be alerted in the UI from the Error below.
options["countdown"] = settings.RTD_BUILDS_RETRY_DELAY
options["max_retries"] = settings.RTD_BUILDS_MAX_RETRIES
Notification.objects.add(
message_id=BuildMaxConcurrencyError.LIMIT_REACHED,
attached_to=build,
dismissable=False,
format_values={"limit": max_concurrent_builds},
)
_, build_api_key = BuildAPIKey.objects.create_key(project=project)
return (
update_docs_task.signature(
args=(
version.pk,
build.pk,
),
kwargs={
"build_commit": commit,
"build_api_key": build_api_key,
},
options=options,
immutable=True,
),
build,
) | Prepare a build in a Celery task for project and version.
If project has a ``build_queue``, execute the task on this build queue. If
project has ``skip=True``, the build is not triggered.
:param project: project's documentation to be built
:param version: version of the project to be built. Default: ``project.get_default_version()``
:param commit: commit sha of the version required for sending build status reports
:param immutable: whether or not create an immutable Celery signature
:returns: Celery signature of update_docs_task and Build instance
:rtype: tuple | prepare_build | python | readthedocs/readthedocs.org | readthedocs/core/utils/__init__.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/__init__.py | MIT |
def trigger_build(project, version=None, commit=None):
"""
Trigger a Build.
Helper that calls ``prepare_build`` and just effectively trigger the Celery
task to be executed by a worker.
:param project: project's documentation to be built
:param version: version of the project to be built. Default: ``latest``
:param commit: commit sha of the version required for sending build status reports
:returns: Celery AsyncResult promise and Build instance
:rtype: tuple
"""
log.bind(
project_slug=project.slug,
version_slug=version.slug if version else None,
commit=commit,
)
log.info("Triggering build.")
update_docs_task, build = prepare_build(
project=project,
version=version,
commit=commit,
immutable=True,
)
if (update_docs_task, build) == (None, None):
# Build was skipped
return (None, None)
task = update_docs_task.apply_async()
# FIXME: I'm using `isinstance` here because I wasn't able to mock this
# properly when running tests and it fails when trying to save a
# `mock.Mock` object in the database.
#
# Store the task_id in the build object to be able to cancel it later.
if isinstance(task.id, (str, int)):
build.task_id = task.id
build.save()
return task, build | Trigger a Build.
Helper that calls ``prepare_build`` and just effectively trigger the Celery
task to be executed by a worker.
:param project: project's documentation to be built
:param version: version of the project to be built. Default: ``latest``
:param commit: commit sha of the version required for sending build status reports
:returns: Celery AsyncResult promise and Build instance
:rtype: tuple | trigger_build | python | readthedocs/readthedocs.org | readthedocs/core/utils/__init__.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/__init__.py | MIT |
def cancel_build(build):
"""
Cancel a triggered/running build.
Depending on the current state of the build, it takes one approach or the other:
- Triggered:
Update the build status and tells Celery to revoke this task.
Workers will know about this and will discard it.
- Running:
Communicate Celery to force the termination of the current build
and rely on the worker to update the build's status.
"""
# NOTE: `terminate=True` is required for the child to attend our call
# immediately when it's running the build. Otherwise, it finishes the
# task. However, to revoke a task that has not started yet, we don't
# need it.
if build.state == BUILD_STATE_TRIGGERED:
# Since the task won't be executed at all, we need to update the
# Build object here.
terminate = False
build.state = BUILD_STATE_CANCELLED
build.success = False
# Add a notification for this build
Notification.objects.add(
message_id=BuildCancelled.CANCELLED_BY_USER,
attached_to=build,
dismissable=False,
)
build.length = 0
build.save()
else:
# In this case, we left the update of the Build object to the task
# itself to be executed in the `on_failure` handler.
terminate = True
log.warning(
"Canceling build.",
project_slug=build.project.slug,
version_slug=build.version.slug,
build_id=build.pk,
build_task_id=build.task_id,
terminate=terminate,
)
app.control.revoke(build.task_id, signal=signal.SIGINT, terminate=terminate) | Cancel a triggered/running build.
Depending on the current state of the build, it takes one approach or the other:
- Triggered:
Update the build status and tells Celery to revoke this task.
Workers will know about this and will discard it.
- Running:
Communicate Celery to force the termination of the current build
and rely on the worker to update the build's status. | cancel_build | python | readthedocs/readthedocs.org | readthedocs/core/utils/__init__.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/__init__.py | MIT |
def send_email_from_object(email: EmailMultiAlternatives | EmailMessage):
"""Given an email object, send it using our send_email_task task."""
from readthedocs.core.tasks import send_email_task
html_content = None
if isinstance(email, EmailMultiAlternatives):
for content, mimetype in email.alternatives:
if mimetype == "text/html":
html_content = content
break
send_email_task.delay(
recipient=email.to[0],
subject=email.subject,
content=email.body,
content_html=html_content,
from_email=email.from_email,
) | Given an email object, send it using our send_email_task task. | send_email_from_object | python | readthedocs/readthedocs.org | readthedocs/core/utils/__init__.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/__init__.py | MIT |
def send_email(
recipient,
subject,
template,
template_html,
context=None,
request=None,
from_email=None,
**kwargs,
):
"""
Alter context passed in and call email send task.
.. seealso::
Task :py:func:`readthedocs.core.tasks.send_email_task`
Task that handles templating and sending email message
"""
from ..tasks import send_email_task
if context is None:
context = {}
context["uri"] = "{scheme}://{host}".format(
scheme="https",
host=settings.PRODUCTION_DOMAIN,
)
content = render_to_string(template, context)
content_html = None
if template_html:
content_html = render_to_string(template_html, context)
send_email_task.delay(
recipient=recipient,
subject=subject,
content=content,
content_html=content_html,
from_email=from_email,
**kwargs,
) | Alter context passed in and call email send task.
.. seealso::
Task :py:func:`readthedocs.core.tasks.send_email_task`
Task that handles templating and sending email message | send_email | python | readthedocs/readthedocs.org | readthedocs/core/utils/__init__.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/__init__.py | MIT |
def slugify(value, *args, **kwargs):
"""
Add a DNS safe option to slugify.
:param bool dns_safe: Replace special chars like underscores with ``-``.
And remove trailing ``-``.
"""
dns_safe = kwargs.pop("dns_safe", True)
value = slugify_base(value, *args, **kwargs)
if dns_safe:
value = re.sub("[-_]+", "-", value)
# DNS doesn't allow - at the beginning or end of subdomains
value = mark_safe(value.strip("-"))
return value | Add a DNS safe option to slugify.
:param bool dns_safe: Replace special chars like underscores with ``-``.
And remove trailing ``-``. | slugify | python | readthedocs/readthedocs.org | readthedocs/core/utils/__init__.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/__init__.py | MIT |
def get_cache_tag(*args):
"""
Generate a cache tag from the given args.
The final tag is composed of several parts
that form a unique tag (like project and version slug).
All parts are separated using a character that isn't
allowed in slugs to avoid collisions.
"""
return ":".join(args) | Generate a cache tag from the given args.
The final tag is composed of several parts
that form a unique tag (like project and version slug).
All parts are separated using a character that isn't
allowed in slugs to avoid collisions. | get_cache_tag | python | readthedocs/readthedocs.org | readthedocs/core/utils/__init__.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/__init__.py | MIT |
def extract_valid_attributes_for_model(model, attributes):
"""
Extract the valid attributes for a model from a dictionary of attributes.
:param model: Model class to extract the attributes for.
:param attributes: Dictionary of attributes to extract.
:returns: Tuple with the valid attributes and the invalid attributes if any.
"""
attributes = attributes.copy()
valid_field_names = {field.name for field in model._meta.get_fields()}
valid_attributes = {}
# We can't change a dictionary while interating over its keys,
# so we make a copy of its keys.
keys = list(attributes.keys())
for key in keys:
if key in valid_field_names:
valid_attributes[key] = attributes.pop(key)
return valid_attributes, attributes | Extract the valid attributes for a model from a dictionary of attributes.
:param model: Model class to extract the attributes for.
:param attributes: Dictionary of attributes to extract.
:returns: Tuple with the valid attributes and the invalid attributes if any. | extract_valid_attributes_for_model | python | readthedocs/readthedocs.org | readthedocs/core/utils/__init__.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/__init__.py | MIT |
def assert_path_is_inside_docroot(path):
"""
Assert that the given path is inside the DOCROOT directory.
Symlinks are resolved before checking, a SuspiciousFileOperation exception
will be raised if the path is outside the DOCROOT.
.. warning::
This operation isn't safe to TocTou (Time-of-check to Time-of-use) attacks.
Users shouldn't be able to change files while this operation is done.
"""
resolved_path = path.absolute().resolve()
docroot = Path(settings.DOCROOT).absolute()
if not path.is_relative_to(docroot):
log.error(
"Suspicious operation outside the docroot directory.",
path_resolved=str(resolved_path),
docroot=settings.DOCROOT,
)
raise SuspiciousFileOperation(path) | Assert that the given path is inside the DOCROOT directory.
Symlinks are resolved before checking, a SuspiciousFileOperation exception
will be raised if the path is outside the DOCROOT.
.. warning::
This operation isn't safe to TocTou (Time-of-check to Time-of-use) attacks.
Users shouldn't be able to change files while this operation is done. | assert_path_is_inside_docroot | python | readthedocs/readthedocs.org | readthedocs/core/utils/filesystem.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/filesystem.py | MIT |
def safe_open(
path, *args, allow_symlinks=False, base_path=None, max_size_bytes=MAX_FILE_SIZE_BYTES, **kwargs
):
"""
Wrapper around path.open() to check for symlinks.
- Checks for symlinks to avoid symlink following vulnerabilities
like GHSA-368m-86q9-m99w.
- Checks that files aren't out of the DOCROOT directory.
- Checks that files aren't too large.
:param allow_symlinks: If `False` and the path is a symlink, raise `FileIsSymlink`
This prevents reading the contents of other files users shouldn't have access to.
:param base_path: If given, check that the path isn't located outside the base path
(usually the directory where the project was cloned).
It must be given if `allow_symlinks` is set to `True`.
This prevents path traversal attacks (even when using symlinks).
:param max_size_bytes: Maximum file size allowed in bytes when reading a file.
The extra *args and **kwargs will be passed to the open() method.
.. warning::
This operation isn't safe to TocTou (Time-of-check to Time-of-use) attacks.
Users shouldn't be able to change files while this operation is done.
"""
if allow_symlinks and not base_path:
raise ValueError("base_path must be given if symlinks are allowed.")
path = Path(path).absolute()
log.bind(
path_resolved=str(path.absolute().resolve()),
)
if path.exists() and not path.is_file():
raise FileIsNotRegularFile(FileIsNotRegularFile.SYMLINK_USED)
if not allow_symlinks and path.is_symlink():
log.info("Skipping file because it's a symlink.")
raise UnsupportedSymlinkFileError(UnsupportedSymlinkFileError.SYMLINK_USED)
# Expand symlinks.
resolved_path = path.resolve()
if resolved_path.exists():
file_size = resolved_path.stat().st_size
if file_size > max_size_bytes:
log.info("File is too large.", size_bytes=file_size)
raise BuildUserError(BuildUserError.FILE_TOO_LARGE)
if allow_symlinks and base_path:
base_path = Path(base_path).absolute()
if not resolved_path.is_relative_to(base_path):
# Trying to path traversal via a symlink, sneaky!
log.info("Path traversal via symlink.")
raise SymlinkOutsideBasePath(SymlinkOutsideBasePath.SYMLINK_USED)
assert_path_is_inside_docroot(resolved_path)
# The encoding is valid only if the file opened is a text file,
# this function is used to read both types of files (text and binary),
# so we can't specify the encoding here.
# pylint: disable=unspecified-encoding
return resolved_path.open(*args, **kwargs) | Wrapper around path.open() to check for symlinks.
- Checks for symlinks to avoid symlink following vulnerabilities
like GHSA-368m-86q9-m99w.
- Checks that files aren't out of the DOCROOT directory.
- Checks that files aren't too large.
:param allow_symlinks: If `False` and the path is a symlink, raise `FileIsSymlink`
This prevents reading the contents of other files users shouldn't have access to.
:param base_path: If given, check that the path isn't located outside the base path
(usually the directory where the project was cloned).
It must be given if `allow_symlinks` is set to `True`.
This prevents path traversal attacks (even when using symlinks).
:param max_size_bytes: Maximum file size allowed in bytes when reading a file.
The extra *args and **kwargs will be passed to the open() method.
.. warning::
This operation isn't safe to TocTou (Time-of-check to Time-of-use) attacks.
Users shouldn't be able to change files while this operation is done. | safe_open | python | readthedocs/readthedocs.org | readthedocs/core/utils/filesystem.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/filesystem.py | MIT |
def safe_copytree(from_dir, to_dir, *args, **kwargs):
"""
Wrapper around shutil.copytree to check for symlinks.
If any of the directories point to symlinks, cancel the operation.
We don't want to copy contents outside of those directories.
The extra *args and **kwargs will be passed to the copytree() function.
.. warning::
This operation isn't safe to TocTou (Time-of-check to Time-of-use) attacks.
Users shouldn't be able to change files while this operation is done.
"""
from_dir = Path(from_dir)
to_dir = Path(to_dir)
if from_dir.is_symlink() or to_dir.is_symlink():
log.info(
"Not copying directory, one of the paths is a symlink.",
from_dir=from_dir,
from_dir_resolved=from_dir.resolve(),
to_dir=to_dir,
to_dir_resolved=to_dir.resolve(),
)
return False
assert_path_is_inside_docroot(from_dir)
assert_path_is_inside_docroot(to_dir)
return shutil.copytree(
from_dir,
to_dir,
*args,
# Copy symlinks as is, instead of its contents.
symlinks=True,
**kwargs,
) | Wrapper around shutil.copytree to check for symlinks.
If any of the directories point to symlinks, cancel the operation.
We don't want to copy contents outside of those directories.
The extra *args and **kwargs will be passed to the copytree() function.
.. warning::
This operation isn't safe to TocTou (Time-of-check to Time-of-use) attacks.
Users shouldn't be able to change files while this operation is done. | safe_copytree | python | readthedocs/readthedocs.org | readthedocs/core/utils/filesystem.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/filesystem.py | MIT |
def safe_rmtree(path, *args, **kwargs):
"""
Wrapper around shutil.rmtree to check for symlinks.
shutil.rmtree doens't follow symlinks by default,
this function just logs in case users are trying to use symlinks.
https://docs.python.org/3/library/shutil.html#shutil.rmtree
The extra *args and **kwargs will be passed to the rmtree() function.
"""
path = Path(path)
if path.is_symlink():
log.info(
"Not deleting directory because it's a symlink.",
path=str(path),
resolved_path=path.resolve(),
)
return None
assert_path_is_inside_docroot(path)
return shutil.rmtree(path, *args, **kwargs) | Wrapper around shutil.rmtree to check for symlinks.
shutil.rmtree doens't follow symlinks by default,
this function just logs in case users are trying to use symlinks.
https://docs.python.org/3/library/shutil.html#shutil.rmtree
The extra *args and **kwargs will be passed to the rmtree() function. | safe_rmtree | python | readthedocs/readthedocs.org | readthedocs/core/utils/filesystem.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/filesystem.py | MIT |
def get_task_data(self):
"""Return tuple with state to be set next and results task."""
state = states.STARTED
info = {
"task_name": self.name,
"context": self.request.get("permission_context", {}),
"public_data": self.request.get("public_data", {}),
}
return state, info | Return tuple with state to be set next and results task. | get_task_data | python | readthedocs/readthedocs.org | readthedocs/core/utils/tasks/public.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/tasks/public.py | MIT |
def set_permission_context(self, context):
"""
Set data that can be used by ``check_permission`` to authorize a.
request for the this task. By default it will be the ``kwargs`` passed
into the task.
"""
self.request.update(permission_context=context)
self.update_progress_data() | Set data that can be used by ``check_permission`` to authorize a.
request for the this task. By default it will be the ``kwargs`` passed
into the task. | set_permission_context | python | readthedocs/readthedocs.org | readthedocs/core/utils/tasks/public.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/tasks/public.py | MIT |
def set_public_data(self, data):
"""
Set data that can be displayed in the frontend to authorized users.
This might include progress data about the task.
"""
self.request.update(public_data=data)
self.update_progress_data() | Set data that can be displayed in the frontend to authorized users.
This might include progress data about the task. | set_public_data | python | readthedocs/readthedocs.org | readthedocs/core/utils/tasks/public.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/tasks/public.py | MIT |
def permission_check(check):
"""
Decorator for tasks that have PublicTask as base.
.. note::
The decorator should be on top of the task decorator.
permission checks::
@PublicTask.permission_check(user_id_matches)
@celery.task(base=PublicTask)
def my_public_task(user_id):
pass
"""
def decorator(func):
func.check_permission = check
return func
return decorator | Decorator for tasks that have PublicTask as base.
.. note::
The decorator should be on top of the task decorator.
permission checks::
@PublicTask.permission_check(user_id_matches)
@celery.task(base=PublicTask)
def my_public_task(user_id):
pass | permission_check | python | readthedocs/readthedocs.org | readthedocs/core/utils/tasks/public.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/core/utils/tasks/public.py | MIT |
def validate_list(value):
"""Check if ``value`` is an iterable."""
if isinstance(value, (dict, str)):
raise ConfigValidationError(
message_id=ConfigValidationError.INVALID_LIST,
format_values={
"value": value,
},
)
if not hasattr(value, "__iter__"):
raise ConfigValidationError(
message_id=ConfigValidationError.INVALID_LIST,
format_values={
"value": value,
},
)
return list(value) | Check if ``value`` is an iterable. | validate_list | python | readthedocs/readthedocs.org | readthedocs/config/validation.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/validation.py | MIT |
def validate_dict(value):
"""Check if ``value`` is a dictionary."""
if not isinstance(value, dict):
raise ConfigValidationError(
message_id=ConfigValidationError.INVALID_DICT,
format_values={
"value": value,
},
) | Check if ``value`` is a dictionary. | validate_dict | python | readthedocs/readthedocs.org | readthedocs/config/validation.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/validation.py | MIT |
def validate_choice(value, choices):
"""Check that ``value`` is in ``choices``."""
choices = validate_list(choices)
if value not in choices:
raise ConfigValidationError(
message_id=ConfigValidationError.INVALID_CHOICE,
format_values={
"value": value,
"choices": ", ".join(map(str, choices)),
},
)
return value | Check that ``value`` is in ``choices``. | validate_choice | python | readthedocs/readthedocs.org | readthedocs/config/validation.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/validation.py | MIT |
def validate_bool(value):
"""Check that ``value`` is an boolean value."""
if value not in (0, 1, False, True):
raise ConfigValidationError(
message_id=ConfigValidationError.INVALID_BOOL,
format_values={
"value": value,
},
)
return bool(value) | Check that ``value`` is an boolean value. | validate_bool | python | readthedocs/readthedocs.org | readthedocs/config/validation.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/validation.py | MIT |
def validate_path(value, base_path):
"""Check that ``value`` is a valid path name and normamlize it."""
string_value = validate_string(value)
if not string_value:
raise ConfigValidationError(
message_id=ConfigValidationError.INVALID_PATH,
format_values={
"value": value,
},
)
full_path = os.path.join(base_path, string_value)
rel_path = os.path.relpath(full_path, base_path)
return rel_path | Check that ``value`` is a valid path name and normamlize it. | validate_path | python | readthedocs/readthedocs.org | readthedocs/config/validation.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/validation.py | MIT |
def validate_path_pattern(value):
"""
Normalize and validates a path pattern.
- Normalizes the path stripping multiple ``/``.
- Expands relative paths.
- Checks the final path is relative to the root of the site ``/``.
"""
path = validate_string(value)
# Start the path with ``/`` to interpret the path as absolute to ``/``.
path = "/" + path.lstrip("/")
path = os.path.normpath(path)
if not os.path.isabs(path):
raise ConfigValidationError(
message_id=ConfigValidationError.INVALID_PATH_PATTERN,
format_values={
"value": value,
},
)
# Remove ``/`` from the path once is validated.
path = path.lstrip("/")
if not path:
raise ConfigValidationError(
message_id=ConfigValidationError.INVALID_PATH_PATTERN,
format_values={
"value": value,
},
)
return path | Normalize and validates a path pattern.
- Normalizes the path stripping multiple ``/``.
- Expands relative paths.
- Checks the final path is relative to the root of the site ``/``. | validate_path_pattern | python | readthedocs/readthedocs.org | readthedocs/config/validation.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/validation.py | MIT |
def validate_string(value):
"""Check that ``value`` is a string type."""
if not isinstance(value, str):
raise ConfigValidationError(
message_id=ConfigValidationError.INVALID_STRING,
format_values={
"value": value,
},
)
return str(value) | Check that ``value`` is a string type. | validate_string | python | readthedocs/readthedocs.org | readthedocs/config/validation.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/validation.py | MIT |
def list_to_dict(list_):
"""Transform a list to a dictionary with its indices as keys."""
dict_ = {str(i): element for i, element in enumerate(list_)}
return dict_ | Transform a list to a dictionary with its indices as keys. | list_to_dict | python | readthedocs/readthedocs.org | readthedocs/config/utils.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/utils.py | MIT |
def catch_validation_error(self, key):
"""Catch a ``ConfigValidationError`` and raises a ``ConfigError`` error."""
# NOTE: I don't like too much this pattern of re-raising an exception via a context manager.
# I think we should raise the exception where it happens, instead of encapsulating all of them.
# The only small limitation that I found is the requirement of passing ``key`` down to where
# the exception happens.
# I'm keeping this pattern for now until we decide to refactor it.
try:
yield
except ConfigValidationError as error:
# Expand the format values defined when the exception is risen
# with extra ones we have here
format_values = getattr(error, "format_values", {})
format_values.update(
{
"key": key,
"value": error.format_values.get("value"),
"source_file": os.path.relpath(self.source_file, self.base_path),
}
)
raise ConfigError(
message_id=error.message_id,
format_values=format_values,
) from error | Catch a ``ConfigValidationError`` and raises a ``ConfigError`` error. | catch_validation_error | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def pop(self, name, container, default, raise_ex):
"""
Search and pop a key inside a dict.
This will pop the keys recursively if the container is empty.
:param name: the key name in a list form (``['key', 'inner']``)
:param container: a dictionary that contains the key
:param default: default value to return if the key doesn't exists
:param raise_ex: if True, raises an exception when a key is not found
"""
key = name[0]
validate_dict(container)
if key in container:
if len(name) > 1:
value = self.pop(name[1:], container[key], default, raise_ex)
if not container[key]:
container.pop(key)
else:
value = container.pop(key)
return value
if raise_ex:
raise ConfigValidationError(
message_id=ConfigValidationError.VALUE_NOT_FOUND,
format_values={
"value": key,
},
)
return default | Search and pop a key inside a dict.
This will pop the keys recursively if the container is empty.
:param name: the key name in a list form (``['key', 'inner']``)
:param container: a dictionary that contains the key
:param default: default value to return if the key doesn't exists
:param raise_ex: if True, raises an exception when a key is not found | pop | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def pop_config(self, key, default=None, raise_ex=False):
"""
Search and pop a key (recursively) from `self._raw_config`.
:param key: the key name in a dotted form (``key.innerkey``)
:param default: Optionally, it can receive a default value
:param raise_ex: If True, raises an exception when the key is not found
"""
return self.pop(key.split("."), self._raw_config, default, raise_ex) | Search and pop a key (recursively) from `self._raw_config`.
:param key: the key name in a dotted form (``key.innerkey``)
:param default: Optionally, it can receive a default value
:param raise_ex: If True, raises an exception when the key is not found | pop_config | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def is_using_setup_py_install(self):
"""Check if this project is using `setup.py install` as installation method."""
for install in self.python.install:
if isinstance(install, PythonInstall) and install.method == SETUPTOOLS:
return True
return False | Check if this project is using `setup.py install` as installation method. | is_using_setup_py_install | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def __getattr__(self, name):
"""Raise an error for unknown attributes."""
raise ConfigError(
message_id=ConfigError.KEY_NOT_SUPPORTED_IN_VERSION,
format_values={"key": name},
) | Raise an error for unknown attributes. | __getattr__ | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def validate(self):
"""Validates and process ``raw_config``."""
self._config["formats"] = self.validate_formats()
# This should be called before ``validate_python`` and ``validate_conda``
self._config["build"] = self.validate_build()
self._config["conda"] = self.validate_conda()
self._config["python"] = self.validate_python()
# Call this before validate sphinx and mkdocs
self.validate_doc_types()
self._config["mkdocs"] = self.validate_mkdocs()
self._config["sphinx"] = self.validate_sphinx()
self._config["submodules"] = self.validate_submodules()
self._config["search"] = self.validate_search()
if self.deprecate_implicit_keys:
self.validate_deprecated_implicit_keys()
self.validate_keys() | Validates and process ``raw_config``. | validate | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def validate_formats(self):
"""
Validates that formats contains only valid formats.
The ``ALL`` keyword can be used to indicate that all formats are used.
We ignore the default values here.
"""
formats = self.pop_config("formats", [])
if formats == ALL:
return self.valid_formats
with self.catch_validation_error("formats"):
validate_list(formats)
for format_ in formats:
validate_choice(format_, self.valid_formats)
return formats | Validates that formats contains only valid formats.
The ``ALL`` keyword can be used to indicate that all formats are used.
We ignore the default values here. | validate_formats | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def validate_conda(self):
"""Validates the conda key."""
raw_conda = self._raw_config.get("conda")
if raw_conda is None:
if self.is_using_conda and not self.is_using_build_commands:
raise ConfigError(
message_id=ConfigError.CONDA_KEY_REQUIRED,
format_values={"key": "conda"},
)
return None
with self.catch_validation_error("conda"):
validate_dict(raw_conda)
conda = {}
with self.catch_validation_error("conda.environment"):
environment = self.pop_config("conda.environment", raise_ex=True)
conda["environment"] = validate_path(environment, self.base_path)
return conda | Validates the conda key. | validate_conda | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def validate_build_config_with_os(self):
"""
Validates the build object (new format).
At least one element must be provided in ``build.tools``.
"""
build = {}
with self.catch_validation_error("build.os"):
build_os = self.pop_config("build.os", raise_ex=True)
build["os"] = validate_choice(build_os, self.settings["os"].keys())
tools = {}
with self.catch_validation_error("build.tools"):
tools = self.pop_config("build.tools")
if tools:
validate_dict(tools)
for tool in tools.keys():
validate_choice(tool, self.settings["tools"].keys())
jobs = {}
with self.catch_validation_error("build.jobs"):
# FIXME: should we use `default={}` or kept the `None` here and
# shortcircuit the rest of the logic?
jobs = self.pop_config("build.jobs", default={})
validate_dict(jobs)
# NOTE: besides validating that each key is one of the expected
# ones, we could validate the value of each of them is a list of
# commands. However, I don't think we should validate the "command"
# looks like a real command.
valid_jobs = list(BuildJobs.model_fields.keys())
for job in jobs.keys():
validate_choice(job, valid_jobs)
commands = []
with self.catch_validation_error("build.commands"):
commands = self.pop_config("build.commands", default=[])
validate_list(commands)
if not (tools or commands):
raise ConfigError(
message_id=ConfigError.NOT_BUILD_TOOLS_OR_COMMANDS,
format_values={
"key": "build",
},
)
if commands and jobs:
raise ConfigError(
message_id=ConfigError.BUILD_JOBS_AND_COMMANDS,
format_values={
"key": "build",
},
)
build["jobs"] = {}
with self.catch_validation_error("build.jobs.build"):
build["jobs"]["build"] = self.validate_build_jobs_build(jobs)
# Remove the build.jobs.build key from the build.jobs dict,
# since it's the only key that should be a dictionary,
# it was already validated above.
jobs.pop("build", None)
for job, job_commands in jobs.items():
with self.catch_validation_error(f"build.jobs.{job}"):
build["jobs"][job] = [
validate_string(job_command) for job_command in validate_list(job_commands)
]
build["commands"] = []
for command in commands:
with self.catch_validation_error("build.commands"):
build["commands"].append(validate_string(command))
build["tools"] = {}
if tools:
for tool, version in tools.items():
with self.catch_validation_error(f"build.tools.{tool}"):
build["tools"][tool] = validate_choice(
version,
self.settings["tools"][tool].keys(),
)
build["apt_packages"] = self.validate_apt_packages()
return build | Validates the build object (new format).
At least one element must be provided in ``build.tools``. | validate_build_config_with_os | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def validate_apt_package(self, index):
"""
Validate the package name to avoid injections of extra options.
We validate that they aren't interpreted as an option or file.
See https://manpages.ubuntu.com/manpages/xenial/man8/apt-get.8.html
and https://www.debian.org/doc/manuals/debian-reference/ch02.en.html#_debian_package_file_names # noqa
for allowed chars in packages names.
"""
key = f"build.apt_packages.{index}"
package = self.pop_config(key)
with self.catch_validation_error(key):
validate_string(package)
package = package.strip()
invalid_starts = [
# Don't allow extra options.
"-",
# Don't allow to install from a path.
"/",
".",
]
for start in invalid_starts:
if package.startswith(start):
raise ConfigError(
message_id=ConfigError.APT_INVALID_PACKAGE_NAME_PREFIX,
format_values={
"prefix": start,
"package": package,
"key": key,
},
)
# List of valid chars in packages names.
pattern = re.compile(r"^[a-zA-Z0-9][a-zA-Z0-9.+-]*$")
if not pattern.match(package):
raise ConfigError(
message_id=ConfigError.APT_INVALID_PACKAGE_NAME,
format_values={
"package": package,
"key": key,
},
)
return package | Validate the package name to avoid injections of extra options.
We validate that they aren't interpreted as an option or file.
See https://manpages.ubuntu.com/manpages/xenial/man8/apt-get.8.html
and https://www.debian.org/doc/manuals/debian-reference/ch02.en.html#_debian_package_file_names # noqa
for allowed chars in packages names. | validate_apt_package | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def validate_python(self):
"""
Validates the python key.
validate_build should be called before this, since it initialize the
build.image attribute.
.. note::
- ``version`` can be a string or number type.
- ``extra_requirements`` needs to be used with ``install: 'pip'``.
"""
raw_python = self._raw_config.get("python", {})
with self.catch_validation_error("python"):
validate_dict(raw_python)
python = {}
with self.catch_validation_error("python.install"):
raw_install = self._raw_config.get("python", {}).get("install", [])
validate_list(raw_install)
if raw_install:
# Transform to a dict, so it's easy to validate extra keys.
self._raw_config.setdefault("python", {})["install"] = list_to_dict(raw_install)
else:
self.pop_config("python.install")
raw_install = self._raw_config.get("python", {}).get("install", [])
python["install"] = [
self.validate_python_install(index) for index in range(len(raw_install))
]
return python | Validates the python key.
validate_build should be called before this, since it initialize the
build.image attribute.
.. note::
- ``version`` can be a string or number type.
- ``extra_requirements`` needs to be used with ``install: 'pip'``. | validate_python | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def validate_python_install(self, index):
"""Validates the python.install.{index} key."""
python_install = {}
key = "python.install.{}".format(index)
raw_install = self._raw_config["python"]["install"][str(index)]
with self.catch_validation_error(key):
validate_dict(raw_install)
if "requirements" in raw_install:
requirements_key = key + ".requirements"
with self.catch_validation_error(requirements_key):
requirements = validate_path(
self.pop_config(requirements_key),
self.base_path,
)
python_install["requirements"] = requirements
elif "path" in raw_install:
path_key = key + ".path"
with self.catch_validation_error(path_key):
path = validate_path(
self.pop_config(path_key),
self.base_path,
)
python_install["path"] = path
method_key = key + ".method"
with self.catch_validation_error(method_key):
method = validate_choice(
self.pop_config(method_key, PIP),
self.valid_install_method,
)
python_install["method"] = method
extra_req_key = key + ".extra_requirements"
with self.catch_validation_error(extra_req_key):
extra_requirements = validate_list(
self.pop_config(extra_req_key, []),
)
if extra_requirements and python_install["method"] != PIP:
raise ConfigError(
message_id=ConfigError.USE_PIP_FOR_EXTRA_REQUIREMENTS,
)
python_install["extra_requirements"] = extra_requirements
else:
raise ConfigError(
message_id=ConfigError.PIP_PATH_OR_REQUIREMENT_REQUIRED,
format_values={
"key": key,
},
)
return python_install | Validates the python.install.{index} key. | validate_python_install | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def validate_doc_types(self):
"""
Validates that the user only have one type of documentation.
This should be called before validating ``sphinx`` or ``mkdocs`` to
avoid innecessary validations.
"""
with self.catch_validation_error("."):
if "sphinx" in self._raw_config and "mkdocs" in self._raw_config:
raise ConfigError(
message_id=ConfigError.SPHINX_MKDOCS_CONFIG_TOGETHER,
) | Validates that the user only have one type of documentation.
This should be called before validating ``sphinx`` or ``mkdocs`` to
avoid innecessary validations. | validate_doc_types | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def validate_mkdocs(self):
"""
Validates the mkdocs key.
It makes sure we are using an existing configuration file.
"""
raw_mkdocs = self._raw_config.get("mkdocs")
if raw_mkdocs is None:
return None
with self.catch_validation_error("mkdocs"):
validate_dict(raw_mkdocs)
mkdocs = {}
with self.catch_validation_error("mkdocs.configuration"):
configuration = self.pop_config("mkdocs.configuration", None)
if configuration is not None:
configuration = validate_path(configuration, self.base_path)
mkdocs["configuration"] = configuration
with self.catch_validation_error("mkdocs.fail_on_warning"):
fail_on_warning = self.pop_config("mkdocs.fail_on_warning", False)
mkdocs["fail_on_warning"] = validate_bool(fail_on_warning)
return mkdocs | Validates the mkdocs key.
It makes sure we are using an existing configuration file. | validate_mkdocs | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def validate_sphinx(self):
"""
Validates the sphinx key.
It makes sure we are using an existing configuration file.
.. note::
It should be called after ``validate_mkdocs``. That way
we can default to sphinx if ``mkdocs`` is not given.
"""
raw_sphinx = self._raw_config.get("sphinx")
if raw_sphinx is None:
if self.mkdocs is None:
raw_sphinx = {}
else:
return None
with self.catch_validation_error("sphinx"):
validate_dict(raw_sphinx)
sphinx = {}
with self.catch_validation_error("sphinx.builder"):
builder = validate_choice(
self.pop_config("sphinx.builder", "html"),
self.valid_sphinx_builders.keys(),
)
sphinx["builder"] = self.valid_sphinx_builders[builder]
with self.catch_validation_error("sphinx.configuration"):
configuration = self.pop_config(
"sphinx.configuration",
)
if configuration is not None:
configuration = validate_path(configuration, self.base_path)
sphinx["configuration"] = configuration
with self.catch_validation_error("sphinx.fail_on_warning"):
fail_on_warning = self.pop_config("sphinx.fail_on_warning", False)
sphinx["fail_on_warning"] = validate_bool(fail_on_warning)
return sphinx | Validates the sphinx key.
It makes sure we are using an existing configuration file.
.. note::
It should be called after ``validate_mkdocs``. That way
we can default to sphinx if ``mkdocs`` is not given. | validate_sphinx | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def validate_submodules(self):
"""
Validates the submodules key.
- We can use the ``ALL`` keyword in include or exclude.
- We can't exclude and include submodules at the same time.
"""
raw_submodules = self._raw_config.get("submodules", {})
with self.catch_validation_error("submodules"):
validate_dict(raw_submodules)
submodules = {}
with self.catch_validation_error("submodules.include"):
include = self.pop_config("submodules.include", [])
if include != ALL:
include = [validate_string(submodule) for submodule in validate_list(include)]
submodules["include"] = include
with self.catch_validation_error("submodules.exclude"):
default = [] if submodules["include"] else ALL
exclude = self.pop_config("submodules.exclude", default)
if exclude != ALL:
exclude = [validate_string(submodule) for submodule in validate_list(exclude)]
submodules["exclude"] = exclude
with self.catch_validation_error("submodules"):
is_including = bool(submodules["include"])
is_excluding = submodules["exclude"] == ALL or bool(submodules["exclude"])
if is_including and is_excluding:
raise ConfigError(
message_id=ConfigError.SUBMODULES_INCLUDE_EXCLUDE_TOGETHER,
)
with self.catch_validation_error("submodules.recursive"):
recursive = self.pop_config("submodules.recursive", False)
submodules["recursive"] = validate_bool(recursive)
return submodules | Validates the submodules key.
- We can use the ``ALL`` keyword in include or exclude.
- We can't exclude and include submodules at the same time. | validate_submodules | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def validate_search(self):
"""
Validates the search key.
- ``ranking`` is a map of path patterns to a rank.
- ``ignore`` is a list of patterns.
- The path pattern supports basic globs (*, ?, [seq]).
- The rank can be a integer number between -10 and 10.
"""
raw_search = self._raw_config.get("search", {})
with self.catch_validation_error("search"):
validate_dict(raw_search)
search = {}
with self.catch_validation_error("search.ranking"):
ranking = self.pop_config("search.ranking", {})
validate_dict(ranking)
valid_rank_range = list(range(-10, 10 + 1))
final_ranking = {}
for pattern, rank in ranking.items():
pattern = validate_path_pattern(pattern)
validate_choice(rank, valid_rank_range)
final_ranking[pattern] = rank
search["ranking"] = final_ranking
with self.catch_validation_error("search.ignore"):
ignore_default = [
"search.html",
"search/index.html",
"404.html",
"404/index.html",
]
search_ignore = self.pop_config("search.ignore", ignore_default)
validate_list(search_ignore)
final_ignore = [validate_path_pattern(pattern) for pattern in search_ignore]
search["ignore"] = final_ignore
return search | Validates the search key.
- ``ranking`` is a map of path patterns to a rank.
- ``ignore`` is a list of patterns.
- The path pattern supports basic globs (*, ?, [seq]).
- The rank can be a integer number between -10 and 10. | validate_search | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def validate_deprecated_implicit_keys(self):
"""
Check for deprecated usages and raise an exception if found.
- If the user is using build.commands, we don't need the sphinx or mkdocs keys.
- If the sphinx key is used, a path to the configuration file is required.
- If the mkdocs key is used, a path to the configuration file is required.
- If none of the sphinx or mkdocs keys are used,
and the user isn't overriding the new build jobs,
the sphinx key is explicitly required.
"""
if self.is_using_build_commands:
return
has_sphinx_key = "sphinx" in self.source_config
has_mkdocs_key = "mkdocs" in self.source_config
if has_sphinx_key and not self.sphinx.configuration:
raise ConfigError(
message_id=ConfigError.SPHINX_CONFIG_MISSING,
)
if has_mkdocs_key and not self.mkdocs.configuration:
raise ConfigError(
message_id=ConfigError.MKDOCS_CONFIG_MISSING,
)
if not self.new_jobs_overriden and not has_sphinx_key and not has_mkdocs_key:
raise ConfigError(
message_id=ConfigError.SPHINX_CONFIG_MISSING,
) | Check for deprecated usages and raise an exception if found.
- If the user is using build.commands, we don't need the sphinx or mkdocs keys.
- If the sphinx key is used, a path to the configuration file is required.
- If the mkdocs key is used, a path to the configuration file is required.
- If none of the sphinx or mkdocs keys are used,
and the user isn't overriding the new build jobs,
the sphinx key is explicitly required. | validate_deprecated_implicit_keys | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def new_jobs_overriden(self):
"""Check if any of the new (undocumented) build jobs are overridden."""
build_jobs = self.build.jobs
new_jobs = (
build_jobs.create_environment,
build_jobs.install,
build_jobs.build.html,
build_jobs.build.pdf,
build_jobs.build.epub,
build_jobs.build.htmlzip,
)
for job in new_jobs:
if job is not None:
return True
return False | Check if any of the new (undocumented) build jobs are overridden. | new_jobs_overriden | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def validate_keys(self):
"""
Checks that we don't have extra keys (invalid ones).
This should be called after all the validations are done and all keys
are popped from `self._raw_config`.
"""
# The version key isn't popped, but it's
# validated in `load`.
self.pop_config("version", None)
wrong_key = ".".join(self._get_extra_key(self._raw_config))
if wrong_key:
raise ConfigError(
message_id=ConfigError.INVALID_KEY_NAME,
format_values={
"key": wrong_key,
},
) | Checks that we don't have extra keys (invalid ones).
This should be called after all the validations are done and all keys
are popped from `self._raw_config`. | validate_keys | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def _get_extra_key(self, value):
"""
Get the extra keyname (list form) of a dict object.
If there is more than one extra key, the first one is returned.
Example::
{
'key': {
'name': 'inner',
}
}
Will return `['key', 'name']`.
"""
if isinstance(value, dict) and value:
key_name = next(iter(value))
return [key_name] + self._get_extra_key(value[key_name])
return [] | Get the extra keyname (list form) of a dict object.
If there is more than one extra key, the first one is returned.
Example::
{
'key': {
'name': 'inner',
}
}
Will return `['key', 'name']`. | _get_extra_key | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def load(path, readthedocs_yaml_path=None):
"""
Load a project configuration and the top-most build config for a given path.
That is usually the root of the project, but will look deeper.
"""
# Custom non-default config file location
if readthedocs_yaml_path:
filename = os.path.join(path, readthedocs_yaml_path)
if not os.path.exists(filename):
raise ConfigError(
message_id=ConfigError.CONFIG_PATH_NOT_FOUND,
format_values={"directory": os.path.relpath(filename, path)},
)
# Default behavior
else:
filename = find_one(path, CONFIG_FILENAME_REGEX)
if not filename:
raise ConfigError(ConfigError.DEFAULT_PATH_NOT_FOUND)
# Allow symlinks, but only the ones that resolve inside the base directory.
with safe_open(filename, "r", allow_symlinks=True, base_path=path) as configuration_file:
try:
config = parse(configuration_file.read())
except ParseError as error:
raise ConfigError(
message_id=ConfigError.SYNTAX_INVALID,
format_values={
"filename": os.path.relpath(filename, path),
"error_message": str(error),
},
) from error
version = config.get("version", 2)
if version not in (2, "2"):
raise ConfigError(message_id=ConfigError.INVALID_VERSION)
build_config = BuildConfigV2(
config,
source_file=filename,
)
build_config.validate()
return build_config | Load a project configuration and the top-most build config for a given path.
That is usually the root of the project, but will look deeper. | load | python | readthedocs/readthedocs.org | readthedocs/config/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/config.py | MIT |
def find_one(path, filename_regex):
"""Find the first file in ``path`` that match ``filename_regex`` regex."""
_path = os.path.abspath(path)
for filename in os.listdir(_path):
if re.match(filename_regex, filename):
return os.path.join(_path, filename)
return "" | Find the first file in ``path`` that match ``filename_regex`` regex. | find_one | python | readthedocs/readthedocs.org | readthedocs/config/find.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/find.py | MIT |
def parse(stream):
"""
Take file-like object and return a project configuration.
The file need be valid YAML and only contain mappings as document.
Everything else raises a ``ParseError``.
"""
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as error:
raise ParseError("YAML: {message}".format(message=error))
if not isinstance(config, dict):
raise ParseError("Expected mapping")
if not config:
raise ParseError("Empty config")
return config | Take file-like object and return a project configuration.
The file need be valid YAML and only contain mappings as document.
Everything else raises a ``ParseError``. | parse | python | readthedocs/readthedocs.org | readthedocs/config/parser.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/parser.py | MIT |
def apply_fs(tmpdir, contents):
"""
Create the directory structure specified in ``contents``.
It's a dict of filenames as keys and the file contents as values. If the
value is another dict, it's a subdirectory.
"""
for filename, content in contents.items():
if hasattr(content, "items"):
apply_fs(tmpdir.mkdir(filename), content)
else:
file = tmpdir.join(filename)
file.write(content)
return tmpdir | Create the directory structure specified in ``contents``.
It's a dict of filenames as keys and the file contents as values. If the
value is another dict, it's a subdirectory. | apply_fs | python | readthedocs/readthedocs.org | readthedocs/config/tests/utils.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/tests/utils.py | MIT |
def test_load_version2(tmpdir):
apply_fs(
tmpdir,
{
"readthedocs.yml": textwrap.dedent(
"""
version: 2
build:
os: "ubuntu-22.04"
tools:
python: "3"
"""
),
},
)
base = str(tmpdir)
with override_settings(DOCROOT=tmpdir):
build = load(base, {})
assert isinstance(build, BuildConfigV2) | version: 2
build:
os: "ubuntu-22.04"
tools:
python: "3" | test_load_version2 | python | readthedocs/readthedocs.org | readthedocs/config/tests/test_config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/tests/test_config.py | MIT |
def test_load_unknow_version(tmpdir):
apply_fs(
tmpdir,
{
"readthedocs.yml": textwrap.dedent(
"""
version: 9
"""
),
},
)
base = str(tmpdir)
with raises(ConfigError) as excinfo:
with override_settings(DOCROOT=tmpdir):
load(base, {})
assert excinfo.value.message_id == ConfigError.INVALID_VERSION | version: 9 | test_load_unknow_version | python | readthedocs/readthedocs.org | readthedocs/config/tests/test_config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/tests/test_config.py | MIT |
def test_load_raise_exception_invalid_syntax(tmpdir):
apply_fs(
tmpdir,
{
"readthedocs.yml": textwrap.dedent(
"""
version: 2
python:
install:
- method: pip
path: .
# bad indentation here
extra_requirements:
- build
"""
),
},
)
base = str(tmpdir)
with raises(ConfigError) as excinfo:
with override_settings(DOCROOT=tmpdir):
load(base, {})
assert excinfo.value.message_id == ConfigError.SYNTAX_INVALID | version: 2
python:
install:
- method: pip
path: .
# bad indentation here
extra_requirements:
- build | test_load_raise_exception_invalid_syntax | python | readthedocs/readthedocs.org | readthedocs/config/tests/test_config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/tests/test_config.py | MIT |
def test_load_non_default_filename(tmpdir):
"""
Load a config file name with a non-default name.
Verifies that we can load a custom config path and that an existing default config file is
correctly ignored.
Note: Our CharField validator for readthedocs_yaml_path currently ONLY allows a file to be
called .readthedocs.yaml.
This test just verifies that the loader doesn't care since we support different file names
in the backend.
"""
non_default_filename = "myconfig.yaml"
apply_fs(
tmpdir,
{
non_default_filename: textwrap.dedent(
"""
version: 2
build:
os: "ubuntu-22.04"
tools:
python: "3"
"""
),
".readthedocs.yaml": "illegal syntax but should not load",
},
)
base = str(tmpdir)
with override_settings(DOCROOT=tmpdir):
build = load(base, readthedocs_yaml_path="myconfig.yaml")
assert isinstance(build, BuildConfigV2)
assert build.source_file == os.path.join(base, non_default_filename) | Load a config file name with a non-default name.
Verifies that we can load a custom config path and that an existing default config file is
correctly ignored.
Note: Our CharField validator for readthedocs_yaml_path currently ONLY allows a file to be
called .readthedocs.yaml.
This test just verifies that the loader doesn't care since we support different file names
in the backend. | test_load_non_default_filename | python | readthedocs/readthedocs.org | readthedocs/config/tests/test_config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/tests/test_config.py | MIT |
def test_load_non_yaml_extension(tmpdir):
"""
Load a config file name from non-default path.
In this version, we verify that we can handle non-yaml extensions
because we allow the user to do that.
See docstring of test_load_non_default_filename.
"""
non_default_filename = ".readthedocs.skrammel"
apply_fs(
tmpdir,
{
"subdir": {
non_default_filename: textwrap.dedent(
"""
version: 2
build:
os: "ubuntu-22.04"
tools:
python: "3"
"""
),
},
".readthedocs.yaml": "illegal syntax but should not load",
},
)
base = str(tmpdir)
with override_settings(DOCROOT=tmpdir):
build = load(base, readthedocs_yaml_path="subdir/.readthedocs.skrammel")
assert isinstance(build, BuildConfigV2)
assert build.source_file == os.path.join(base, "subdir/.readthedocs.skrammel") | Load a config file name from non-default path.
In this version, we verify that we can handle non-yaml extensions
because we allow the user to do that.
See docstring of test_load_non_default_filename. | test_load_non_yaml_extension | python | readthedocs/readthedocs.org | readthedocs/config/tests/test_config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/tests/test_config.py | MIT |
def test_commands_build_config_tools_and_commands_valid(self):
"""
Test that build.tools and build.commands are valid together.
"""
build = get_build_config(
{
"build": {
"os": "ubuntu-20.04",
"tools": {"python": "3.8"},
"commands": ["pip install pelican", "pelican content"],
},
},
)
build.validate()
assert isinstance(build.build, BuildWithOs)
assert build.build.commands == ["pip install pelican", "pelican content"] | Test that build.tools and build.commands are valid together. | test_commands_build_config_tools_and_commands_valid | python | readthedocs/readthedocs.org | readthedocs/config/tests/test_config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/tests/test_config.py | MIT |
def test_build_jobs_without_build_os_is_invalid(self):
"""
build.jobs can't be used without build.os
"""
build = get_build_config(
{
"build": {
"tools": {"python": "3.8"},
"jobs": {
"pre_checkout": ["echo pre_checkout"],
},
},
},
)
with raises(ConfigError) as excinfo:
build.validate()
assert excinfo.value.message_id == ConfigValidationError.VALUE_NOT_FOUND
assert excinfo.value.format_values.get("key") == "build.os" | build.jobs can't be used without build.os | test_build_jobs_without_build_os_is_invalid | python | readthedocs/readthedocs.org | readthedocs/config/tests/test_config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/tests/test_config.py | MIT |
def test_commands_build_config_valid(self):
"""It's valid to build with just build.os and build.commands."""
build = get_build_config(
{
"build": {
"os": "ubuntu-22.04",
"commands": ["echo 'hello world' > _readthedocs/html/index.html"],
},
},
)
build.validate()
assert isinstance(build.build, BuildWithOs)
assert build.build.commands == [
"echo 'hello world' > _readthedocs/html/index.html"
] | It's valid to build with just build.os and build.commands. | test_commands_build_config_valid | python | readthedocs/readthedocs.org | readthedocs/config/tests/test_config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/tests/test_config.py | MIT |
def test_do_not_parse_multiple_configs_in_one_file():
buf = StringIO(
"""
base: path
---
base: other_path
name: second
nested:
works: true
"""
)
with raises(ParseError):
parse(buf) | base: path
---
base: other_path
name: second
nested:
works: true | test_do_not_parse_multiple_configs_in_one_file | python | readthedocs/readthedocs.org | readthedocs/config/tests/test_parser.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/config/tests/test_parser.py | MIT |
def SOCIALACCOUNT_PROVIDERS(self):
"""Allow settings social account settigs from the host system."""
providers = self._SOCIALACCOUNT_PROVIDERS
for provider in providers.keys():
try:
for setting in ["client_id", "secret"]:
value = os.environ.get(
f"RTD_SOCIALACCOUNT_PROVIDERS_{provider.upper()}_{setting.upper()}"
)
if value is not None:
providers[provider]['APPS'][0][setting] = value
except KeyError:
pass
return providers | Allow settings social account settigs from the host system. | SOCIALACCOUNT_PROVIDERS | python | readthedocs/readthedocs.org | readthedocs/settings/docker_compose.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/settings/docker_compose.py | MIT |
def SHOW_DEBUG_TOOLBAR(self):
"""
Show django-debug-toolbar on DEBUG or if it's forced by RTD_FORCE_SHOW_DEBUG_TOOLBAR.
This will show the debug toolbar on:
- Docker local instance
- web-extra production instance
"""
return self.DEBUG or self.RTD_FORCE_SHOW_DEBUG_TOOLBAR | Show django-debug-toolbar on DEBUG or if it's forced by RTD_FORCE_SHOW_DEBUG_TOOLBAR.
This will show the debug toolbar on:
- Docker local instance
- web-extra production instance | SHOW_DEBUG_TOOLBAR | python | readthedocs/readthedocs.org | readthedocs/settings/base.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/settings/base.py | MIT |
def DOCKER_LIMITS(self):
"""
Set docker limits dynamically, if in production, based on system memory.
We do this to avoid having separate build images. This assumes 1 build
process per server, which will be allowed to consume all available
memory.
We subtract 750MiB for overhead of processes and base system, and set
the build time as proportional to the memory limit.
"""
# Our normal default
limits = {
"memory": "2g",
"time": 900,
}
# Only run on our servers
if self.RTD_IS_PRODUCTION:
total_memory, memory_limit = self._get_docker_memory_limit()
if memory_limit:
limits = {
"memory": f"{memory_limit}m",
"time": max(
limits["time"],
round(total_memory * self.DOCKER_TIME_LIMIT_COEFF, -2),
),
}
log.info(
"Using dynamic docker limits.",
hostname=socket.gethostname(),
memory=limits["memory"],
time=limits["time"],
)
return limits | Set docker limits dynamically, if in production, based on system memory.
We do this to avoid having separate build images. This assumes 1 build
process per server, which will be allowed to consume all available
memory.
We subtract 750MiB for overhead of processes and base system, and set
the build time as proportional to the memory limit. | DOCKER_LIMITS | python | readthedocs/readthedocs.org | readthedocs/settings/base.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/settings/base.py | MIT |
def SOCIALACCOUNT_PROVIDERS(self):
"""
This is useful to override in a subclass, mainly to add the secrets when deploying.
Our ops repos have a complex (shared) inheritance structure, so it's easier this way.
"""
return self._SOCIALACCOUNT_PROVIDERS | This is useful to override in a subclass, mainly to add the secrets when deploying.
Our ops repos have a complex (shared) inheritance structure, so it's easier this way. | SOCIALACCOUNT_PROVIDERS | python | readthedocs/readthedocs.org | readthedocs/settings/base.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/settings/base.py | MIT |
def delete_customer(customer_id):
"""Delete customer from Stripe, cancelling subscriptions."""
try:
log.info(
"Deleting stripe customer.",
stripe_customer=customer_id,
)
customer = stripe.Customer.retrieve(customer_id)
return customer.delete()
except stripe.error.InvalidRequestError:
log.exception(
"Customer not deleted. Customer not found on Stripe.",
stripe_customer=customer_id,
) | Delete customer from Stripe, cancelling subscriptions. | delete_customer | python | readthedocs/readthedocs.org | readthedocs/payments/utils.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/payments/utils.py | MIT |
def cancel_subscription(subscription_id):
"""Cancel Stripe subscription, if it exists."""
try:
log.info(
"Canceling stripe subscription.",
stripe_subscription=subscription_id,
)
return stripe.Subscription.delete(subscription_id)
except stripe.error.StripeError:
log.exception(
"Subscription not cancelled. Subscription not found on Stripe. ",
stripe_subscription=subscription_id,
) | Cancel Stripe subscription, if it exists. | cancel_subscription | python | readthedocs/readthedocs.org | readthedocs/payments/utils.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/payments/utils.py | MIT |
def delete_customer(sender, instance, **__):
"""On Gold subscription deletion, remove the customer from Stripe."""
if sender == GoldUser and instance.stripe_id is not None:
# TODO: I don't think we have to delete the customer here,
# but cancel the subscription instead
utils.delete_customer(instance.stripe_id) | On Gold subscription deletion, remove the customer from Stripe. | delete_customer | python | readthedocs/readthedocs.org | readthedocs/gold/signals.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/gold/signals.py | MIT |
def test_csp_headers(self):
"""
Test CSP headers aren't altered.
This view originally altered the CSP directives based on whether we were
using the new dashboard. We weren't using inline scripts in this view
however, so this was reverted. The tests remain for now, but aren't
super useful and will break when we change `script-src` in base settings.
"""
self.client.force_login(self.user)
csp_header = "Content-Security-Policy"
script_src_regex = re.compile(r".*\s+script-src [^;]*'unsafe-inline'")
url = reverse("gold_detail")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIsNone(script_src_regex.match(resp[csp_header])) | Test CSP headers aren't altered.
This view originally altered the CSP directives based on whether we were
using the new dashboard. We weren't using inline scripts in this view
however, so this was reverted. The tests remain for now, but aren't
super useful and will break when we change `script-src` in base settings. | test_csp_headers | python | readthedocs/readthedocs.org | readthedocs/gold/tests/test_views.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/gold/tests/test_views.py | MIT |
def send(self):
"""
Send templated notification emails through our standard email backend.
The content body is first rendered from an on-disk template, then passed
into the standard email templates as a string.
"""
template = self.get_template_names(source_format=constants.TEXT)
template_html = self.get_template_names(source_format=constants.HTML)
send_email(
recipient=self.user.email,
subject=self.get_subject(),
template=template,
template_html=template_html,
context=self.get_context_data(),
) | Send templated notification emails through our standard email backend.
The content body is first rendered from an on-disk template, then passed
into the standard email templates as a string. | send | python | readthedocs/readthedocs.org | readthedocs/notifications/email.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/notifications/email.py | MIT |
def project_skip_builds(instance, *args, **kwargs):
"""Check if the project is ``skip`` and add/cancel the notification."""
if instance.skip:
Notification.objects.add(
message_id=MESSAGE_PROJECT_SKIP_BUILDS,
attached_to=instance,
dismissable=False,
)
else:
Notification.objects.cancel(
message_id=MESSAGE_PROJECT_SKIP_BUILDS,
attached_to=instance,
) | Check if the project is ``skip`` and add/cancel the notification. | project_skip_builds | python | readthedocs/readthedocs.org | readthedocs/notifications/signals.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/notifications/signals.py | MIT |
def organization_disabled(instance, *args, **kwargs):
"""Check if the organization is ``disabled`` and add/cancel the notification."""
if instance.disabled:
Notification.objects.add(
message_id=MESSAGE_ORGANIZATION_DISABLED,
attached_to=instance,
dismissable=False,
)
else:
Notification.objects.cancel(
message_id=MESSAGE_ORGANIZATION_DISABLED,
attached_to=instance,
) | Check if the organization is ``disabled`` and add/cancel the notification. | organization_disabled | python | readthedocs/readthedocs.org | readthedocs/notifications/signals.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/notifications/signals.py | MIT |
def user_email_verified(instance, *args, **kwargs):
"""Check if the primary email is validated and cancel the notification."""
if instance.primary:
if instance.verified:
Notification.objects.cancel(
attached_to=instance.user,
message_id=MESSAGE_EMAIL_VALIDATION_PENDING,
)
else:
Notification.objects.add(
attached_to=instance.user,
message_id=MESSAGE_EMAIL_VALIDATION_PENDING,
dismissable=True,
) | Check if the primary email is validated and cancel the notification. | user_email_verified | python | readthedocs/readthedocs.org | readthedocs/notifications/signals.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/notifications/signals.py | MIT |
def _prepend_template_prefix(self, template):
"""
Prepend Django {% load %} template tag.
This is required to render the notifications with custom filters/tags.
"""
prefix = "{% load notifications_filters %}"
return prefix + template | Prepend Django {% load %} template tag.
This is required to render the notifications with custom filters/tags. | _prepend_template_prefix | python | readthedocs/readthedocs.org | readthedocs/notifications/messages.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/notifications/messages.py | MIT |
def add(self, *args, **kwargs):
"""
Create a notification without duplicating it.
If a notification with the same ``message_id`` is already attached to the object,
its ``modified_at`` timestamp and ``state`` is updated.
Otherwise, a new notification object is created for this object.
"""
message_id = kwargs.get("message_id")
attached_to = kwargs.pop("attached_to")
content_type = ContentType.objects.get_for_model(attached_to)
notification = self.filter(
attached_to_content_type=content_type,
attached_to_id=attached_to.id,
message_id=message_id,
# Update only ``READ`` and ``UNREAD`` notifications because we want
# to keep track of ``DISMISSED`` and ``CANCELLED`` ones.
state__in=(UNREAD, READ),
).first()
if notification:
# Remove the fields we are overriding.
# Avoids passing these fields twice to ``.update()`` which
# raises an exception in that case.
kwargs.pop("state", None)
kwargs.pop("modified", None)
self.filter(pk=notification.pk).update(
*args,
modified=timezone.now(),
state=UNREAD,
**kwargs,
)
notification.refresh_from_db()
return notification
return super().create(*args, attached_to=attached_to, **kwargs) | Create a notification without duplicating it.
If a notification with the same ``message_id`` is already attached to the object,
its ``modified_at`` timestamp and ``state`` is updated.
Otherwise, a new notification object is created for this object. | add | python | readthedocs/readthedocs.org | readthedocs/notifications/querysets.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/notifications/querysets.py | MIT |
def cancel(self, message_id, attached_to):
"""
Cancel an on-going notification because the underlying state has changed.
When a notification is not valid anymore because the user has made the
required action (e.g. paid an unpaid subscription) we use this method
to mark those notifications as ``CANCELLED``.
It only cancels notifications that are ``UNREAD`` or ``READ``.
"""
content_type = ContentType.objects.get_for_model(attached_to)
self.filter(
attached_to_content_type=content_type,
attached_to_id=attached_to.id,
message_id=message_id,
state__in=(UNREAD, READ),
).update(
state=CANCELLED,
modified=timezone.now(),
) | Cancel an on-going notification because the underlying state has changed.
When a notification is not valid anymore because the user has made the
required action (e.g. paid an unpaid subscription) we use this method
to mark those notifications as ``CANCELLED``.
It only cancels notifications that are ``UNREAD`` or ``READ``. | cancel | python | readthedocs/readthedocs.org | readthedocs/notifications/querysets.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/notifications/querysets.py | MIT |
def for_user(self, user, resource):
"""
Retrieve notifications related to resource for a particular user.
Given a user, returns all the notifications for the specified ``resource``
considering permissions (e.g. does not return any notification if the ``user``
doesn't have admin permissions on the ``resource``).
If ``resource="all"``, it returns the following notifications:
- are attached to an ``Organization`` where the user is owner
- are attached to a ``Project`` where the user is admin
- are attacehd to the ``User`` themselves
It only returns notifications that are ``READ`` or ``UNREAD``.
"""
# Need to be here due to circular imports
from readthedocs.organizations.models import Organization
from readthedocs.projects.models import Project
if resource == "all":
# http://chibisov.github.io/drf-extensions/docs/#usage-with-generic-relations
user_notifications = self.filter(
attached_to_content_type=ContentType.objects.get_for_model(User),
attached_to_id=user.pk,
)
project_notifications = self.filter(
attached_to_content_type=ContentType.objects.get_for_model(Project),
attached_to_id__in=AdminPermission.projects(
user,
admin=True,
member=False,
).values("id"),
)
organization_notifications = self.filter(
attached_to_content_type=ContentType.objects.get_for_model(Organization),
attached_to_id__in=AdminPermission.organizations(
user,
owner=True,
member=False,
).values("id"),
)
# Return all the notifications related to this user attached to:
# User, Project and Organization models where the user is admin.
return (user_notifications | project_notifications | organization_notifications).filter(
state__in=(UNREAD, READ)
)
if isinstance(resource, User):
if user == resource:
return self.filter(
attached_to_content_type=ContentType.objects.get_for_model(resource),
attached_to_id=resource.pk,
state__in=(UNREAD, READ),
)
if isinstance(resource, Project):
if resource in AdminPermission.projects(user, admin=True, member=False):
return self.filter(
attached_to_content_type=ContentType.objects.get_for_model(resource),
attached_to_id=resource.pk,
state__in=(UNREAD, READ),
)
if isinstance(resource, Organization):
if resource in AdminPermission.organizations(
user,
owner=True,
member=False,
):
return self.filter(
attached_to_content_type=ContentType.objects.get_for_model(resource),
attached_to_id=resource.pk,
state__in=(UNREAD, READ),
)
return self.none() | Retrieve notifications related to resource for a particular user.
Given a user, returns all the notifications for the specified ``resource``
considering permissions (e.g. does not return any notification if the ``user``
doesn't have admin permissions on the ``resource``).
If ``resource="all"``, it returns the following notifications:
- are attached to an ``Organization`` where the user is owner
- are attached to a ``Project`` where the user is admin
- are attacehd to the ``User`` themselves
It only returns notifications that are ``READ`` or ``UNREAD``. | for_user | python | readthedocs/readthedocs.org | readthedocs/notifications/querysets.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/notifications/querysets.py | MIT |
def to_class_name(value):
"""Output the name of the class for the given object."""
return value.__class__.__name__ | Output the name of the class for the given object. | to_class_name | python | readthedocs/readthedocs.org | readthedocs/notifications/templatetags/notifications_filters.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/notifications/templatetags/notifications_filters.py | MIT |
def recurse_while_none(element):
"""Recursively find the leaf node with the ``href`` attribute."""
if element.text is None and element.getchildren():
return recurse_while_none(element.getchildren()[0])
href = element.attrib.get("href")
if not href:
href = element.attrib.get("id")
return {element.text: href} | Recursively find the leaf node with the ``href`` attribute. | recurse_while_none | python | readthedocs/readthedocs.org | readthedocs/embed/utils.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/embed/utils.py | MIT |
def clean_references(obj, url, html_raw_response=False):
"""
Rewrite (internal) links (href) and images (src) to make them absolute.
1. external links/images are not changed
2. prepend URL to links that are just fragments (e.g. #section)
3. prepend URL (without filename) to internal relative links/images
"""
# TODO: do not depend on PyQuery
obj = PQ(obj)
if url is None:
return obj
for tag in obj.find("a") + obj.find("img"):
base_url = urlparse(url)
attribute = "href" if tag.tag == "a" else "src"
value = tag.attrib[attribute]
# We need to make all internal links/images, to be absolute
parsed_href = urlparse(value)
if parsed_href.scheme or parsed_href.path.startswith("/"):
# don't change external links/images
continue
if tag.tag == "a" and not parsed_href.path and parsed_href.fragment:
# It's a link pointing to a specific section inside the target ``href="#section-link"``
cleaned_value = base_url.geturl() + value
tag.attrib[attribute] = cleaned_value
continue
if not base_url.path.endswith("/"):
# internal relative link/image
# href="../../another.html" and ``base_url`` is not HTMLDir
# (e.g. /en/latest/deep/internal/section/page.html)
# we want to remove the trailing filename (page.html) and use the rest as base URL
# The resulting absolute link should be
# https://slug.readthedocs.io/en/latest/deep/internal/section/../../another.html
# remove the filename (page.html) from the original document URL (base_url) and,
path, _ = base_url.path.rsplit("/", 1)
# append the value of href/src (../../another.html) to the base URL.
base_url = base_url._replace(path=path + "/")
cleaned_value = base_url.geturl() + value
tag.attrib[attribute] = cleaned_value
if html_raw_response:
return obj.outerHtml()
return obj | Rewrite (internal) links (href) and images (src) to make them absolute.
1. external links/images are not changed
2. prepend URL to links that are just fragments (e.g. #section)
3. prepend URL (without filename) to internal relative links/images | clean_references | python | readthedocs/readthedocs.org | readthedocs/embed/utils.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/embed/utils.py | MIT |
def remove_sphinx_build_output():
"""Remove _build/ folder, if exist."""
for path in (srcdir,):
build_path = os.path.join(path, "_build")
if os.path.exists(build_path):
shutil.rmtree(build_path) | Remove _build/ folder, if exist. | remove_sphinx_build_output | python | readthedocs/readthedocs.org | readthedocs/embed/v3/tests/conftest.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/embed/v3/tests/conftest.py | MIT |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.