code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def test_default_main_section(self, storage_exists, storage_open, app, client):
app.build()
path = app.outdir / "index.html"
assert path.exists() is True
content = open(path).read()
storage_exists.return_value = True
storage_open.side_effect = self._mock_open(content)
params = {
"url": "https://project.readthedocs.io/en/latest/",
}
response = client.get(self.api_url, params)
assert response.status_code == 200
title = get_anchor_link_title("heading")
# Note the difference between `<section>` and `<div class="section">`
if Version(docutils.__version__) >= Version("0.17"):
content = f"""
<div class="body" role="main">
<section id="title">
<h1>Title<a class="headerlink" href="https://project.readthedocs.io/en/latest/#title" title="{title}">¶</a></h1>
<p>This is an example page used to test EmbedAPI parsing features.</p>
<section id="sub-title">
<h2>Sub-title<a class="headerlink" href="https://project.readthedocs.io/en/latest/#sub-title" title="{title}">¶</a></h2>
<p>This is a reference to <a class="reference internal" href="https://project.readthedocs.io/en/latest/#sub-title"><span class="std std-ref">Sub-title</span></a>.</p>
</section>
<section id="manual-reference-section">
<span id="manual-reference"></span><h2>Manual Reference Section<a class="headerlink" href="https://project.readthedocs.io/en/latest/#manual-reference-section" title="{title}">¶</a></h2>
<p>This is a reference to a manual reference <a class="reference internal" href="https://project.readthedocs.io/en/latest/#manual-reference"><span class="std std-ref">Manual Reference Section</span></a>.</p>
</section>
</section>
<div class="clearer"></div>
</div>
"""
else:
content = """
<div class="body" role="main">
<div class="section" id="title">
<h1>Title<a class="headerlink" href="https://project.readthedocs.io/en/latest/#title" title="{title}">¶</a></h1>
<p>This is an example page used to test EmbedAPI parsing features.</p>
<div class="section" id="sub-title">
<h2>Sub-title<a class="headerlink" href="https://project.readthedocs.io/en/latest/#sub-title" title="{title}">¶</a></h2>
<p>This is a reference to <a class="reference internal" href="https://project.readthedocs.io/en/latest/#sub-title"><span class="std std-ref">Sub-title</span></a>.</p>
</div>
<div class="section" id="manual-reference-section">
<span id="manual-reference"></span><h2>Manual Reference Section<a class="headerlink" href="https://project.readthedocs.io/en/latest/#manual-reference-section" title="{title}">¶</a></h2>
<p>This is a reference to a manual reference <a class="reference internal" href="https://project.readthedocs.io/en/latest/#manual-reference"><span class="std std-ref">Manual Reference Section</span></a>.</p>
</div>
</div>
</div>
"""
json_response = response.json()
assert json_response == {
"url": "https://project.readthedocs.io/en/latest/",
"fragment": None,
"content": mock.ANY,
"external": False,
}
compare_content_without_blank_lines(json_response["content"], content) | else:
content = | test_default_main_section | python | readthedocs/readthedocs.org | readthedocs/embed/v3/tests/test_internal_pages.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/embed/v3/tests/test_internal_pages.py | MIT |
def test_default_main_section(self, app, client, requests_mock):
app.build()
path = app.outdir / "index.html"
assert path.exists() is True
content = open(path).read()
requests_mock.get("https://docs.project.com", text=content)
params = {
"url": "https://docs.project.com",
}
response = client.get(self.api_url, params)
assert response.status_code == 200
title = get_anchor_link_title("heading")
# The output is different because docutils is outputting this,
# and we're not sanitizing it, but just passing it through.
if Version(docutils.__version__) >= Version("0.17"):
content = f"""
<div class="body" role="main">
<section id="title">
<h1>Title<a class="headerlink" href="https://docs.project.com#title" title="{title}">¶</a></h1>
<p>This is an example page used to test EmbedAPI parsing features.</p>
<section id="sub-title">
<h2>Sub-title<a class="headerlink" href="https://docs.project.com#sub-title" title="{title}">¶</a></h2>
<p>This is a reference to <a class="reference internal" href="https://docs.project.com#sub-title"><span class="std std-ref">Sub-title</span></a>.</p>
</section>
<section id="manual-reference-section">
<span id="manual-reference"></span><h2>Manual Reference Section<a class="headerlink" href="https://docs.project.com#manual-reference-section" title="{title}">¶</a></h2>
<p>This is a reference to a manual reference <a class="reference internal" href="https://docs.project.com#manual-reference"><span class="std std-ref">Manual Reference Section</span></a>.</p>
</section>
</section>
<div class="clearer"></div>
</div>
"""
else:
content = """
<div class="body" role="main">
<div class="section" id="title">
<h1>Title<a class="headerlink" href="https://docs.project.com#title" title="{title}">¶</a></h1>
<p>This is an example page used to test EmbedAPI parsing features.</p>
<div class="section" id="sub-title">
<h2>Sub-title<a class="headerlink" href="https://docs.project.com#sub-title" title="{title}">¶</a></h2>
<p>This is a reference to <a class="reference internal" href="https://docs.project.com#sub-title"><span class="std std-ref">Sub-title</span></a>.</p>
</div>
<div class="section" id="manual-reference-section">
<span id="manual-reference"></span><h2>Manual Reference Section<a class="headerlink" href="https://docs.project.com#manual-reference-section" title="{title}">¶</a></h2>
<p>This is a reference to a manual reference <a class="reference internal" href="https://docs.project.com#manual-reference"><span class="std std-ref">Manual Reference Section</span></a>.</p>
</div>
</div>
</div>
"""
json_response = response.json()
assert json_response == {
"url": "https://docs.project.com",
"fragment": None,
"content": mock.ANY,
"external": True,
}
compare_content_without_blank_lines(json_response["content"], content) | else:
content = | test_default_main_section | python | readthedocs/readthedocs.org | readthedocs/embed/v3/tests/test_external_pages.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/embed/v3/tests/test_external_pages.py | MIT |
def get(self, *args, **kwargs):
"""Wrapper around ``client.get`` to be overridden in the proxied api tests."""
return self.client.get(*args, **kwargs) | Wrapper around ``client.get`` to be overridden in the proxied api tests. | get | python | readthedocs/readthedocs.org | readthedocs/embed/v3/tests/test_access.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/embed/v3/tests/test_access.py | MIT |
def test_get_content_private_version_logged_in_user(self):
"""This test is skipped, since the proxied API on .org doesn't log in users.""" | This test is skipped, since the proxied API on .org doesn't log in users. | test_get_content_private_version_logged_in_user | python | readthedocs/readthedocs.org | readthedocs/embed/v3/tests/test_access.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/embed/v3/tests/test_access.py | MIT |
def test_two_links():
"""
First link does not affect the second one.
We are using ``._replace`` for the firsturl case, and that should not affect
the second link.
"""
firsturl = URLData(
"https://t.readthedocs.io/en/latest/internal/deep/page/section.html",
"../../page.html#to-a-section",
"https://t.readthedocs.io/en/latest/internal/deep/page/../../page.html#to-a-section",
)
secondurl = URLData(
"", # docurl comes from firsturl
"#to-a-section",
"https://t.readthedocs.io/en/latest/internal/deep/page/section.html#to-a-section",
)
pq = PyQuery(
f'<body><a href="{firsturl.ref}">Click here</a><a href="{secondurl.ref}">Click here</a></body>'
)
response = clean_references(pq, firsturl.docurl)
firstlink, secondlink = response.find("a")
assert (firstlink.attrib["href"], secondlink.attrib["href"]) == (
firsturl.expected,
secondurl.expected,
) | First link does not affect the second one.
We are using ``._replace`` for the firsturl case, and that should not affect
the second link. | test_two_links | python | readthedocs/readthedocs.org | readthedocs/embed/tests/test_links.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/embed/tests/test_links.py | MIT |
def get(self, client, *args, **kwargs):
"""Wrapper around ``client.get`` to be overridden in the proxied api tests."""
return client.get(*args, **kwargs) | Wrapper around ``client.get`` to be overridden in the proxied api tests. | get | python | readthedocs/readthedocs.org | readthedocs/embed/tests/test_api.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/embed/tests/test_api.py | MIT |
def post(self, request, *args, **kwargs):
"""
Accept or decline an invitation.
To accept an invitation, the ``accept`` parameter must be
in the request, otherwise we decline the invitation.
"""
invitation = self.get_object()
url = reverse("homepage")
if request.POST.get("accept"):
if invitation.expired:
return HttpResponseBadRequest("Invitation has expired.")
# If the invitation is attached to an email,
# and the current user isn't logged-in we
# redeem the invitation after the user has signed-up.
if not request.user.is_authenticated and invitation.to_email:
return self.redeem_at_sign_up(invitation)
invitation.redeem(request.user, request=request)
url = invitation.get_success_url()
else:
log.info(
"Invitation declined",
invitation_pk=invitation.pk,
object_type=invitation.object_type,
object_name=invitation.object_name,
object_pk=invitation.object.pk,
)
invitation.create_audit_log(
action=AuditLog.INVITATION_DECLINED,
request=request,
user=invitation.to_user,
)
invitation.delete()
return HttpResponseRedirect(url) | Accept or decline an invitation.
To accept an invitation, the ``accept`` parameter must be
in the request, otherwise we decline the invitation. | post | python | readthedocs/readthedocs.org | readthedocs/invitations/views.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/invitations/views.py | MIT |
def redeem_at_sign_up(self, invitation):
"""
Mark the invitation to be redeemed after the user has signed-up.
We redirect the user to the sign-up page,
the invitation will be automatically redeemed
after the user has signed-up (readthedocs.core.adapters.AccountAdapter.save_user).
"""
self.request.session.update(
{
"invitation:pk": invitation.pk,
# Auto-verify EmailAddress via django-allauth.
"account_verified_email": invitation.to_email,
}
)
url = reverse("account_signup")
obj = invitation.object
organization = None
if isinstance(obj, Team):
organization = obj.organization
elif isinstance(obj, Organization):
organization = obj
if organization and AdminPermission.has_sso_enabled(organization):
url += f"?organization={organization.slug}"
return HttpResponseRedirect(url) | Mark the invitation to be redeemed after the user has signed-up.
We redirect the user to the sign-up page,
the invitation will be automatically redeemed
after the user has signed-up (readthedocs.core.adapters.AccountAdapter.save_user). | redeem_at_sign_up | python | readthedocs/readthedocs.org | readthedocs/invitations/views.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/invitations/views.py | MIT |
def delete_related_invitations(sender, instance, **kwargs):
"""
Delete related invitations of an object.
Generic foreign keys don't have a way to cascade delete,
so we need to do it manually.
"""
invitations = Invitation.objects.for_object(instance)
log.info(
"Deleting related invitations.",
object_type=sender.__name__.lower(),
object_id=instance.pk,
count=invitations.count(),
)
invitations.delete() | Delete related invitations of an object.
Generic foreign keys don't have a way to cascade delete,
so we need to do it manually. | delete_related_invitations | python | readthedocs/readthedocs.org | readthedocs/invitations/signals.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/invitations/signals.py | MIT |
def send_invitation(self):
"""Send an email with the invitation to join the object."""
email = self.invitation.to_email
if not email:
email = self.invitation.to_user.email
from_user = self.invitation.from_user
from_name = from_user.get_full_name() or from_user.username
object_description = self._get_email_object_description()
expiration_date = self.invitation.expiration_date
log.info(
"Emailing invitation",
email=email,
invitation_pk=self.invitation.pk,
object_type=self.invitation.object_type,
object_name=self.invitation.object_name,
object_pk=self.invitation.object.pk,
)
send_email(
recipient=email,
subject=f"{from_name} has invited you to join the {object_description}",
template="invitations/email/invitation.txt",
template_html="invitations/email/invitation.html",
context={
"from_name": from_name,
"object_description": object_description,
"invite_url": self.invitation.get_absolute_url(),
"valid_until": timesince.timeuntil(expiration_date),
"expiration_date": formats.date_format(expiration_date),
},
) | Send an email with the invitation to join the object. | send_invitation | python | readthedocs/readthedocs.org | readthedocs/invitations/backends.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/invitations/backends.py | MIT |
def get_backend(invitation):
"""Get the proper backend for the invitation."""
backends = [
OrganizationBackend,
ProjectBackend,
TeamBackend,
]
for backend in backends:
if isinstance(invitation.object, backend.klass):
return backend(invitation)
raise ValueError(f"Backend not found for object of class {object.__class__}") | Get the proper backend for the invitation. | get_backend | python | readthedocs/readthedocs.org | readthedocs/invitations/backends.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/invitations/backends.py | MIT |
def invite(self, from_user, obj, to_user=None, to_email=None, request=None):
"""
Create and send an invitation for `to_user` or `to_email` to join `object`.
If the invitation already exists, we don't send the invitation again.
:param request: If given, a log entry will be created.
"""
if not to_user and not to_email:
raise ValueError("A user or email must be provided")
fields = {
"content_type": ContentType.objects.get_for_model(obj),
"object_id": obj.pk,
}
if to_user:
fields["to_user"] = to_user
else:
fields["to_email"] = to_email
invitation, created = self.get_or_create(
**fields,
defaults={
"from_user": from_user,
},
)
if created:
if request:
invitation.create_audit_log(
action=AuditLog.INVITATION_SENT,
request=request,
user=request.user,
)
invitation.send()
return invitation, created | Create and send an invitation for `to_user` or `to_email` to join `object`.
If the invitation already exists, we don't send the invitation again.
:param request: If given, a log entry will be created. | invite | python | readthedocs/readthedocs.org | readthedocs/invitations/models.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/invitations/models.py | MIT |
def redeem(self, user=None, request=None):
"""
Redeem invitation.
`user` will be used only if the invitation is attached
to an email, otherwise `to_user` user will be used.
:param request: If given, a log entry will be created.
"""
if self.expired:
return False
if self.to_user:
user = self.to_user
log.info(
"Redeeming invitation",
invitation_pk=self.pk,
for_user=user.username,
object_type=self.object_type,
object_name=self.object_name,
object_pk=self.object.pk,
)
if request:
self.create_audit_log(
action=AuditLog.INVITATION_ACCEPTED,
request=request,
user=user,
)
return self.backend.redeem(user=user) | Redeem invitation.
`user` will be used only if the invitation is attached
to an email, otherwise `to_user` user will be used.
:param request: If given, a log entry will be created. | redeem | python | readthedocs/readthedocs.org | readthedocs/invitations/models.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/invitations/models.py | MIT |
def get_success_url(self):
"""URL to redirect after the invitation has been redeemed."""
return self.backend.get_success_url() | URL to redirect after the invitation has been redeemed. | get_success_url | python | readthedocs/readthedocs.org | readthedocs/invitations/models.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/invitations/models.py | MIT |
def get_origin_url(self):
"""URL from where the invitations for the object are created."""
return self.backend.get_origin_url() | URL from where the invitations for the object are created. | get_origin_url | python | readthedocs/readthedocs.org | readthedocs/invitations/models.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/invitations/models.py | MIT |
def can_revoke_invitation(self, user):
"""
Check whether the user can revoke the invitation.
A user can revoke an invitation if it's the owner
of the object attached to it.
"""
return self.backend.owns_object(user) | Check whether the user can revoke the invitation.
A user can revoke an invitation if it's the owner
of the object attached to it. | can_revoke_invitation | python | readthedocs/readthedocs.org | readthedocs/invitations/models.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/invitations/models.py | MIT |
def create_audit_log(self, action, request, user=None):
"""Create an audit log entry for this invitation."""
from readthedocs.audit.serializers import InvitationSerializer
# Attach the proper project and organization to the log.
kwargs = {}
object_type = self.object_type
if object_type == "organization":
kwargs["organization"] = self.object
elif object_type == "project":
kwargs["project"] = self.object
elif object_type == "team":
kwargs["organization"] = self.object.organization
AuditLog.objects.new(
action=action,
request=request,
data=InvitationSerializer(self).data,
user=user,
**kwargs,
) | Create an audit log entry for this invitation. | create_audit_log | python | readthedocs/readthedocs.org | readthedocs/invitations/models.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/invitations/models.py | MIT |
def __init__(self, data):
"""
Initializer.
:param data: object with all the data grabbed by Celery task in
``before_start`` and used as a way to share data with this class
by-directionally.
:type data: readthedocs.projects.tasks.builds.TaskData
"""
self.data = data | Initializer.
:param data: object with all the data grabbed by Celery task in
``before_start`` and used as a way to share data with this class
by-directionally.
:type data: readthedocs.projects.tasks.builds.TaskData | __init__ | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def setup_vcs(self):
"""
Perform all VCS related steps.
1. clone the repository
2. checkout specific commit/identifier
3. load the config file
4. checkout submodules
"""
# Make dirs if it does not exist to clone the repository under it
if not os.path.exists(self.data.project.doc_path):
os.makedirs(self.data.project.doc_path)
if not self.data.project.vcs_class():
raise RepositoryError(RepositoryError.UNSUPPORTED_VCS)
before_vcs.send(
sender=self.data.version,
environment=self.vcs_environment,
)
# Create the VCS repository where all the commands are going to be
# executed for a particular VCS type
self.vcs_repository = self.data.project.vcs_repo(
version=self.data.version.slug,
environment=self.vcs_environment,
verbose_name=self.data.version.verbose_name,
version_type=self.data.version.type,
version_identifier=self.data.version.identifier,
version_machine=self.data.version.machine,
)
# We can't do too much on ``pre_checkout`` because we haven't
# cloned the repository yet and we don't know what the user wrote
# in the `.readthedocs.yaml` yet.
#
# We could implement something different in the future if we download
# the `.readthedocs.yaml` file without cloning.
# See https://github.com/readthedocs/readthedocs.org/issues/8935
#
# self.run_build_job("pre_checkout")
self.checkout()
self.run_build_job("post_checkout")
commit = self.data.build_commit or self.vcs_repository.commit
if commit:
self.data.build["commit"] = commit | Perform all VCS related steps.
1. clone the repository
2. checkout specific commit/identifier
3. load the config file
4. checkout submodules | setup_vcs | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def setup_environment(self):
"""
Create the environment and install required dependencies.
1. install OS dependencies (apt)
2. create language (e.g. Python) environment
3. install dependencies into the environment
"""
# Environment used for building code, usually with Docker
language_environment_cls = Virtualenv
if self.data.config.is_using_conda:
language_environment_cls = Conda
self.language_environment = language_environment_cls(
version=self.data.version,
build_env=self.build_environment,
config=self.data.config,
)
# TODO: check if `before_build` and `after_build` are still useful
# (maybe in commercial?)
#
# I didn't find they are used anywhere, we should probably remove them
before_build.send(
sender=self.data.version,
environment=self.build_environment,
)
self.run_build_job("pre_system_dependencies")
self.system_dependencies()
self.run_build_job("post_system_dependencies")
# Install all ``build.tools`` specified by the user
self.install_build_tools()
self.run_build_job("pre_create_environment")
self.create_environment()
self.run_build_job("post_create_environment")
self.run_build_job("pre_install")
self.install()
self.run_build_job("post_install") | Create the environment and install required dependencies.
1. install OS dependencies (apt)
2. create language (e.g. Python) environment
3. install dependencies into the environment | setup_environment | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def build(self):
"""
Build all the formats specified by the user.
1. build HTML
2. build HTMLZzip
3. build PDF
4. build ePub
"""
self.run_build_job("pre_build")
# Build all formats
self.build_html()
self.build_htmlzip()
self.build_pdf()
self.build_epub()
self.run_build_job("post_build")
self.store_readthedocs_build_yaml()
after_build.send(
sender=self.data.version,
) | Build all the formats specified by the user.
1. build HTML
2. build HTMLZzip
3. build PDF
4. build ePub | build | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def checkout(self):
"""Checkout Git repo and load build config file."""
log.info("Cloning and fetching.")
self.vcs_repository.update()
identifier = self.data.build_commit or self.data.version.identifier
log.info("Checking out.", identifier=identifier)
self.vcs_repository.checkout(identifier)
# The director is responsible for understanding which config file to use for a build.
# In order to reproduce a build 1:1, we may use readthedocs_yaml_path defined by the build
# instead of per-version or per-project.
# Use the below line to fetch the readthedocs_yaml_path defined per-build.
# custom_config_file = self.data.build.get("readthedocs_yaml_path", None)
custom_config_file = None
# This logic can be extended with version-specific config files
if not custom_config_file and self.data.version.project.readthedocs_yaml_path:
custom_config_file = self.data.version.project.readthedocs_yaml_path
if custom_config_file:
log.info("Using a custom .readthedocs.yaml file.", path=custom_config_file)
checkout_path = self.data.project.checkout_path(self.data.version.slug)
default_config_file = find_one(checkout_path, CONFIG_FILENAME_REGEX)
final_config_file = custom_config_file or default_config_file
# Output the path for the config file used.
# This works as confirmation for us & the user about which file is used,
# as well as the fact that *any* config file is used.
if final_config_file:
self.vcs_environment.run(
"cat",
# Show user the relative path to the config file
# TODO: Have our standard path replacement code catch this.
# https://github.com/readthedocs/readthedocs.org/pull/10413#discussion_r1230765843
final_config_file.replace(checkout_path + "/", ""),
cwd=checkout_path,
)
self.data.config = load_yaml_config(
version=self.data.version,
readthedocs_yaml_path=custom_config_file,
)
self.data.build["config"] = self.data.config.as_dict()
self.data.build["readthedocs_yaml_path"] = custom_config_file
# Raise a build error if the project is not using a config file or using v1
if self.data.config.version not in ("2", 2):
raise BuildUserError(BuildUserError.NO_CONFIG_FILE_DEPRECATED)
# Raise a build error if the project is using "build.image" on their config file
build_config_key = self.data.config.source_config.get("build", {})
if "image" in build_config_key:
raise BuildUserError(BuildUserError.BUILD_IMAGE_CONFIG_KEY_DEPRECATED)
# TODO: move this validation to the Config object once we are settled here
if "image" not in build_config_key and "os" not in build_config_key:
raise BuildUserError(BuildUserError.BUILD_OS_REQUIRED)
self.vcs_repository.update_submodules(self.data.config) | Checkout Git repo and load build config file. | checkout | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def system_dependencies(self):
"""
Install apt packages from the config file.
We don't allow to pass custom options or install from a path.
The packages names are already validated when reading the config file.
.. note::
``--quiet`` won't suppress the output,
it would just remove the progress bar.
"""
packages = self.data.config.build.apt_packages
if packages:
self.build_environment.run(
"apt-get",
"update",
"--assume-yes",
"--quiet",
user=settings.RTD_DOCKER_SUPER_USER,
)
# put ``--`` to end all command arguments.
self.build_environment.run(
"apt-get",
"install",
"--assume-yes",
"--quiet",
"--",
*packages,
user=settings.RTD_DOCKER_SUPER_USER,
) | Install apt packages from the config file.
We don't allow to pass custom options or install from a path.
The packages names are already validated when reading the config file.
.. note::
``--quiet`` won't suppress the output,
it would just remove the progress bar. | system_dependencies | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def run_build_job(self, job):
"""
Run a command specified by the user under `build.jobs.` config key.
It uses the "VCS environment" for pre_/post_ checkout jobs and "build
environment" for the rest of them.
Note that user's commands:
- are not escaped
- are run with under the path where the repository was cloned
- are run as RTD_DOCKER_USER user
- users can't run commands as `root` user
- all the user's commands receive same environment variables as regular commands
Example:
build:
jobs:
pre_install:
- echo `date`
- python path/to/myscript.py
pre_build:
- sed -i **/*.rst -e "s|{version}|v3.5.1|g"
build:
html:
- make html
pdf:
- make pdf
In this case, `self.data.config.build.jobs.pre_build` will contains
`sed` command.
"""
commands = get_dotted_attribute(self.data.config, f"build.jobs.{job}", None)
if not commands:
return
cwd = self.data.project.checkout_path(self.data.version.slug)
environment = self.vcs_environment
if job not in ("pre_checkout", "post_checkout"):
environment = self.build_environment
for command in commands:
environment.run(command, escape_command=False, cwd=cwd) | Run a command specified by the user under `build.jobs.` config key.
It uses the "VCS environment" for pre_/post_ checkout jobs and "build
environment" for the rest of them.
Note that user's commands:
- are not escaped
- are run with under the path where the repository was cloned
- are run as RTD_DOCKER_USER user
- users can't run commands as `root` user
- all the user's commands receive same environment variables as regular commands
Example:
build:
jobs:
pre_install:
- echo `date`
- python path/to/myscript.py
pre_build:
- sed -i **/*.rst -e "s|{version}|v3.5.1|g"
build:
html:
- make html
pdf:
- make pdf
In this case, `self.data.config.build.jobs.pre_build` will contains
`sed` command. | run_build_job | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def check_old_output_directory(self):
"""
Check if there the directory '_build/html' exists and fail the build if so.
Read the Docs used to build artifacts into '_build/html' and there are
some projects with this path hardcoded in their files. Those builds are
having unexpected behavior since we are not using that path anymore.
In case we detect they are keep using that path, we fail the build
explaining this.
"""
command = self.build_environment.run(
"test",
"-x",
"_build/html",
cwd=self.data.project.checkout_path(self.data.version.slug),
record=False,
)
if command.exit_code == 0:
log.warning("Directory '_build/html' exists. This may lead to unexpected behavior.")
raise BuildUserError(BuildUserError.BUILD_OUTPUT_OLD_DIRECTORY_USED) | Check if there the directory '_build/html' exists and fail the build if so.
Read the Docs used to build artifacts into '_build/html' and there are
some projects with this path hardcoded in their files. Those builds are
having unexpected behavior since we are not using that path anymore.
In case we detect they are keep using that path, we fail the build
explaining this. | check_old_output_directory | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def run_build_commands(self):
"""Runs each build command in the build environment."""
python_reshim_commands = (
{"pip", "install"},
{"conda", "create"},
{"conda", "install"},
{"mamba", "create"},
{"mamba", "install"},
{"poetry", "install"},
)
rust_reshim_commands = ({"cargo", "install"},)
cwd = self.data.project.checkout_path(self.data.version.slug)
environment = self.build_environment
for command in self.data.config.build.commands:
environment.run(command, escape_command=False, cwd=cwd)
# Execute ``asdf reshim python`` if the user is installing a
# package since the package may contain an executable
# See https://github.com/readthedocs/readthedocs.org/pull/9150#discussion_r882849790
for python_reshim_command in python_reshim_commands:
# Convert tuple/list into set to check reshim command is a
# subset of the command itself. This is to find ``pip install``
# but also ``pip -v install`` and ``python -m pip install``
if python_reshim_command.issubset(command.split()):
environment.run(
*["asdf", "reshim", "python"],
escape_command=False,
cwd=cwd,
record=False,
)
# Do same for Rust
for rust_reshim_command in rust_reshim_commands:
if rust_reshim_command.issubset(command.split()):
environment.run(
*["asdf", "reshim", "rust"],
escape_command=False,
cwd=cwd,
record=False,
)
html_output_path = os.path.join(cwd, BUILD_COMMANDS_OUTPUT_PATH_HTML)
if not os.path.exists(html_output_path):
raise BuildUserError(BuildUserError.BUILD_COMMANDS_WITHOUT_OUTPUT)
# Update the `Version.documentation_type` to match the doctype defined
# by the config file. When using `build.commands` it will be `GENERIC`
self.data.version.documentation_type = self.data.config.doctype
self.store_readthedocs_build_yaml() | Runs each build command in the build environment. | run_build_commands | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def install_build_tools(self):
"""
Install all ``build.tools`` defined by the user in the config file.
It uses ``asdf`` behind the scenes to manage all the tools and versions
of them. These tools/versions are stored in the Cloud cache and are
downloaded on each build (~50 - ~100Mb).
If the requested tool/version is not present in the cache, it's
installed via ``asdf`` on the fly.
"""
if settings.RTD_DOCKER_COMPOSE:
# Create a symlink for ``root`` user to use the same ``.asdf``
# installation as the ``docs`` user. Required for local building
# since everything is run as ``root`` when using Local Development
# instance
cmd = [
"ln",
"-s",
os.path.join(settings.RTD_DOCKER_WORKDIR, ".asdf"),
"/root/.asdf",
]
self.build_environment.run(
*cmd,
record=False,
)
for tool, version in self.data.config.build.tools.items():
full_version = version.full_version # e.g. 3.9 -> 3.9.7
# TODO: generate the correct path for the Python version
# see https://github.com/readthedocs/readthedocs.org/pull/8447#issuecomment-911562267
# tool_path = f'{self.config.build.os}/{tool}/2021-08-30/{full_version}.tar.gz'
build_os = self.data.config.build.os
if build_os == "ubuntu-lts-latest":
_, build_os = settings.RTD_DOCKER_BUILD_SETTINGS["os"]["ubuntu-lts-latest"].split(
":"
)
tool_path = f"{build_os}-{tool}-{full_version}.tar.gz"
tool_version_cached = build_tools_storage.exists(tool_path)
if tool_version_cached:
remote_fd = build_tools_storage.open(tool_path, mode="rb")
with tarfile.open(fileobj=remote_fd) as tar:
# Extract it on the shared path between host and Docker container
extract_path = os.path.join(self.data.project.doc_path, "tools")
tar.extractall(extract_path)
# Move the extracted content to the ``asdf`` installation
cmd = [
"mv",
f"{extract_path}/{full_version}",
os.path.join(
settings.RTD_DOCKER_WORKDIR,
f".asdf/installs/{tool}/{full_version}",
),
]
self.build_environment.run(
*cmd,
record=False,
)
else:
log.debug(
"Cached version for tool not found.",
os=self.data.config.build.os,
tool=tool,
full_version=full_version,
tool_path=tool_path,
)
# If the tool version selected is not available from the
# cache we compile it at build time
cmd = [
# TODO: make ``PYTHON_CONFIGURE_OPTS="--enable-shared"``
# environment variable to work here. Note that
# ``self.build_environment.run`` does not support passing
# environment for a particular command:
# https://github.com/readthedocs/readthedocs.org/blob/9d2d1a2/readthedocs/doc_builder/environments.py#L430-L431
"asdf",
"install",
tool,
full_version,
]
self.build_environment.run(
*cmd,
)
# Make the tool version chosen by the user the default one
cmd = [
"asdf",
"global",
tool,
full_version,
]
self.build_environment.run(
*cmd,
)
# Recreate shims for this tool to make the new version
# installed available
# https://asdf-vm.com/learn-more/faq.html#newly-installed-exectable-not-running
cmd = [
"asdf",
"reshim",
tool,
]
self.build_environment.run(
*cmd,
record=False,
)
if all(
[
tool == "python",
# Do not install them if the tool version was cached
# because these dependencies are already installed when
# created with our script and uploaded to the cache's
# bucket
not tool_version_cached,
# Do not install them on conda/mamba since they are not
# needed because the environment is managed by conda/mamba
# itself
self.data.config.python_interpreter not in ("conda", "mamba"),
]
):
# We cap setuptools to avoid breakage of projects
# relying on setup.py invokations,
# see https://github.com/readthedocs/readthedocs.org/issues/8659
setuptools_version = (
"setuptools<58.3.0"
if self.data.config.is_using_setup_py_install
else "setuptools"
)
# Install our own requirements if the version is compiled
cmd = [
"python",
"-mpip",
"install",
"-U",
"virtualenv",
setuptools_version,
]
self.build_environment.run(
*cmd,
) | Install all ``build.tools`` defined by the user in the config file.
It uses ``asdf`` behind the scenes to manage all the tools and versions
of them. These tools/versions are stored in the Cloud cache and are
downloaded on each build (~50 - ~100Mb).
If the requested tool/version is not present in the cache, it's
installed via ``asdf`` on the fly. | install_build_tools | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def build_docs_class(self, builder_class):
"""
Build docs with additional doc backends.
These steps are not necessarily required for the build to halt, so we
only raise a warning exception here. A hard error will halt the build
process.
"""
# If the builder is generic, we have nothing to do here,
# as the commnads are provided by the user.
if builder_class == GENERIC:
return
builder = get_builder_class(builder_class)(
build_env=self.build_environment,
python_env=self.language_environment,
)
if builder_class == self.data.config.doctype:
builder.show_conf()
self.data.version.documentation_type = builder.get_final_doctype()
success = builder.build()
return success | Build docs with additional doc backends.
These steps are not necessarily required for the build to halt, so we
only raise a warning exception here. A hard error will halt the build
process. | build_docs_class | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def get_vcs_env_vars(self):
"""Get environment variables to be included in the VCS setup step."""
env = self.get_rtd_env_vars()
# Don't prompt for username, this requires Git 2.3+
env["GIT_TERMINAL_PROMPT"] = "0"
return env | Get environment variables to be included in the VCS setup step. | get_vcs_env_vars | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def get_rtd_env_vars(self):
"""Get bash environment variables specific to Read the Docs."""
env = {
"READTHEDOCS": "True",
"READTHEDOCS_VERSION": self.data.version.slug,
"READTHEDOCS_VERSION_TYPE": self.data.version.type,
"READTHEDOCS_VERSION_NAME": self.data.version.verbose_name,
"READTHEDOCS_PROJECT": self.data.project.slug,
"READTHEDOCS_LANGUAGE": self.data.project.language,
"READTHEDOCS_REPOSITORY_PATH": self.data.project.checkout_path(self.data.version.slug),
"READTHEDOCS_OUTPUT": os.path.join(
self.data.project.checkout_path(self.data.version.slug), "_readthedocs/"
),
"READTHEDOCS_GIT_CLONE_URL": self.data.project.repo,
# TODO: we don't have access to the database from the builder.
# We need to find a way to expose HTML_URL here as well.
# "READTHEDOCS_GIT_HTML_URL": self.data.project.remote_repository.html_url,
"READTHEDOCS_GIT_IDENTIFIER": self.data.version.git_identifier,
"READTHEDOCS_GIT_COMMIT_HASH": self.data.build["commit"],
"READTHEDOCS_PRODUCTION_DOMAIN": settings.PRODUCTION_DOMAIN,
}
return env | Get bash environment variables specific to Read the Docs. | get_rtd_env_vars | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def get_build_env_vars(self):
"""Get bash environment variables used for all builder commands."""
env = self.get_rtd_env_vars()
# https://no-color.org/
env["NO_COLOR"] = "1"
if self.data.config.conda is not None:
env.update(
{
# NOTE: should these be prefixed with "READTHEDOCS_"?
"CONDA_ENVS_PATH": os.path.join(self.data.project.doc_path, "conda"),
"CONDA_DEFAULT_ENV": self.data.version.slug,
"BIN_PATH": os.path.join(
self.data.project.doc_path,
"conda",
self.data.version.slug,
"bin",
),
}
)
else:
env.update(
{
"BIN_PATH": os.path.join(
self.data.project.doc_path,
"envs",
self.data.version.slug,
"bin",
),
"READTHEDOCS_VIRTUALENV_PATH": os.path.join(
self.data.project.doc_path, "envs", self.data.version.slug
),
}
)
env.update(
{
"READTHEDOCS_CANONICAL_URL": self.data.version.canonical_url,
}
)
# Update environment from Project's specific environment variables,
# avoiding to expose private environment variables
# if the version is external (i.e. a PR build).
env.update(
self.data.project.environment_variables(public_only=self.data.version.is_external)
)
return env | Get bash environment variables used for all builder commands. | get_build_env_vars | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def is_type_sphinx(self):
"""Is documentation type Sphinx."""
return "sphinx" in self.data.config.doctype | Is documentation type Sphinx. | is_type_sphinx | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def attach_notification(
self,
message_id,
format_values=None,
state="unread",
dismissable=False,
news=False,
):
"""Attach a notification to build in progress using the APIv2."""
format_values = format_values or {}
# NOTE: we are using APIv2 here because it uses BuildAPIKey authentication,
# which is not currently supported by APIv3.
self.data.api_client.notifications.post(
{
"attached_to": f"build/{self.data.build['id']}",
"message_id": message_id,
"state": state, # Optional
"dismissable": dismissable,
"news": news,
"format_values": format_values,
}
) | Attach a notification to build in progress using the APIv2. | attach_notification | python | readthedocs/readthedocs.org | readthedocs/doc_builder/director.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/director.py | MIT |
def install_requirements(self):
"""Install all requirements from the config object."""
for install in self.config.python.install:
if isinstance(install, PythonInstallRequirements):
self.install_requirements_file(install)
if isinstance(install, PythonInstall):
self.install_package(install) | Install all requirements from the config object. | install_requirements | python | readthedocs/readthedocs.org | readthedocs/doc_builder/python_environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/python_environments.py | MIT |
def install_package(self, install):
"""
Install the package using pip or setuptools.
:param install: A install object from the config module.
:type install: readthedocs.config.models.PythonInstall
"""
# NOTE: `venv_bin` requires `prefixes`.
# However, it's overwritten in the subclasses and
# it forces passing the `prefixes=` attribute.
# I'm not sure how to solve this, so I'm skipping this check for now.
# pylint: disable=no-value-for-parameter
if install.method == PIP:
# Prefix ./ so pip installs from a local path rather than pypi
local_path = os.path.join(".", install.path) if install.path != "." else install.path
extra_req_param = ""
if install.extra_requirements:
extra_req_param = "[{}]".format(",".join(install.extra_requirements))
self.build_env.run(
self.venv_bin(filename="python"),
"-m",
"pip",
"install",
"--upgrade",
"--upgrade-strategy",
"only-if-needed",
"--no-cache-dir",
"{path}{extra_requirements}".format(
path=local_path,
extra_requirements=extra_req_param,
),
cwd=self.checkout_path,
bin_path=self.venv_bin(),
)
elif install.method == SETUPTOOLS:
self.build_env.run(
self.venv_bin(filename="python"),
os.path.join(install.path, "setup.py"),
"install",
"--force",
cwd=self.checkout_path,
bin_path=self.venv_bin(),
) | Install the package using pip or setuptools.
:param install: A install object from the config module.
:type install: readthedocs.config.models.PythonInstall | install_package | python | readthedocs/readthedocs.org | readthedocs/doc_builder/python_environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/python_environments.py | MIT |
def venv_bin(self, prefixes, filename=None):
"""
Return path to the virtualenv bin path, or a specific binary.
:param filename: If specified, add this filename to the path return
:param prefixes: List of path prefixes to include in the resulting path
:returns: Path to virtualenv bin or filename in virtualenv bin
"""
if filename is not None:
prefixes.append(filename)
return os.path.join(*prefixes) | Return path to the virtualenv bin path, or a specific binary.
:param filename: If specified, add this filename to the path return
:param prefixes: List of path prefixes to include in the resulting path
:returns: Path to virtualenv bin or filename in virtualenv bin | venv_bin | python | readthedocs/readthedocs.org | readthedocs/doc_builder/python_environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/python_environments.py | MIT |
def setup_base(self):
"""
Create a virtualenv, invoking ``python -mvirtualenv``.
.. note::
``--no-download`` was removed because of the pip breakage,
it was sometimes installing pip 20.0 which broke everything
https://github.com/readthedocs/readthedocs.org/issues/6585
Important not to add empty string arguments, see:
https://github.com/readthedocs/readthedocs.org/issues/7322
"""
cli_args = [
"-mvirtualenv",
# Append the positional destination argument
"$READTHEDOCS_VIRTUALENV_PATH",
]
self.build_env.run(
self.config.python_interpreter,
*cli_args,
# Don't use virtualenv bin that doesn't exist yet
bin_path=None,
# Don't use the project's root, some config files can interfere
cwd=None,
) | Create a virtualenv, invoking ``python -mvirtualenv``.
.. note::
``--no-download`` was removed because of the pip breakage,
it was sometimes installing pip 20.0 which broke everything
https://github.com/readthedocs/readthedocs.org/issues/6585
Important not to add empty string arguments, see:
https://github.com/readthedocs/readthedocs.org/issues/7322 | setup_base | python | readthedocs/readthedocs.org | readthedocs/doc_builder/python_environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/python_environments.py | MIT |
def install_core_requirements(self):
"""Install basic Read the Docs requirements into the virtualenv."""
pip_install_cmd = [
self.venv_bin(filename="python"),
"-m",
"pip",
"install",
"--upgrade",
"--no-cache-dir",
]
self._install_latest_requirements(pip_install_cmd) | Install basic Read the Docs requirements into the virtualenv. | install_core_requirements | python | readthedocs/readthedocs.org | readthedocs/doc_builder/python_environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/python_environments.py | MIT |
def _install_latest_requirements(self, pip_install_cmd):
"""Install all the latest core requirements."""
# First, upgrade pip and setuptools to their latest versions
cmd = pip_install_cmd + ["pip", "setuptools"]
self.build_env.run(
*cmd,
bin_path=self.venv_bin(),
cwd=self.checkout_path,
)
# Nothing else to install for generic projects.
if self.config.doctype == GENERIC:
return
# Second, install all the latest core requirements
requirements = []
if self.config.doctype == "mkdocs":
requirements.append("mkdocs")
else:
requirements.append("sphinx")
cmd = copy.copy(pip_install_cmd)
cmd.extend(requirements)
self.build_env.run(
*cmd,
bin_path=self.venv_bin(),
cwd=self.checkout_path,
) | Install all the latest core requirements. | _install_latest_requirements | python | readthedocs/readthedocs.org | readthedocs/doc_builder/python_environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/python_environments.py | MIT |
def install_requirements_file(self, install):
"""
Install a requirements file using pip.
:param install: A install object from the config module.
:type install: readthedocs.config.models.PythonInstallRequirements
"""
requirements_file_path = install.requirements
if requirements_file_path:
args = [
self.venv_bin(filename="python"),
"-m",
"pip",
"install",
]
if self.project.has_feature(Feature.PIP_ALWAYS_UPGRADE):
args += ["--upgrade"]
args += [
"--exists-action=w",
"--no-cache-dir",
"-r",
requirements_file_path,
]
self.build_env.run(
*args,
cwd=self.checkout_path,
bin_path=self.venv_bin(),
) | Install a requirements file using pip.
:param install: A install object from the config module.
:type install: readthedocs.config.models.PythonInstallRequirements | install_requirements_file | python | readthedocs/readthedocs.org | readthedocs/doc_builder/python_environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/python_environments.py | MIT |
def conda_bin_name(self):
"""
Decide whether use ``mamba`` or ``conda`` to create the environment.
``mamba`` is really fast to solve dependencies and download channel
metadata on startup.
See https://github.com/QuantStack/mamba
"""
return self.config.python_interpreter | Decide whether use ``mamba`` or ``conda`` to create the environment.
``mamba`` is really fast to solve dependencies and download channel
metadata on startup.
See https://github.com/QuantStack/mamba | conda_bin_name | python | readthedocs/readthedocs.org | readthedocs/doc_builder/python_environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/python_environments.py | MIT |
def _show_environment_yaml(self):
"""Show ``environment.yml`` file in the Build output."""
self.build_env.run(
"cat",
self.config.conda.environment,
cwd=self.checkout_path,
) | Show ``environment.yml`` file in the Build output. | _show_environment_yaml | python | readthedocs/readthedocs.org | readthedocs/doc_builder/python_environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/python_environments.py | MIT |
def _append_core_requirements(self):
"""
Append Read the Docs dependencies to Conda environment file.
This help users to pin their dependencies properly without us upgrading
them in the second ``conda install`` run.
See https://github.com/readthedocs/readthedocs.org/pull/5631
"""
try:
# Allow symlinks, but only the ones that resolve inside the base directory.
inputfile = safe_open(
os.path.join(
self.checkout_path,
self.config.conda.environment,
),
"r",
allow_symlinks=True,
base_path=self.checkout_path,
)
if not inputfile:
raise UserFileNotFound(
message_id=UserFileNotFound.FILE_NOT_FOUND,
format_values={
"filename": self.config.conda.environment,
},
)
environment = parse_yaml(inputfile)
except IOError:
log.warning(
"There was an error while reading Conda environment file.",
)
except ParseError:
log.warning(
"There was an error while parsing Conda environment file.",
)
else:
# Append conda dependencies directly to ``dependencies`` and pip
# dependencies to ``dependencies.pip``
pip_requirements, conda_requirements = self._get_core_requirements()
dependencies = environment.get("dependencies", [])
pip_dependencies = {"pip": pip_requirements}
for item in dependencies:
if isinstance(item, dict) and "pip" in item:
# NOTE: pip can be ``None``
pip_requirements.extend(item.get("pip") or [])
dependencies.remove(item)
break
dependencies.append(pip_dependencies)
dependencies.extend(conda_requirements)
environment.update({"dependencies": dependencies})
try:
# Allow symlinks, but only the ones that resolve inside the base directory.
outputfile = safe_open(
os.path.join(
self.checkout_path,
self.config.conda.environment,
),
"w",
allow_symlinks=True,
base_path=self.checkout_path,
)
if not outputfile:
raise UserFileNotFound(
message_id=UserFileNotFound.FILE_NOT_FOUND,
format_values={
"filename": self.config.conda.environment,
},
)
yaml.safe_dump(environment, outputfile)
except IOError:
log.warning(
"There was an error while writing the new Conda environment file.",
) | Append Read the Docs dependencies to Conda environment file.
This help users to pin their dependencies properly without us upgrading
them in the second ``conda install`` run.
See https://github.com/readthedocs/readthedocs.org/pull/5631 | _append_core_requirements | python | readthedocs/readthedocs.org | readthedocs/doc_builder/python_environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/python_environments.py | MIT |
def install_core_requirements(self):
"""
Skip installing requirements.
Skip installing core requirements, since they were already appended to
the user's ``environment.yml`` and installed at ``conda env create`` step.
""" | Skip installing requirements.
Skip installing core requirements, since they were already appended to
the user's ``environment.yml`` and installed at ``conda env create`` step. | install_core_requirements | python | readthedocs/readthedocs.org | readthedocs/doc_builder/python_environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/python_environments.py | MIT |
def load_yaml_config(version, readthedocs_yaml_path=None):
"""
Load a build configuration file (`.readthedocs.yaml`).
This uses the configuration logic from `readthedocs-build`, which will keep
parsing consistent between projects.
:param readthedocs_yaml_path: Optionally, we are told which readthedocs_yaml_path to
load instead of using defaults.
"""
checkout_path = version.project.checkout_path(version.slug)
# TODO: review this function since we are removing all the defaults for BuildConfigV2 as well.
# NOTE: all the configuration done on the UI will make no effect at all from now on.
# Get build image to set up the python version validation. Pass in the
# build image python limitations to the loaded config so that the versions
# can be rejected at validation
config = load_config(
path=checkout_path,
readthedocs_yaml_path=readthedocs_yaml_path,
)
return config | Load a build configuration file (`.readthedocs.yaml`).
This uses the configuration logic from `readthedocs-build`, which will keep
parsing consistent between projects.
:param readthedocs_yaml_path: Optionally, we are told which readthedocs_yaml_path to
load instead of using defaults. | load_yaml_config | python | readthedocs/readthedocs.org | readthedocs/doc_builder/config.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/config.py | MIT |
def get_final_doctype(self):
"""Some builders may have a different doctype at build time."""
return self.config.doctype | Some builders may have a different doctype at build time. | get_final_doctype | python | readthedocs/readthedocs.org | readthedocs/doc_builder/base.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/base.py | MIT |
def show_conf(self):
"""Show the configuration used for this builder.""" | Show the configuration used for this builder. | show_conf | python | readthedocs/readthedocs.org | readthedocs/doc_builder/base.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/base.py | MIT |
def build(self):
"""Do the actual building of the documentation."""
raise NotImplementedError | Do the actual building of the documentation. | build | python | readthedocs/readthedocs.org | readthedocs/doc_builder/base.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/base.py | MIT |
def _post_build(self):
"""Execute extra steps (e.g. create ZIP, rename PDF, etc) after building if required.""" | Execute extra steps (e.g. create ZIP, rename PDF, etc) after building if required. | _post_build | python | readthedocs/readthedocs.org | readthedocs/doc_builder/base.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/base.py | MIT |
def docs_dir(self):
"""Handle creating a custom docs_dir if it doesn't exist."""
for doc_dir_name in ["docs", "doc", "Doc", "book"]:
possible_path = os.path.join(self.project_path, doc_dir_name)
if os.path.exists(possible_path):
return possible_path
return self.project_path | Handle creating a custom docs_dir if it doesn't exist. | docs_dir | python | readthedocs/readthedocs.org | readthedocs/doc_builder/base.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/base.py | MIT |
def run(self, *args, **kwargs):
"""Proxy run to build environment."""
return self.build_env.run(*args, **kwargs) | Proxy run to build environment. | run | python | readthedocs/readthedocs.org | readthedocs/doc_builder/base.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/base.py | MIT |
def run(self):
"""Set up subprocess and execute command."""
self.start_time = datetime.utcnow()
environment = self._environment.copy()
if "DJANGO_SETTINGS_MODULE" in environment:
del environment["DJANGO_SETTINGS_MODULE"]
if "PYTHONPATH" in environment:
del environment["PYTHONPATH"]
# Always copy the PATH from the host into the environment
env_paths = os.environ.get("PATH", "").split(":")
if self.bin_path is not None:
env_paths.insert(0, self.bin_path)
environment["PATH"] = ":".join(env_paths)
log.info(
"Running build command.",
command=self.get_command(),
cwd=self.cwd,
environment=environment,
)
try:
# When using ``shell=True`` the command should be flatten
command = self.command
if self.shell:
command = self.get_command()
stderr = subprocess.PIPE if self.demux else subprocess.STDOUT
proc = subprocess.Popen( # pylint: disable=consider-using-with
command,
shell=self.shell,
cwd=self.cwd,
stdin=None,
stdout=subprocess.PIPE,
stderr=stderr,
env=environment,
)
cmd_stdout, cmd_stderr = proc.communicate()
self.output = self.decode_output(cmd_stdout)
self.error = self.decode_output(cmd_stderr)
self.exit_code = proc.returncode
except OSError:
log.exception("Operating system error.")
self.exit_code = -1
finally:
self.end_time = datetime.utcnow() | Set up subprocess and execute command. | run | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def decode_output(self, output: bytes) -> str:
"""Decode bytes output to a UTF-8 string."""
decoded = ""
try:
decoded = output.decode("utf-8", "replace")
except (TypeError, AttributeError):
pass
return decoded | Decode bytes output to a UTF-8 string. | decode_output | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def get_command(self):
"""Flatten command."""
if hasattr(self.command, "__iter__") and not isinstance(self.command, str):
return " ".join(self.command)
return self.command | Flatten command. | get_command | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def save(self, api_client):
"""Save this command and result via the API."""
# Force record this command as success to avoid Build reporting errors
# on commands that are just for checking purposes and do not interferes
# in the Build
if self.record_as_success:
log.warning("Recording command exit_code as success")
self.exit_code = 0
data = {
"build": self.build_env.build.get("id"),
"command": self.get_command(),
"output": self.sanitize_output(self.output),
"exit_code": self.exit_code,
"start_time": self.start_time,
"end_time": self.end_time,
}
if self.build_env.project.has_feature(Feature.API_LARGE_DATA):
# Don't use slumber directly here. Slumber tries to enforce a string,
# which will break our multipart encoding here.
encoder = MultipartEncoder({key: str(value) for key, value in data.items()})
resource = api_client.command
resp = resource._store["session"].post(
resource._store["base_url"] + "/",
data=encoder,
headers={
"Content-Type": encoder.content_type,
},
)
log.debug("Post response via multipart form.", response=resp)
else:
resp = api_client.command.post(data)
log.debug("Post response via JSON encoded data.", response=resp) | Save this command and result via the API. | save | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def __init__(self, *args, escape_command=True, **kwargs):
"""
Override default to extend behavior.
:param escape_command: whether escape special chars the command before
executing it in the container. This should only be disabled on
trusted or internal commands.
:type escape_command: bool
"""
self.escape_command = escape_command
super().__init__(*args, **kwargs) | Override default to extend behavior.
:param escape_command: whether escape special chars the command before
executing it in the container. This should only be disabled on
trusted or internal commands.
:type escape_command: bool | __init__ | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def run(self):
"""Execute command in existing Docker container."""
log.info(
"Running build command in container.",
container_id=self.build_env.container_id,
command=self.get_command(),
cwd=self.cwd,
)
self.start_time = datetime.utcnow()
client = self.build_env.get_client()
try:
exec_cmd = client.exec_create(
container=self.build_env.container_id,
cmd=self.get_wrapped_command(),
environment=self._environment,
user=self.user,
workdir=self.cwd,
stdout=True,
stderr=True,
)
out = client.exec_start(exec_id=exec_cmd["Id"], stream=False, demux=self.demux)
cmd_stdout = ""
cmd_stderr = ""
if self.demux:
cmd_stdout, cmd_stderr = out
else:
cmd_stdout = out
self.output = self.decode_output(cmd_stdout)
self.error = self.decode_output(cmd_stderr)
cmd_ret = client.exec_inspect(exec_id=exec_cmd["Id"])
self.exit_code = cmd_ret["ExitCode"]
# Docker will exit with a special exit code to signify the command
# was killed due to memory usage. We try to make the error code
# nicer here. However, sometimes the kernel kills the command and
# Docker does not use the specific exit code, so we check if the
# word `Killed` is in the last 15 lines of the command's output.
#
# NOTE: the work `Killed` could appear in the output because the
# command was killed by OOM or timeout so we put a generic message here.
killed_in_output = "Killed" in "\n".join(
self.output.splitlines()[-15:],
)
if self.exit_code == DOCKER_OOM_EXIT_CODE or (self.exit_code == 1 and killed_in_output):
self.output += str(
_(
"\n\nCommand killed due to timeout or excessive memory consumption\n",
),
)
except DockerAPIError:
self.exit_code = -1
if self.output is None or not self.output:
self.output = _("Command exited abnormally")
finally:
self.end_time = datetime.utcnow() | Execute command in existing Docker container. | run | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def get_wrapped_command(self):
"""
Wrap command in a shell and optionally escape special bash characters.
In order to set the current working path inside a docker container, we
need to wrap the command in a shell call manually.
Some characters will be interpreted as shell characters without
escaping, such as: ``pip install requests<0.8``. When passing
``escape_command=True`` in the init method this escapes a good majority
of those characters.
"""
prefix = ""
if self.bin_path:
bin_path = self._escape_command(self.bin_path)
prefix += f"PATH={bin_path}:$PATH "
command = " ".join(
self._escape_command(part) if self.escape_command else part for part in self.command
)
if prefix:
# Using `;` or `\n` to separate the `prefix` where we define the
# variables with the `command` itself, have the same effect.
# However, using `;` is more explicit.
# See https://github.com/readthedocs/readthedocs.org/pull/10334
return f"/bin/sh -c '{prefix}; {command}'"
return f"/bin/sh -c '{command}'" | Wrap command in a shell and optionally escape special bash characters.
In order to set the current working path inside a docker container, we
need to wrap the command in a shell call manually.
Some characters will be interpreted as shell characters without
escaping, such as: ``pip install requests<0.8``. When passing
``escape_command=True`` in the init method this escapes a good majority
of those characters. | get_wrapped_command | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def run(self, *cmd, **kwargs):
"""Shortcut to run command from environment."""
return self.run_command_class(cls=self.command_class, cmd=cmd, **kwargs) | Shortcut to run command from environment. | run | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def run_command_class(
self, cls, cmd, warn_only=False, record=True, record_as_success=False, **kwargs
):
"""
Run command from this environment.
:param cls: command class to instantiate a command
:param cmd: command (as a list) to execute in this environment
:param record: whether or not to record this particular command
(``False`` implies ``warn_only=True``)
:param warn_only: don't raise an exception on command failure
:param record_as_success: force command ``exit_code`` to be saved as
``0`` (``True`` implies ``warn_only=True`` and ``record=True``)
"""
if not record:
warn_only = True
if record_as_success:
record = True
warn_only = True
# ``record_as_success`` is needed to instantiate the BuildCommand
kwargs.update({"record_as_success": record_as_success})
# Remove PATH from env, and set it to bin_path if it isn't passed in
environment = self._environment.copy()
env_path = environment.pop("BIN_PATH", None)
if "bin_path" not in kwargs and env_path:
kwargs["bin_path"] = env_path
if "environment" in kwargs:
raise BuildAppError(
BuildAppError.GENERIC_WITH_BUILD_ID,
exception_message="environment can't be passed in via commands.",
)
kwargs["environment"] = environment
kwargs["build_env"] = self
build_cmd = cls(cmd, **kwargs)
build_cmd.run()
if record:
# TODO: I don't like how it's handled this entry point here since
# this class should know nothing about a BuildCommand (which are the
# only ones that can be saved/recorded)
self.record_command(build_cmd)
# We want append this command to the list of commands only if it has
# to be recorded in the database (to keep consistency) and also, it
# has to be added after ``self.record_command`` since its
# ``exit_code`` can be altered because of ``record_as_success``
self.commands.append(build_cmd)
if build_cmd.failed:
if warn_only:
msg = "Command failed"
build_output = ""
if build_cmd.output:
build_output += "\n".join(build_cmd.output.split("\n")[:10])
build_output += "\n ..Output Truncated.. \n"
build_output += "\n".join(build_cmd.output.split("\n")[-10:])
log.warning(
msg,
command=build_cmd.get_command(),
output=build_output,
exit_code=build_cmd.exit_code,
project_slug=self.project.slug if self.project else "",
version_slug=self.version.slug if self.version else "",
)
elif build_cmd.exit_code == RTD_SKIP_BUILD_EXIT_CODE:
raise BuildCancelled(BuildCancelled.SKIPPED_EXIT_CODE_183)
else:
# TODO: for now, this still outputs a generic error message
# that is the same across all commands. We could improve this
# with more granular error messages that vary by the command
# being run.
raise BuildUserError(BuildUserError.GENERIC)
return build_cmd | Run command from this environment.
:param cls: command class to instantiate a command
:param cmd: command (as a list) to execute in this environment
:param record: whether or not to record this particular command
(``False`` implies ``warn_only=True``)
:param warn_only: don't raise an exception on command failure
:param record_as_success: force command ``exit_code`` to be saved as
``0`` (``True`` implies ``warn_only=True`` and ``record=True``) | run_command_class | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def __enter__(self):
"""Start of environment context."""
try:
# Test for existing container. We remove any stale containers that
# are no longer running here if there is a collision. We throw an
# exception
state = self.container_state()
if state is not None:
if state.get("Running") is True:
raise BuildAppError(
BuildAppError.GENERIC_WITH_BUILD_ID,
exception_message=_(
"A build environment is currently running for this version",
),
)
log.warning(
"Removing stale container.",
container_id=self.container_id,
)
client = self.get_client()
client.remove_container(self.container_id)
except (DockerAPIError, ConnectionError) as exc:
raise BuildAppError(
BuildAppError.GENERIC_WITH_BUILD_ID, exception_message=exc.explanation
) from exc
# Create the checkout path if it doesn't exist to avoid Docker creation
if not os.path.exists(self.project.doc_path):
os.makedirs(self.project.doc_path)
try:
self.create_container()
except: # noqa
self.__exit__(*sys.exc_info())
raise
return self | Start of environment context. | __enter__ | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def __exit__(self, exc_type, exc_value, tb):
"""End of environment context."""
client = self.get_client()
try:
client.kill(self.container_id)
except DockerNotFoundError:
log.info(
"Container does not exists, nothing to kill.",
container_id=self.container_id,
)
except DockerAPIError:
# Logging this as warning because it usually happens due memory
# limit or build timeout. In those cases, the container is not
# running and can't be killed
log.warning(
"Unable to kill container.",
container_id=self.container_id,
)
# Save the container's state before removing it to know what exception
# to raise in the next step (`update_build_from_container_state`)
state = self.container_state()
try:
log.info("Removing container.", container_id=self.container_id)
client.remove_container(self.container_id)
except DockerNotFoundError:
log.info(
"Container does not exists, nothing to remove.",
container_id=self.container_id,
)
# Catch direct failures from Docker API or with an HTTP request.
# These errors should not surface to the user.
except (DockerAPIError, ConnectionError, ReadTimeout):
log.exception("Couldn't remove container")
self.raise_container_error(state) | End of environment context. | __exit__ | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def get_client(self):
"""Create Docker client connection."""
try:
if self.client is None:
self.client = APIClient(
base_url=DOCKER_SOCKET,
version=DOCKER_VERSION,
)
return self.client
except DockerException as exc:
raise BuildAppError(
BuildAppError.GENERIC_WITH_BUILD_ID, exception_message=exc.explanation
) from exc | Create Docker client connection. | get_client | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def _get_binds(self):
"""
Return proper Docker Binds depending on settings.
It uses Docker Volume if running on a docker-compose. Otherwise, it
returns just a regular mountpoint path.
"""
if getattr(settings, "RTD_DOCKER_COMPOSE", False):
from pathlib import Path
binds = {
settings.RTD_DOCKER_COMPOSE_VOLUME: {
"bind": str(Path(settings.DOCROOT).parent),
"mode": "rw",
},
}
else:
binds = {
self.project.doc_path: {
"bind": self.project.doc_path,
"mode": "rw",
},
}
binds.update(settings.RTD_DOCKER_ADDITIONAL_BINDS)
return binds | Return proper Docker Binds depending on settings.
It uses Docker Volume if running on a docker-compose. Otherwise, it
returns just a regular mountpoint path. | _get_binds | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def get_container_host_config(self):
"""
Create the ``host_config`` settings for the container.
It mainly generates the proper path bindings between the Docker
container and the Host by mounting them with the proper permissions.
The object returned is passed to Docker function
``client.create_container``.
"""
return self.get_client().create_host_config(
binds=self._get_binds(),
mem_limit=self.container_mem_limit,
) | Create the ``host_config`` settings for the container.
It mainly generates the proper path bindings between the Docker
container and the Host by mounting them with the proper permissions.
The object returned is passed to Docker function
``client.create_container``. | get_container_host_config | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def container_id(self):
"""Return id of container if it is valid."""
if self.container_name:
return self.container_name
if self.container:
return self.container.get("Id") | Return id of container if it is valid. | container_id | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def container_state(self):
"""Get container state."""
client = self.get_client()
try:
info = client.inspect_container(self.container_id)
return info.get("State", {})
except DockerAPIError:
return None | Get container state. | container_state | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def raise_container_error(self, state):
"""
Raise an exception based on the container's state.
In the case of the parent command exiting before the exec commands
finish, or in the case of OOM on the container, raise a
`BuildUserError` with an error message explaining the failure.
Otherwise, raise a `BuildAppError`.
"""
if state is not None and state.get("Running") is False:
if state.get("ExitCode") == DOCKER_TIMEOUT_EXIT_CODE:
raise BuildUserError(message_id=BuildUserError.BUILD_TIME_OUT)
if state.get("OOMKilled", False):
raise BuildUserError(message_id=BuildUserError.BUILD_EXCESSIVE_MEMORY)
if state.get("Error"):
raise BuildAppError(
message_id=BuildAppError.BUILD_DOCKER_UNKNOWN_ERROR,
format_values={
"message": state.get("Error"),
},
) | Raise an exception based on the container's state.
In the case of the parent command exiting before the exec commands
finish, or in the case of OOM on the container, raise a
`BuildUserError` with an error message explaining the failure.
Otherwise, raise a `BuildAppError`. | raise_container_error | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def create_container(self):
"""Create docker container."""
client = self.get_client()
try:
log.info(
"Creating Docker container.",
container_image=self.container_image,
container_id=self.container_id,
)
self.container = client.create_container(
image=self.container_image,
command=(
'/bin/sh -c "sleep {time}; exit {exit}"'.format(
time=self.container_time_limit,
exit=DOCKER_TIMEOUT_EXIT_CODE,
)
),
name=self.container_id,
hostname=self.container_id,
host_config=self.get_container_host_config(),
detach=True,
user=settings.RTD_DOCKER_USER,
runtime="runsc", # gVisor runtime
)
client.start(container=self.container_id)
except (DockerAPIError, ConnectionError) as exc:
raise BuildAppError(
BuildAppError.GENERIC_WITH_BUILD_ID, exception_messag=exc.explanation
) from exc | Create docker container. | create_container | python | readthedocs/readthedocs.org | readthedocs/doc_builder/environments.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/environments.py | MIT |
def get_language(self, project):
"""Get a Sphinx compatible language code."""
language = project.language
return OLD_LANGUAGES_CODE_MAPPING.get(language, language) | Get a Sphinx compatible language code. | get_language | python | readthedocs/readthedocs.org | readthedocs/doc_builder/backends/sphinx.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/backends/sphinx.py | MIT |
def show_conf(self):
"""Show the current ``conf.py`` being used."""
if self.config_file is None:
raise ProjectConfigurationError(ProjectConfigurationError.NOT_FOUND)
self.config_file = self.config_file or self.project.conf_file(self.version.slug)
if not os.path.exists(self.config_file):
raise UserFileNotFound(
message_id=UserFileNotFound.FILE_NOT_FOUND,
format_values={
"filename": os.path.relpath(self.config_file, self.project_path),
},
)
# Print the contents of conf.py in order to make the rendered
# configfile visible in the build logs
self.run(
"cat",
os.path.relpath(
self.config_file,
self.project_path,
),
cwd=self.project_path,
) | Show the current ``conf.py`` being used. | show_conf | python | readthedocs/readthedocs.org | readthedocs/doc_builder/backends/sphinx.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/backends/sphinx.py | MIT |
def _post_build(self):
"""Internal post build to create the ZIP file from the HTML output."""
target_file = os.path.join(
self.absolute_container_output_dir,
# TODO: shouldn't this name include the name of the version as well?
# It seems we were using the project's slug previously.
# So, keeping it like that for now until we decide make that adjustment.
f"{self.project.slug}.zip",
)
# **SECURITY CRITICAL: Advisory GHSA-hqwg-gjqw-h5wg**
# Move the directory into a temporal directory,
# so we can rename the directory for zip to use
# that prefix when zipping the files (arcname).
mktemp = self.run("mktemp", "--directory", record=False)
tmp_dir = Path(mktemp.output.strip())
dirname = f"{self.project.slug}-{self.version.slug}"
self.run(
"mv",
self.absolute_container_output_dir,
str(tmp_dir / dirname),
cwd=self.project_path,
record=False,
)
self.run(
"mkdir",
"--parents",
self.absolute_container_output_dir,
cwd=self.project_path,
record=False,
)
self.run(
"zip",
"--recurse-paths", # Include all files and directories.
"--symlinks", # Don't resolve symlinks.
target_file,
dirname,
cwd=str(tmp_dir),
record=False,
) | Internal post build to create the ZIP file from the HTML output. | _post_build | python | readthedocs/readthedocs.org | readthedocs/doc_builder/backends/sphinx.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/backends/sphinx.py | MIT |
def _post_build(self):
"""Internal post build to cleanup EPUB output directory and leave only one .epub file."""
temp_epub_file = f"/tmp/{self.project.slug}-{self.version.slug}.epub"
target_file = os.path.join(
self.absolute_container_output_dir,
f"{self.project.slug}.epub",
)
epub_sphinx_filepaths = glob(os.path.join(self.absolute_host_output_dir, "*.epub"))
if epub_sphinx_filepaths:
# NOTE: we currently support only one .epub per version
epub_filepath = epub_sphinx_filepaths[0]
self.run("mv", epub_filepath, temp_epub_file, cwd=self.project_path, record=False)
self.run(
"rm",
"--recursive",
self.absolute_container_output_dir,
cwd=self.project_path,
record=False,
)
self.run(
"mkdir",
"--parents",
self.absolute_container_output_dir,
cwd=self.project_path,
record=False,
)
self.run("mv", temp_epub_file, target_file, cwd=self.project_path, record=False) | Internal post build to cleanup EPUB output directory and leave only one .epub file. | _post_build | python | readthedocs/readthedocs.org | readthedocs/doc_builder/backends/sphinx.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/backends/sphinx.py | MIT |
def _post_build(self):
"""Internal post build to cleanup PDF output directory and leave only one .pdf file."""
if not self.pdf_file_name:
raise BuildUserError(BuildUserError.PDF_NOT_FOUND)
# TODO: merge this with ePUB since it's pretty much the same
temp_pdf_file = f"/tmp/{self.project.slug}-{self.version.slug}.pdf"
target_file = os.path.join(
self.absolute_container_output_dir,
self.pdf_file_name,
)
# NOTE: we currently support only one .pdf per version
pdf_sphinx_filepath = os.path.join(self.absolute_container_output_dir, self.pdf_file_name)
pdf_sphinx_filepath_host = os.path.join(
self.absolute_host_output_dir,
self.pdf_file_name,
)
if os.path.exists(pdf_sphinx_filepath_host):
self.run(
"mv",
pdf_sphinx_filepath,
temp_pdf_file,
cwd=self.project_path,
record=False,
)
self.run(
"rm",
"-r",
self.absolute_container_output_dir,
cwd=self.project_path,
record=False,
)
self.run(
"mkdir",
"-p",
self.absolute_container_output_dir,
cwd=self.project_path,
record=False,
)
self.run("mv", temp_pdf_file, target_file, cwd=self.project_path, record=False) | Internal post build to cleanup PDF output directory and leave only one .pdf file. | _post_build | python | readthedocs/readthedocs.org | readthedocs/doc_builder/backends/sphinx.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/backends/sphinx.py | MIT |
def get_absolute_static_url():
"""
Get the fully qualified static URL from settings.
Mkdocs needs a full domain because it tries to link to local files.
"""
static_url = settings.STATIC_URL
if not static_url.startswith("http"):
domain = settings.PRODUCTION_DOMAIN
static_url = "http://{}{}".format(domain, static_url)
return static_url | Get the fully qualified static URL from settings.
Mkdocs needs a full domain because it tries to link to local files. | get_absolute_static_url | python | readthedocs/readthedocs.org | readthedocs/doc_builder/backends/mkdocs.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/backends/mkdocs.py | MIT |
def get_final_doctype(self):
"""
Select a doctype based on the ``use_directory_urls`` setting.
https://www.mkdocs.org/user-guide/configuration/#use_directory_urls
"""
# TODO: we should eventually remove this method completely and stop
# relying on "loading the `mkdocs.yml` file in a safe way just to know
# if it's a MKDOCS or MKDOCS_HTML documentation type".
# Allow symlinks, but only the ones that resolve inside the base directory.
with safe_open(
self.yaml_file,
"r",
allow_symlinks=True,
base_path=self.project_path,
) as fh:
config = yaml_load_safely(fh)
use_directory_urls = config.get("use_directory_urls", True)
return MKDOCS if use_directory_urls else MKDOCS_HTML | Select a doctype based on the ``use_directory_urls`` setting.
https://www.mkdocs.org/user-guide/configuration/#use_directory_urls | get_final_doctype | python | readthedocs/readthedocs.org | readthedocs/doc_builder/backends/mkdocs.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/backends/mkdocs.py | MIT |
def get_yaml_config(self):
"""Find the ``mkdocs.yml`` file in the project root."""
mkdocs_path = self.config.mkdocs.configuration
if not mkdocs_path:
mkdocs_path = "mkdocs.yml"
return os.path.join(
self.project_path,
mkdocs_path,
) | Find the ``mkdocs.yml`` file in the project root. | get_yaml_config | python | readthedocs/readthedocs.org | readthedocs/doc_builder/backends/mkdocs.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/backends/mkdocs.py | MIT |
def show_conf(self):
"""Show the current ``mkdocs.yaml`` being used."""
# Write the mkdocs.yml to the build logs
self.run(
"cat",
os.path.relpath(self.yaml_file, self.project_path),
cwd=self.project_path,
) | Show the current ``mkdocs.yaml`` being used. | show_conf | python | readthedocs/readthedocs.org | readthedocs/doc_builder/backends/mkdocs.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/backends/mkdocs.py | MIT |
def yaml_load_safely(content):
"""
Uses ``SafeLoader`` loader to skip unknown tags.
When a YAML contains ``!!python/name:int`` it will store the ``int``
suffix temporarily to be able to re-dump it later. We need this to avoid
executing random code, but still support these YAML files without
information loss.
"""
return yaml.load(content, Loader=SafeLoader) | Uses ``SafeLoader`` loader to skip unknown tags.
When a YAML contains ``!!python/name:int`` it will store the ``int``
suffix temporarily to be able to re-dump it later. We need this to avoid
executing random code, but still support these YAML files without
information loss. | yaml_load_safely | python | readthedocs/readthedocs.org | readthedocs/doc_builder/backends/mkdocs.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/backends/mkdocs.py | MIT |
def sortpath(filename):
"""
Function to use as `key=` argument for `sorted`.
It sorts the file names by the less deep directories first.
However, it doesn't group the results by directory.
Ideally, this should sort file names by hierarchy (less deep directory
first), groupping them by directory and alphabetically. We should update
this function to achieve that goal if we find a simple way to do it.
"""
parts = filename.split("/")
return len(parts), parts | Function to use as `key=` argument for `sorted`.
It sorts the file names by the less deep directories first.
However, it doesn't group the results by directory.
Ideally, this should sort file names by hierarchy (less deep directory
first), groupping them by directory and alphabetically. We should update
this function to achieve that goal if we find a simple way to do it. | get_diff.sortpath | python | readthedocs/readthedocs.org | readthedocs/filetreediff/__init__.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/filetreediff/__init__.py | MIT |
def get_diff(version_a: Version, version_b: Version) -> FileTreeDiff | None:
"""
Get the file tree diff between two versions.
If any of the versions don't have a manifest, return None.
If the latest build of any of the versions is different from the manifest build,
the diff is marked as outdated. The client is responsible for deciding
how to handle this case.
Set operations are used to calculate the added, deleted, and modified files.
To get the modified files, we compare the main content hash of each common file.
If there are no changes between the versions, all lists will be empty.
"""
outdated = False
manifests: list[FileTreeDiffManifest] = []
for version in (version_a, version_b):
manifest = get_manifest(version)
if not manifest:
return None
latest_build = version.latest_successful_build
if not latest_build:
return None
if latest_build.id != manifest.build.id:
outdated = True
manifests.append(manifest)
# pylint: disable=unbalanced-tuple-unpacking
version_a_manifest, version_b_manifest = manifests
files_a = set(version_a_manifest.files.keys())
files_b = set(version_b_manifest.files.keys())
files_added = list(files_a - files_b)
files_deleted = list(files_b - files_a)
files_modified = []
for file_path in files_a & files_b:
file_a = version_a_manifest.files[file_path]
file_b = version_b_manifest.files[file_path]
if file_a.main_content_hash != file_b.main_content_hash:
files_modified.append(file_path)
def sortpath(filename):
"""
Function to use as `key=` argument for `sorted`.
It sorts the file names by the less deep directories first.
However, it doesn't group the results by directory.
Ideally, this should sort file names by hierarchy (less deep directory
first), groupping them by directory and alphabetically. We should update
this function to achieve that goal if we find a simple way to do it.
"""
parts = filename.split("/")
return len(parts), parts
return FileTreeDiff(
added=sorted(files_added, key=sortpath),
deleted=sorted(files_deleted, key=sortpath),
modified=sorted(files_modified, key=sortpath),
outdated=outdated,
) | Get the file tree diff between two versions.
If any of the versions don't have a manifest, return None.
If the latest build of any of the versions is different from the manifest build,
the diff is marked as outdated. The client is responsible for deciding
how to handle this case.
Set operations are used to calculate the added, deleted, and modified files.
To get the modified files, we compare the main content hash of each common file.
If there are no changes between the versions, all lists will be empty. | get_diff | python | readthedocs/readthedocs.org | readthedocs/filetreediff/__init__.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/filetreediff/__init__.py | MIT |
def get_manifest(version: Version) -> FileTreeDiffManifest | None:
"""
Get the file manifest for a version.
If the manifest file does not exist, return None.
"""
storage_path = version.project.get_storage_path(
type_=MEDIA_TYPE_DIFF,
version_slug=version.slug,
include_file=False,
version_type=version.type,
)
manifest_path = build_media_storage.join(storage_path, MANIFEST_FILE_NAME)
try:
with build_media_storage.open(manifest_path) as manifest_file:
manifest = json.load(manifest_file)
except FileNotFoundError:
return None
return FileTreeDiffManifest.from_dict(manifest) | Get the file manifest for a version.
If the manifest file does not exist, return None. | get_manifest | python | readthedocs/readthedocs.org | readthedocs/filetreediff/__init__.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/filetreediff/__init__.py | MIT |
def from_dict(cls, data: dict) -> "FileTreeDiffManifest":
"""
Create a FileTreeManifest from a dictionary.
The dictionary should follow the same structure as the one returned by
converting the object to a dictionary using the `as_dict` method.
"""
build_id = data["build"]["id"]
files = [
FileTreeDiffFile(path=path, main_content_hash=file["main_content_hash"])
for path, file in data["files"].items()
]
return cls(build_id, files) | Create a FileTreeManifest from a dictionary.
The dictionary should follow the same structure as the one returned by
converting the object to a dictionary using the `as_dict` method. | from_dict | python | readthedocs/readthedocs.org | readthedocs/filetreediff/dataclasses.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/filetreediff/dataclasses.py | MIT |
def as_dict(self) -> dict:
"""Convert the object to a dictionary."""
return asdict(self) | Convert the object to a dictionary. | as_dict | python | readthedocs/readthedocs.org | readthedocs/filetreediff/dataclasses.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/filetreediff/dataclasses.py | MIT |
def normalize_request_payload(request):
"""
Normalize the request body, hopefully to JSON.
This will attempt to return a JSON body, backing down to a string body next.
:param request: HTTP request object
:type request: django.http.HttpRequest
:returns: The request body as a string
:rtype: str
"""
request_payload = getattr(request, "data", {})
if request.content_type != "application/json":
# Here, request_body can be a dict or a MergeDict. Probably best to
# normalize everything first
try:
request_payload = dict(list(request_payload.items()))
except AttributeError:
pass
return request_payload | Normalize the request body, hopefully to JSON.
This will attempt to return a JSON body, backing down to a string body next.
:param request: HTTP request object
:type request: django.http.HttpRequest
:returns: The request body as a string
:rtype: str | normalize_request_payload | python | readthedocs/readthedocs.org | readthedocs/integrations/utils.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/integrations/utils.py | MIT |
def exchanges(self, obj):
"""
Manually make an inline-ish block.
JSONField doesn't do well with fieldsets for whatever reason. This is
just to link to the exchanges.
"""
url = urls.reverse(
"admin:{}_{}_changelist".format(
HttpExchange._meta.app_label,
HttpExchange._meta.model_name,
),
)
return format_html(
'<a href="{}?{}={}">{} HTTP transactions</a>',
url,
"integrations__pk",
obj.pk,
obj.exchanges.count(),
) | Manually make an inline-ish block.
JSONField doesn't do well with fieldsets for whatever reason. This is
just to link to the exchanges. | exchanges | python | readthedocs/readthedocs.org | readthedocs/integrations/admin.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/integrations/admin.py | MIT |
def from_exchange(self, req, resp, related_object, payload=None):
"""
Create object from Django request and response objects.
If an explicit Request ``payload`` is not specified, the payload will be
determined directly from the Request object. This makes a good effort to
normalize the data, however we don't enforce that the payload is JSON
:param req: Request object to store
:type req: HttpRequest
:param resp: Response object to store
:type resp: HttpResponse
:param related_object: Object to use for generic relation
:param payload: Alternate payload object to store
:type payload: dict
"""
request_payload = payload
if request_payload is None:
request_payload = normalize_request_payload(req)
try:
request_body = json.dumps(request_payload, sort_keys=True)
except TypeError:
request_body = str(request_payload)
# This is the rawest form of request header we have, the WSGI
# headers. HTTP headers are prefixed with `HTTP_`, which we remove,
# and because the keys are all uppercase, we'll normalize them to
# title case-y hyphen separated values.
request_headers = {
key[5:].title().replace("_", "-"): str(val)
for (key, val) in list(req.META.items())
if key.startswith("HTTP_")
} # yapf: disable
request_headers["Content-Type"] = req.content_type
# Remove unwanted headers
for filter_rule in self.REQ_FILTER_RULES:
for key in list(request_headers.keys()):
if filter_rule.match(key):
del request_headers[key]
response_payload = resp.data if hasattr(resp, "data") else resp.content
try:
response_body = json.dumps(response_payload, sort_keys=True)
except TypeError:
response_body = str(response_payload)
response_headers = dict(list(resp.items()))
fields = {
"status_code": resp.status_code,
"request_headers": request_headers,
"request_body": request_body,
"response_body": response_body,
"response_headers": response_headers,
}
fields["related_object"] = related_object
obj = self.create(**fields)
self.delete_limit(related_object)
return obj | Create object from Django request and response objects.
If an explicit Request ``payload`` is not specified, the payload will be
determined directly from the Request object. This makes a good effort to
normalize the data, however we don't enforce that the payload is JSON
:param req: Request object to store
:type req: HttpRequest
:param resp: Response object to store
:type resp: HttpResponse
:param related_object: Object to use for generic relation
:param payload: Alternate payload object to store
:type payload: dict | from_exchange | python | readthedocs/readthedocs.org | readthedocs/integrations/models.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/integrations/models.py | MIT |
def from_requests_exchange(self, response, related_object):
"""
Create an exchange object from a requests' response.
:param response: The result from calling request.post() or similar.
:param related_object: Object to use for generic relationship.
"""
request = response.request
# NOTE: we need to cast ``request.headers`` and ``response.headers``
# because it's a ``requests.structures.CaseInsensitiveDict`` which is
# not JSON serializable.
obj = self.create(
related_object=related_object,
request_headers=dict(request.headers) or {},
request_body=request.body or "",
status_code=response.status_code,
response_headers=dict(response.headers),
response_body=response.text,
)
self.delete_limit(related_object)
return obj | Create an exchange object from a requests' response.
:param response: The result from calling request.post() or similar.
:param related_object: Object to use for generic relationship. | from_requests_exchange | python | readthedocs/readthedocs.org | readthedocs/integrations/models.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/integrations/models.py | MIT |
def formatted_json(self, field):
"""Try to return pretty printed and Pygment highlighted code."""
value = getattr(self, field) or ""
try:
if not isinstance(value, dict):
value = json.loads(value)
json_value = json.dumps(value, sort_keys=True, indent=2)
formatter = HtmlFormatter()
html = highlight(json_value, JsonLexer(), formatter)
return mark_safe(html)
except (ValueError, TypeError):
return value | Try to return pretty printed and Pygment highlighted code. | formatted_json | python | readthedocs/readthedocs.org | readthedocs/integrations/models.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/integrations/models.py | MIT |
def _get_subclass_replacement(self, original):
"""
Replace model instance on Integration subclasses.
This is based on the ``integration_type`` field, and is used to provide
specific functionality to and integration via a proxy subclass of the
Integration model.
"""
cls_replace = self._get_subclass(original.integration_type)
if cls_replace is None:
return original
new = cls_replace()
for k, v in list(original.__dict__.items()):
new.__dict__[k] = v
return new | Replace model instance on Integration subclasses.
This is based on the ``integration_type`` field, and is used to provide
specific functionality to and integration via a proxy subclass of the
Integration model. | _get_subclass_replacement | python | readthedocs/readthedocs.org | readthedocs/integrations/models.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/integrations/models.py | MIT |
def create(self, **kwargs):
"""
Override of create method to use subclass instance instead.
Instead of using the underlying Integration model to create this
instance, we get the correct subclass to use instead. This allows for
overrides to ``save`` and other model functions on object creation.
"""
model_cls = self._get_subclass(kwargs.get("integration_type"))
if model_cls is None:
model_cls = self.model
obj = model_cls(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj | Override of create method to use subclass instance instead.
Instead of using the underlying Integration model to create this
instance, we get the correct subclass to use instead. This allows for
overrides to ``save`` and other model functions on object creation. | create | python | readthedocs/readthedocs.org | readthedocs/integrations/models.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/integrations/models.py | MIT |
def save(self, *args, **kwargs):
"""Ensure model has token data before saving."""
try:
token = self.provider_data.get("token")
except (AttributeError, TypeError):
token = None
finally:
if token is None:
token = default_token()
self.provider_data = {"token": token}
super().save(*args, **kwargs) | Ensure model has token data before saving. | save | python | readthedocs/readthedocs.org | readthedocs/integrations/models.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/integrations/models.py | MIT |
def token(self):
"""Get or generate a secret token for authentication."""
return self.provider_data.get("token") | Get or generate a secret token for authentication. | token | python | readthedocs/readthedocs.org | readthedocs/integrations/models.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/integrations/models.py | MIT |
def forwards_func(apps, schema_editor):
"""Old models with provider_data='' are being fetched as str instead of json."""
Integration = apps.get_model("integrations", "Integration")
Integration.objects.filter(
provider_data="",
).update(
provider_data={},
) | Old models with provider_data='' are being fetched as str instead of json. | forwards_func | python | readthedocs/readthedocs.org | readthedocs/integrations/migrations/0007_update-provider-data.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/integrations/migrations/0007_update-provider-data.py | MIT |
def get_readthedocs_app_path():
"""Return the absolute path of the ``readthedocs`` app."""
try:
import readthedocs
path = readthedocs.__path__[0]
except (IndexError, ImportError):
raise Exception('Unable to find "readthedocs" path module')
return path | Return the absolute path of the ``readthedocs`` app. | get_readthedocs_app_path | python | readthedocs/readthedocs.org | readthedocs/rtd_tests/utils.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/rtd_tests/utils.py | MIT |
def add_git_submodule_without_cloning(directory, submodule, url):
"""
Add a submodule without cloning it.
We write directly to the git index, more details in:
https://stackoverflow.com/a/37378302/2187091
:param directory: The directory where the git repo is
:type directory: str
:param submodule: The name of the submodule to be created
:type submodule: str
:param url: The url where the submodule points to
:type url: str
"""
env = environ.copy()
env["GIT_DIR"] = pjoin(directory, ".git")
chdir(directory)
mkdir(pjoin(directory, submodule))
gitmodules_path = pjoin(directory, ".gitmodules")
with open(gitmodules_path, "w+") as fh:
content = textwrap.dedent(
"""
[submodule "{submodule}"]
path = {submodule}
url = {url}
"""
)
fh.write(content.format(submodule=submodule, url=url))
check_output(
[
"git",
"update-index",
"--add",
"--cacheinfo",
"160000",
"233febf4846d7a0aeb95b6c28962e06e21d13688",
submodule,
],
env=env,
) | Add a submodule without cloning it.
We write directly to the git index, more details in:
https://stackoverflow.com/a/37378302/2187091
:param directory: The directory where the git repo is
:type directory: str
:param submodule: The name of the submodule to be created
:type submodule: str
:param url: The url where the submodule points to
:type url: str | add_git_submodule_without_cloning | python | readthedocs/readthedocs.org | readthedocs/rtd_tests/utils.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/rtd_tests/utils.py | MIT |
def request(self, method, *args, **kwargs):
"""
Perform request from factory.
:param method: Request method as string
:returns: Request instance
Several additional keywords arguments can be passed in:
user
User instance to use for the request, will default to an
:py:class:`AnonymousUser` instance otherwise.
session
Dictionary to instantiate the session handler with.
Other keyword arguments are passed into the request method
"""
factory = RequestFactory()
fn = getattr(factory, method)
request = fn(*args, **kwargs)
# Mock user, session, and messages
request.user = kwargs.pop("user", AnonymousUser())
session = kwargs.pop("session", {})
middleware = SessionMiddleware(lambda request: HttpResponse())
middleware.process_request(request)
request.session.update(session)
request.session.save()
messages = FallbackStorage(request)
setattr(request, "_messages", messages)
return request | Perform request from factory.
:param method: Request method as string
:returns: Request instance
Several additional keywords arguments can be passed in:
user
User instance to use for the request, will default to an
:py:class:`AnonymousUser` instance otherwise.
session
Dictionary to instantiate the session handler with.
Other keyword arguments are passed into the request method | request | python | readthedocs/readthedocs.org | readthedocs/rtd_tests/base.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/rtd_tests/base.py | MIT |
def post_step(self, step, **kwargs):
"""
Post step form data to `url`, using supplementary `kwargs`
Use data from kwargs to build dict to pass into form
"""
if not self.url:
raise Exception("Missing wizard URL")
try:
data = {
"{}-{}".format(step, k): v
for (k, v) in list(self.step_data[step].items())
}
except KeyError:
pass
# Update with prefixed step data
data["{}-current_step".format(self.wizard_class_slug)] = step
view = self.wizard_class.as_view()
req = self.request("post", self.url, data=data, **kwargs)
resp = view(req)
self.assertIsNotNone(resp)
return resp | Post step form data to `url`, using supplementary `kwargs`
Use data from kwargs to build dict to pass into form | post_step | python | readthedocs/readthedocs.org | readthedocs/rtd_tests/base.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/rtd_tests/base.py | MIT |
def assertWizardResponse(self, response, step=None): # noqa
"""Assert successful wizard response."""
# This is the last form
if step is None:
try:
wizard = response.context_data["wizard"]
self.assertEqual(wizard["form"].errors, {})
except (TypeError, KeyError):
pass
self.assertEqual(response.status_code, 302)
else:
self.assertIn("wizard", response.context_data)
wizard = response.context_data["wizard"]
try:
self.assertEqual(wizard["form"].errors, {})
except AssertionError:
self.assertIsNone(wizard["form"].errors)
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(response.context_data["wizard"])
self.assertEqual(wizard["steps"].current, step)
response.render()
self.assertContains(
response,
'name="{}-current_step"'.format(self.wizard_class_slug),
) | Assert successful wizard response. | assertWizardResponse | python | readthedocs/readthedocs.org | readthedocs/rtd_tests/base.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/rtd_tests/base.py | MIT |
def assertWizardFailure(self, response, field, match=None): # noqa
"""
Assert field threw a validation error.
response
Client response object
field
Field name to test for validation error
match
Regex match for field validation error
"""
self.assertEqual(response.status_code, 200)
self.assertIn("wizard", response.context_data)
self.assertIn("form", response.context_data["wizard"])
self.assertIn(field, response.context_data["wizard"]["form"].errors)
if match is not None:
error = response.context_data["wizard"]["form"].errors[field]
self.assertRegex(str(error), match) # noqa | Assert field threw a validation error.
response
Client response object
field
Field name to test for validation error
match
Regex match for field validation error | assertWizardFailure | python | readthedocs/readthedocs.org | readthedocs/rtd_tests/base.py | https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/rtd_tests/base.py | MIT |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.