code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def email_pending_custom_domains(number_of_emails=3): """ Send a total of `number_of_emails` to a user about a pending custom domain. The emails are send exponentially till the last day, for 30 days this would be: 7, 15, 30. """ now = timezone.now().date() validation_period = settings.RTD_CUSTOM_DOMAINS_VALIDATION_PERIOD dates = [ now - timezone.timedelta(days=validation_period // (2**n)) for n in range(number_of_emails) ] queryset = Domain.objects.pending(include_recently_expired=True).filter( validation_process_start__date__in=dates ) for domain in queryset: # NOTE: this site notification was attach to every single user. # The new behavior is to attach it to the project. # # We send an email notification to all the project's admins. Notification.objects.add( message_id=MESSAGE_DOMAIN_VALIDATION_PENDING, attached_to=domain.project, dismissable=True, format_values={ "domain": domain.domain, "domain_url": reverse( "projects_domains_edit", args=[domain.project.slug, domain.pk] ), }, ) for user in AdminPermission.admins(domain.project): notification = PendingCustomDomainValidation( context_object=domain, user=user, ) notification.send()
Send a total of `number_of_emails` to a user about a pending custom domain. The emails are send exponentially till the last day, for 30 days this would be: 7, 15, 30.
email_pending_custom_domains
python
readthedocs/readthedocs.org
readthedocs/domains/tasks.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/domains/tasks.py
MIT
def log_logged_in(sender, request, user, **kwargs): """Log when a user has logged in.""" # pylint: disable=unused-argument AuditLog.objects.new( action=AuditLog.AUTHN, user=user, request=request, )
Log when a user has logged in.
log_logged_in
python
readthedocs/readthedocs.org
readthedocs/audit/signals.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/audit/signals.py
MIT
def log_logged_out(sender, request, user, **kwargs): """Log when a user has logged out.""" # pylint: disable=unused-argument # Only log if there is an user. if not user: return AuditLog.objects.new( action=AuditLog.LOGOUT, user=user, request=request, )
Log when a user has logged out.
log_logged_out
python
readthedocs/readthedocs.org
readthedocs/audit/signals.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/audit/signals.py
MIT
def log_login_failed(sender, credentials, request, **kwargs): """Log when a user has failed to logged in.""" # pylint: disable=unused-argument username = credentials.get("username") user = User.objects.filter(Q(username=username) | Q(email=username)).first() AuditLog.objects.new( action=AuditLog.AUTHN_FAILURE, user=user, log_user_username=username, request=request, )
Log when a user has failed to logged in.
log_login_failed
python
readthedocs/readthedocs.org
readthedocs/audit/signals.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/audit/signals.py
MIT
def delete_old_personal_audit_logs(days=None): """ Delete personal security logs older than `days`. If `days` isn't given, default to ``RTD_AUDITLOGS_DEFAULT_RETENTION_DAYS``. We delete logs that aren't related to an organization, there are tasks in .com to delete those according to their plan. """ days = days or settings.RTD_AUDITLOGS_DEFAULT_RETENTION_DAYS days_ago = timezone.now() - timezone.timedelta(days=days) audit_logs = AuditLog.objects.filter( log_organization_id__isnull=True, created__lt=days_ago, ) log.info("Deleting old audit logs.", days=days, count=audit_logs.count()) audit_logs.delete()
Delete personal security logs older than `days`. If `days` isn't given, default to ``RTD_AUDITLOGS_DEFAULT_RETENTION_DAYS``. We delete logs that aren't related to an organization, there are tasks in .com to delete those according to their plan.
delete_old_personal_audit_logs
python
readthedocs/readthedocs.org
readthedocs/audit/tasks.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/audit/tasks.py
MIT
def new(self, action, user=None, request=None, **kwargs): """ Create an audit log for `action`. If user or request are given, other fields will be auto-populated from that information. """ actions_requiring_user = ( AuditLog.PAGEVIEW, AuditLog.DOWNLOAD, AuditLog.AUTHN, AuditLog.LOGOUT, AuditLog.INVITATION_SENT, AuditLog.INVITATION_ACCEPTED, AuditLog.INVITATION_REVOKED, ) if action in actions_requiring_user and (not user or not request): raise TypeError(f"A user and a request are required for the {action} action.") if action in (AuditLog.PAGEVIEW, AuditLog.DOWNLOAD) and "project" not in kwargs: raise TypeError(f"A project is required for the {action} action.") # Don't save anonymous users. if user and user.is_anonymous: user = None if request: kwargs["ip"] = get_client_ip(request) kwargs["browser"] = request.headers.get("User-Agent") kwargs.setdefault("resource", request.path_info) kwargs.setdefault("auth_backend", get_auth_backend(request)) # Fill the project from the request if available. # This is frequently on actions generated from a subdomain. unresolved_domain = getattr(request, "unresolved_domain", None) if "project" not in kwargs and unresolved_domain: kwargs["project"] = unresolved_domain.project return self.create( user=user, action=action, **kwargs, )
Create an audit log for `action`. If user or request are given, other fields will be auto-populated from that information.
new
python
readthedocs/readthedocs.org
readthedocs/audit/models.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/audit/models.py
MIT
def auth_backend_display(self): """ Get a string representation for backends that aren't part of the normal login. .. note:: The backends listed here are implemented on .com only. """ backend = self.auth_backend or "" backend_displays = { "TemporaryAccessTokenBackend": _("shared link"), "TemporaryAccessPasswordBackend": _("shared password"), } for name, display in backend_displays.items(): if name in backend: return display return ""
Get a string representation for backends that aren't part of the normal login. .. note:: The backends listed here are implemented on .com only.
auth_backend_display
python
readthedocs/readthedocs.org
readthedocs/audit/models.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/audit/models.py
MIT
def forwards_func(apps, schema_editor): """ Post migration ip field from GenericIPAddressField to CharField. GenericIPAddressField saves the IP with ``{ip}/{range}`` format. We don't need to show the range to users. """ AuditLog = apps.get_model("audit", "AuditLog") for auditlog in AuditLog.objects.all().iterator(): ip = auditlog.ip if ip: ip = ip.split("/", maxsplit=1)[0] auditlog.ip = ip auditlog.save()
Post migration ip field from GenericIPAddressField to CharField. GenericIPAddressField saves the IP with ``{ip}/{range}`` format. We don't need to show the range to users.
forwards_func
python
readthedocs/readthedocs.org
readthedocs/audit/migrations/0005_migrate_ip_field_values.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/audit/migrations/0005_migrate_ip_field_values.py
MIT
def redirect_to_https(request, project): """ Shortcut to get a https redirect. :param request: Request object. :param project: The current project being served """ return canonical_redirect(request, project, RedirectType.http_to_https)
Shortcut to get a https redirect. :param request: Request object. :param project: The current project being served
redirect_to_https
python
readthedocs/readthedocs.org
readthedocs/proxito/redirects.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/redirects.py
MIT
def canonical_redirect(request, project, redirect_type, external_version_slug=None): """ Return a canonical redirect response. All redirects are cached, since the final URL will be checked for authorization. The following cases are covered: - Redirect a custom domain from http to https http://project.rtd.io/ -> https://project.rtd.io/ - Redirect a domain to a canonical domain (http or https). http://project.rtd.io/ -> https://docs.test.com/ http://project.rtd.io/foo/bar/ -> https://docs.test.com/foo/bar/ - Redirect from a subproject domain to the main domain https://subproject.rtd.io/en/latest/foo -> https://main.rtd.io/projects/subproject/en/latest/foo # noqa https://subproject.rtd.io/en/latest/foo -> https://docs.test.com/projects/subproject/en/latest/foo # noqa Raises ``InfiniteRedirectException`` if the redirect is the same as the current URL. :param request: Request object. :param project: The current project being served :param redirect_type: The type of canonical redirect (https, canonical-cname, subproject-main-domain) :param external_version_slug: The version slug if the request is from a pull request preview. """ from_url = request.build_absolute_uri() parsed_from = urlparse(from_url) if redirect_type == RedirectType.http_to_https: # We only need to change the protocol. to = parsed_from._replace(scheme="https").geturl() elif redirect_type == RedirectType.to_canonical_domain: # We need to change the domain and protocol. canonical_domain = project.get_canonical_custom_domain() protocol = "https" if canonical_domain.https else "http" to = parsed_from._replace(scheme=protocol, netloc=canonical_domain.domain).geturl() elif redirect_type == RedirectType.subproject_to_main_domain: # We need to get the subproject root in the domain of the main # project, and append the current path. project_doc_prefix = Resolver().get_subproject_url_prefix( project=project, external_version_slug=external_version_slug, ) parsed_doc_prefix = urlparse(project_doc_prefix) to = parsed_doc_prefix._replace( path=unsafe_join_url_path(parsed_doc_prefix.path, parsed_from.path), query=parsed_from.query, ).geturl() else: raise NotImplementedError if from_url == to: # check that we do have a response and avoid infinite redirect log.debug( "Infinite Redirect: FROM URL is the same than TO URL.", url=to, ) raise InfiniteRedirectException() log.debug("Canonical Redirect.", host=request.get_host(), from_url=from_url, to_url=to) resp = HttpResponseRedirect(to) resp["X-RTD-Redirect"] = redirect_type.name cache_response(resp, cache_tags=[project.slug]) return resp
Return a canonical redirect response. All redirects are cached, since the final URL will be checked for authorization. The following cases are covered: - Redirect a custom domain from http to https http://project.rtd.io/ -> https://project.rtd.io/ - Redirect a domain to a canonical domain (http or https). http://project.rtd.io/ -> https://docs.test.com/ http://project.rtd.io/foo/bar/ -> https://docs.test.com/foo/bar/ - Redirect from a subproject domain to the main domain https://subproject.rtd.io/en/latest/foo -> https://main.rtd.io/projects/subproject/en/latest/foo # noqa https://subproject.rtd.io/en/latest/foo -> https://docs.test.com/projects/subproject/en/latest/foo # noqa Raises ``InfiniteRedirectException`` if the redirect is the same as the current URL. :param request: Request object. :param project: The current project being served :param redirect_type: The type of canonical redirect (https, canonical-cname, subproject-main-domain) :param external_version_slug: The version slug if the request is from a pull request preview.
canonical_redirect
python
readthedocs/readthedocs.org
readthedocs/proxito/redirects.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/redirects.py
MIT
def __init__(self, http_status=404, path_not_found=None, **kwargs): """ Constructor that all subclasses should call. :param kwargs: all kwargs are added as page context for rendering the 404 template :param http_status: 404 view should respect this and set the HTTP status. :param path_not_found: Inform the template and 404 view about a different path from request.path """ self.http_status = http_status self.path_not_found = path_not_found self.kwargs = kwargs
Constructor that all subclasses should call. :param kwargs: all kwargs are added as page context for rendering the 404 template :param http_status: 404 view should respect this and set the HTTP status. :param path_not_found: Inform the template and 404 view about a different path from request.path
__init__
python
readthedocs/readthedocs.org
readthedocs/proxito/exceptions.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/exceptions.py
MIT
def __init__(self, domain, **kwargs): """ Raised when DNS for a custom domain is bad. :param domain: The domain for which DNS is misconfigured. :param kwargs: """ kwargs["domain"] = domain super().__init__(**kwargs)
Raised when DNS for a custom domain is bad. :param domain: The domain for which DNS is misconfigured. :param kwargs:
__init__
python
readthedocs/readthedocs.org
readthedocs/proxito/exceptions.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/exceptions.py
MIT
def __init__(self, domain, **kwargs): """ Raised when a project wasn't found for a given domain. :param domain: The domain (custom and hosted) that was not found. :param kwargs: """ kwargs["domain"] = domain super().__init__(**kwargs)
Raised when a project wasn't found for a given domain. :param domain: The domain (custom and hosted) that was not found. :param kwargs:
__init__
python
readthedocs/readthedocs.org
readthedocs/proxito/exceptions.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/exceptions.py
MIT
def __init__(self, project, **kwargs): """ Raised if a subproject was not found. :param project: The project in which the subproject could not be found :param kwargs: Context dictionary of the rendered template """ kwargs["project"] = project super().__init__(**kwargs)
Raised if a subproject was not found. :param project: The project in which the subproject could not be found :param kwargs: Context dictionary of the rendered template
__init__
python
readthedocs/readthedocs.org
readthedocs/proxito/exceptions.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/exceptions.py
MIT
def __init__(self, project, **kwargs): """ Raised if a page inside an existing project was not found. :param project: The project in which the file could not be found :param kwargs: Context dictionary of the rendered template """ kwargs["project"] = project super().__init__(**kwargs)
Raised if a page inside an existing project was not found. :param project: The project in which the file could not be found :param kwargs: Context dictionary of the rendered template
__init__
python
readthedocs/readthedocs.org
readthedocs/proxito/exceptions.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/exceptions.py
MIT
def __init__(self, project, **kwargs): """ Raised if a translation of a project was not found. :param project: The project in which the translation could not be found :param kwargs: Context dictionary of the rendered template """ kwargs["project"] = project super().__init__(**kwargs)
Raised if a translation of a project was not found. :param project: The project in which the translation could not be found :param kwargs: Context dictionary of the rendered template
__init__
python
readthedocs/readthedocs.org
readthedocs/proxito/exceptions.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/exceptions.py
MIT
def __init__(self, project, **kwargs): """ Raised if a version was not found. :param project: The project in which the version could not be found :param kwargs: Context dictionary of the rendered template """ kwargs["project"] = project super().__init__(**kwargs)
Raised if a version was not found. :param project: The project in which the version could not be found :param kwargs: Context dictionary of the rendered template
__init__
python
readthedocs/readthedocs.org
readthedocs/proxito/exceptions.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/exceptions.py
MIT
def add_cache_tags(response, cache_tags): """ Add cache tags to the response. New cache tags will be appended to the existing ones. :param response: The response to add cache tags to. :param cache_tags: A list of cache tags to add to the response. """ cache_tags = cache_tags.copy() current_cache_tag = response.headers.get(CACHE_TAG_HEADER) if current_cache_tag: cache_tags.append(current_cache_tag) response.headers[CACHE_TAG_HEADER] = ",".join(cache_tags)
Add cache tags to the response. New cache tags will be appended to the existing ones. :param response: The response to add cache tags to. :param cache_tags: A list of cache tags to add to the response.
add_cache_tags
python
readthedocs/readthedocs.org
readthedocs/proxito/cache.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/cache.py
MIT
def cache_response(response, cache_tags=None, force=True): """ Cache the response at the CDN level. We add the ``Cache-Tag`` header to the response, to be able to purge the cache by a given tag. And we set the ``CDN-Cache-Control: public`` header to cache the response at the CDN level only. This doesn't affect caching at the browser level (``Cache-Control``). See: - https://developers.cloudflare.com/cache/how-to/purge-cache/#cache-tags-enterprise-only - https://developers.cloudflare.com/cache/about/cdn-cache-control. :param response: The response to cache. :param cache_tags: A list of cache tags to add to the response. :param force: If ``True``, the header will be set to public even if it was already set to private. """ if cache_tags: add_cache_tags(response, cache_tags) if force or CDN_CACHE_CONTROL_HEADER not in response.headers: response.headers[CDN_CACHE_CONTROL_HEADER] = "public"
Cache the response at the CDN level. We add the ``Cache-Tag`` header to the response, to be able to purge the cache by a given tag. And we set the ``CDN-Cache-Control: public`` header to cache the response at the CDN level only. This doesn't affect caching at the browser level (``Cache-Control``). See: - https://developers.cloudflare.com/cache/how-to/purge-cache/#cache-tags-enterprise-only - https://developers.cloudflare.com/cache/about/cdn-cache-control. :param response: The response to cache. :param cache_tags: A list of cache tags to add to the response. :param force: If ``True``, the header will be set to public even if it was already set to private.
cache_response
python
readthedocs/readthedocs.org
readthedocs/proxito/cache.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/cache.py
MIT
def private_response(response, force=True): """ Prevent the response from being cached at the CDN level. We do this by explicitly setting the ``CDN-Cache-Control`` header to private. :param response: The response to mark as private. :param force: If ``True``, the header will be set to private even if it was already set to public. """ if force or CDN_CACHE_CONTROL_HEADER not in response.headers: response.headers[CDN_CACHE_CONTROL_HEADER] = "private"
Prevent the response from being cached at the CDN level. We do this by explicitly setting the ``CDN-Cache-Control`` header to private. :param response: The response to mark as private. :param force: If ``True``, the header will be set to private even if it was already set to public.
private_response
python
readthedocs/readthedocs.org
readthedocs/proxito/cache.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/cache.py
MIT
def add_proxito_headers(self, request, response): """Add debugging and cache headers to proxito responses.""" project_slug = getattr(request, "path_project_slug", "") version_slug = getattr(request, "path_version_slug", "") path = getattr(response, "proxito_path", "") response["X-RTD-Domain"] = request.get_host() response["X-RTD-Project"] = project_slug if version_slug: response["X-RTD-Version"] = version_slug if path: response["X-RTD-Path"] = path # Include the project & project-version so we can do larger purges if needed cache_tags = [] if project_slug: cache_tags.append(project_slug) if version_slug: cache_tags.append(get_cache_tag(project_slug, version_slug)) if cache_tags: add_cache_tags(response, cache_tags) unresolved_domain = request.unresolved_domain if unresolved_domain: response["X-RTD-Project-Method"] = unresolved_domain.source.name if unresolved_domain.is_from_external_domain: response["X-RTD-Version-Method"] = "domain" else: response["X-RTD-Version-Method"] = "path"
Add debugging and cache headers to proxito responses.
add_proxito_headers
python
readthedocs/readthedocs.org
readthedocs/proxito/middleware.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/middleware.py
MIT
def add_user_headers(self, request, response): """ Set specific HTTP headers requested by the user. The headers added come from ``projects.models.HTTPHeader`` associated with the ``Domain`` object. """ unresolved_domain = request.unresolved_domain if unresolved_domain and unresolved_domain.is_from_custom_domain: response_headers = [header.lower() for header in response.headers.keys()] domain = unresolved_domain.domain for http_header in domain.http_headers.all(): if http_header.name.lower() in response_headers: log.error( "Overriding an existing response HTTP header.", http_header=http_header.name, domain=domain.domain, ) log.debug( "Adding custom response HTTP header.", http_header=http_header.name, domain=domain.domain, ) if http_header.only_if_secure_request and not request.is_secure(): continue # HTTP headers here are limited to # ``HTTPHeader.HEADERS_CHOICES`` since adding arbitrary HTTP # headers is potentially dangerous response[http_header.name] = http_header.value
Set specific HTTP headers requested by the user. The headers added come from ``projects.models.HTTPHeader`` associated with the ``Domain`` object.
add_user_headers
python
readthedocs/readthedocs.org
readthedocs/proxito/middleware.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/middleware.py
MIT
def add_hsts_headers(self, request, response): """ Set the Strict-Transport-Security (HSTS) header for docs sites. * For the public domain, set the HSTS header if settings.PUBLIC_DOMAIN_USES_HTTPS * For custom domains, check the HSTS values on the Domain object. The domain object should be saved already in request.domain. """ if not request.is_secure(): # Only set the HSTS header if the request is over HTTPS return response hsts_header_values = [] unresolved_domain = request.unresolved_domain if ( settings.PUBLIC_DOMAIN_USES_HTTPS and unresolved_domain and unresolved_domain.is_from_public_domain ): hsts_header_values = [ "max-age=31536000", "includeSubDomains", "preload", ] elif unresolved_domain and unresolved_domain.is_from_custom_domain: domain = unresolved_domain.domain # TODO: migrate Domains with HSTS set using these fields to # ``HTTPHeader`` and remove this chunk of code from here. if domain.hsts_max_age: hsts_header_values.append(f"max-age={domain.hsts_max_age}") # These other options don't make sense without max_age > 0 if domain.hsts_include_subdomains: hsts_header_values.append("includeSubDomains") if domain.hsts_preload: hsts_header_values.append("preload") if hsts_header_values: # See https://tools.ietf.org/html/rfc6797 response["Strict-Transport-Security"] = "; ".join(hsts_header_values)
Set the Strict-Transport-Security (HSTS) header for docs sites. * For the public domain, set the HSTS header if settings.PUBLIC_DOMAIN_USES_HTTPS * For custom domains, check the HSTS values on the Domain object. The domain object should be saved already in request.domain.
add_hsts_headers
python
readthedocs/readthedocs.org
readthedocs/proxito/middleware.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/middleware.py
MIT
def add_cache_headers(self, request, response): """ Add Cache-Control headers. If the `CDN-Cache-Control` header isn't already present, set the cache level to public or private, depending if we allow private repos or not. Or if the request was from the `X-RTD-Slug` header, we don't cache the response, since we could be caching a response in another domain. We use ``CDN-Cache-Control``, to control caching at the CDN level only. This doesn't affect caching at the browser level (``Cache-Control``). See https://developers.cloudflare.com/cache/about/cdn-cache-control. """ unresolved_domain = request.unresolved_domain # Never trust projects resolving from the X-RTD-Slug header, # we don't want to cache their content on domains from other # projects, see GHSA-mp38-vprc-7hf5. if unresolved_domain and unresolved_domain.is_from_http_header: private_response(response, force=True) # SECURITY: Return early, we never want to cache this response. return # Mark the response as private or cache it, if it hasn't been marked as so already. if settings.ALLOW_PRIVATE_REPOS: private_response(response, force=False) else: cache_response(response, force=False)
Add Cache-Control headers. If the `CDN-Cache-Control` header isn't already present, set the cache level to public or private, depending if we allow private repos or not. Or if the request was from the `X-RTD-Slug` header, we don't cache the response, since we could be caching a response in another domain. We use ``CDN-Cache-Control``, to control caching at the CDN level only. This doesn't affect caching at the browser level (``Cache-Control``). See https://developers.cloudflare.com/cache/about/cdn-cache-control.
add_cache_headers
python
readthedocs/readthedocs.org
readthedocs/proxito/middleware.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/middleware.py
MIT
def _set_request_attributes(self, request, unresolved_domain): """ Set attributes in the request from the unresolved domain. - Set ``request.unresolved_domain`` to the unresolved domain. """ request.unresolved_domain = unresolved_domain
Set attributes in the request from the unresolved domain. - Set ``request.unresolved_domain`` to the unresolved domain.
_set_request_attributes
python
readthedocs/readthedocs.org
readthedocs/proxito/middleware.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/middleware.py
MIT
def add_hosting_integrations_headers(self, request, response): """ Add HTTP headers to communicate to Cloudflare Workers. We have configured Cloudflare Workers to inject the addons and remove the old flyout integration based on HTTP headers. This method uses two different headers for these purposes: - ``X-RTD-Force-Addons``: inject ``readthedocs-addons.js`` and remove old flyout integration (via ``readthedocs-doc-embed.js``). Enabled on all projects by default starting on Oct 7, 2024. """ addons = False project_slug = getattr(request, "path_project_slug", "") if project_slug: addons = AddonsConfig.objects.filter(project__slug=project_slug).first() if addons: if addons.enabled: response["X-RTD-Force-Addons"] = "true"
Add HTTP headers to communicate to Cloudflare Workers. We have configured Cloudflare Workers to inject the addons and remove the old flyout integration based on HTTP headers. This method uses two different headers for these purposes: - ``X-RTD-Force-Addons``: inject ``readthedocs-addons.js`` and remove old flyout integration (via ``readthedocs-doc-embed.js``). Enabled on all projects by default starting on Oct 7, 2024.
add_hosting_integrations_headers
python
readthedocs/readthedocs.org
readthedocs/proxito/middleware.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/middleware.py
MIT
def add_cors_headers(self, request, response): """ Add CORS headers only to files from docs. DocDiff addons requires making a request from ``RTD_EXTERNAL_VERSION_DOMAIN`` to ``PUBLIC_DOMAIN`` to be able to compare both DOMs and show the visual differences. This request needs ``Access-Control-Allow-Origin`` HTTP headers to be accepted by browsers. However, we cannot allow passing credentials, since we don't want cross-origin requests to be able to access private versions. We set this header to `*`, we don't care about the origin of the request. And we don't have the need nor want to allow passing credentials from cross-origin requests. See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin. """ # TODO: se should add these headers to files from docs only, # proxied APIs and other endpoints should not have CORS headers. # These attributes aren't currently set for proxied APIs, but we shuold # find a better way to do this. project_slug = getattr(request, "path_project_slug", "") version_slug = getattr(request, "path_version_slug", "") if project_slug and version_slug: response.headers[ACCESS_CONTROL_ALLOW_ORIGIN] = "*" response.headers[ACCESS_CONTROL_ALLOW_METHODS] = "HEAD, OPTIONS, GET" return response
Add CORS headers only to files from docs. DocDiff addons requires making a request from ``RTD_EXTERNAL_VERSION_DOMAIN`` to ``PUBLIC_DOMAIN`` to be able to compare both DOMs and show the visual differences. This request needs ``Access-Control-Allow-Origin`` HTTP headers to be accepted by browsers. However, we cannot allow passing credentials, since we don't want cross-origin requests to be able to access private versions. We set this header to `*`, we don't care about the origin of the request. And we don't have the need nor want to allow passing credentials from cross-origin requests. See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin.
add_cors_headers
python
readthedocs/readthedocs.org
readthedocs/proxito/middleware.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/middleware.py
MIT
def _get_https_redirect(self, request): """ Get a redirect response if the request should be redirected to HTTPS. A request should be redirected to HTTPS if any of the following conditions are met: - It's from a custom domain and the domain has HTTPS enabled. - It's from a public domain, and the public domain uses HTTPS. """ if request.is_secure(): # The request is already HTTPS, so we skip redirecting it. return None unresolved_domain = request.unresolved_domain # HTTPS redirect for custom domains. if unresolved_domain.is_from_custom_domain: domain = unresolved_domain.domain if domain.https: return redirect_to_https(request, project=unresolved_domain.project) return None # HTTPS redirect for public domains. if ( unresolved_domain.is_from_public_domain or unresolved_domain.is_from_external_domain ) and settings.PUBLIC_DOMAIN_USES_HTTPS: return redirect_to_https(request, project=unresolved_domain.project) return None
Get a redirect response if the request should be redirected to HTTPS. A request should be redirected to HTTPS if any of the following conditions are met: - It's from a custom domain and the domain has HTTPS enabled. - It's from a public domain, and the public domain uses HTTPS.
_get_https_redirect
python
readthedocs/readthedocs.org
readthedocs/proxito/middleware.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/middleware.py
MIT
def test_canonical_cname_redirect(self): """Requests to the public domain URL should redirect to the custom domain if the domain is canonical/https.""" cname = "docs.random.com" domain = get( Domain, project=self.pip, domain=cname, canonical=False, https=False ) resp = self.client.get(self.url, headers={"host": "pip.dev.readthedocs.io"}) # This is the / -> /en/latest/ redirect. self.assertEqual(resp.status_code, 302) self.assertEqual(resp["X-RTD-Redirect"], RedirectType.system.name) # Make the domain canonical/https and make sure we redirect domain.canonical = True domain.https = True domain.save() for url in (self.url, "/subdir/"): resp = self.client.get(url, headers={"host": "pip.dev.readthedocs.io"}) self.assertEqual(resp.status_code, 302) self.assertEqual(resp["location"], f"https://{cname}{url}") self.assertEqual( resp["X-RTD-Redirect"], RedirectType.to_canonical_domain.name )
Requests to the public domain URL should redirect to the custom domain if the domain is canonical/https.
test_canonical_cname_redirect
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_middleware.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_middleware.py
MIT
def test_subproject_redirect(self): """Requests to a subproject should redirect to the domain of the main project.""" subproject = get( Project, name="subproject", slug="subproject", users=[self.owner], privacy_level=PUBLIC, ) subproject.versions.update(privacy_level=PUBLIC) get( ProjectRelationship, parent=self.pip, child=subproject, ) for url in (self.url, "/subdir/", "/en/latest/"): resp = self.client.get( url, headers={"host": "subproject.dev.readthedocs.io"} ) self.assertEqual(resp.status_code, 302) self.assertTrue( resp["location"].startswith( "http://pip.dev.readthedocs.io/projects/subproject/" ) ) self.assertEqual( resp["X-RTD-Redirect"], RedirectType.subproject_to_main_domain.name ) # Using a custom domain in a subproject isn't supported (or shouldn't be!). cname = "docs.random.com" get( Domain, project=subproject, domain=cname, canonical=True, https=True, ) resp = self.client.get( self.url, headers={"host": "subproject.dev.readthedocs.io"} ) self.assertEqual(resp.status_code, 302) self.assertEqual( resp["location"], f"http://pip.dev.readthedocs.io/projects/subproject/" ) self.assertEqual( resp["X-RTD-Redirect"], RedirectType.subproject_to_main_domain.name )
Requests to a subproject should redirect to the domain of the main project.
test_subproject_redirect
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_middleware.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_middleware.py
MIT
def test_canonicalize_public_domain_to_cname_redirect(self): """Redirect to the CNAME if it is canonical.""" self.domain.canonical = True self.domain.save() r = self.client.get( "/", secure=True, headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], f"https://{self.domain.domain}/", ) self.assertEqual(r.headers["CDN-Cache-Control"], "public") self.assertEqual(r.headers["Cache-Tag"], "project") self.assertEqual(r["X-RTD-Redirect"], RedirectType.to_canonical_domain.name) # We should redirect before 404ing r = self.client.get( "/en/latest/404after302", secure=True, headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], f"https://{self.domain.domain}/en/latest/404after302", ) self.assertEqual(r.headers["CDN-Cache-Control"], "public") self.assertEqual(r.headers["Cache-Tag"], "project") self.assertEqual(r["X-RTD-Redirect"], RedirectType.to_canonical_domain.name)
Redirect to the CNAME if it is canonical.
test_canonicalize_public_domain_to_cname_redirect
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_redirects.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_redirects.py
MIT
def test_exact_redirect_avoid_infinite_redirect(self): """ Avoid infinite redirects. If the URL hit is the same that the URL returned for redirection, we return a 404. These examples comes from this issue: * http://github.com/readthedocs/readthedocs.org/issues/4673 """ fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/*", to_url="/en/latest/:splat", ) r = self.client.get( "/redirect.html", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/redirect.html", ) r = self.client.get( "/redirect/", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/redirect/", ) r = self.client.get( "/en/latest/redirect/", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 404) fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/*", to_url="/en/latest/subdir/:splat", ) r = self.client.get( "/en/latest/redirect.html", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/subdir/redirect.html", ) r = self.client.get( "/en/latest/subdir/redirect.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 404)
Avoid infinite redirects. If the URL hit is the same that the URL returned for redirection, we return a 404. These examples comes from this issue: * http://github.com/readthedocs/readthedocs.org/issues/4673
test_exact_redirect_avoid_infinite_redirect
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_old_redirects.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_old_redirects.py
MIT
def test_redirect_exact_with_wildcard(self): """ Exact redirects can have a ``*`` at the end of ``from_url``. Use case: we want to deprecate version ``2.0`` and replace it by ``3.0``. We write an exact redirect from ``/en/2.0/*`` to ``/en/3.0/:splat``. """ fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/*", to_url="/en/version/:splat", # change version ) self.assertEqual(self.project.redirects.count(), 1) r = self.client.get( "/en/latest/guides/install.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/version/guides/install.html", ) # NOTE: I had to modify this test to add the Redirect in # ``self.translation`` instead of the root project. I think it makes # sense, but just wanted to mention to not forget to talk about # brackward compatibility fixture.get( Redirect, project=self.translation, redirect_type=EXACT_REDIRECT, from_url="/es/version/*", to_url="/en/master/:splat", # change language and version ) r = self.client.get( "/es/version/guides/install.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/master/guides/install.html", ) fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/latest/tutorials/*", to_url="/en/latest/tutorial.html", ) r = self.client.get( "/en/latest/tutorials/install.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/latest/tutorial.html" )
Exact redirects can have a ``*`` at the end of ``from_url``. Use case: we want to deprecate version ``2.0`` and replace it by ``3.0``. We write an exact redirect from ``/en/2.0/*`` to ``/en/3.0/:splat``.
test_redirect_exact_with_wildcard
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_old_redirects.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_old_redirects.py
MIT
def test_redirect_inactive_version(self): """ Inactive Version (``active=False``) should redirect properly. The function that servers the page should return 404 when serving a page of an inactive version and the redirect system should work. """ fixture.get( Version, slug="oldversion", project=self.project, active=False, ) fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/en/oldversion/", to_url="/en/newversion/", ) r = self.client.get( "/en/oldversion/", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/en/newversion/", )
Inactive Version (``active=False``) should redirect properly. The function that servers the page should return 404 when serving a page of an inactive version and the redirect system should work.
test_redirect_inactive_version
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_old_redirects.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_old_redirects.py
MIT
def test_exact_redirect_with_wildcard(self): """ Test prefix redirect. Prefix redirects don't match a version, so they will return 404, and the redirect will be handled there. """ fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/woot/*", to_url="/en/latest/:splat", force=True, ) r = self.client.get( "/woot/install.html", headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(r.status_code, 404)
Test prefix redirect. Prefix redirects don't match a version, so they will return 404, and the redirect will be handled there.
test_exact_redirect_with_wildcard
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_old_redirects.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_old_redirects.py
MIT
def test_redirect_exact_redirect_with_wildcard_crossdomain(self): """ Avoid redirecting to an external site unless the external site is in to_url. We also test by trying to bypass the protocol check with the special chars listed at https://github.com/python/cpython/blob/c3ffbbdf3d5645ee07c22649f2028f9dffc762ba/Lib/urllib/parse.py#L80-L81. """ fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/*", to_url="/en/latest/:splat", ) urls = [ # Plain protocol, these are caught by the slash redirect. ( "http://project.dev.readthedocs.io/http://my.host/path.html", "/http:/my.host/path.html", ), ( "http://project.dev.readthedocs.io//my.host/path.html", "/my.host/path.html", ), # Trying to bypass the protocol check by including a `\r` char. ( "http://project.dev.readthedocs.io/http:/%0D/my.host/path.html", "http://project.dev.readthedocs.io/en/latest/http://my.host/path.html", ), ( "http://project.dev.readthedocs.io/%0D/my.host/path.html", "http://project.dev.readthedocs.io/en/latest/my.host/path.html", ), # Trying to bypass the protocol check by including a `\t` char. ( "http://project.dev.readthedocs.io/http:/%09/my.host/path.html", "http://project.dev.readthedocs.io/en/latest/http://my.host/path.html", ), ( "http://project.dev.readthedocs.io/%09/my.host/path.html", "http://project.dev.readthedocs.io/en/latest/my.host/path.html", ), ] for url, expected_location in urls: r = self.client.get(url, headers={"host": "project.dev.readthedocs.io"}) self.assertEqual(r.status_code, 302, url) self.assertEqual(r["Location"], expected_location, url)
Avoid redirecting to an external site unless the external site is in to_url. We also test by trying to bypass the protocol check with the special chars listed at https://github.com/python/cpython/blob/c3ffbbdf3d5645ee07c22649f2028f9dffc762ba/Lib/urllib/parse.py#L80-L81.
test_redirect_exact_redirect_with_wildcard_crossdomain
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_old_redirects.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_old_redirects.py
MIT
def test_redirect_html_to_clean_url_crossdomain(self): """ Avoid redirecting to an external site unless the external site is in to_url """ fixture.get( Redirect, project=self.project, redirect_type=HTML_TO_CLEAN_URL_REDIRECT, ) urls = [ # Plain protocol, these are caught by the slash redirect. ( "http://project.dev.readthedocs.io/http://my.host/path.html", "/http:/my.host/path.html", ), ( "http://project.dev.readthedocs.io//my.host/path.html", "/my.host/path.html", ), # Trying to bypass the protocol check by including a `\r` char. ( "http://project.dev.readthedocs.io/http:/%0D/my.host/path.html", "http://project.dev.readthedocs.io/en/latest/http://my.host/path/", ), ( "http://project.dev.readthedocs.io/%0D/my.host/path.html", "http://project.dev.readthedocs.io/en/latest/my.host/path/", ), ] for url, expected_location in urls: r = self.client.get(url, headers={"host": "project.dev.readthedocs.io"}) self.assertEqual(r.status_code, 302, url) self.assertEqual(r["Location"], expected_location, url)
Avoid redirecting to an external site unless the external site is in to_url
test_redirect_html_to_clean_url_crossdomain
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_old_redirects.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_old_redirects.py
MIT
def test_redirect_clean_url_to_html_crossdomain(self): """Avoid redirecting to an external site unless the external site is in to_url.""" fixture.get( Redirect, project=self.project, redirect_type=CLEAN_URL_TO_HTML_REDIRECT, ) urls = [ # Plain protocol, these are caught by the slash redirect. ( "http://project.dev.readthedocs.io/http://my.host/path/", "/http:/my.host/path/", ), ("http://project.dev.readthedocs.io//my.host/path/", "/my.host/path/"), # Trying to bypass the protocol check by including a `\r` char. ( "http://project.dev.readthedocs.io/http:/%0D/my.host/path/", "http://project.dev.readthedocs.io/en/latest/http://my.host/path.html", ), ( "http://project.dev.readthedocs.io/%0D/my.host/path/", "http://project.dev.readthedocs.io/en/latest/my.host/path.html", ), ] for url, expected_location in urls: r = self.client.get(url, headers={"host": "project.dev.readthedocs.io"}) self.assertEqual(r.status_code, 302, url) self.assertEqual(r["Location"], expected_location, url)
Avoid redirecting to an external site unless the external site is in to_url.
test_redirect_clean_url_to_html_crossdomain
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_old_redirects.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_old_redirects.py
MIT
def test_redirect_using_projects_prefix(self): """ Test that we can support redirects using the ``/projects/`` prefix. https://github.com/readthedocs/readthedocs.org/issues/7552 """ redirect = fixture.get( Redirect, project=self.project, redirect_type=EXACT_REDIRECT, from_url="/projects/*", to_url="https://example.com/projects/:splat", ) self.assertEqual(self.project.redirects.count(), 1) r = self.client.get( "/projects/deleted-subproject/en/latest/guides/install.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "https://example.com/projects/deleted-subproject/en/latest/guides/install.html", ) redirect.from_url = "/projects/not-found/*" redirect.to_url = "/projects/subproject/:splat" redirect.save() r = self.client.get( "/projects/not-found/en/latest/guides/install.html", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(r.status_code, 302) self.assertEqual( r["Location"], "http://project.dev.readthedocs.io/projects/subproject/en/latest/guides/install.html", )
Test that we can support redirects using the ``/projects/`` prefix. https://github.com/readthedocs/readthedocs.org/issues/7552
test_redirect_using_projects_prefix
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_old_redirects.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_old_redirects.py
MIT
def test_external_version_serving_old_slugs(self): """ Test external version serving with projects with `--` in their slug. Some old projects may have been created with a slug containg `--`, our current code doesn't allow these type of slugs. """ fixture.get( Version, verbose_name="10", slug="10", type=EXTERNAL, active=True, project=self.project, ) self.project.slug = "test--project" self.project.save() host = "test--project--10.dev.readthedocs.build" resp = self.client.get("/en/10/awesome.html", headers={"host": host}) self.assertEqual( resp["x-accel-redirect"], "/proxito/media/external/html/test--project/10/awesome.html", )
Test external version serving with projects with `--` in their slug. Some old projects may have been created with a slug containg `--`, our current code doesn't allow these type of slugs.
test_external_version_serving_old_slugs
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_full.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_full.py
MIT
def test_invalid_download_files(self): """ Making sure we don't serve HTML or other formats here. See GHSA-98pf-gfh3-x3mp for more information. """ for type_ in ["html", "foo", "zip"]: resp = self.client.get( f"/_/downloads/en/latest/{type_}/", headers={"host": "project.dev.readthedocs.io"}, ) self.assertEqual(resp.status_code, 404)
Making sure we don't serve HTML or other formats here. See GHSA-98pf-gfh3-x3mp for more information.
test_invalid_download_files
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_full.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_full.py
MIT
def test_filename_with_parent_paths(self): """ Ensure the project, version, and language match the request See GHSA-5w8m-r7jm-mhp9 for more information. """ relative_paths = [ # Retarget version, lang and version, and project "/en/latest/../target/awesome.html", "/en/latest/../../en/target/awesome.html", "/en/latest/../../../someproject/en/target/awesome.html", # Same, but with Windows path separators "/en/latest/..\\../en/target/awesome.html", "/en/latest/..\\..\\../someproject/en/target/awesome.html", "/en/latest/..\\../someproject/en/target/awesome.html", "/en/latest/..\\\\../en/target/awesome.html", "/en/latest/..\\\\..\\\\../someproject/en/target/awesome.html", "/en/latest/..\\\\../someproject/en/target/awesome.html", ] for _path in relative_paths: resp = self.client.get( _path, headers={"host": "project.dev.readthedocs.io"} ) self.assertEqual(resp.status_code, 400)
Ensure the project, version, and language match the request See GHSA-5w8m-r7jm-mhp9 for more information.
test_filename_with_parent_paths
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_full.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_full.py
MIT
def test_default_robots_txt(self, storage_exists): storage_exists.return_value = False self.project.versions.update(active=True, built=True) response = self.client.get( reverse("robots_txt"), headers={"host": "project.readthedocs.io"} ) self.assertEqual(response.status_code, 200) expected = dedent( """ User-agent: * Disallow: # Allow everything Sitemap: https://project.readthedocs.io/sitemap.xml """ ).lstrip() self.assertEqual(response.content.decode(), expected)
User-agent: * Disallow: # Allow everything Sitemap: https://project.readthedocs.io/sitemap.xml
test_default_robots_txt
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_full.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_full.py
MIT
def test_default_robots_txt_disallow_hidden_versions(self, storage_exists): storage_exists.return_value = False self.project.versions.update(active=True, built=True) fixture.get( Version, project=self.project, slug="hidden", active=True, hidden=True, privacy_level=PUBLIC, ) fixture.get( Version, project=self.project, slug="hidden-2", active=True, hidden=True, privacy_level=PUBLIC, ) fixture.get( Version, project=self.project, slug="hidden-and-inactive", active=False, hidden=True, privacy_level=PUBLIC, ) fixture.get( Version, project=self.project, slug="hidden-and-private", active=False, hidden=True, privacy_level=PRIVATE, ) response = self.client.get( reverse("robots_txt"), headers={"host": "project.readthedocs.io"} ) self.assertEqual(response.status_code, 200) expected = dedent( """ User-agent: * Disallow: /en/hidden-2/ # Hidden version Disallow: /en/hidden/ # Hidden version Sitemap: https://project.readthedocs.io/sitemap.xml """ ).lstrip() self.assertEqual(response.content.decode(), expected)
User-agent: * Disallow: /en/hidden-2/ # Hidden version Disallow: /en/hidden/ # Hidden version Sitemap: https://project.readthedocs.io/sitemap.xml
test_default_robots_txt_disallow_hidden_versions
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_full.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_full.py
MIT
def test_serve_static_files_internal_nginx_redirect_always_appended(self): """Test for #11080.""" resp = self.client.get( reverse( "proxito_static_files", args=["proxito-static/javascript/readthedocs-doc-embed.js"], ), headers={"host": "project.readthedocs.io"}, ) self.assertEqual(resp.status_code, 200) self.assertEqual( resp.headers["x-accel-redirect"], "/proxito-static/media/proxito-static/javascript/readthedocs-doc-embed.js", ) self.assertEqual( resp.headers["Cache-Tag"], "project,project:rtd-staticfiles,rtd-staticfiles" )
Test for #11080.
test_serve_static_files_internal_nginx_redirect_always_appended
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_full.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_full.py
MIT
def _test_cache_control_header_project(self, expected_value, host=None): """ Test the CDN-Cache-Control header on requests for `self.project`. :param expected_value: The expected value of the header: 'public' or 'private'. :param host: Hostname to use in the requests. """ host = host or "project.dev.readthedocs.io" # Normal serving. urls = [ "/en/latest/", "/en/latest/foo.html", ] for url in urls: resp = self.client.get(url, secure=True, headers={"host": host}) self.assertEqual(resp.headers["CDN-Cache-Control"], expected_value, url) self.assertEqual(resp.headers["Cache-Tag"], "project,project:latest", url) # Page & system redirects are always cached. # Authz is done on the redirected URL. location = f"https://{host}/en/latest/" urls = [ ["", location], ["/", location], ["/page/foo.html", f"https://{host}/en/latest/foo.html"], ] for url, location in urls: resp = self.client.get(url, secure=True, headers={"host": host}) self.assertEqual(resp["Location"], location, url) self.assertEqual(resp.headers["CDN-Cache-Control"], "public", url) self.assertEqual(resp.headers["Cache-Tag"], "project", url) # Proxied static files are always cached. resp = self.client.get("/_/static/file.js", secure=True, headers={"host": host}) self.assertEqual(resp.headers["CDN-Cache-Control"], "public") self.assertEqual( resp.headers["Cache-Tag"], "project,project:rtd-staticfiles,rtd-staticfiles" ) # Slash redirects can always be cached. url = "/en//latest//" resp = self.client.get(url, secure=True, headers={"host": host}) self.assertEqual(resp["Location"], "/en/latest/", url) self.assertEqual(resp.headers["CDN-Cache-Control"], "public", url) self.assertEqual(resp.headers["Cache-Tag"], "project") # Forced redirects will be cached only if the version is public. get( Redirect, project=self.project, redirect_type="exact", from_url="/en/latest/install.html", to_url="/en/latest/tutorial/install.html", force=True, ) url = "/en/latest/install.html" resp = self.client.get(url, secure=True, headers={"host": host}) self.assertEqual( resp["Location"], f"https://{host}/en/latest/tutorial/install.html", url ) self.assertEqual(resp.headers["CDN-Cache-Control"], expected_value, url) self.assertEqual(resp.headers["Cache-Tag"], "project,project:latest", url)
Test the CDN-Cache-Control header on requests for `self.project`. :param expected_value: The expected value of the header: 'public' or 'private'. :param host: Hostname to use in the requests.
_test_cache_control_header_project
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_full.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_full.py
MIT
def _test_cache_control_header_subproject(self, expected_value, host=None): """ Test the CDN-Cache-Control header on requests for `self.subproject`. :param expected_value: The expected value of the header: 'public' or 'private'. :param host: Hostname to use in the requests. """ host = host or "project.dev.readthedocs.io" # Normal serving. urls = [ "/projects/subproject/en/latest/", "/projects/subproject/en/latest/foo.html", ] for url in urls: resp = self.client.get(url, secure=True, headers={"host": host}) self.assertEqual(resp.headers["CDN-Cache-Control"], expected_value, url) self.assertEqual( resp.headers["Cache-Tag"], "subproject,subproject:latest", url ) # Page & system redirects are always cached. # Authz is done on the redirected URL. location = f"https://{host}/projects/subproject/en/latest/" urls = [ ["/projects/subproject", location], ["/projects/subproject/", location], ] for url, location in urls: resp = self.client.get(url, secure=True, headers={"host": host}) self.assertEqual(resp["Location"], location, url) self.assertEqual(resp.headers["CDN-Cache-Control"], "public", url) self.assertEqual(resp.headers["Cache-Tag"], "subproject", url) # Proxied static files are always cached. resp = self.client.get("/_/static/file.js", secure=True, headers={"host": host}) self.assertEqual(resp.headers["CDN-Cache-Control"], "public") self.assertEqual( resp.headers["Cache-Tag"], "project,project:rtd-staticfiles,rtd-staticfiles" ) # Slash redirects can always be cached. url = "/projects//subproject//" resp = self.client.get(url, secure=True, headers={"host": host}) self.assertEqual(resp["Location"], "/projects/subproject/", url) self.assertEqual(resp.headers["CDN-Cache-Control"], "public", url) self.assertEqual(resp.headers["Cache-Tag"], "project")
Test the CDN-Cache-Control header on requests for `self.subproject`. :param expected_value: The expected value of the header: 'public' or 'private'. :param host: Hostname to use in the requests.
_test_cache_control_header_subproject
python
readthedocs/readthedocs.org
readthedocs/proxito/tests/test_full.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/tests/test_full.py
MIT
def fast_404(request, *args, **kwargs): """ A fast error page handler. This stops us from running RTD logic in our error handling. We already do this in RTD prod when we fallback to it. """ return HttpResponse("Not Found.", status=404)
A fast error page handler. This stops us from running RTD logic in our error handling. We already do this in RTD prod when we fallback to it.
fast_404
python
readthedocs/readthedocs.org
readthedocs/proxito/views/utils.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/utils.py
MIT
def proxito_404_page_handler(request, template_name="errors/proxito/404/base.html", exception=None): """ Serves a 404 error message, handling 404 exception types raised throughout the app. We want to return fast when the 404 is used as an internal NGINX redirect to reach our ``ServeError404`` view. However, if the 404 exception was risen inside ``ServeError404`` view, we want to render a useful HTML response. """ # 404 exceptions that don't originate from our proxito 404 handler should have a fast response # with no HTML rendered, since they will be forwarded to our 404 handler again. if request.resolver_match and request.resolver_match.url_name != "proxito_404_handler": return fast_404(request, exception, template_name) context = {} http_status = 404 # Contextualized 404 exceptions: # Context is defined by the views that raise these exceptions and handled # in their templates. if isinstance(exception, ContextualizedHttp404): context.update(exception.get_context()) template_name = exception.template_name http_status = exception.http_status context["path_not_found"] = context.get("path_not_found") or request.path r = render( request, template_name, context=context, ) r.status_code = http_status return r
Serves a 404 error message, handling 404 exception types raised throughout the app. We want to return fast when the 404 is used as an internal NGINX redirect to reach our ``ServeError404`` view. However, if the 404 exception was risen inside ``ServeError404`` view, we want to render a useful HTML response.
proxito_404_page_handler
python
readthedocs/readthedocs.org
readthedocs/proxito/views/utils.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/utils.py
MIT
def get(self, request, subproject_slug=None, filename=""): """Handle all page redirects.""" unresolved_domain = request.unresolved_domain project = unresolved_domain.project # Use the project from the domain, or use the subproject slug. if subproject_slug: project = get_object_or_404(project.subprojects, alias=subproject_slug).child # Get the default version from the current project, # or the version from the external domain. if unresolved_domain.is_from_external_domain: version_slug = unresolved_domain.external_version_slug else: version_slug = project.get_default_version() # TODO: find a better way to pass this to the middleware. request.path_project_slug = project.slug return self.system_redirect( request=request, final_project=project, version_slug=version_slug, filename=filename, is_external_version=unresolved_domain.is_from_external_domain, )
Handle all page redirects.
get
python
readthedocs/readthedocs.org
readthedocs/proxito/views/serve.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/serve.py
MIT
def get(self, request, path): """ Serve a file from the resolved project and version from the path. Before trying to serve the file, we check for canonical redirects. If the path isn't valid for the current project, or if the version/translation doesn't exist, we raise a 404. This will be handled by the ``ServeError404`` view. This view handles the following redirects: - Redirect to the default version of the project from the root path or translation (/ -> /en/latest/, /en/ -> /en/latest/). - Trailing slash redirect (/en/latest -> /en/latest/). - Forced redirects (apply a user defined redirect even if the path exists). This view checks if the user is allowed to access the current version, and if the project is marked as spam. """ unresolved_domain = request.unresolved_domain # Protect against bad requests to API hosts that don't set this attribute. if not unresolved_domain: raise Http404 # Handle requests that need canonicalizing first, # e.g. HTTP -> HTTPS, redirect to canonical domain, etc. # We run this here to reduce work we need to do on easily cached responses. # It's slower for the end user to have multiple HTTP round trips, # but reduces chances for URL resolving bugs, # and makes caching more effective because we don't care about authz. redirect_type = self._get_canonical_redirect_type(request) if redirect_type: try: return canonical_redirect( request, project=unresolved_domain.project, redirect_type=redirect_type, external_version_slug=unresolved_domain.external_version_slug, ) except InfiniteRedirectException: # ``canonical_redirect`` raises this when it's redirecting back to itself. # We can safely ignore it here because it's logged in ``canonical_redirect``, # and we don't want to issue infinite redirects. pass # Django doesn't include the leading slash in the path, so we normalize it here. path = "/" + path return self.serve_path(request, path)
Serve a file from the resolved project and version from the path. Before trying to serve the file, we check for canonical redirects. If the path isn't valid for the current project, or if the version/translation doesn't exist, we raise a 404. This will be handled by the ``ServeError404`` view. This view handles the following redirects: - Redirect to the default version of the project from the root path or translation (/ -> /en/latest/, /en/ -> /en/latest/). - Trailing slash redirect (/en/latest -> /en/latest/). - Forced redirects (apply a user defined redirect even if the path exists). This view checks if the user is allowed to access the current version, and if the project is marked as spam.
get
python
readthedocs/readthedocs.org
readthedocs/proxito/views/serve.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/serve.py
MIT
def _get_canonical_redirect_type(self, request): """If the current request needs a redirect, return the type of redirect to perform.""" unresolved_domain = request.unresolved_domain project = unresolved_domain.project # Check for subprojects before checking for canonical domains, # so we can redirect to the main domain first. # Custom domains on subprojects are not supported. if project.is_subproject: log.debug( "Proxito Public Domain -> Subproject Main Domain Redirect.", project_slug=project.slug, ) return RedirectType.subproject_to_main_domain if unresolved_domain.is_from_public_domain: canonical_domain = ( Domain.objects.filter(project=project).filter(canonical=True, https=True).exists() ) # For .com we need to check if the project supports custom domains. if canonical_domain and Resolver()._use_cname(project): log.debug( "Proxito Public Domain -> Canonical Domain Redirect.", project_slug=project.slug, ) return RedirectType.to_canonical_domain return None
If the current request needs a redirect, return the type of redirect to perform.
_get_canonical_redirect_type
python
readthedocs/readthedocs.org
readthedocs/proxito/views/serve.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/serve.py
MIT
def get(self, request, proxito_path): """ Handler for 404 pages on subdomains. This does a couple of things: * Handles directory indexing for URLs that don't end in a slash * Check for user redirects * Record the broken link for analytics * Handles custom 404 serving For 404's, first search for a 404 page in the current version, then continues with the default version and finally, if none of them are found, the Read the Docs default page (Maze Found) is rendered by Django and served. """ log.bind(proxito_path=proxito_path) log.debug("Executing 404 handler.") unresolved_domain = request.unresolved_domain # We force all storage calls to use the external versions storage, # since we are serving an external version. # The version that results from the unresolve_path() call already is # validated to use the correct manager, this is here to add defense in # depth against serving the wrong version. if unresolved_domain.is_from_external_domain: self.version_type = EXTERNAL project = None version = None # If we weren't able to resolve a filename, # then the path is the filename. filename = proxito_path lang_slug = None version_slug = None # Try to map the current path to a project/version/filename. # If that fails, we fill the variables with the information we have # available in the exceptions. contextualized_404_class = ContextualizedHttp404 try: unresolved = unresolver.unresolve_path( unresolved_domain=unresolved_domain, path=proxito_path, append_indexhtml=False, ) # Inject the UnresolvedURL into the HttpRequest so we can access from the middleware. # We could resolve it again from the middleware, but we would duplicating DB queries. request.unresolved_url = unresolved project = unresolved.project version = unresolved.version filename = unresolved.filename lang_slug = project.language version_slug = version.slug contextualized_404_class = ProjectFilenameHttp404 except VersionNotFoundError as exc: project = exc.project lang_slug = project.language version_slug = exc.version_slug filename = exc.filename contextualized_404_class = ProjectVersionHttp404 except TranslationNotFoundError as exc: project = exc.project lang_slug = exc.language version_slug = exc.version_slug filename = exc.filename contextualized_404_class = ProjectTranslationHttp404 except TranslationWithoutVersionError as exc: project = exc.project lang_slug = exc.language # TODO: Use a contextualized 404 except InvalidExternalVersionError as exc: project = exc.project # TODO: Use a contextualized 404 except InvalidPathForVersionedProjectError as exc: project = exc.project filename = exc.path # TODO: Use a contextualized 404 log.bind( project_slug=project.slug, version_slug=version_slug, ) # TODO: find a better way to pass this to the middleware. request.path_project_slug = project.slug request.path_version_slug = version_slug # If we were able to resolve to a valid version, it means that the # current file doesn't exist. So we check if we can redirect to its # index file if it exists before doing anything else. # If the version isn't marked as built, we don't check for index files, # since the version doesn't have any files. # This is /en/latest/foo -> /en/latest/foo/index.html. if version and version.built: response = self._get_index_file_redirect( request=request, project=project, version=version, filename=filename, full_path=proxito_path, ) if response: return response # Check and perform redirects on 404 handler for non-external domains only. # NOTE: This redirect check must be done after trying files like # ``index.html`` to emulate the behavior we had when # serving directly from NGINX without passing through Python. if not unresolved_domain.is_from_external_domain: try: redirect_response = self.get_redirect_response( request=request, project=project, language=lang_slug, version_slug=version_slug, filename=filename, path=proxito_path, ) if redirect_response: return redirect_response except InfiniteRedirectException: # ``get_redirect_response`` raises this when it's redirecting back to itself. # We can safely ignore it here because it's logged in ``canonical_redirect``, # and we don't want to issue infinite redirects. pass # Register 404 pages into our database for user's analytics. if not unresolved_domain.is_from_external_domain: self._register_broken_link( project=project, version=version, filename=filename, path=proxito_path, ) response = self._get_custom_404_page( request=request, project=project, version=version, ) if response: return response # Don't use the custom 404 page, use our general contextualized 404 response # Several additional context variables can be added if the templates # or other error handling is developed (version, language, filename). raise contextualized_404_class( project=project, path_not_found=proxito_path, )
Handler for 404 pages on subdomains. This does a couple of things: * Handles directory indexing for URLs that don't end in a slash * Check for user redirects * Record the broken link for analytics * Handles custom 404 serving For 404's, first search for a 404 page in the current version, then continues with the default version and finally, if none of them are found, the Read the Docs default page (Maze Found) is rendered by Django and served.
get
python
readthedocs/readthedocs.org
readthedocs/proxito/views/serve.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/serve.py
MIT
def _get_custom_404_page(self, request, project, version=None): """ Try to serve a custom 404 page from this project. If a version is given, try to serve the 404 page from that version first, if it doesn't exist, try to serve the 404 page from the default version. We check for a 404.html or 404/index.html file. We don't check for a custom 404 page in versions that aren't marked as built, since they don't have any files. If a 404 page is found, we return a response with the content of that file, `None` otherwise. """ versions_404 = [version] if version and version.built else [] if not version or version.slug != project.default_version: default_version = project.versions.filter(slug=project.default_version).first() if default_version and default_version.built: versions_404.append(default_version) if not versions_404: return None tryfiles = ["404.html", "404/index.html"] available_404_files = list( HTMLFile.objects.filter(version__in=versions_404, path__in=tryfiles).values_list( "version__slug", "path" ) ) if not available_404_files: return None for version_404 in versions_404: if not self.allowed_user(request, version_404): continue for tryfile in tryfiles: if (version_404.slug, tryfile) not in available_404_files: continue storage_root_path = project.get_storage_path( type_="html", version_slug=version_404.slug, include_file=False, version_type=self.version_type, ) storage_filename_path = build_media_storage.join(storage_root_path, tryfile) log.debug( "Serving custom 404.html page.", version_slug_404=version_404.slug, storage_filename_path=storage_filename_path, ) try: content = build_media_storage.open(storage_filename_path).read() return HttpResponse(content, status=404) except FileNotFoundError: log.warning( "File not found in storage. File out of sync with DB.", file=storage_filename_path, ) return None return None
Try to serve a custom 404 page from this project. If a version is given, try to serve the 404 page from that version first, if it doesn't exist, try to serve the 404 page from the default version. We check for a 404.html or 404/index.html file. We don't check for a custom 404 page in versions that aren't marked as built, since they don't have any files. If a 404 page is found, we return a response with the content of that file, `None` otherwise.
_get_custom_404_page
python
readthedocs/readthedocs.org
readthedocs/proxito/views/serve.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/serve.py
MIT
def _get_index_file_redirect(self, request, project, version, filename, full_path): """ Check if a file is a directory and redirect to its index file. For example: - /en/latest/foo -> /en/latest/foo/index.html """ # If the path ends with `/`, we already tried to serve # the `/index.html` file. if full_path.endswith("/"): return None tryfile = (filename.rstrip("/") + "/index.html").lstrip("/") if not HTMLFile.objects.filter(version=version, path=tryfile).exists(): return None log.info("Redirecting to index file.", tryfile=tryfile) # Use urlparse so that we maintain GET args in our redirect parts = urlparse(full_path) new_path = parts.path.rstrip("/") + "/" # `full_path` doesn't include query params.` query = urlparse(request.get_full_path()).query redirect_url = parts._replace( path=new_path, query=query, ).geturl() # TODO: decide if we need to check for infinite redirect here # (from URL == to URL) return HttpResponseRedirect(redirect_url)
Check if a file is a directory and redirect to its index file. For example: - /en/latest/foo -> /en/latest/foo/index.html
_get_index_file_redirect
python
readthedocs/readthedocs.org
readthedocs/proxito/views/serve.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/serve.py
MIT
def get(self, request): """ Serve custom user's defined ``/robots.txt``. If the project is delisted or is a spam project, we force a special robots.txt. If the user added a ``robots.txt`` in the "default version" of the project, we serve it directly. """ project = request.unresolved_domain.project if project.delisted: return render( request, "robots.delisted.txt", content_type="text/plain", ) # Verify if the project is marked as spam and return a custom robots.txt if "readthedocsext.spamfighting" in settings.INSTALLED_APPS: from readthedocsext.spamfighting.utils import is_robotstxt_denied # noqa if is_robotstxt_denied(project): return render( request, "robots.spam.txt", content_type="text/plain", ) # Use the ``robots.txt`` file from the default version configured version_slug = project.get_default_version() version = project.versions.get(slug=version_slug) no_serve_robots_txt = any( [ # If the default version is private or, version.privacy_level == PRIVATE, # default version is not active or, not version.active, # default version is not built not version.built, ] ) if no_serve_robots_txt: # ... we do return a 404 raise Http404() log.bind( project_slug=project.slug, version_slug=version.slug, ) try: response = self._serve_docs( request=request, project=project, version=version, filename="robots.txt", check_if_exists=True, ) log.info("Serving custom robots.txt file.") return response except StorageFileNotFound: pass # Serve default robots.txt sitemap_url = "{scheme}://{domain}/sitemap.xml".format( scheme="https", domain=project.subdomain(), ) context = { "sitemap_url": sitemap_url, "hidden_paths": self._get_hidden_paths(project), } return render( request, "robots.txt", context, content_type="text/plain", )
Serve custom user's defined ``/robots.txt``. If the project is delisted or is a spam project, we force a special robots.txt. If the user added a ``robots.txt`` in the "default version" of the project, we serve it directly.
get
python
readthedocs/readthedocs.org
readthedocs/proxito/views/serve.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/serve.py
MIT
def _get_hidden_paths(self, project): """Get the absolute paths of the public hidden versions of `project`.""" hidden_versions = Version.internal.public(project=project).filter(hidden=True) resolver = Resolver() hidden_paths = [ resolver.resolve_path(project, version_slug=version.slug) for version in hidden_versions ] return hidden_paths
Get the absolute paths of the public hidden versions of `project`.
_get_hidden_paths
python
readthedocs/readthedocs.org
readthedocs/proxito/views/serve.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/serve.py
MIT
def priorities_generator(): """ Generator returning ``priority`` needed by sitemap.xml. It generates values from 1 to 0.1 by decreasing in 0.1 on each iteration. After 0.1 is reached, it will keep returning 0.1. """ priorities = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2] yield from itertools.chain(priorities, itertools.repeat(0.1))
Generator returning ``priority`` needed by sitemap.xml. It generates values from 1 to 0.1 by decreasing in 0.1 on each iteration. After 0.1 is reached, it will keep returning 0.1.
get.priorities_generator
python
readthedocs/readthedocs.org
readthedocs/proxito/views/serve.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/serve.py
MIT
def hreflang_formatter(lang): """ sitemap hreflang should follow correct format. Use hyphen instead of underscore in language and country value. ref: https://en.wikipedia.org/wiki/Hreflang#Common_Mistakes """ if "_" in lang: return lang.replace("_", "-") return lang
sitemap hreflang should follow correct format. Use hyphen instead of underscore in language and country value. ref: https://en.wikipedia.org/wiki/Hreflang#Common_Mistakes
get.hreflang_formatter
python
readthedocs/readthedocs.org
readthedocs/proxito/views/serve.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/serve.py
MIT
def changefreqs_generator(): """ Generator returning ``changefreq`` needed by sitemap.xml. It returns ``weekly`` on first iteration, then ``daily`` and then it will return always ``monthly``. We are using ``monthly`` as last value because ``never`` is too aggressive. If the tag is removed and a branch is created with the same name, we will want bots to revisit this. """ changefreqs = ["weekly", "daily"] yield from itertools.chain(changefreqs, itertools.repeat("monthly"))
Generator returning ``changefreq`` needed by sitemap.xml. It returns ``weekly`` on first iteration, then ``daily`` and then it will return always ``monthly``. We are using ``monthly`` as last value because ``never`` is too aggressive. If the tag is removed and a branch is created with the same name, we will want bots to revisit this.
get.changefreqs_generator
python
readthedocs/readthedocs.org
readthedocs/proxito/views/serve.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/serve.py
MIT
def get(self, request): """ Generate and serve a ``sitemap.xml`` for a particular ``project``. The sitemap is generated from all the ``active`` and public versions of ``project``. These versions are sorted by using semantic versioning prepending ``latest`` and ``stable`` (if they are enabled) at the beginning. Following this order, the versions are assigned priorities and change frequency. Starting from 1 and decreasing by 0.1 for priorities and starting from daily, weekly to monthly for change frequency. If the project doesn't have any public version, the view raises ``Http404``. :param request: Django request object :param project: Project instance to generate the sitemap :returns: response with the ``sitemap.xml`` template rendered :rtype: django.http.HttpResponse """ # pylint: disable=too-many-locals def priorities_generator(): """ Generator returning ``priority`` needed by sitemap.xml. It generates values from 1 to 0.1 by decreasing in 0.1 on each iteration. After 0.1 is reached, it will keep returning 0.1. """ priorities = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2] yield from itertools.chain(priorities, itertools.repeat(0.1)) def hreflang_formatter(lang): """ sitemap hreflang should follow correct format. Use hyphen instead of underscore in language and country value. ref: https://en.wikipedia.org/wiki/Hreflang#Common_Mistakes """ if "_" in lang: return lang.replace("_", "-") return lang def changefreqs_generator(): """ Generator returning ``changefreq`` needed by sitemap.xml. It returns ``weekly`` on first iteration, then ``daily`` and then it will return always ``monthly``. We are using ``monthly`` as last value because ``never`` is too aggressive. If the tag is removed and a branch is created with the same name, we will want bots to revisit this. """ changefreqs = ["weekly", "daily"] yield from itertools.chain(changefreqs, itertools.repeat("monthly")) project = request.unresolved_domain.project public_versions = Version.internal.public( project=project, only_active=True, include_hidden=False, ) if not public_versions.exists(): raise Http404() sorted_versions = sort_version_aware(public_versions) # This is a hack to swap the latest version with # stable version to get the stable version first in the sitemap. # We want stable with priority=1 and changefreq='weekly' and # latest with priority=0.9 and changefreq='daily' # More details on this: https://github.com/rtfd/readthedocs.org/issues/5447 if ( len(sorted_versions) >= 2 and sorted_versions[0].slug == LATEST and sorted_versions[1].slug == STABLE ): sorted_versions[0], sorted_versions[1] = ( sorted_versions[1], sorted_versions[0], ) versions = [] for version, priority, changefreq in zip( sorted_versions, priorities_generator(), changefreqs_generator(), ): element = { "loc": version.get_subdomain_url(), "priority": priority, "changefreq": changefreq, "languages": [], } # Version can be enabled, but not ``built`` yet. We want to show the # link without a ``lastmod`` attribute last_build = version.builds.order_by("-date").first() if last_build: element["lastmod"] = last_build.date.isoformat() resolver = Resolver() if project.translations.exists(): for translation in project.translations.all(): translated_version = ( Version.internal.public(project=translation) .filter(slug=version.slug) .first() ) if translated_version: href = resolver.resolve_version( project=translation, version=translated_version, ) element["languages"].append( { "hreflang": hreflang_formatter(translation.language), "href": href, } ) # Add itself also as protocol requires element["languages"].append( { "hreflang": project.language, "href": element["loc"], } ) versions.append(element) context = { "versions": versions, } return render( request, "sitemap.xml", context, content_type="application/xml", )
Generate and serve a ``sitemap.xml`` for a particular ``project``. The sitemap is generated from all the ``active`` and public versions of ``project``. These versions are sorted by using semantic versioning prepending ``latest`` and ``stable`` (if they are enabled) at the beginning. Following this order, the versions are assigned priorities and change frequency. Starting from 1 and decreasing by 0.1 for priorities and starting from daily, weekly to monthly for change frequency. If the project doesn't have any public version, the view raises ``Http404``. :param request: Django request object :param project: Project instance to generate the sitemap :returns: response with the ``sitemap.xml`` template rendered :rtype: django.http.HttpResponse
get
python
readthedocs/readthedocs.org
readthedocs/proxito/views/serve.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/serve.py
MIT
def _get_cache_tags(self): """ Add an additional *global* tag. This is so we can purge all files from all projects with one single call. """ tags = super()._get_cache_tags() tags.append(self.project_cache_tag) return tags
Add an additional *global* tag. This is so we can purge all files from all projects with one single call.
_get_cache_tags
python
readthedocs/readthedocs.org
readthedocs/proxito/views/serve.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/serve.py
MIT
def get( self, addons_version, project, request, version=None, build=None, filename=None, url=None, ): """ Unique entry point to get the proper API response. It will evaluate the ``addons_version`` passed and decide which is the best JSON structure for that particular version. """ if addons_version.major == 1: return self._v1(project, version, build, filename, url, request) if addons_version.major == 2: return self._v2(project, version, build, filename, url, request)
Unique entry point to get the proper API response. It will evaluate the ``addons_version`` passed and decide which is the best JSON structure for that particular version.
get
python
readthedocs/readthedocs.org
readthedocs/proxito/views/hosting.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/hosting.py
MIT
def _get_versions(self, request, project): """ Get all active for a project that the user has access to. This includes versions matching the following conditions: - The user has access to it - They are built - They are active - They are not hidden """ return Version.internal.public( project=project, user=request.user, only_active=True, only_built=True, include_hidden=False, )
Get all active for a project that the user has access to. This includes versions matching the following conditions: - The user has access to it - They are built - They are active - They are not hidden
_get_versions
python
readthedocs/readthedocs.org
readthedocs/proxito/views/hosting.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/hosting.py
MIT
def _has_permission(self, request, version): """ Check if user from the request is authorized to access `version`. This is mainly to be overridden in .com to make use of the auth backends in the proxied API. """ return True
Check if user from the request is authorized to access `version`. This is mainly to be overridden in .com to make use of the auth backends in the proxied API.
_has_permission
python
readthedocs/readthedocs.org
readthedocs/proxito/views/hosting.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/hosting.py
MIT
def _v1(self, project, version, build, filename, url, request): """ Initial JSON data structure consumed by the JavaScript client. This response is definitely in *alpha* state currently and shouldn't be used for anyone to customize their documentation or the integration with the Read the Docs JavaScript client. It's under active development and anything can change without notice. It tries to follow some similarity with the APIv3 for already-known resources (Project, Version, Build, etc). """ resolver = Resolver() versions_active_built_not_hidden = Version.objects.none() sorted_versions_active_built_not_hidden = Version.objects.none() user = request.user versions_active_built_not_hidden = ( self._get_versions(request, project).select_related("project").order_by("-slug") ) sorted_versions_active_built_not_hidden = versions_active_built_not_hidden if not project.supports_multiple_versions: # Return only one version when the project doesn't support multiple versions. # That version is the only one the project serves. sorted_versions_active_built_not_hidden = ( sorted_versions_active_built_not_hidden.filter(slug=project.get_default_version()) ) else: if project.addons.flyout_sorting == ADDONS_FLYOUT_SORTING_SEMVER_READTHEDOCS_COMPATIBLE: sorted_versions_active_built_not_hidden = sorted( versions_active_built_not_hidden, key=lambda version: comparable_version( version.verbose_name, repo_type=project.repo_type, ), reverse=True, ) elif project.addons.flyout_sorting == ADDONS_FLYOUT_SORTING_PYTHON_PACKAGING: sorted_versions_active_built_not_hidden = sort_versions_python_packaging( versions_active_built_not_hidden, project.addons.flyout_sorting_latest_stable_at_beginning, ) elif project.addons.flyout_sorting == ADDONS_FLYOUT_SORTING_CALVER: sorted_versions_active_built_not_hidden = sort_versions_calver( versions_active_built_not_hidden, project.addons.flyout_sorting_latest_stable_at_beginning, ) elif project.addons.flyout_sorting == ADDONS_FLYOUT_SORTING_CUSTOM_PATTERN: sorted_versions_active_built_not_hidden = sort_versions_custom_pattern( versions_active_built_not_hidden, project.addons.flyout_sorting_custom_pattern, project.addons.flyout_sorting_latest_stable_at_beginning, ) main_project = project.main_language_project or project # Exclude the current project since we don't want to return itself as a translation project_translations = ( Project.objects.public(user=user) .filter(pk__in=main_project.translations.all()) .exclude(slug=project.slug) ) # Include main project as translation if the current project is one of the translations if project != main_project: project_translations |= Project.objects.public(user=user).filter(slug=main_project.slug) project_translations = project_translations.order_by("language").select_related( "main_language_project" ) data = { "api_version": "1", "projects": { "current": ProjectSerializerNoLinks( project, resolver=resolver, version_slug=version.slug if version else None, ).data, "translations": ProjectSerializerNoLinks( project_translations, resolver=resolver, version_slug=version.slug if version else None, many=True, ).data, }, "versions": { "current": VersionSerializerNoLinks( version, resolver=resolver, ).data if version else None, # These are "sorted active, built, not hidden versions" "active": VersionSerializerNoLinks( sorted_versions_active_built_not_hidden, resolver=resolver, many=True, ).data, }, "builds": { "current": BuildSerializerNoLinks(build).data if build else None, }, # TODO: consider creating one serializer per field here. # The resulting JSON will be the same, but maybe it's easier/cleaner? "domains": { "dashboard": settings.PRODUCTION_DOMAIN, }, "readthedocs": { "analytics": { "code": settings.GLOBAL_ANALYTICS_CODE, }, "resolver": { "filename": filename, }, }, # TODO: the ``features`` is not polished and we expect to change drastically. # Mainly, all the fields including a Project, Version or Build will use the exact same # serializer than the keys ``project``, ``version`` and ``build`` from the top level. "addons": { "options": { "load_when_embedded": project.addons.options_load_when_embedded, "root_selector": project.addons.options_root_selector, }, "analytics": { "enabled": project.addons.analytics_enabled, # TODO: consider adding this field into the ProjectSerializer itself. # NOTE: it seems we are removing this feature, # so we may not need the ``code`` attribute here # https://github.com/readthedocs/readthedocs.org/issues/9530 "code": project.analytics_code, }, "notifications": { "enabled": project.addons.notifications_enabled, "show_on_latest": project.addons.notifications_show_on_latest, "show_on_non_stable": project.addons.notifications_show_on_non_stable, "show_on_external": project.addons.notifications_show_on_external, }, "flyout": { "enabled": project.addons.flyout_enabled, # TODO: find a way to get this data in a reliably way. # We don't have a simple way to map a URL to a file in the repository. # This feature may be deprecated/removed in this implementation since it relies # on data injected at build time and sent as `docroot=`, `source_suffix=` and `page=`. # Example URL: # /_/api/v2/footer_html/?project=weblate&version=latest&page=index&theme=furo&docroot=/docs/&source_suffix=.rst # Data injected at: # https://github.com/rtfd/readthedocs-sphinx-ext/blob/7c60d1646c12ac0b83d61abfbdd5bcd77d324124/readthedocs_ext/_templates/readthedocs-insert.html.tmpl#L23 # # "vcs": { # "url": "https://github.com", # "username": "readthedocs", # "repository": "test-builds", # "branch": version.identifier if version else None, # "filepath": "/docs/index.rst", # }, "position": project.addons.flyout_position, }, "customscript": { "enabled": project.addons.customscript_enabled, "src": project.addons.customscript_src, }, "search": { "enabled": project.addons.search_enabled, # TODO: figure it out where this data comes from. # # Originally, this was thought to be customizable by the user # adding these filters from the Admin UI. # # I'm removing this feature for now until we implement it correctly. "filters": [ # NOTE: this is an example of the structure of the this object. # It contains the name of the filter and the search syntax to prepend # to the user's query. # It uses "Search query sintax": # https://docs.readthedocs.io/en/stable/server-side-search/syntax.html # [ # "Include subprojects", # f"subprojects:{project.slug}/{version.slug}", # ], ], "default_filter": f"project:{project.slug}/{version.slug}" if version else None, }, "linkpreviews": { "enabled": project.addons.linkpreviews_enabled, }, "hotkeys": { "enabled": project.addons.hotkeys_enabled, "doc_diff": { "enabled": True, "trigger": "KeyD", # Could be something like "Ctrl + D" }, "search": { "enabled": True, "trigger": "Slash", # Could be something like "Ctrl + D" }, }, "filetreediff": { "enabled": project.addons.filetreediff_enabled, }, }, } if version: response = self._get_filetreediff_response( request=request, project=project, version=version, resolver=resolver, ) if response: data["addons"]["filetreediff"].update(response) # Show the subprojects filter on the parent project and subproject # TODO: Remove these queries and try to find a way to get this data # from the resolver, which has already done these queries. # TODO: Replace this fixed filters with the work proposed in # https://github.com/readthedocs/addons/issues/22 if project.subprojects.exists(): data["addons"]["search"]["filters"].append( [ "Include subprojects", f"subprojects:{project.slug}/{version.slug}", ] ) elif project.superprojects.exists(): superproject = project.superprojects.first().parent data["addons"]["search"]["filters"].append( [ "Include subprojects", f"subprojects:{superproject.slug}/{version.slug}", ] ) # DocDiff depends on `url=` GET attribute. # This attribute allows us to know the exact filename where the request was made. # If we don't know the filename, we cannot return the data required by DocDiff to work. # In that case, we just don't include the `doc_diff` object in the response. if url: base_version_slug = ( project.addons.options_base_version.slug if project.addons.options_base_version else LATEST ) data["addons"].update( { "doc_diff": { "enabled": project.addons.doc_diff_enabled, # "http://test-builds-local.devthedocs.org/en/latest/index.html" "base_url": resolver.resolve( project=project, version_slug=base_version_slug, language=project.language, filename=filename, ) if filename else None, "inject_styles": True, }, } ) # Update this data with ethicalads if "readthedocsext.donate" in settings.INSTALLED_APPS: from readthedocsext.donate.utils import ( # noqa get_campaign_types, get_project_keywords, get_publisher, is_ad_free_project, is_ad_free_user, ) data["addons"].update( { "ethicalads": { "enabled": project.addons.ethicalads_enabled, # NOTE: this endpoint is not authenticated, the user checks are done over an annonymous user for now # # NOTE: it requires ``settings.USE_PROMOS=True`` to return ``ad_free=false`` here "ad_free": is_ad_free_user(AnonymousUser()) or is_ad_free_project(project), "campaign_types": get_campaign_types(AnonymousUser(), project), "keywords": get_project_keywords(project), "publisher": get_publisher(project), }, } ) return data
Initial JSON data structure consumed by the JavaScript client. This response is definitely in *alpha* state currently and shouldn't be used for anyone to customize their documentation or the integration with the Read the Docs JavaScript client. It's under active development and anything can change without notice. It tries to follow some similarity with the APIv3 for already-known resources (Project, Version, Build, etc).
_v1
python
readthedocs/readthedocs.org
readthedocs/proxito/views/hosting.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/hosting.py
MIT
def _get_filetreediff_response(self, *, request, project, version, resolver): """ Get the file tree diff response for the given version. This response is only enabled for external versions, we do the comparison between the current version and the latest version. """ if not version.is_external and not settings.RTD_FILETREEDIFF_ALL: return None if not project.addons.filetreediff_enabled: return None base_version = project.addons.options_base_version or project.get_latest_version() if not base_version or not self._has_permission(request=request, version=base_version): return None diff = get_diff(version_a=version, version_b=base_version) if not diff: return None def _filter_diff_files(files): # Filter out all the files that match the ignored patterns ignore_patterns = project.addons.filetreediff_ignored_files or [] files = [ filename for filename in files if not any( fnmatch.fnmatch(filename, ignore_pattern) for ignore_pattern in ignore_patterns ) ] result = [] for filename in files: result.append( { "filename": filename, "urls": { "current": resolver.resolve_version( project=project, filename=filename, version=version, ), "base": resolver.resolve_version( project=project, filename=filename, version=base_version, ), }, } ) return result return { "outdated": diff.outdated, "diff": { "added": _filter_diff_files(diff.added), "deleted": _filter_diff_files(diff.deleted), "modified": _filter_diff_files(diff.modified), }, }
Get the file tree diff response for the given version. This response is only enabled for external versions, we do the comparison between the current version and the latest version.
_get_filetreediff_response
python
readthedocs/readthedocs.org
readthedocs/proxito/views/hosting.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/hosting.py
MIT
def _serve_docs(self, request, project, version, filename, check_if_exists=False): """ Serve a documentation file. :param check_if_exists: If `True` we check if the file exists before trying to serve it. This will raisen an exception if the file doesn't exists. Useful to make sure were are serving a file that exists in storage, checking if the file exists will make one additional request to the storage. """ base_storage_path = project.get_storage_path( type_=MEDIA_TYPE_HTML, version_slug=version.slug, include_file=False, # Force to always read from the internal or extrernal storage, # according to the current request. version_type=self.version_type, ) # Handle our backend storage not supporting directory indexes, # so we need to append index.html when appropriate. if not filename or filename.endswith("/"): filename += "index.html" # If the filename starts with `/`, the join will fail, # so we strip it before joining it. try: storage_path = build_media_storage.join(base_storage_path, filename.lstrip("/")) except ValueError: # We expect this exception from the django storages safe_join # function, when the filename resolves to a higher relative path. # The request is malicious or malformed in this case. raise BadRequest("Invalid URL") if check_if_exists and not build_media_storage.exists(storage_path): raise StorageFileNotFound self._track_pageview( project=project, path=filename, request=request, download=False, ) return self._serve_file( request=request, storage_path=storage_path, storage_backend=build_media_storage, )
Serve a documentation file. :param check_if_exists: If `True` we check if the file exists before trying to serve it. This will raisen an exception if the file doesn't exists. Useful to make sure were are serving a file that exists in storage, checking if the file exists will make one additional request to the storage.
_serve_docs
python
readthedocs/readthedocs.org
readthedocs/proxito/views/mixins.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/mixins.py
MIT
def _serve_dowload(self, request, project, version, type_): """ Serve downloadable content for the given version. The HTTP header ``Content-Disposition`` is added with the proper filename (e.g. "pip-pypa-io-en-latest.pdf" or "pip-pypi-io-en-v2.0.pdf" or "docs-celeryproject-org-kombu-en-stable.pdf"). """ storage_path = project.get_storage_path( type_=type_, version_slug=version.slug, # Force to always read from the internal or extrernal storage, # according to the current request. version_type=self.version_type, include_file=True, ) self._track_pageview( project=project, path=storage_path, request=request, download=True, ) response = self._serve_file( request=request, storage_path=storage_path, storage_backend=build_media_storage, ) # Set the filename of the download. filename_ext = storage_path.rsplit(".", 1)[-1] domain = unicode_slugify(project.subdomain().replace(".", "-")) if project.is_subproject: filename = f"{domain}-{project.alias}-{project.language}-{version.slug}.{filename_ext}" else: filename = f"{domain}-{project.language}-{version.slug}.{filename_ext}" response["Content-Disposition"] = f"filename={filename}" return response
Serve downloadable content for the given version. The HTTP header ``Content-Disposition`` is added with the proper filename (e.g. "pip-pypa-io-en-latest.pdf" or "pip-pypi-io-en-v2.0.pdf" or "docs-celeryproject-org-kombu-en-stable.pdf").
_serve_dowload
python
readthedocs/readthedocs.org
readthedocs/proxito/views/mixins.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/mixins.py
MIT
def _serve_file(self, request, storage_path, storage_backend): """ Serve a file from storage. Serve from the filesystem if using ``PYTHON_MEDIA``. We definitely shouldn't do this in production, but I don't want to force a check for ``DEBUG``. :param storage_path: Path to file to serve. :param storage_backend: Storage backend class from where to serve the file. """ storage_url = self._get_storage_url( request=request, storage_path=storage_path, storage_backend=storage_backend, ) if settings.PYTHON_MEDIA: return self._serve_file_from_python(request, storage_url, storage_backend) return self._serve_file_from_nginx( storage_url, root_path=storage_backend.internal_redirect_root_path, )
Serve a file from storage. Serve from the filesystem if using ``PYTHON_MEDIA``. We definitely shouldn't do this in production, but I don't want to force a check for ``DEBUG``. :param storage_path: Path to file to serve. :param storage_backend: Storage backend class from where to serve the file.
_serve_file
python
readthedocs/readthedocs.org
readthedocs/proxito/views/mixins.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/mixins.py
MIT
def _get_storage_url(self, request, storage_path, storage_backend): """ Get the full storage URL from a storage path. The URL will be without scheme and domain, this is to perform an NGINX internal redirect. Authorization query arguments will stay in place (useful for private buckets). """ # We are catching a broader exception, # since depending on the storage backend, # an invalid path may raise a different exception. try: # NOTE: calling ``.url`` will remove any double slashes. # e.g: '/foo//bar///' -> '/foo/bar/'. storage_url = storage_backend.url(storage_path, http_method=request.method) except Exception as e: log.info("Invalid storage path.", path=storage_path, exc_info=e) raise InvalidPathError parsed_url = urlparse(storage_url)._replace(scheme="", netloc="") return parsed_url.geturl()
Get the full storage URL from a storage path. The URL will be without scheme and domain, this is to perform an NGINX internal redirect. Authorization query arguments will stay in place (useful for private buckets).
_get_storage_url
python
readthedocs/readthedocs.org
readthedocs/proxito/views/mixins.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/mixins.py
MIT
def _track_pageview(self, project, path, request, download): """Create an audit log of the page view if audit is enabled.""" # Remove any query args (like the token access from AWS). path_only = urlparse(path).path track_file = path_only.endswith((".html", ".pdf", ".epub", ".zip")) if track_file and self._is_audit_enabled(project): action = AuditLog.DOWNLOAD if download else AuditLog.PAGEVIEW AuditLog.objects.new( action=action, user=request.user, request=request, project=project, )
Create an audit log of the page view if audit is enabled.
_track_pageview
python
readthedocs/readthedocs.org
readthedocs/proxito/views/mixins.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/mixins.py
MIT
def _is_audit_enabled(self, project): """ Check if the project has the audit feature enabled to track individual page views. This feature is different from page views analytics, as it records every page view individually with more metadata like the user, IP, etc. """ return bool(get_feature(project, feature_type=TYPE_AUDIT_PAGEVIEWS))
Check if the project has the audit feature enabled to track individual page views. This feature is different from page views analytics, as it records every page view individually with more metadata like the user, IP, etc.
_is_audit_enabled
python
readthedocs/readthedocs.org
readthedocs/proxito/views/mixins.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/mixins.py
MIT
def _serve_file_from_nginx(self, path, root_path): """ Serve a file from nginx. Returns a response with ``X-Accel-Redirect``, which will cause nginx to serve it directly as an internal redirect. :param path: The path of the file to serve. :param root_path: The root path of the internal redirect. """ internal_path = f"/{root_path}/" + path.lstrip("/") log.debug( "Nginx serve.", original_path=path, internal_path=internal_path, ) content_type, encoding = mimetypes.guess_type(internal_path) content_type = content_type or "application/octet-stream" response = HttpResponse( f"Serving internal path: {internal_path}", content_type=content_type ) if encoding: response["Content-Encoding"] = encoding # NGINX does not support non-ASCII characters in the header, so we # convert the IRI path to URI so it's compatible with what NGINX expects # as the header value. # https://github.com/benoitc/gunicorn/issues/1448 # https://docs.djangoproject.com/en/1.11/ref/unicode/#uri-and-iri-handling x_accel_redirect = iri_to_uri(internal_path) response["X-Accel-Redirect"] = x_accel_redirect # Needed to strip any GET args, etc. response.proxito_path = urlparse(internal_path).path return response
Serve a file from nginx. Returns a response with ``X-Accel-Redirect``, which will cause nginx to serve it directly as an internal redirect. :param path: The path of the file to serve. :param root_path: The root path of the internal redirect.
_serve_file_from_nginx
python
readthedocs/readthedocs.org
readthedocs/proxito/views/mixins.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/mixins.py
MIT
def _serve_file_from_python(self, request, path, storage): """ Serve a file from Python. .. warning:: Don't use this in production! """ log.debug("Django serve.", path=path) root_path = storage.path("") return serve(request, path, root_path)
Serve a file from Python. .. warning:: Don't use this in production!
_serve_file_from_python
python
readthedocs/readthedocs.org
readthedocs/proxito/views/mixins.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/mixins.py
MIT
def system_redirect( self, request, final_project, version_slug, filename, is_external_version=False ): """ Return a redirect that is defined by RTD instead of the user. This is normally used for `/` and `/page/*` redirects. :param request: Request object. :param final_project: The current project being served. :param version_slug: The current version slug being served. :param filename: The filename being served. :param external: If the version is from a pull request preview. """ urlparse_result = urlparse(request.get_full_path()) to = Resolver().resolve( project=final_project, version_slug=version_slug, filename=filename, query_params=urlparse_result.query, external=is_external_version, ) log.debug("System Redirect.", host=request.get_host(), from_url=filename, to_url=to) new_path_parsed = urlparse(to) old_path_parsed = urlparse(request.build_absolute_uri()) # Check explicitly only the path and hostname, since a different # protocol or query parameters could lead to a infinite redirect. if ( new_path_parsed.hostname == old_path_parsed.hostname and new_path_parsed.path == old_path_parsed.path ): log.debug( "Infinite Redirect: FROM URL is the same than TO URL.", url=to, ) raise InfiniteRedirectException() # All system redirects can be cached, since the final URL will check for authz. self.cache_response = True resp = HttpResponseRedirect(to) resp["X-RTD-Redirect"] = RedirectType.system.name return resp
Return a redirect that is defined by RTD instead of the user. This is normally used for `/` and `/page/*` redirects. :param request: Request object. :param final_project: The current project being served. :param version_slug: The current version slug being served. :param filename: The filename being served. :param external: If the version is from a pull request preview.
system_redirect
python
readthedocs/readthedocs.org
readthedocs/proxito/views/mixins.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/mixins.py
MIT
def get_redirect_response( self, request, project, language, version_slug, filename, path, forced_only=False, ): """ Check for a redirect for this project that matches the current path, and return a response if found. :returns: redirect response with the correct path :rtype: HttpResponseRedirect or HttpResponsePermanentRedirect """ redirect, redirect_path = project.redirects.get_matching_redirect_with_path( language=language, version_slug=version_slug, filename=filename, path=path, forced_only=forced_only, ) if not redirect or not redirect_path: return None # `path` doesn't include query params. query_list = parse_qsl( urlparse(request.get_full_path()).query, keep_blank_values=True, ) current_url_parsed = urlparse(request.build_absolute_uri()) current_url = current_url_parsed.geturl() if redirect.redirects_to_external_domain: # If the redirect is to an external domain, we use it as is. new_url_parsed = urlparse(redirect_path) # TODO: Maybe exclude some query params from the redirect, # like `ticket` (used by our CAS client) if it's to an external domain. # We are logging a warning for now. has_ticket_param = any(param == "ticket" for param, _ in query_list) if has_ticket_param: log.warning( "Redirecting to an external domain with a ticket param.", from_url=current_url, to_url=new_url_parsed.geturl(), forced_only=forced_only, ) else: parsed_redirect_path = urlparse(redirect_path) query = parsed_redirect_path.query fragment = parsed_redirect_path.fragment # We use geturl() to get path, since the original path may begin with a protocol, # but we still want to keep that as part of the path. redirect_path = parsed_redirect_path._replace( fragment="", query="", ).geturl() # SECURITY: If the redirect doesn't explicitly redirect to an external domain, # we force the final redirect to be to the same domain as the current request # to avoid open redirects vulnerabilities. new_url_parsed = current_url_parsed._replace( path=redirect_path, query=query, fragment=fragment ) # Combine the query params from the original request with the ones from the redirect. query_list.extend(parse_qsl(new_url_parsed.query, keep_blank_values=True)) query = urlencode(query_list) new_url_parsed = new_url_parsed._replace(query=query) new_url = new_url_parsed.geturl() log.debug( "Redirecting...", from_url=current_url, to_url=new_url, http_status_code=redirect.http_status, forced_only=forced_only, ) # Check explicitly only the path and hostname, since a different # protocol or query parameters could lead to a infinite redirect. if ( new_url_parsed.hostname == current_url_parsed.hostname and new_url_parsed.path == current_url_parsed.path ): # check that we do have a response and avoid infinite redirect log.debug( "Infinite Redirect: FROM URL is the same than TO URL.", from_url=current_url, to_url=new_url, forced_only=forced_only, ) raise InfiniteRedirectException() if redirect.http_status == 301: resp = HttpResponsePermanentRedirect(new_url) else: resp = HttpResponseRedirect(new_url) # Add a user-visible header to make debugging easier resp["X-RTD-Redirect"] = RedirectType.user.name return resp
Check for a redirect for this project that matches the current path, and return a response if found. :returns: redirect response with the correct path :rtype: HttpResponseRedirect or HttpResponsePermanentRedirect
get_redirect_response
python
readthedocs/readthedocs.org
readthedocs/proxito/views/mixins.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/proxito/views/mixins.py
MIT
def __init__( self, query=None, filters=None, projects=None, aggregate_results=True, use_advanced_query=True, **kwargs, ): """ Custom wrapper around FacetedSearch. :param string query: Query to search for :param dict filters: Filters to be used with the query. :param projects: A dictionary of project slugs mapped to a `VersionData` object. Or a list of project slugs. Results are filter with these values. :param use_advanced_query: If `True` forces to always use `SimpleQueryString` for the text query object. :param bool aggregate_results: If results should be aggregated, this is returning the number of results within other facets. :param bool use_advanced_query: Always use SimpleQueryString. Set this to `False` to use the experimental fuzzy search. """ self.use_advanced_query = use_advanced_query self.aggregate_results = aggregate_results self.projects = projects or {} # Hack a fix to our broken connection pooling # This creates a new connection on every request, # but actually works :) log.debug("Hacking Elastic to fix search connection pooling") self.using = Elasticsearch(**settings.ELASTICSEARCH_DSL["default"]) filters = filters or {} # We may receive invalid filters valid_filters = {k: v for k, v in filters.items() if k in self.facets} super().__init__(query=query, filters=valid_filters, **kwargs)
Custom wrapper around FacetedSearch. :param string query: Query to search for :param dict filters: Filters to be used with the query. :param projects: A dictionary of project slugs mapped to a `VersionData` object. Or a list of project slugs. Results are filter with these values. :param use_advanced_query: If `True` forces to always use `SimpleQueryString` for the text query object. :param bool aggregate_results: If results should be aggregated, this is returning the number of results within other facets. :param bool use_advanced_query: Always use SimpleQueryString. Set this to `False` to use the experimental fuzzy search.
__init__
python
readthedocs/readthedocs.org
readthedocs/search/faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/faceted_search.py
MIT
def _get_queries(self, *, query, fields): """ Get a list of query objects according to the query. If the query is a single term we try to match partial words and substrings (available only with the DEFAULT_TO_FUZZY_SEARCH feature flag), otherwise we use the SimpleQueryString query. """ get_queries_function = ( self._get_single_term_queries if self._is_single_term(query) else self._get_text_queries ) return get_queries_function( query=query, fields=fields, )
Get a list of query objects according to the query. If the query is a single term we try to match partial words and substrings (available only with the DEFAULT_TO_FUZZY_SEARCH feature flag), otherwise we use the SimpleQueryString query.
_get_queries
python
readthedocs/readthedocs.org
readthedocs/search/faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/faceted_search.py
MIT
def _get_text_queries(self, *, query, fields): """ Returns a list of query objects according to the query. SimpleQueryString provides a syntax to let advanced users manipulate the results explicitly. We need to search for both "and" and "or" operators. The score of "and" should be higher as it satisfies both "or" and "and". For valid options, see: - https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html # noqa """ queries = [] is_advanced_query = self.use_advanced_query or self._is_advanced_query(query) for operator in self.operators: if is_advanced_query: query_string = SimpleQueryString( query=query, fields=fields, default_operator=operator, ) else: query_string = self._get_fuzzy_query( query=query, fields=fields, operator=operator, ) queries.append(query_string) return queries
Returns a list of query objects according to the query. SimpleQueryString provides a syntax to let advanced users manipulate the results explicitly. We need to search for both "and" and "or" operators. The score of "and" should be higher as it satisfies both "or" and "and". For valid options, see: - https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html # noqa
_get_text_queries
python
readthedocs/readthedocs.org
readthedocs/search/faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/faceted_search.py
MIT
def _get_single_term_queries(self, query, fields): """ Returns a list of query objects for fuzzy and partial results. We need to search for both "and" and "or" operators. The score of "and" should be higher as it satisfies both "or" and "and". We use the Wildcard query with the query suffixed by ``*`` to match substrings. For valid options, see: - https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-wildcard-query.html # noqa .. note:: Doing a prefix **and** suffix search is slow on big indexes like ours. """ query_string = self._get_fuzzy_query( query=query, fields=fields, ) queries = [query_string] for field in fields: # Remove boosting from the field, field = re.sub(r"\^.*$", "", field) kwargs = { field: {"value": f"{query}*"}, } queries.append(Wildcard(**kwargs)) return queries
Returns a list of query objects for fuzzy and partial results. We need to search for both "and" and "or" operators. The score of "and" should be higher as it satisfies both "or" and "and". We use the Wildcard query with the query suffixed by ``*`` to match substrings. For valid options, see: - https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-wildcard-query.html # noqa .. note:: Doing a prefix **and** suffix search is slow on big indexes like ours.
_get_single_term_queries
python
readthedocs/readthedocs.org
readthedocs/search/faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/faceted_search.py
MIT
def _get_fuzzy_query(self, *, query, fields, operator="or"): """ Returns a query object used for fuzzy results. For valid options, see: - https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html """ return MultiMatch( query=query, fields=fields, operator=operator, fuzziness="AUTO:4,6", prefix_length=1, )
Returns a query object used for fuzzy results. For valid options, see: - https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html
_get_fuzzy_query
python
readthedocs/readthedocs.org
readthedocs/search/faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/faceted_search.py
MIT
def _is_single_term(self, query): """ Check if the query is a single term. A query is a single term if it is a single word, if it doesn't contain the syntax from a simple query string, and if `self.use_advanced_query` is False. """ is_single_term = ( not self.use_advanced_query and query and len(query.split()) <= 1 and not self._is_advanced_query(query) ) return is_single_term
Check if the query is a single term. A query is a single term if it is a single word, if it doesn't contain the syntax from a simple query string, and if `self.use_advanced_query` is False.
_is_single_term
python
readthedocs/readthedocs.org
readthedocs/search/faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/faceted_search.py
MIT
def _is_advanced_query(self, query): """ Check if query looks like to be using the syntax from a simple query string. .. note:: We don't check if the syntax is valid. The tokens used aren't very common in a normal query, so checking if the query contains any of them should be enough to determinate if it's an advanced query. Simple query syntax: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html#simple-query-string-syntax """ tokens = {"+", "|", "-", '"', "*", "(", ")", "~"} query_tokens = set(query) return not tokens.isdisjoint(query_tokens)
Check if query looks like to be using the syntax from a simple query string. .. note:: We don't check if the syntax is valid. The tokens used aren't very common in a normal query, so checking if the query contains any of them should be enough to determinate if it's an advanced query. Simple query syntax: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html#simple-query-string-syntax
_is_advanced_query
python
readthedocs/readthedocs.org
readthedocs/search/faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/faceted_search.py
MIT
def aggregate(self, search): """Overridden to decide if we should aggregate or not.""" if self.aggregate_results: super().aggregate(search)
Overridden to decide if we should aggregate or not.
aggregate
python
readthedocs/readthedocs.org
readthedocs/search/faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/faceted_search.py
MIT
def query(self, search, query): """ Customize search results to support extra functionality. If `self.projects` was given, we use it to filter the documents. Only filtering by a list of slugs is supported. Also: * Adds SimpleQueryString with `self.operators` instead of default query. * Adds HTML encoding of results to avoid XSS issues. """ search = search.highlight_options(**self._highlight_options) search = search.source(excludes=self.excludes) queries = self._get_queries( query=query, fields=self.fields, ) # Run bool query with should, so it returns result where either of the query matches. bool_query = Bool(should=queries) # Filter by project slugs. if self.projects: if isinstance(self.projects, list): projects_query = Bool(filter=Terms(slug=self.projects)) bool_query = Bool(must=[bool_query, projects_query]) else: raise ValueError("projects must be a list!") search = search.query(bool_query) return search
Customize search results to support extra functionality. If `self.projects` was given, we use it to filter the documents. Only filtering by a list of slugs is supported. Also: * Adds SimpleQueryString with `self.operators` instead of default query. * Adds HTML encoding of results to avoid XSS issues.
query
python
readthedocs/readthedocs.org
readthedocs/search/faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/faceted_search.py
MIT
def _get_projects_query(self): """ Get filter by projects query. If it's a dict, filter by project and version, if it's a list filter by project. """ if not self.projects: return None if isinstance(self.projects, dict): versions_query = [ Bool(filter=[Term(project=project), Term(version=version)]) for project, version in self.projects.items() ] return Bool(should=versions_query) if isinstance(self.projects, list): return Bool(filter=Terms(project=self.projects)) raise ValueError("projects must be a list or a dict!")
Get filter by projects query. If it's a dict, filter by project and version, if it's a list filter by project.
_get_projects_query
python
readthedocs/readthedocs.org
readthedocs/search/faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/faceted_search.py
MIT
def query(self, search, query): """ Manipulates the query to support nested queries and a custom rank for pages. If `self.projects` was given, we use it to filter the documents that match the same project and version. """ search = search.highlight_options(**self._highlight_options) search = search.source(excludes=self.excludes) queries = self._get_queries( query=query, fields=self.fields, ) sections_nested_query = self._get_nested_query( query=query, path="sections", fields=self._section_fields, ) queries.append(sections_nested_query) bool_query = Bool(should=queries) projects_query = self._get_projects_query() if projects_query: bool_query = Bool(must=[bool_query, projects_query]) final_query = FunctionScore( query=bool_query, script_score=self._get_script_score(), ) search = search.query(final_query) return search
Manipulates the query to support nested queries and a custom rank for pages. If `self.projects` was given, we use it to filter the documents that match the same project and version.
query
python
readthedocs/readthedocs.org
readthedocs/search/faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/faceted_search.py
MIT
def _get_nested_query(self, *, query, path, fields): """Generate a nested query with passed parameters.""" queries = self._get_queries( query=query, fields=fields, ) bool_query = Bool(should=queries) raw_fields = [ # Remove boosting from the field re.sub(r"\^.*$", "", field) for field in fields ] highlight = dict( self._highlight_options, fields={field: {} for field in raw_fields}, ) return Nested( path=path, inner_hits={"highlight": highlight}, query=bool_query, )
Generate a nested query with passed parameters.
_get_nested_query
python
readthedocs/readthedocs.org
readthedocs/search/faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/faceted_search.py
MIT
def _get_script_score(self): """ Gets an ES script to map the page rank to a valid score weight. ES expects the rank to be a number greater than 0, but users can set this between [-10, +10]. We map that range to [0.01, 2] (21 possible values). The first lower rank (0.8) needs to bring the score from the highest boost (sections.title^2) close to the lowest boost (title^1.5), that way exact results take priority: - 2.0 * 0.8 = 1.6 (score close to 1.5, but not lower than it) - 1.5 * 0.8 = 1.2 (score lower than 1.5) The first higher rank (1.2) needs to bring the score from the lowest boost (title^1.5) close to the highest boost (sections.title^2), that way exact results take priority: - 2.0 * 1.3 = 2.6 (score higher thank 2.0) - 1.5 * 1.3 = 1.95 (score close to 2.0, but not higher than it) The next lower and higher ranks need to decrease/increase both scores. See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-score-query.html#field-value-factor # noqa """ ranking = [ 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 1.93, 1.96, 2, ] # Each rank maps to a element in the ranking list. # -10 will map to the first element (-10 + 10 = 0) and so on. source = """ int rank = doc['rank'].size() == 0 ? 0 : (int) doc['rank'].value; return params.ranking[rank + 10] * _score; """ return { "script": { "source": source, "params": {"ranking": ranking}, }, }
Gets an ES script to map the page rank to a valid score weight. ES expects the rank to be a number greater than 0, but users can set this between [-10, +10]. We map that range to [0.01, 2] (21 possible values). The first lower rank (0.8) needs to bring the score from the highest boost (sections.title^2) close to the lowest boost (title^1.5), that way exact results take priority: - 2.0 * 0.8 = 1.6 (score close to 1.5, but not lower than it) - 1.5 * 0.8 = 1.2 (score lower than 1.5) The first higher rank (1.2) needs to bring the score from the lowest boost (title^1.5) close to the highest boost (sections.title^2), that way exact results take priority: - 2.0 * 1.3 = 2.6 (score higher thank 2.0) - 1.5 * 1.3 = 1.95 (score close to 2.0, but not higher than it) The next lower and higher ranks need to decrease/increase both scores. See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-script-score-query.html#field-value-factor # noqa
_get_script_score
python
readthedocs/readthedocs.org
readthedocs/search/faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/faceted_search.py
MIT
def remove_indexed_files(project_slug, version_slug=None, sync_id=None, index_name=None): """ Remove files from `version_slug` of `project_slug` from the search index. :param model: Class of the model to be deleted. :param project_slug: Project slug. :param version_slug: Version slug. If isn't given, all index from `project` are deleted. :param build_id: Build id. If isn't given, all index from `version` are deleted. """ log.bind( project_slug=project_slug, version_slug=version_slug, ) if not DEDConfig.autosync_enabled(): log.info("Autosync disabled, skipping removal from the search index.") return # If a search index name is provided, we need to temporarily change # the index name of the document. document = PageDocument old_index_name = document._index._name if index_name: document._index._name = index_name try: log.info("Deleting old files from search index.") documents = document().search().filter("term", project=project_slug) if version_slug: documents = documents.filter("term", version=version_slug) if sync_id: documents = documents.exclude("term", build=sync_id) documents.delete() except Exception: log.exception("Unable to delete a subset of files. Continuing.") # Restore the old index name. if index_name: document._index._name = old_index_name
Remove files from `version_slug` of `project_slug` from the search index. :param model: Class of the model to be deleted. :param project_slug: Project slug. :param version_slug: Version slug. If isn't given, all index from `project` are deleted. :param build_id: Build id. If isn't given, all index from `version` are deleted.
remove_indexed_files
python
readthedocs/readthedocs.org
readthedocs/search/utils.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/utils.py
MIT
def _get_index(indices, index_name): """ Get Index from all the indices. :param indices: DED indices list :param index_name: Name of the index :return: DED Index """ for index in indices: if index._name == index_name: return index
Get Index from all the indices. :param indices: DED indices list :param index_name: Name of the index :return: DED Index
_get_index
python
readthedocs/readthedocs.org
readthedocs/search/utils.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/utils.py
MIT
def _get_document(model, document_class): """ Get DED document class object from the model and name of document class. :param model: The model class to find the document :param document_class: the name of the document class. :return: DED DocType object """ documents = registry.get_documents(models=[model]) for document in documents: if str(document) == document_class: return document
Get DED document class object from the model and name of document class. :param model: The model class to find the document :param document_class: the name of the document class. :return: DED DocType object
_get_document
python
readthedocs/readthedocs.org
readthedocs/search/utils.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/utils.py
MIT
def _last_30_days_iter(): """Returns iterator for previous 30 days (including today).""" thirty_days_ago = timezone.now().date() - timezone.timedelta(days=30) # this includes the current day, len() = 31 return (thirty_days_ago + timezone.timedelta(days=n) for n in range(31))
Returns iterator for previous 30 days (including today).
_last_30_days_iter
python
readthedocs/readthedocs.org
readthedocs/search/utils.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/utils.py
MIT
def _get_last_30_days_str(date_format="%Y-%m-%d"): """Returns the list of dates in string format for previous 30 days (including today).""" last_30_days_str = [ timezone.datetime.strftime(date, date_format) for date in _last_30_days_iter() ] return last_30_days_str
Returns the list of dates in string format for previous 30 days (including today).
_get_last_30_days_str
python
readthedocs/readthedocs.org
readthedocs/search/utils.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/utils.py
MIT
def _search(self, *, user_input, projects, use_advanced_query): """Return search results and facets given a `user_input` and `projects` to filter by.""" if not user_input.query: return [], {} filters = {} for avail_facet in self.available_facets: value = getattr(user_input, avail_facet, None) if value: filters[avail_facet] = value search = ProjectSearch( query=user_input.query, filters=filters, projects=projects, use_advanced_query=use_advanced_query, ) # pep8 and blank don't agree on having a space before :. results = search[: self.max_search_results].execute() # noqa facets = results.facets # Make sure the selected facets are displayed, # even when they return 0 results. for facet in facets: value = getattr(user_input, facet, None) if value and value not in (name for name, *_ in facets[facet]): facets[facet].insert(0, (value, 0, True)) return results, facets
Return search results and facets given a `user_input` and `projects` to filter by.
_search
python
readthedocs/readthedocs.org
readthedocs/search/views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/views.py
MIT
def index_project_save(instance, *args, **kwargs): """ Save a Project instance based on the post_save signal.post_save. This uses Celery to do it async, replacing how django-elasticsearch-dsl does it. """ from readthedocs.search.documents import ProjectDocument kwargs = { "app_label": Project._meta.app_label, "model_name": Project.__name__, "document_class": str(ProjectDocument), "objects_id": [instance.id], } # Do not index if autosync is disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es.delay(**kwargs) else: log.info("Skipping indexing")
Save a Project instance based on the post_save signal.post_save. This uses Celery to do it async, replacing how django-elasticsearch-dsl does it.
index_project_save
python
readthedocs/readthedocs.org
readthedocs/search/signals.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/signals.py
MIT
def _get_page_content(self, page): """Gets the page content from storage.""" content = None try: storage_path = self.project.get_storage_path( type_="html", version_slug=self.version.slug, include_file=False, version_type=self.version.type, ) file_path = self.storage.join(storage_path, page) with self.storage.open(file_path, mode="r") as f: content = f.read() except Exception: log.warning( "Failed to get page content.", page=page, ) return content
Gets the page content from storage.
_get_page_content
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def _get_page_title(self, body, html): """ Gets the title from the html page. The title is the first section in the document, falling back to the ``title`` tag. """ first_header = body.css_first("h1") if first_header: title, _ = self._parse_section_title(first_header) return title title = html.css_first("title") if title: return self._parse_content(title.text()) return None
Gets the title from the html page. The title is the first section in the document, falling back to the ``title`` tag.
_get_page_title
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def _get_main_node(self, html): """ Gets the main node from where to start indexing content. The main node is tested in the following order: - Try with a tag with the ``main`` role. This role is used by several static sites and themes. - Try the ``main`` tag. - Try the first ``h1`` node and return its parent Usually all sections are neighbors, so they are children of the same parent node. - Return the body element itself if all checks above fail. """ body = html.body main_node = body.css_first("[role=main]") if main_node: return main_node main_node = body.css_first("main") if main_node: return main_node # TODO: this could be done in smarter way, # checking for common parents between all h nodes. first_header = body.css_first("h1") if first_header: return self._get_header_container(first_header).parent return body
Gets the main node from where to start indexing content. The main node is tested in the following order: - Try with a tag with the ``main`` role. This role is used by several static sites and themes. - Try the ``main`` tag. - Try the first ``h1`` node and return its parent Usually all sections are neighbors, so they are children of the same parent node. - Return the body element itself if all checks above fail.
_get_main_node
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT