code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def _get_header_container(self, h_tag): """ Get the *real* container of a header tag or title. If the parent of the ``h`` tag is a ``header`` tag, then we return the ``header`` tag, since the header tag acts as a container for the title of the section. Otherwise, we return the tag itself. """ if h_tag.parent.tag == "header": return h_tag.parent return h_tag
Get the *real* container of a header tag or title. If the parent of the ``h`` tag is a ``header`` tag, then we return the ``header`` tag, since the header tag acts as a container for the title of the section. Otherwise, we return the tag itself.
_get_header_container
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def _parse_content(self, content): """Converts all new line characters and multiple spaces to a single space.""" content = content.strip().split() content = (text.strip() for text in content) content = " ".join(text for text in content if text) return content
Converts all new line characters and multiple spaces to a single space.
_parse_content
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def _parse_sections(self, title, body): """ Parses each section into a structured dict. Sub-sections are nested, so they are children of the outer section, and sections with the same level are neighbors. We index the content under a section till before the next one. We can have pages that have content before the first title or that don't have a title, we index that content first under the title of the original page. """ document_title = title indexed_nodes = [] for dd, dt, section in self._parse_dls(body): indexed_nodes.append(dd) indexed_nodes.append(dt) yield section # Remove all seen and indexed data outside of traversal. # We want to avoid modifying the DOM tree while traversing it. for node in indexed_nodes: node.decompose() # Index content for pages that don't start with a title. # We check for sections till 3 levels to avoid indexing all the content # in this step. try: content, _ = self._parse_section_content( body.child, depth=3, ) if content: yield { "id": "", "title": document_title, "content": content, } except Exception as e: log.info("Unable to index section", section=str(e)) # Index content from h1 to h6 headers. for section in [body.css(f"h{h}") for h in range(1, 7)]: for tag in section: try: title, _id = self._parse_section_title(tag) next_tag = self._get_header_container(tag).next content, _ = self._parse_section_content(next_tag, depth=2) yield { "id": _id, "title": title, "content": content, } except Exception: log.info("Unable to index section.", exc_info=True)
Parses each section into a structured dict. Sub-sections are nested, so they are children of the outer section, and sections with the same level are neighbors. We index the content under a section till before the next one. We can have pages that have content before the first title or that don't have a title, we index that content first under the title of the original page.
_parse_sections
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def _parse_dt(self, tag): """ Parses a definition term <dt>. If the <dt> does not have an id attribute, it cannot be referenced. This should be understood by the caller. """ section_id = tag.attributes.get("id", "") return self._parse_content(tag.text()), section_id
Parses a definition term <dt>. If the <dt> does not have an id attribute, it cannot be referenced. This should be understood by the caller.
_parse_dt
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def _get_sections(self, title, body): """Get the first `self.max_inner_documents` sections.""" iterator = self._parse_sections(title=title, body=body) sections = list(itertools.islice(iterator, 0, self.max_inner_documents)) try: next(iterator) except StopIteration: pass else: log.warning( "Limit of inner sections exceeded.", project_slug=self.project.slug, version_slug=self.version.slug, limit=self.max_inner_documents, ) return sections
Get the first `self.max_inner_documents` sections.
_get_sections
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def _clean_body(self, body): """ Removes nodes with irrelevant content before parsing its sections. This method is documented here: https://dev.readthedocs.io/page/search-integration.html#irrelevant-content .. warning:: This will mutate the original `body`. """ nodes_to_be_removed = itertools.chain( # Navigation nodes body.css("nav"), body.css("[role=navigation]"), body.css("[role=search]"), # Permalinks, this is a Sphinx convention. body.css(".headerlink"), # Line numbers from code blocks, they are very noisy in contents. # This convention is popular in Sphinx. body.css(".linenos"), body.css(".lineno"), # Sphinx doesn't wrap the result from the `toctree` directive # in a nav tag. so we need to manually remove that content. body.css(".toctree-wrapper"), ) for node in nodes_to_be_removed: node.decompose() return body
Removes nodes with irrelevant content before parsing its sections. This method is documented here: https://dev.readthedocs.io/page/search-integration.html#irrelevant-content .. warning:: This will mutate the original `body`.
_clean_body
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def _is_section(self, tag): """ Check if `tag` is a section (linkeable header). The tag is a section if it's a ``h`` or a ``header`` tag. """ is_h_tag = re.match(r"h\d$", tag.tag) return is_h_tag or tag.tag == "header"
Check if `tag` is a section (linkeable header). The tag is a section if it's a ``h`` or a ``header`` tag.
_is_section
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def _parse_section_title(self, tag): """ Parses a section title tag and gets its id. The id (used to link to the section) is tested in the following order: - Get the id from the node itself. - Get the id from the parent node. """ section_id = tag.attributes.get("id", "") if not section_id: parent = tag.parent section_id = parent.attributes.get("id", "") return self._parse_content(tag.text()), section_id
Parses a section title tag and gets its id. The id (used to link to the section) is tested in the following order: - Get the id from the node itself. - Get the id from the parent node.
_parse_section_title
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def _parse_section_content(self, tag, *, depth=0): """ Gets the content from tag till before a new section. if depth > 0, recursively check for sections in all tag's children. Returns a tuple with: the parsed content, and a boolean indicating if a section was found. """ contents = [] section_found = False next_tag = tag while next_tag: if section_found or self._is_section(next_tag): section_found = True break if self._is_code_section(next_tag): content = self._parse_code_section(next_tag) elif depth <= 0 or not next_tag.child: # Calling .text() with deep `True` over a text node will return empty. deep = next_tag.tag != "-text" content = next_tag.text(deep=deep) else: content, section_found = self._parse_section_content( tag=next_tag.child, depth=depth - 1 ) if content: is_block_level_element = next_tag.tag in self.block_level_elements if is_block_level_element: # Add a line break before and after a block level element. contents.append(f"\n{content}\n") else: contents.append(content) next_tag = next_tag.next return self._parse_content("".join(contents)), section_found
Gets the content from tag till before a new section. if depth > 0, recursively check for sections in all tag's children. Returns a tuple with: the parsed content, and a boolean indicating if a section was found.
_parse_section_content
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def _is_code_section(self, tag): """ Check if `tag` is a code section. Sphinx and Mkdocs codeblocks usually have a class named ``highlight`` or ``highlight-{language}``. """ if not tag.css_first("pre"): return False for c in tag.attributes.get("class", "").split(): if c.startswith("highlight"): return True return False
Check if `tag` is a code section. Sphinx and Mkdocs codeblocks usually have a class named ``highlight`` or ``highlight-{language}``.
_is_code_section
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def _parse_code_section(self, tag): """ Parse a code section to fetch relevant content only. - Removes line numbers. Sphinx and Mkdocs may use a table when the code block includes line numbers. This table has a td tag with a ``lineos`` class. Other implementations put the line number within the code, inside span tags with the ``lineno`` class. """ nodes_to_be_removed = itertools.chain(tag.css(".linenos"), tag.css(".lineno")) for node in nodes_to_be_removed: node.decompose() contents = [] for node in tag.css("pre"): # XXX: Don't call to `_parse_content` # if we decide to show code results more nicely, # so the indentation isn't lost. content = node.text().strip("\n") contents.append(self._parse_content(content)) return " ".join(contents)
Parse a code section to fetch relevant content only. - Removes line numbers. Sphinx and Mkdocs may use a table when the code block includes line numbers. This table has a td tag with a ``lineos`` class. Other implementations put the line number within the code, inside span tags with the ``lineno`` class.
_parse_code_section
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def parse(self, page): """ Get the parsed JSON for search indexing. Returns a dictionary with the following structure. { 'path': 'file path', 'title': 'Title', 'sections': [ { 'id': 'section-anchor', 'title': 'Section title', 'content': 'Section content', }, ], } """ try: content = self._get_page_content(page) if content: return self._process_content(page, content) except Exception: log.info("Failed to index page.", path=page, exc_info=True) return { "path": page, "title": "", "sections": [], "main_content_hash": None, }
Get the parsed JSON for search indexing. Returns a dictionary with the following structure. { 'path': 'file path', 'title': 'Title', 'sections': [ { 'id': 'section-anchor', 'title': 'Section title', 'content': 'Section content', }, ], }
parse
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def _process_content(self, page, content): """Parses the content into a structured dict.""" html = HTMLParser(content) body = self._get_main_node(html) title = "" sections = [] main_content_hash = None if body: main_content_hash = hashlib.md5(body.html.encode()).hexdigest() body = self._clean_body(body) title = self._get_page_title(body, html) or page sections = self._get_sections(title=title, body=body) else: log.info( "Page doesn't look like it has valid content, skipping.", page=page, ) return { "path": page, "title": title, "sections": sections, "main_content_hash": main_content_hash, }
Parses the content into a structured dict.
_process_content
python
readthedocs/readthedocs.org
readthedocs/search/parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/parsers.py
MIT
def get_queryset(self): """ Additional filtering of default queryset. Don't include delisted projects. This will also break in-doc search for these projects, but it's not a priority to find a solution for this as long as "delisted" projects are understood to be projects with a negative reason for being delisted. """ return super().get_queryset().exclude(delisted=True).exclude(is_spam=True)
Additional filtering of default queryset. Don't include delisted projects. This will also break in-doc search for these projects, but it's not a priority to find a solution for this as long as "delisted" projects are understood to be projects with a negative reason for being delisted.
get_queryset
python
readthedocs/readthedocs.org
readthedocs/search/documents.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/documents.py
MIT
def get_queryset(self): """Don't include ignored files and delisted projects.""" queryset = super().get_queryset() queryset = ( queryset.exclude(ignore=True) .exclude(project__delisted=True) .exclude(project__is_spam=True) .select_related("version", "project") ) return queryset
Don't include ignored files and delisted projects.
get_queryset
python
readthedocs/readthedocs.org
readthedocs/search/documents.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/documents.py
MIT
def index_missing_objects(app_label, model_name, document_class, index_generation_time): """ Task to insure that none of the object is missed from indexing. The object ids are sent to `index_objects_to_es` task for indexing. While the task is running, new objects can be created/deleted in database and they will not be in the tasks for indexing into ES. This task will index all the objects that got into DB after the `latest_indexed` timestamp to ensure that everything is in ES index. """ model = apps.get_model(app_label, model_name) document = _get_document(model=model, document_class=document_class) query_string = "{}__lte".format(document.modified_model_field) queryset = document().get_queryset().exclude(**{query_string: index_generation_time}) document().update(queryset.iterator()) log.info( "Indexed missing objects from model.", count=queryset.count(), model=model.__name__, )
Task to insure that none of the object is missed from indexing. The object ids are sent to `index_objects_to_es` task for indexing. While the task is running, new objects can be created/deleted in database and they will not be in the tasks for indexing into ES. This task will index all the objects that got into DB after the `latest_indexed` timestamp to ensure that everything is in ES index.
index_missing_objects
python
readthedocs/readthedocs.org
readthedocs/search/tasks.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tasks.py
MIT
def delete_old_search_queries_from_db(): """ Delete old SearchQuery objects older than ``RTD_ANALYTICS_DEFAULT_RETENTION_DAYS``. This is run by celery beat every day. """ retention_days = settings.RTD_ANALYTICS_DEFAULT_RETENTION_DAYS days_ago = timezone.now().date() - timezone.timedelta(days=retention_days) search_queries_qs = SearchQuery.objects.filter( created__date__lt=days_ago, ) if search_queries_qs.exists(): log.info( "Deleting search queries for last 3 months.", total=search_queries_qs.count(), ) search_queries_qs.delete()
Delete old SearchQuery objects older than ``RTD_ANALYTICS_DEFAULT_RETENTION_DAYS``. This is run by celery beat every day.
delete_old_search_queries_from_db
python
readthedocs/readthedocs.org
readthedocs/search/tasks.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tasks.py
MIT
def record_search_query(project_slug, version_slug, query, total_results, time_string): """Record/update a search query for analytics.""" if not project_slug or not version_slug or not query: log.debug( "Not recording the search query.", project_slug=project_slug, version_slug=version_slug, query=query, total_results=total_results, time=time_string, ) return time = parse(time_string) before_10_sec = time - timezone.timedelta(seconds=10) partial_query_qs = SearchQuery.objects.filter( project__slug=project_slug, version__slug=version_slug, modified__gte=before_10_sec, ).order_by("-modified") # If a partial query exists, then just update that object. # Check max 30 queries, in case there is a flood of queries. max_queries = 30 for partial_query in partial_query_qs[:max_queries]: if query.startswith(partial_query.query): partial_query.query = query partial_query.total_results = total_results partial_query.save() return version = ( Version.objects.filter(slug=version_slug, project__slug=project_slug) .prefetch_related("project") .first() ) if not version: log.debug( "Not recording the search query because project does not exist.", project_slug=project_slug, version_slug=version_slug, ) return SearchQuery.objects.create( project=version.project, version=version, query=query, total_results=total_results, )
Record/update a search query for analytics.
record_search_query
python
readthedocs/readthedocs.org
readthedocs/search/tasks.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tasks.py
MIT
def generate_queries_count_of_one_month(cls, project_slug): """ Returns the total queries performed each day of the last 30 days (including today). Structure of returned data is compatible to make graphs. Sample returned data:: { 'labels': ['01 Jul', '02 Jul', '03 Jul'], 'int_data': [150, 200, 143] } This data shows that there were 150 searches were made on 01 July, 200 searches on 02 July and 143 searches on 03 July. """ today = timezone.now().date() last_30th_day = timezone.now().date() - timezone.timedelta(days=30) qs = cls.objects.filter( project__slug=project_slug, created__date__lte=today, created__date__gte=last_30th_day, ).order_by("-created") # dict containing the total number of queries # of each day for the past 30 days (if present in database). count_dict = dict( qs.annotate(created_date=TruncDate("created")) .values("created_date") .order_by("created_date") .annotate(count=Count("id")) .values_list("created_date", "count") ) count_data = [count_dict.get(date) or 0 for date in _last_30_days_iter()] # format the date value to a more readable form # Eg. `16 Jul` last_30_days_str = [ timezone.datetime.strftime(date, "%d %b") for date in _last_30_days_iter() ] final_data = { "labels": last_30_days_str, "int_data": count_data, } return final_data
Returns the total queries performed each day of the last 30 days (including today). Structure of returned data is compatible to make graphs. Sample returned data:: { 'labels': ['01 Jul', '02 Jul', '03 Jul'], 'int_data': [150, 200, 143] } This data shows that there were 150 searches were made on 01 July, 200 searches on 02 July and 143 searches on 03 July.
generate_queries_count_of_one_month
python
readthedocs/readthedocs.org
readthedocs/search/models.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/models.py
MIT
def test_sphinx_local_toc(self, storage_open, storage_exists): """ Test that the local table of contents from the ``contents`` directive is not included in the indexed content. """ # Source: # https://docs.readthedocs.io/en/stable/security.html html_content = data_path / "sphinx/in/local-toc.html" storage_open.side_effect = self._mock_open(html_content.open().read()) storage_exists.return_value = True self.version.documentation_type = SPHINX self.version.save() page_file = get( HTMLFile, project=self.project, version=self.version, path="local-toc.html", ) parsed_json = page_file.processed_json expected_json = json.load(open(data_path / "sphinx/out/local-toc.json")) assert parsed_json == expected_json
Test that the local table of contents from the ``contents`` directive is not included in the indexed content.
test_sphinx_local_toc
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_parsers.py
MIT
def test_sphinx_toctree(self, storage_open, storage_exists): """ Test that the table of contents from the ``toctree`` directive is not included in the indexed content. """ # Source: # https://docs.readthedocs.io/en/stable/api/index.html html_content = data_path / "sphinx/in/toctree.html" storage_open.side_effect = self._mock_open(html_content.open().read()) storage_exists.return_value = True self.version.documentation_type = SPHINX self.version.save() page_file = get( HTMLFile, project=self.project, version=self.version, path="toctree.html", ) parsed_json = page_file.processed_json expected_json = json.load(open(data_path / "sphinx/out/toctree.json")) assert parsed_json == expected_json
Test that the table of contents from the ``toctree`` directive is not included in the indexed content.
test_sphinx_toctree
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_parsers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_parsers.py
MIT
def get_search_query_from_project_file( project_slug, page_num=0, field="title", type=None ): """ Return search query from the project's page file. Query is generated from the value of `data_type` """ html_file = HTMLFile.objects.filter(project__slug=project_slug).order_by("id")[ page_num ] file_data = html_file.processed_json internal_type = { "section": "sections", "title": "title", } query_data = file_data[internal_type[type or field]] if not type and field == "title": # uses first word of page title as query query = query_data.split()[0] elif type == "section" and field == "title": # generates query from section title query_data = query_data[0]["title"].split() start = 0 end = random.randint(1, len(query_data)) query = query_data[start:end] query = " ".join(query) elif type == "section" and field == "content": # generates query from section content query_data = query_data[0]["content"].split() start = random.randint(0, 6) # 5 words to generate query to make sure that # query does not only contains 'is', 'and', 'the' # and other stop words end = start + 5 query = query_data[start:end] query = " ".join(query) return query
Return search query from the project's page file. Query is generated from the value of `data_type`
get_search_query_from_project_file
python
readthedocs/readthedocs.org
readthedocs/search/tests/utils.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/utils.py
MIT
def test_search_exact_match(self, client, project, case): """Check quoted query match exact phrase with case insensitively Making a query with quoted text like ``"foo bar"`` should match exactly ``foo bar`` or ``Foo Bar`` etc """ # `Sphinx` word is present both in `kuma` and `docs` files # But the phrase `Sphinx uses` is available only in kuma docs. # So search with this phrase to check query_text = r'"Sphinx uses"' cased_query = getattr(query_text, case) query = cased_query() page_search = PageSearch(query=query) results = page_search.execute() assert len(results) == 2 # Both versions have the same exact content. # Order of results is not deterministic anymore for some reason, # so we use a set to compare the results. assert {result["version"] for result in results} == {"stable", "latest"} for result in results: assert result["project"] == "kuma" assert result["path"] == "testdocumentation"
Check quoted query match exact phrase with case insensitively Making a query with quoted text like ``"foo bar"`` should match exactly ``foo bar`` or ``Foo Bar`` etc
test_search_exact_match
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_faceted_search.py
MIT
def test_search_combined_result(self, client, project): """Check search result are combined of both `AND` and `OR` operator If query is `Foo Bar` then the result should be as following order: - Where both `Foo Bar` is present - Where `Foo` or `Bar` is present """ query = "Elasticsearch Query" page_search = PageSearch(query=query) results = page_search.execute() assert len(results) == 6 result_paths_latest = [r.path for r in results if r.version == "latest"] result_paths_stable = [r.path for r in results if r.version == "stable"] # ``guides/wipe-environment`` page has both ``Elasticsearch Query`` words # ``docker`` page has ``Elasticsearch`` word # ``installation`` page has ``Query`` word. expected_paths = ["guides/wipe-environment", "docker", "installation"] assert result_paths_latest == expected_paths assert result_paths_stable == expected_paths
Check search result are combined of both `AND` and `OR` operator If query is `Foo Bar` then the result should be as following order: - Where both `Foo Bar` is present - Where `Foo` or `Bar` is present
test_search_combined_result
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_faceted_search.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_faceted_search.py
MIT
def test_search_project_have_correct_language_facets(self, client, project): """Test that searching project should have correct language facets in the results""" # Create a project in bn and add it as a translation get(Project, language="bn", name=project.name) results, facets = self._get_search_result( url=self.url, client=client, search_params={"q": project.name, "type": "project"}, ) lang_facets = facets["language"] lang_facets_str = [facet[0] for facet in lang_facets] # There should be 2 languages assert len(lang_facets) == 2 assert sorted(lang_facets_str) == sorted(["en", "bn"]) for facet in lang_facets: assert facet[2] == False # because none of the facets are applied
Test that searching project should have correct language facets in the results
test_search_project_have_correct_language_facets
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_views.py
MIT
def test_search_project_filter_language(self, client, project): """Test that searching project filtered according to language.""" # Create a project in bn and add it as a translation translate = get(Project, language="bn", name=project.name) search_params = {"q": project.name, "language": "bn", "type": "project"} results, facets = self._get_search_result( url=self.url, client=client, search_params=search_params, ) # There should be only 1 result assert len(results) == 1 lang_facets = facets["language"] lang_facets_str = [facet[0] for facet in lang_facets] # There should be 2 languages because both `en` and `bn` should show there assert len(lang_facets) == 2 assert sorted(lang_facets_str) == sorted(["en", "bn"])
Test that searching project filtered according to language.
test_search_project_filter_language
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_views.py
MIT
def test_file_search_case_insensitive(self, client, project, case, data_type): """ Check File search is case insensitive. It tests with uppercase, lowercase and camelcase. """ type, field = None, None data_type = data_type.split(".") if len(data_type) < 2: field = data_type[0] else: type, field = data_type query_text = get_search_query_from_project_file( project_slug=project.slug, type=type, field=field, ) cased_query = getattr(query_text, case) query = cased_query() results, _ = self._get_search_result( url=self.url, client=client, search_params={"q": query, "type": "file"} ) assert len(results) >= 1 first_result = results[0] highlight = self._get_highlight(first_result, field, type) assert len(highlight) == 1 highlighted_words = self._get_highlighted_words(highlight[0]) assert len(highlighted_words) >= 1 for word in highlighted_words: assert word.lower() in query.lower()
Check File search is case insensitive. It tests with uppercase, lowercase and camelcase.
test_file_search_case_insensitive
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_views.py
MIT
def test_file_search_exact_match(self, client, project): """ Check quoted query match exact phrase. Making a query with quoted text like ``"foo bar"`` should match exactly ``foo bar`` phrase. """ # `Sphinx` word is present both in `kuma` and `docs` files # But the phrase `Sphinx uses` is present only in `kuma` docs. # So search with this phrase to check query = r'"Sphinx uses"' results, _ = self._get_search_result( url=self.url, client=client, search_params={"q": query, "type": "file"} ) # There are two results, # one from each version of the "kuma" project. assert len(results) == 2 # Both versions have the same exact content. # Order of results is not deterministic anymore for some reason, # so we use a set to compare the results. assert {result["version"]["slug"] for result in results} == {"stable", "latest"} for result in results: assert result["project"] == {"alias": None, "slug": "kuma"} assert result["domain"] == "http://kuma.readthedocs.io" assert result["path"].endswith("/documentation.html") blocks = results[0]["blocks"] assert len(blocks) == 1 assert blocks[0]["type"] == "section" highlight = self._get_highlight(results[0], "content", "section") assert len(highlight) == 1 highlighted_words = self._get_highlighted_words(highlight[0]) assert len(highlighted_words) >= 1 for word in highlighted_words: assert word.lower() in query.lower()
Check quoted query match exact phrase. Making a query with quoted text like ``"foo bar"`` should match exactly ``foo bar`` phrase.
test_file_search_exact_match
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_views.py
MIT
def test_file_search_filter_by_project(self, client): """Test that search result are filtered according to project.""" # `environment` word is present both in `kuma` and `docs` files # so search with this phrase but filter through `kuma` project search_params = { "q": "project:kuma environment", "type": "file", } results, facets = self._get_search_result( url=self.url, client=client, search_params=search_params, ) project_facets = facets["project"] resulted_project_facets = [facet[0] for facet in project_facets] # There should be 1 search result as we have filtered assert len(results) == 1 # kuma should should be there only assert {"alias": None, "slug": "kuma"} == results[0]["project"] # The projects we search is the only one included in the final results. assert resulted_project_facets == ["kuma"]
Test that search result are filtered according to project.
test_file_search_filter_by_project
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_views.py
MIT
def test_search_query_recorded_when_results_not_zero(self, api_client): """Test if search query is recorded in a database when a search is made.""" assert ( SearchQuery.objects.all().count() == 0 ), "no SearchQuery should be present if there is no search made." # `sphinx` is present in `documentation.json` # file of project `kuma` search_params = {"q": "sphinx", "project": "kuma", "version": "latest"} resp = api_client.get(self.url, search_params) assert resp.data["count"] == 1 assert ( SearchQuery.objects.all().count() == 1 ), "there should be 1 obj since a search is made which returns one result."
Test if search query is recorded in a database when a search is made.
test_search_query_recorded_when_results_not_zero
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_search_tasks.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_search_tasks.py
MIT
def test_partial_queries_are_not_recorded(self, api_client): """Test if partial queries are not recorded.""" assert ( SearchQuery.objects.all().count() == 0 ), "no SearchQuery should be present if there is no search made." time = timezone.now() search_params = {"q": "stack", "project": "docs", "version": "latest"} with mock.patch("django.utils.timezone.now") as test_time: test_time.return_value = time resp = api_client.get(self.url, search_params) assert resp.status_code, 200 assert ( SearchQuery.objects.all().count() == 1 ), "one SearchQuery should be present" # update the time and the search query and make another search request time = time + timezone.timedelta(seconds=2) search_params["q"] = "stack over" with mock.patch("django.utils.timezone.now") as test_time: test_time.return_value = time resp = api_client.get(self.url, search_params) assert resp.status_code, 200 # update the time and the search query and make another search request time = time + timezone.timedelta(seconds=2) search_params["q"] = "stack overflow" with mock.patch("django.utils.timezone.now") as test_time: test_time.return_value = time resp = api_client.get(self.url, search_params) assert resp.status_code, 200 assert ( SearchQuery.objects.all().count() == 1 ), "one SearchQuery should be present" assert ( SearchQuery.objects.all().first().query == "stack overflow" ), "one SearchQuery should be there because partial queries gets updated"
Test if partial queries are not recorded.
test_partial_queries_are_not_recorded
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_search_tasks.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_search_tasks.py
MIT
def test_search_query_recorded_when_results_are_zero(self, api_client): """Test that search queries are recorded when they have zero results.""" assert ( SearchQuery.objects.all().count() == 0 ), "no SearchQuery should be present if there is no search made." # `readthedo` is NOT present in project `kuma`. search_params = {"q": "readthedo", "project": "kuma", "version": "latest"} resp = api_client.get(self.url, search_params) assert resp.data["count"] == 0 assert SearchQuery.objects.all().count() == 1
Test that search queries are recorded when they have zero results.
test_search_query_recorded_when_results_are_zero
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_search_tasks.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_search_tasks.py
MIT
def test_delete_old_search_queries_from_db(self, project): """Test that the old search queries are being deleted.""" assert ( SearchQuery.objects.all().count() == 0 ), "no SearchQuery should be present if there is no search made." obj = SearchQuery.objects.create( project=project, version=project.versions.all().first(), query="first" ) obj.created = timezone.make_aware(timezone.datetime(2019, 1, 1)) obj.save() assert SearchQuery.objects.all().count() == 1 tasks.delete_old_search_queries_from_db() assert SearchQuery.objects.all().count() == 0
Test that the old search queries are being deleted.
test_delete_old_search_queries_from_db
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_search_tasks.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_search_tasks.py
MIT
def test_doc_search_filter_by_project(self, api_client): """Test Doc search results are filtered according to project""" # `documentation` word is present both in `kuma` and `docs` files # and not in `pipeline`, so search with this phrase but filter through project search_params = {"q": "documentation", "project": "docs", "version": "latest"} resp = self.get_search(api_client, search_params) assert resp.status_code == 200 data = resp.data["results"] assert len(data) == 2 # both pages of `docs` contains the word `documentation` # all results must be from same project for res in data: assert res["project"] == "docs"
Test Doc search results are filtered according to project
test_doc_search_filter_by_project
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_api.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_api.py
MIT
def test_doc_search_filter_by_version(self, api_client, project): """Test Doc search result are filtered according to version""" query = get_search_query_from_project_file(project_slug=project.slug) latest_version = project.versions.all()[0] # Create another version dummy_version = get( Version, project=project, active=True, privacy_level=PUBLIC, ) # Create HTMLFile same as the latest version latest_version_files = HTMLFile.objects.all().filter(version=latest_version) for f in latest_version_files: f.version = dummy_version # Make primary key to None, so django will create new object f.pk = None f.save() PageDocument().update(f) search_params = { "q": query, "project": project.slug, "version": dummy_version.slug, } resp = self.get_search(api_client, search_params) assert resp.status_code == 200 data = resp.data["results"] assert len(data) == 1 assert data[0]["project"] == project.slug assert data[0]["project_alias"] is None
Test Doc search result are filtered according to version
test_doc_search_filter_by_version
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_api.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_api.py
MIT
def test_doc_search_pagination(self, api_client, project): """Test Doc search result can be paginated""" latest_version = project.versions.all()[0] html_file = HTMLFile.objects.filter(version=latest_version)[0] title = html_file.processed_json["title"] query = title.split()[0] # Create 60 more same html file for _ in range(60): # Make primary key to None, so django will create new object html_file.pk = None html_file.save() PageDocument().update(html_file) search_params = { "q": query, "project": project.slug, "version": latest_version.slug, } resp = self.get_search(api_client, search_params) assert resp.status_code == 200 # Check the count is 61 (1 existing and 60 new created) assert resp.data["count"] == 61 # Check there are next url assert resp.data["next"] is not None # There should be only 50 data as the pagination is 50 by default assert len(resp.data["results"]) == 50 # Check for page 2 search_params["page"] = 2 resp = self.get_search(api_client, search_params) assert resp.status_code == 200 # Check the count is 61 (1 existing and 60 new created) assert resp.data["count"] == 61 # We don't have more results after this page assert resp.data["next"] is None # There should be only the 11 left assert len(resp.data["results"]) == 11 # Add `page_size` parameter and check the data is paginated accordingly search_params["page_size"] = 5 resp = self.get_search(api_client, search_params) assert resp.status_code == 200 assert len(resp.data["results"]) == 5
Test Doc search result can be paginated
test_doc_search_pagination
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_api.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_api.py
MIT
def test_doc_search_without_parameters(self, api_client, project): """Hitting Document Search endpoint without project and version should return 404.""" resp = self.get_search(api_client, {}) assert resp.status_code == 404
Hitting Document Search endpoint without project and version should return 404.
test_doc_search_without_parameters
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_api.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_api.py
MIT
def test_doc_search_without_query(self, api_client, project): """Hitting Document Search endpoint without a query should return error.""" resp = self.get_search( api_client, {"project": project.slug, "version": project.versions.first().slug}, ) assert resp.status_code == 400 # Check error message is there assert "q" in resp.data.keys()
Hitting Document Search endpoint without a query should return error.
test_doc_search_without_query
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_api.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_api.py
MIT
def test_doc_search_subprojects(self, api_client, all_projects): """Test Document search return results from subprojects also""" project = all_projects[0] subproject = all_projects[1] version = project.versions.all()[0] # Add another project as subproject of the project project.add_subproject(subproject) # Now search with subproject content but explicitly filter by the parent project query = get_search_query_from_project_file(project_slug=subproject.slug) search_params = {"q": query, "project": project.slug, "version": version.slug} resp = self.get_search(api_client, search_params) assert resp.status_code == 200 data = resp.data["results"] assert len(data) >= 1 # there may be results from another projects # First result should be the subproject first_result = data[0] assert first_result["project"] == subproject.slug assert first_result["project_alias"] == subproject.slug # The result is from the same version as the main project. assert first_result["version"] == version.slug # Check the link is the subproject document link document_link = subproject.get_docs_url(version_slug=version.slug) link = first_result["domain"] + first_result["path"] assert document_link in link
Test Document search return results from subprojects also
test_doc_search_subprojects
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_api.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_api.py
MIT
def test_doc_search_subprojects_default_version(self, api_client, all_projects): """Return results from subprojects that match the version from the main project or fallback to its default version.""" project = all_projects[0] version = project.versions.all()[0] subproject = all_projects[1] subproject_version = subproject.versions.all()[0] # Change the name of the version, and make it default. subproject_version.slug = "different" subproject_version.save() subproject.default_version = subproject_version.slug subproject.save() subproject.versions.filter(slug=version.slug).delete() # Refresh index version_files = HTMLFile.objects.all().filter(version=subproject_version) for f in version_files: PageDocument().update(f) # Add another project as subproject of the project project.add_subproject(subproject) # Now search with subproject content but explicitly filter by the parent project query = get_search_query_from_project_file(project_slug=subproject.slug) search_params = {"q": query, "project": project.slug, "version": version.slug} resp = self.get_search(api_client, search_params) assert resp.status_code == 200 data = resp.data["results"] assert len(data) >= 1 # there may be results from another projects # First result should be the subproject first_result = data[0] assert first_result["project"] == subproject.slug assert first_result["version"] == "different" # Check the link is the subproject document link document_link = subproject.get_docs_url(version_slug=subproject_version.slug) link = first_result["domain"] + first_result["path"] assert document_link in link
Return results from subprojects that match the version from the main project or fallback to its default version.
test_doc_search_subprojects_default_version
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_api.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_api.py
MIT
def test_get_all_projects_returns_empty_results(self, api_client, project): """If there is a case where `_get_projects_to_search` returns empty, we could be querying all projects.""" # `documentation` word is present both in `kuma` and `docs` files # and not in `pipeline`, so search with this phrase but filter through project search_params = {"q": "documentation", "project": "docs", "version": "latest"} resp = self.get_search(api_client, search_params) assert resp.status_code == 200 data = resp.data["results"] assert len(data) == 0
If there is a case where `_get_projects_to_search` returns empty, we could be querying all projects.
test_get_all_projects_returns_empty_results
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_api.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_api.py
MIT
def test_doc_search_hidden_versions(self, api_client, all_projects): """Test Document search return results from subprojects also""" project = all_projects[0] subproject = all_projects[1] version = project.versions.all()[0] # Add another project as subproject of the project project.add_subproject(subproject) version_subproject = subproject.versions.first() version_subproject.hidden = True version_subproject.save() # Now search with subproject content but explicitly filter by the parent project query = get_search_query_from_project_file(project_slug=subproject.slug) search_params = {"q": query, "project": project.slug, "version": version.slug} resp = self.get_search(api_client, search_params) assert resp.status_code == 200 # The version from the subproject is hidden, so isn't show on the results. data = resp.data["results"] assert len(data) == 0 # Now search on the subproject with hidden version query = get_search_query_from_project_file(project_slug=subproject.slug) search_params = { "q": query, "project": subproject.slug, "version": version_subproject.slug, } resp = self.get_search(api_client, search_params) assert resp.status_code == 200 # We can still search inside the hidden version data = resp.data["results"] assert len(data) == 1 first_result = data[0] assert first_result["project"] == subproject.slug
Test Document search return results from subprojects also
test_doc_search_hidden_versions
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_api.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_api.py
MIT
def test_search_single_query(self, api_client): """A single query matches substrings.""" project = Project.objects.get(slug="docs") feature, _ = Feature.objects.get_or_create( feature_id=Feature.DEFAULT_TO_FUZZY_SEARCH, ) project.feature_set.add(feature) project.save() version = project.versions.all().first() # Query with a partial word should return results search_params = { "project": project.slug, "version": version.slug, "q": "ind", } resp = self.get_search(api_client, search_params) assert resp.status_code == 200 results = resp.data["results"] assert len(results) > 0 assert "Index" in results[0]["title"] highlights = results[0]["blocks"][0]["highlights"] assert "<span>index</span>" in highlights["content"][0] assert "Guides" in results[1]["title"] highlights = results[1]["blocks"][0]["highlights"] assert "<span>index</span>" in highlights["content"][0] # Query with a partial word, but we want to match that search_params = { "project": project.slug, "version": version.slug, "q": '"ind"', } resp = self.get_search(api_client, search_params) assert resp.status_code == 200 assert len(resp.data["results"]) == 0 # Exact query still works search_params = { "project": project.slug, "version": version.slug, "q": '"index"', } resp = self.get_search(api_client, search_params) assert resp.status_code == 200 results = resp.data["results"] assert len(results) > 0 assert "Index" in results[0]["title"]
A single query matches substrings.
test_search_single_query
python
readthedocs/readthedocs.org
readthedocs/search/tests/test_api.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/tests/test_api.py
MIT
def forwards_func(apps, schema_editor): """Make all total_results fields not none.""" SearchQuery = apps.get_model("search", "SearchQuery") SearchQuery.objects.filter(total_results=None).update(total_results=0)
Make all total_results fields not none.
forwards_func
python
readthedocs/readthedocs.org
readthedocs/search/migrations/0003_migrate_total_results_null_values.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/migrations/0003_migrate_total_results_null_values.py
MIT
def _reindex_projects_from(self, days_ago, queue): """Reindex projects with recent changes.""" since = datetime.now() - timedelta(days=days_ago) queryset = Project.objects.filter(modified_date__gte=since).distinct() app_label = Project._meta.app_label model_name = Project.__name__ apply_async_kwargs = {"queue": queue} for doc in registry.get_documents(models=[Project]): indexing_tasks = self._get_indexing_tasks( app_label=app_label, model_name=model_name, queryset=queryset, index_name=doc._index._name, document_class=str(doc), ) for task in indexing_tasks: task.apply_async(**apply_async_kwargs) log.info( "Tasks issued successfully.", app_label=app_label, model_name=model_name, items=queryset.count(), )
Reindex projects with recent changes.
_reindex_projects_from
python
readthedocs/readthedocs.org
readthedocs/search/management/commands/reindex_elasticsearch.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/management/commands/reindex_elasticsearch.py
MIT
def _reindex_files_from(self, days_ago, queue): """Reindex HTML files from versions with recent builds.""" since = datetime.now() - timedelta(days=days_ago) queryset = ( Version.objects.for_reindex() .filter(builds__date__gte=since) .values_list("pk", flat=True) ) for version_id in queryset.iterator(): reindex_version.apply_async( kwargs={"version_id": version_id}, queue=queue, ) log.info( "Tasks issued successfully for re-indexing of versions.", number_of_tasks=queryset.count(), )
Reindex HTML files from versions with recent builds.
_reindex_files_from
python
readthedocs/readthedocs.org
readthedocs/search/management/commands/reindex_elasticsearch.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/management/commands/reindex_elasticsearch.py
MIT
def handle(self, *args, **options): """ Index models into Elasticsearch index asynchronously using celery. You can specify model to get indexed by passing `--model <app_label>.<model_name>` parameter. Otherwise, it will re-index all the models """ if options["models"]: models = [apps.get_model(model_name) for model_name in options["models"]] else: models = [Project, HTMLFile] queue = options["queue"] change_index = options["change_index"] update_from = options["update_from"] if change_index: timestamp = change_index print( f"You are about to change change the index from {models} to `[model]_{timestamp}`", "**The old index will be deleted!**", ) if input("Continue? y/n: ") != "y": print("Task cancelled") sys.exit(1) self._change_index(models=models, timestamp=timestamp) print( textwrap.dedent( """ Indexes had been changed. Remember to re-index changed projects and versions with the `--update-from n` argument, where `n` is the number of days since the re-index. """ ) ) elif update_from: print( "You are about to reindex all changed objects", f"from the latest {update_from} days from {models}", ) if input("Continue? y/n: ") != "y": print("Task cancelled") sys.exit(1) self._reindex_from(days_ago=update_from, models=models, queue=queue) else: print( f"You are about to reindex all objects from {models}", f"into a new index in the {queue} queue.", ) if input("Continue? y/n: ") != "y": print("Task cancelled") sys.exit(1) timestamp = self._run_reindex_tasks(models=models, queue=queue) print( textwrap.dedent( f""" Re-indexing tasks have been created. Timestamp: {timestamp} Please monitor the tasks. After they are completed run the same command with the `--change-index {timestamp}` argument. """ ) )
Index models into Elasticsearch index asynchronously using celery. You can specify model to get indexed by passing `--model <app_label>.<model_name>` parameter. Otherwise, it will re-index all the models
handle
python
readthedocs/readthedocs.org
readthedocs/search/management/commands/reindex_elasticsearch.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/management/commands/reindex_elasticsearch.py
MIT
def paginate_queryset(self, queryset, request, view=None): """ Override to get the paginated result from the ES queryset. This makes use of our custom paginator and slicing support from the ES DSL object, instead of the one used by django's ORM. Mostly inspired by https://github.com/encode/django-rest-framework/blob/acbd9d8222e763c7f9c7dc2de23c430c702e06d4/rest_framework/pagination.py#L191 # noqa """ # Needed for other methods of this class. self.request = request page_size = self.get_page_size(request) page_number = request.query_params.get(self.page_query_param, 1) original_page_number = page_number page_number = self._get_page_number(page_number) if page_number <= 0: msg = self.invalid_page_message.format( page_number=original_page_number, message=_("Invalid page"), ) raise NotFound(msg) start = (page_number - 1) * page_size end = page_number * page_size result = [] total_count = 0 total_pages = 1 if queryset: result = queryset[start:end].execute() total_count = result.hits.total["value"] hits = max(1, total_count) total_pages = ceil(hits / page_size) if total_pages > 1 and self.template is not None: # The browsable API should display pagination controls. self.display_page_controls = True # Needed for other methods of this class. self.page = PaginatorPage( page_number=page_number, total_pages=total_pages, count=total_count, ) return result
Override to get the paginated result from the ES queryset. This makes use of our custom paginator and slicing support from the ES DSL object, instead of the one used by django's ORM. Mostly inspired by https://github.com/encode/django-rest-framework/blob/acbd9d8222e763c7f9c7dc2de23c430c702e06d4/rest_framework/pagination.py#L191 # noqa
paginate_queryset
python
readthedocs/readthedocs.org
readthedocs/search/api/pagination.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/pagination.py
MIT
def _build_project_data(self, project, version_slug): """Build a `ProjectData` object given a project and its version.""" url = project.get_docs_url(version_slug=version_slug) project_alias = project.superprojects.values_list("alias", flat=True).first() version_data = VersionData( slug=version_slug, docs_url=url, ) return ProjectData( alias=project_alias, version=version_data, )
Build a `ProjectData` object given a project and its version.
_build_project_data
python
readthedocs/readthedocs.org
readthedocs/search/api/v2/serializers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v2/serializers.py
MIT
def _get_project_data(self, obj): """ Get and cache the project data. Try to get the data from the ``projects_data`` context, and fallback to get it from the database. If the result is fetched from the database, it's cached into ``projects_data``. """ project_data = self.context.get("projects_data", {}).get(obj.project) if project_data: return project_data project = Project.objects.filter(slug=obj.project).first() if project: projects_data = self.context.setdefault("projects_data", {}) projects_data[obj.project] = self._build_project_data(project, obj.version) return projects_data[obj.project] return None
Get and cache the project data. Try to get the data from the ``projects_data`` context, and fallback to get it from the database. If the result is fetched from the database, it's cached into ``projects_data``.
_get_project_data
python
readthedocs/readthedocs.org
readthedocs/search/api/v2/serializers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v2/serializers.py
MIT
def get_blocks(self, obj): """Combine and sort inner results (domains and sections).""" sections = obj.meta.inner_hits.sections or [] sorted_results = sorted( sections, key=attrgetter("meta.score"), reverse=True, ) sorted_results = [SectionSearchSerializer(hit).data for hit in sorted_results] return sorted_results
Combine and sort inner results (domains and sections).
get_blocks
python
readthedocs/readthedocs.org
readthedocs/search/api/v2/serializers.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v2/serializers.py
MIT
def _validate_query_params(self): """ Validate all required query params are passed on the request. Query params required are: ``q``, ``project`` and ``version``. :rtype: None :raises: ValidationError if one of them is missing. """ errors = {} required_query_params = {"q", "project", "version"} request_params = set(self.request.query_params.keys()) missing_params = required_query_params - request_params for param in missing_params: errors[param] = [_("This query param is required")] if errors: raise ValidationError(errors)
Validate all required query params are passed on the request. Query params required are: ``q``, ``project`` and ``version``. :rtype: None :raises: ValidationError if one of them is missing.
_validate_query_params
python
readthedocs/readthedocs.org
readthedocs/search/api/v2/views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v2/views.py
MIT
def _get_projects_to_search(self): """Get all projects to search.""" main_version = self._get_version() main_project = self._get_project() if not self._has_permission(self.request, main_version): return [] projects_to_search = [(main_project, main_version)] subprojects = Project.objects.filter(superprojects__parent_id=main_project.id) for subproject in subprojects: version = self._get_project_version( project=subproject, version_slug=main_version.slug, include_hidden=False, ) # Fallback to the default version of the subproject. if not version and subproject.default_version: version = self._get_project_version( project=subproject, version_slug=subproject.default_version, include_hidden=False, ) if version and self._has_permission(self.request, version): projects_to_search.append((subproject, version)) return projects_to_search
Get all projects to search.
_get_projects_to_search
python
readthedocs/readthedocs.org
readthedocs/search/api/v2/views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v2/views.py
MIT
def _get_project_version(self, project, version_slug, include_hidden=True): """ Get a version from a given project. :param project: A `Project` object. :param version_slug: The version slug. :param include_hidden: If hidden versions should be considered. """ return ( Version.internal.public( user=self.request.user, project=project, only_built=True, include_hidden=include_hidden, ) .filter(slug=version_slug) .first() )
Get a version from a given project. :param project: A `Project` object. :param version_slug: The version slug. :param include_hidden: If hidden versions should be considered.
_get_project_version
python
readthedocs/readthedocs.org
readthedocs/search/api/v2/views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v2/views.py
MIT
def _has_permission(self, request, version): """ Check if `user` is authorized to access `version`. The queryset from `_get_subproject_version` already filters public projects. This is mainly to be overridden in .com to make use of the auth backends in the proxied API. """ return True
Check if `user` is authorized to access `version`. The queryset from `_get_subproject_version` already filters public projects. This is mainly to be overridden in .com to make use of the auth backends in the proxied API.
_has_permission
python
readthedocs/readthedocs.org
readthedocs/search/api/v2/views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v2/views.py
MIT
def get_queryset(self): """ Returns an Elasticsearch DSL search object or an iterator. .. note:: Calling ``list(search)`` over an DSL search object is the same as calling ``search.execute().hits``. This is why an DSL search object is compatible with DRF's paginator. """ projects = { project.slug: version.slug for project, version in self._get_projects_to_search() } # Check to avoid searching all projects in case it's empty. if not projects: log.info("Unable to find a version to search") return [] query = self._get_search_query() queryset = PageSearch( query=query, projects=projects, aggregate_results=False, use_advanced_query=self._use_advanced_query(), ) return queryset
Returns an Elasticsearch DSL search object or an iterator. .. note:: Calling ``list(search)`` over an DSL search object is the same as calling ``search.execute().hits``. This is why an DSL search object is compatible with DRF's paginator.
get_queryset
python
readthedocs/readthedocs.org
readthedocs/search/api/v2/views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v2/views.py
MIT
def list(self): """List the results using pagination.""" queryset = self.get_queryset() page = self.paginator.paginate_queryset( queryset, self.request, view=self, ) serializer = self.get_serializer(page, many=True, projects=self._get_projects_to_search()) return self.paginator.get_paginated_response(serializer.data)
List the results using pagination.
list
python
readthedocs/readthedocs.org
readthedocs/search/api/v2/views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v2/views.py
MIT
def description(self): """ Get the view description. Force the description to always be the docstring of this class, even if it's subclassed. """ return SearchAPI.__doc__
Get the view description. Force the description to always be the docstring of this class, even if it's subclassed.
description
python
readthedocs/readthedocs.org
readthedocs/search/api/v3/views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v3/views.py
MIT
def get_queryset(self): """ Returns an Elasticsearch DSL search object or an iterator. .. note:: Calling ``list(search)`` over an DSL search object is the same as calling ``search.execute().hits``. This is why an DSL search object is compatible with DRF's paginator. """ use_advanced_query = should_use_advanced_query(self._get_projects_to_search()) search = self._search_executor.search( use_advanced_query=use_advanced_query, aggregate_results=False, ) if not search: return [] return search
Returns an Elasticsearch DSL search object or an iterator. .. note:: Calling ``list(search)`` over an DSL search object is the same as calling ``search.execute().hits``. This is why an DSL search object is compatible with DRF's paginator.
get_queryset
python
readthedocs/readthedocs.org
readthedocs/search/api/v3/views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v3/views.py
MIT
def _add_extra_fields(self, response): """ Add additional fields to the top level response. These are fields that aren't part of the serializers, and are related to the whole list, rather than each element. """ # Add all projects that were used in the final search. response.data["projects"] = [ {"slug": project.slug, "versions": [{"slug": version.slug}]} for project, version in self._get_projects_to_search() ] # Add the query used in the final search, # this doesn't include arguments. response.data["query"] = self._get_search_query()
Add additional fields to the top level response. These are fields that aren't part of the serializers, and are related to the whole list, rather than each element.
_add_extra_fields
python
readthedocs/readthedocs.org
readthedocs/search/api/v3/views.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v3/views.py
MIT
def projects(self): """ Return all projects used in this search. If empty, it will search all projects. :returns: A list of tuples (project, version). """ projects = islice(self._get_projects_to_search(), self.max_projects) # Make sure we are using just one version per-project, # searching multiple versions of the same projects isn't supported yet. projects_dict = dict(projects) return list(projects_dict.items())
Return all projects used in this search. If empty, it will search all projects. :returns: A list of tuples (project, version).
projects
python
readthedocs/readthedocs.org
readthedocs/search/api/v3/executor.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v3/executor.py
MIT
def search(self, **kwargs): """ Perform the search. :param kwargs: All kwargs are passed to the `PageSearch` constructor. """ if not self._has_arguments and self.arguments_required: return None projects = {project.slug: version.slug for project, version in self.projects} # If the search is done without projects, ES will search on all projects. # If we don't have projects and the user provided arguments, # it means we don't have anything to search on (no results). # Or if we don't have projects and we don't allow searching all, # we also just return. if not projects and (self._has_arguments or not self.default_all): return None search = PageSearch( query=self.parser.query, projects=projects, **kwargs, ) return search
Perform the search. :param kwargs: All kwargs are passed to the `PageSearch` constructor.
search
python
readthedocs/readthedocs.org
readthedocs/search/api/v3/executor.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v3/executor.py
MIT
def _get_projects_to_search(self): """ Return an iterator of (project, version) used in this search. An iterator (yield syntax) is used so we can stop at ``self.max_projects``, this way we avoid fetching projects that we won't use. """ if not self._has_arguments: if self.arguments_required: return None yield from self._get_default_projects() return None for value in self.parser.arguments["project"]: project, version = self._get_project_and_version(value) if version and self._has_permission(self.request, version): yield project, version for value in self.parser.arguments["subprojects"]: project, version = self._get_project_and_version(value) # Add the project itself. if version and self._has_permission(self.request, version): yield project, version if project: # If the user didn't provide a version, version_slug will be `None`, # and we add all subprojects with their default version, # otherwise we will add all projects that match the given version. _, version_slug = self._split_project_and_version(value) yield from self._get_subprojects( project=project, version_slug=version_slug, ) # Add all projects the user has access to. if self.parser.arguments["user"] == "@me": yield from self._get_projects_from_user()
Return an iterator of (project, version) used in this search. An iterator (yield syntax) is used so we can stop at ``self.max_projects``, this way we avoid fetching projects that we won't use.
_get_projects_to_search
python
readthedocs/readthedocs.org
readthedocs/search/api/v3/executor.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v3/executor.py
MIT
def _get_subprojects(self, project, version_slug=None): """ Get a tuple (project, version) of all subprojects of `project`. If `version_slug` doesn't match a version of the subproject, the default version will be used. If `version_slug` is None, we will always use the default version. """ subprojects = Project.objects.filter(superprojects__parent=project) for subproject in subprojects: version = None if version_slug: version = self._get_project_version( project=subproject, version_slug=version_slug, include_hidden=False, ) # Fallback to the default version of the subproject. if not version and subproject.default_version: version = self._get_project_version( project=subproject, version_slug=subproject.default_version, include_hidden=False, ) if version and self._has_permission(self.request, version): yield subproject, version
Get a tuple (project, version) of all subprojects of `project`. If `version_slug` doesn't match a version of the subproject, the default version will be used. If `version_slug` is None, we will always use the default version.
_get_subprojects
python
readthedocs/readthedocs.org
readthedocs/search/api/v3/executor.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v3/executor.py
MIT
def _has_permission(self, request, version): """ Check if `user` is authorized to access `version`. The queryset from `_get_project_version` already filters public projects. This is mainly to be overridden in .com to make use of the auth backends in the proxied API. """ return True
Check if `user` is authorized to access `version`. The queryset from `_get_project_version` already filters public projects. This is mainly to be overridden in .com to make use of the auth backends in the proxied API.
_has_permission
python
readthedocs/readthedocs.org
readthedocs/search/api/v3/executor.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v3/executor.py
MIT
def _get_project_version(self, project, version_slug, include_hidden=True): """ Get a version from a given project. :param project: A `Project` object. :param version_slug: The version slug. :param include_hidden: If hidden versions should be considered. """ return ( Version.internal.public( user=self.request.user, project=project, only_built=True, include_hidden=include_hidden, ) .filter(slug=version_slug) .first() )
Get a version from a given project. :param project: A `Project` object. :param version_slug: The version slug. :param include_hidden: If hidden versions should be considered.
_get_project_version
python
readthedocs/readthedocs.org
readthedocs/search/api/v3/executor.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v3/executor.py
MIT
def _split_project_and_version(self, term): """ Split a term of the form ``{project}/{version}``. :returns: A tuple of project and version. If the version part isn't found, `None` will be returned in its place. """ parts = term.split("/", maxsplit=1) if len(parts) > 1: return parts return parts[0], None
Split a term of the form ``{project}/{version}``. :returns: A tuple of project and version. If the version part isn't found, `None` will be returned in its place.
_split_project_and_version
python
readthedocs/readthedocs.org
readthedocs/search/api/v3/executor.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v3/executor.py
MIT
def get_dummy_processed_json(self, extra=None): """ Return a dict to be used as data indexed by ES. :param extra: By default it returns some default values, you can override this passing a dict to extra. """ extra = extra or {} default = { "path": "index.html", "title": "Title", "sections": [ { "id": "first", "title": "First Paragraph", "content": "First paragraph, content of interest: test.", } ], "domain_data": [], } default.update(extra) return default
Return a dict to be used as data indexed by ES. :param extra: By default it returns some default values, you can override this passing a dict to extra.
get_dummy_processed_json
python
readthedocs/readthedocs.org
readthedocs/search/api/v3/tests/test_api.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v3/tests/test_api.py
MIT
def create_index(self, version, files=None): """ Create a search index for `version` with files as content. :param version: Version object :param files: A dictionary with the filename as key and a dict as value to be passed to `get_dummy_processed_json`. """ files = files or {"index.html": {}} for file, extra in files.items(): html_file = HTMLFile.objects.filter( project=version.project, version=version, name=file ).first() if not html_file: html_file = get( HTMLFile, project=version.project, version=version, name=file, ) html_file.get_processed_json = mock.MagicMock( name="get_processed_json", return_value=self.get_dummy_processed_json(extra), ) PageDocument().update(html_file)
Create a search index for `version` with files as content. :param version: Version object :param files: A dictionary with the filename as key and a dict as value to be passed to `get_dummy_processed_json`.
create_index
python
readthedocs/readthedocs.org
readthedocs/search/api/v3/tests/test_api.py
https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/search/api/v3/tests/test_api.py
MIT
def strip(line): """Strip comments and whitespace from a line of text.""" return line.split('#', 1)[0].strip()
Strip comments and whitespace from a line of text.
strip
python
MechanicalSoup/MechanicalSoup
setup.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/setup.py
MIT
def requirements_from_file(filename): """Parses a pip requirements file into a list.""" with open(filename, 'r') as fd: return [strip(line) for line in fd if strip(line)]
Parses a pip requirements file into a list.
requirements_from_file
python
MechanicalSoup/MechanicalSoup
setup.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/setup.py
MIT
def read(fname, URL, URLImage): """Read the content of a file.""" with open(path.join(path.dirname(__file__), fname)) as fd: readme = fd.read() if hasattr(readme, 'decode'): # In Python 3, turn bytes into str. readme = readme.decode('utf8') # turn relative links into absolute ones readme = re.sub(r'`<([^>]*)>`__', r'`\1 <' + URL + r"/blob/main/\1>`__", readme) readme = re.sub(r"\.\. image:: /", ".. image:: " + URLImage + "/", readme) return readme
Read the content of a file.
read
python
MechanicalSoup/MechanicalSoup
setup.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/setup.py
MIT
def __looks_like_html(response): """Guesses entity type when Content-Type header is missing. Since Content-Type is not strictly required, some servers leave it out. """ text = response.text.lstrip().lower() return text.startswith('<html') or text.startswith('<!doctype')
Guesses entity type when Content-Type header is missing. Since Content-Type is not strictly required, some servers leave it out.
__looks_like_html
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def add_soup(response, soup_config): """Attaches a soup object to a requests response.""" if ("text/html" in response.headers.get("Content-Type", "") or Browser.__looks_like_html(response)): # Note: By default (no charset provided in HTTP headers), requests # returns 'ISO-8859-1' which is the default for HTML4, even if HTML # code specifies a different encoding. In this case, we want to # resort to bs4 sniffing, hence the special handling here. http_encoding = ( response.encoding if 'charset' in response.headers.get("Content-Type", "") else None ) html_encoding = bs4.dammit.EncodingDetector.find_declared_encoding( response.content, is_html=True ) # See https://www.w3.org/International/questions/qa-html-encoding-declarations.en#httphead # noqa: E501 # > The HTTP header has a higher precedence than the in-document # > meta declarations. encoding = http_encoding if http_encoding else html_encoding response.soup = bs4.BeautifulSoup( response.content, from_encoding=encoding, **soup_config ) else: response.soup = None
Attaches a soup object to a requests response.
add_soup
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def set_cookiejar(self, cookiejar): """Replaces the current cookiejar in the requests session. Since the session handles cookies automatically without calling this function, only use this when default cookie handling is insufficient. :param cookiejar: Any `http.cookiejar.CookieJar <https://docs.python.org/3/library/http.cookiejar.html#http.cookiejar.CookieJar>`__ compatible object. """ self.session.cookies = cookiejar
Replaces the current cookiejar in the requests session. Since the session handles cookies automatically without calling this function, only use this when default cookie handling is insufficient. :param cookiejar: Any `http.cookiejar.CookieJar <https://docs.python.org/3/library/http.cookiejar.html#http.cookiejar.CookieJar>`__ compatible object.
set_cookiejar
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def get_cookiejar(self): """Gets the cookiejar from the requests session.""" return self.session.cookies
Gets the cookiejar from the requests session.
get_cookiejar
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def set_user_agent(self, user_agent): """Replaces the current user agent in the requests session headers.""" # set a default user_agent if not specified if user_agent is None: requests_ua = requests.utils.default_user_agent() user_agent = f'{requests_ua} ({__title__}/{__version__})' # the requests module uses a case-insensitive dict for session headers self.session.headers['User-agent'] = user_agent
Replaces the current user agent in the requests session headers.
set_user_agent
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def request(self, *args, **kwargs): """Straightforward wrapper around `requests.Session.request <http://docs.python-requests.org/en/master/api/#requests.Session.request>`__. :return: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`__ object with a *soup*-attribute added by :func:`add_soup`. This is a low-level function that should not be called for basic usage (use :func:`get` or :func:`post` instead). Use it if you need an HTTP verb that MechanicalSoup doesn't manage (e.g. MKCOL) for example. """ response = self.session.request(*args, **kwargs) Browser.add_soup(response, self.soup_config) return response
Straightforward wrapper around `requests.Session.request <http://docs.python-requests.org/en/master/api/#requests.Session.request>`__. :return: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`__ object with a *soup*-attribute added by :func:`add_soup`. This is a low-level function that should not be called for basic usage (use :func:`get` or :func:`post` instead). Use it if you need an HTTP verb that MechanicalSoup doesn't manage (e.g. MKCOL) for example.
request
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def get(self, *args, **kwargs): """Straightforward wrapper around `requests.Session.get <http://docs.python-requests.org/en/master/api/#requests.Session.get>`__. :return: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`__ object with a *soup*-attribute added by :func:`add_soup`. """ response = self.session.get(*args, **kwargs) if self.raise_on_404 and response.status_code == 404: raise LinkNotFoundError() Browser.add_soup(response, self.soup_config) return response
Straightforward wrapper around `requests.Session.get <http://docs.python-requests.org/en/master/api/#requests.Session.get>`__. :return: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`__ object with a *soup*-attribute added by :func:`add_soup`.
get
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def post(self, *args, **kwargs): """Straightforward wrapper around `requests.Session.post <http://docs.python-requests.org/en/master/api/#requests.Session.post>`__. :return: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`__ object with a *soup*-attribute added by :func:`add_soup`. """ response = self.session.post(*args, **kwargs) Browser.add_soup(response, self.soup_config) return response
Straightforward wrapper around `requests.Session.post <http://docs.python-requests.org/en/master/api/#requests.Session.post>`__. :return: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`__ object with a *soup*-attribute added by :func:`add_soup`.
post
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def put(self, *args, **kwargs): """Straightforward wrapper around `requests.Session.put <http://docs.python-requests.org/en/master/api/#requests.Session.put>`__. :return: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`__ object with a *soup*-attribute added by :func:`add_soup`. """ response = self.session.put(*args, **kwargs) Browser.add_soup(response, self.soup_config) return response
Straightforward wrapper around `requests.Session.put <http://docs.python-requests.org/en/master/api/#requests.Session.put>`__. :return: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`__ object with a *soup*-attribute added by :func:`add_soup`.
put
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def _get_request_kwargs(method, url, **kwargs): """This method exists to raise a TypeError when a method or url is specified in the kwargs. """ request_kwargs = {"method": method, "url": url} request_kwargs.update(kwargs) return request_kwargs
This method exists to raise a TypeError when a method or url is specified in the kwargs.
_get_request_kwargs
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def get_request_kwargs(cls, form, url=None, **kwargs): """Extract input data from the form.""" method = str(form.get("method", "get")) action = form.get("action") url = urllib.parse.urljoin(url, action) if url is None: # This happens when both `action` and `url` are None. raise ValueError('no URL to submit to') # read https://www.w3.org/TR/html52/sec-forms.html if method.lower() == "get": data = kwargs.pop("params", dict()) else: data = kwargs.pop("data", dict()) files = kwargs.pop("files", dict()) # Use a list of 2-tuples to better reflect the behavior of browser QSL. # Requests also retains order when encoding form data in 2-tuple lists. data = [(k, v) for k, v in data.items()] multipart = form.get("enctype", "") == "multipart/form-data" # Process form tags in the order that they appear on the page, # skipping those tags that do not have a name-attribute. selector = ",".join(f"{tag}[name]" for tag in ("input", "button", "textarea", "select")) for tag in form.select(selector): name = tag.get("name") # name-attribute of tag # Skip disabled elements, since they should not be submitted. if tag.has_attr('disabled'): continue if tag.name == "input": if tag.get("type", "").lower() in ("radio", "checkbox"): if "checked" not in tag.attrs: continue value = tag.get("value", "on") else: # browsers use empty string for inputs with missing values value = tag.get("value", "") # If the enctype is not multipart, the filename is put in # the form as a text input and the file is not sent. if is_multipart_file_upload(form, tag): if isinstance(value, io.IOBase): content = value filename = os.path.basename(getattr(value, "name", "")) else: content = "" filename = os.path.basename(value) # If content is the empty string, we still pass it # for consistency with browsers (see # https://github.com/MechanicalSoup/MechanicalSoup/issues/250). files[name] = (filename, content) else: if isinstance(value, io.IOBase): value = os.path.basename(getattr(value, "name", "")) data.append((name, value)) elif tag.name == "button": if tag.get("type", "").lower() in ("button", "reset"): continue else: data.append((name, tag.get("value", ""))) elif tag.name == "textarea": data.append((name, tag.text)) elif tag.name == "select": # If the value attribute is not specified, the content will # be passed as a value instead. options = tag.select("option") selected_values = [i.get("value", i.text) for i in options if "selected" in i.attrs] if "multiple" in tag.attrs: for value in selected_values: data.append((name, value)) elif selected_values: # A standard select element only allows one option to be # selected, but browsers pick last if somehow multiple. data.append((name, selected_values[-1])) elif options: # Selects the first option if none are selected first_value = options[0].get("value", options[0].text) data.append((name, first_value)) if method.lower() == "get": kwargs["params"] = data else: kwargs["data"] = data # The following part of the function is here to respect the # enctype specified by the form, i.e. force sending multipart # content. Since Requests doesn't have yet a feature to choose # enctype, we have to use tricks to make it behave as we want # This code will be updated if Requests implements it. if multipart and not files: # Requests will switch to "multipart/form-data" only if # files pass the `if files:` test, so in this case we use # a modified dict that passes the if test even if empty. class DictThatReturnsTrue(dict): def __bool__(self): return True __nonzero__ = __bool__ files = DictThatReturnsTrue() return cls._get_request_kwargs(method, url, files=files, **kwargs)
Extract input data from the form.
get_request_kwargs
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def _request(self, form, url=None, **kwargs): """Extract input data from the form to pass to a Requests session.""" request_kwargs = Browser.get_request_kwargs(form, url, **kwargs) return self.session.request(**request_kwargs)
Extract input data from the form to pass to a Requests session.
_request
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def submit(self, form, url=None, **kwargs): """Prepares and sends a form request. NOTE: To submit a form with a :class:`StatefulBrowser` instance, it is recommended to use :func:`StatefulBrowser.submit_selected` instead of this method so that the browser state is correctly updated. :param form: The filled-out form. :param url: URL of the page the form is on. If the form action is a relative path, then this must be specified. :param \\*\\*kwargs: Arguments forwarded to `requests.Session.request <http://docs.python-requests.org/en/master/api/#requests.Session.request>`__. If `files`, `params` (with GET), or `data` (with POST) are specified, they will be appended to by the contents of `form`. :return: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`__ object with a *soup*-attribute added by :func:`add_soup`. """ if isinstance(form, Form): form = form.form response = self._request(form, url, **kwargs) Browser.add_soup(response, self.soup_config) return response
Prepares and sends a form request. NOTE: To submit a form with a :class:`StatefulBrowser` instance, it is recommended to use :func:`StatefulBrowser.submit_selected` instead of this method so that the browser state is correctly updated. :param form: The filled-out form. :param url: URL of the page the form is on. If the form action is a relative path, then this must be specified. :param \\*\\*kwargs: Arguments forwarded to `requests.Session.request <http://docs.python-requests.org/en/master/api/#requests.Session.request>`__. If `files`, `params` (with GET), or `data` (with POST) are specified, they will be appended to by the contents of `form`. :return: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`__ object with a *soup*-attribute added by :func:`add_soup`.
submit
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def launch_browser(self, soup): """Launch a browser to display a page, for debugging purposes. :param: soup: Page contents to display, supplied as a bs4 soup object. """ with tempfile.NamedTemporaryFile(delete=False, suffix='.html') as file: file.write(soup.encode()) webbrowser.open('file://' + file.name)
Launch a browser to display a page, for debugging purposes. :param: soup: Page contents to display, supplied as a bs4 soup object.
launch_browser
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def close(self): """Close the current session, if still open.""" if self.session is not None: self.session.cookies.clear() self.session.close() self.session = None
Close the current session, if still open.
close
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/browser.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/browser.py
MIT
def set_input(self, data): """Fill-in a set of fields in a form. Example: filling-in a login/password form .. code-block:: python form.set_input({"login": username, "password": password}) This will find the input element named "login" and give it the value ``username``, and the input element named "password" and give it the value ``password``. """ for (name, value) in data.items(): i = self.form.find("input", {"name": name}) if not i: raise InvalidFormMethod("No input field named " + name) self._assert_valid_file_upload(i, value) i["value"] = value
Fill-in a set of fields in a form. Example: filling-in a login/password form .. code-block:: python form.set_input({"login": username, "password": password}) This will find the input element named "login" and give it the value ``username``, and the input element named "password" and give it the value ``password``.
set_input
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/form.py
MIT
def uncheck_all(self, name): """Remove the *checked*-attribute of all input elements with a *name*-attribute given by ``name``. """ for option in self.form.find_all("input", {"name": name}): if "checked" in option.attrs: del option.attrs["checked"]
Remove the *checked*-attribute of all input elements with a *name*-attribute given by ``name``.
uncheck_all
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/form.py
MIT
def check(self, data): """For backwards compatibility, this method handles checkboxes and radio buttons in a single call. It will not uncheck any checkboxes unless explicitly specified by ``data``, in contrast with the default behavior of :func:`~Form.set_checkbox`. """ for (name, value) in data.items(): try: self.set_checkbox({name: value}, uncheck_other_boxes=False) continue except InvalidFormMethod: pass try: self.set_radio({name: value}) continue except InvalidFormMethod: pass raise LinkNotFoundError("No input checkbox/radio named " + name)
For backwards compatibility, this method handles checkboxes and radio buttons in a single call. It will not uncheck any checkboxes unless explicitly specified by ``data``, in contrast with the default behavior of :func:`~Form.set_checkbox`.
check
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/form.py
MIT
def set_checkbox(self, data, uncheck_other_boxes=True): """Set the *checked*-attribute of input elements of type "checkbox" specified by ``data`` (i.e. check boxes). :param data: Dict of ``{name: value, ...}``. In the family of checkboxes whose *name*-attribute is ``name``, check the box whose *value*-attribute is ``value``. All boxes in the family can be checked (unchecked) if ``value`` is True (False). To check multiple specific boxes, let ``value`` be a tuple or list. :param uncheck_other_boxes: If True (default), before checking any boxes specified by ``data``, uncheck the entire checkbox family. Consider setting to False if some boxes are checked by default when the HTML is served. """ for (name, value) in data.items(): # Case-insensitive search for type=checkbox selector = 'input[type="checkbox" i][name="{}"]'.format(name) checkboxes = self.form.select(selector) if not checkboxes: raise InvalidFormMethod("No input checkbox named " + name) # uncheck if requested if uncheck_other_boxes: self.uncheck_all(name) # Wrap individual values (e.g. int, str) in a 1-element tuple. if not isinstance(value, list) and not isinstance(value, tuple): value = (value,) # Check or uncheck one or more boxes for choice in value: choice_str = str(choice) # Allow for example literal numbers for checkbox in checkboxes: if checkbox.attrs.get("value", "on") == choice_str: checkbox["checked"] = "" break # Allow specifying True or False to check/uncheck elif choice is True: checkbox["checked"] = "" break elif choice is False: if "checked" in checkbox.attrs: del checkbox.attrs["checked"] break else: raise LinkNotFoundError( "No input checkbox named %s with choice %s" % (name, choice) )
Set the *checked*-attribute of input elements of type "checkbox" specified by ``data`` (i.e. check boxes). :param data: Dict of ``{name: value, ...}``. In the family of checkboxes whose *name*-attribute is ``name``, check the box whose *value*-attribute is ``value``. All boxes in the family can be checked (unchecked) if ``value`` is True (False). To check multiple specific boxes, let ``value`` be a tuple or list. :param uncheck_other_boxes: If True (default), before checking any boxes specified by ``data``, uncheck the entire checkbox family. Consider setting to False if some boxes are checked by default when the HTML is served.
set_checkbox
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/form.py
MIT
def set_radio(self, data): """Set the *checked*-attribute of input elements of type "radio" specified by ``data`` (i.e. select radio buttons). :param data: Dict of ``{name: value, ...}``. In the family of radio buttons whose *name*-attribute is ``name``, check the radio button whose *value*-attribute is ``value``. Only one radio button in the family can be checked. """ for (name, value) in data.items(): # Case-insensitive search for type=radio selector = 'input[type="radio" i][name="{}"]'.format(name) radios = self.form.select(selector) if not radios: raise InvalidFormMethod("No input radio named " + name) # only one radio button can be checked self.uncheck_all(name) # Check the appropriate radio button (value cannot be a list/tuple) for radio in radios: if radio.attrs.get("value", "on") == str(value): radio["checked"] = "" break else: raise LinkNotFoundError( f"No input radio named {name} with choice {value}" )
Set the *checked*-attribute of input elements of type "radio" specified by ``data`` (i.e. select radio buttons). :param data: Dict of ``{name: value, ...}``. In the family of radio buttons whose *name*-attribute is ``name``, check the radio button whose *value*-attribute is ``value``. Only one radio button in the family can be checked.
set_radio
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/form.py
MIT
def set_textarea(self, data): """Set the *string*-attribute of the first textarea element specified by ``data`` (i.e. set the text of a textarea). :param data: Dict of ``{name: value, ...}``. The textarea whose *name*-attribute is ``name`` will have its *string*-attribute set to ``value``. """ for (name, value) in data.items(): t = self.form.find("textarea", {"name": name}) if not t: raise InvalidFormMethod("No textarea named " + name) t.string = value
Set the *string*-attribute of the first textarea element specified by ``data`` (i.e. set the text of a textarea). :param data: Dict of ``{name: value, ...}``. The textarea whose *name*-attribute is ``name`` will have its *string*-attribute set to ``value``.
set_textarea
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/form.py
MIT
def set_select(self, data): """Set the *selected*-attribute of the first option element specified by ``data`` (i.e. select an option from a dropdown). :param data: Dict of ``{name: value, ...}``. Find the select element whose *name*-attribute is ``name``. Then select from among its children the option element whose *value*-attribute is ``value``. If no matching *value*-attribute is found, this will search for an option whose text matches ``value``. If the select element's *multiple*-attribute is set, then ``value`` can be a list or tuple to select multiple options. """ for (name, value) in data.items(): select = self.form.find("select", {"name": name}) if not select: raise InvalidFormMethod("No select named " + name) # Deselect all options first for option in select.find_all("option"): if "selected" in option.attrs: del option.attrs["selected"] # Wrap individual values in a 1-element tuple. # If value is a list/tuple, select must be a <select multiple>. if not isinstance(value, list) and not isinstance(value, tuple): value = (value,) elif "multiple" not in select.attrs: raise LinkNotFoundError("Cannot select multiple options!") for choice in value: option = select.find("option", {"value": choice}) # try to find with text instead of value if not option: option = select.find("option", string=choice) if not option: raise LinkNotFoundError( f'Option {choice} not found for select {name}' ) option.attrs["selected"] = "selected"
Set the *selected*-attribute of the first option element specified by ``data`` (i.e. select an option from a dropdown). :param data: Dict of ``{name: value, ...}``. Find the select element whose *name*-attribute is ``name``. Then select from among its children the option element whose *value*-attribute is ``value``. If no matching *value*-attribute is found, this will search for an option whose text matches ``value``. If the select element's *multiple*-attribute is set, then ``value`` can be a list or tuple to select multiple options.
set_select
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/form.py
MIT
def __setitem__(self, name, value): """Forwards arguments to :func:`~Form.set`. For example, :code:`form["name"] = "value"` calls :code:`form.set("name", "value")`. """ return self.set(name, value)
Forwards arguments to :func:`~Form.set`. For example, :code:`form["name"] = "value"` calls :code:`form.set("name", "value")`.
__setitem__
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/form.py
MIT
def set(self, name, value, force=False): """Set a form element identified by ``name`` to a specified ``value``. The type of element (input, textarea, select, ...) does not need to be given; it is inferred by the following methods: :func:`~Form.set_checkbox`, :func:`~Form.set_radio`, :func:`~Form.set_input`, :func:`~Form.set_textarea`, :func:`~Form.set_select`. If none of these methods find a matching element, then if ``force`` is True, a new element (``<input type="text" ...>``) will be added using :func:`~Form.new_control`. Example: filling-in a login/password form with EULA checkbox .. code-block:: python form.set("login", username) form.set("password", password) form.set("eula-checkbox", True) Example: uploading a file through a ``<input type="file" name="tagname">`` field (provide an open file object, and its content will be uploaded): .. code-block:: python form.set("tagname", open(path_to_local_file, "rb")) """ for func in ("checkbox", "radio", "input", "textarea", "select"): try: getattr(self, "set_" + func)({name: value}) return except InvalidFormMethod: pass if force: self.new_control('text', name, value=value) return raise LinkNotFoundError("No valid element named " + name)
Set a form element identified by ``name`` to a specified ``value``. The type of element (input, textarea, select, ...) does not need to be given; it is inferred by the following methods: :func:`~Form.set_checkbox`, :func:`~Form.set_radio`, :func:`~Form.set_input`, :func:`~Form.set_textarea`, :func:`~Form.set_select`. If none of these methods find a matching element, then if ``force`` is True, a new element (``<input type="text" ...>``) will be added using :func:`~Form.new_control`. Example: filling-in a login/password form with EULA checkbox .. code-block:: python form.set("login", username) form.set("password", password) form.set("eula-checkbox", True) Example: uploading a file through a ``<input type="file" name="tagname">`` field (provide an open file object, and its content will be uploaded): .. code-block:: python form.set("tagname", open(path_to_local_file, "rb"))
set
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/form.py
MIT
def new_control(self, type, name, value, **kwargs): """Add a new input element to the form. The arguments set the attributes of the new element. """ # Remove existing input-like elements with the same name for tag in ('input', 'textarea', 'select'): for old in self.form.find_all(tag, {'name': name}): old.decompose() # We don't have access to the original soup object (just the # Tag), so we instantiate a new BeautifulSoup() to call # new_tag(). We're only building the soup object, not parsing # anything, so the parser doesn't matter. Specify the one # included in Python to avoid having dependency issue. control = BeautifulSoup("", "html.parser").new_tag('input') control['type'] = type control['name'] = name control['value'] = value for k, v in kwargs.items(): control[k] = v self._assert_valid_file_upload(control, value) self.form.append(control) return control
Add a new input element to the form. The arguments set the attributes of the new element.
new_control
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/form.py
MIT
def choose_submit(self, submit): """Selects the input (or button) element to use for form submission. :param submit: The :class:`bs4.element.Tag` (or just its *name*-attribute) that identifies the submit element to use. If ``None``, will choose the first valid submit element in the form, if one exists. If ``False``, will not use any submit element; this is useful for simulating AJAX requests, for example. To simulate a normal web browser, only one submit element must be sent. Therefore, this does not need to be called if there is only one submit element in the form. If the element is not found or if multiple elements match, raise a :class:`LinkNotFoundError` exception. Example: :: browser = mechanicalsoup.StatefulBrowser() browser.open(url) form = browser.select_form() form.choose_submit('form_name_attr') browser.submit_selected() """ # Since choose_submit is destructive, it doesn't make sense to call # this method twice unless no submit is specified. if self._submit_chosen: if submit is None: return else: raise Exception('Submit already chosen. Cannot change submit!') # All buttons NOT of type (button,reset) are valid submits # Case-insensitive search for type=submit inps = [i for i in self.form.select('input[type="submit" i], button') if i.get("type", "").lower() not in ('button', 'reset')] # If no submit specified, choose the first one if submit is None and inps: submit = inps[0] found = False for inp in inps: if (inp.has_attr('name') and inp['name'] == submit): if found: raise LinkNotFoundError( f"Multiple submit elements match: {submit}" ) found = True elif inp == submit: if found: # Ignore submit element since it is an exact # duplicate of the one we're looking at. del inp['name'] found = True else: # Delete any non-matching element's name so that it will be # omitted from the submitted form data. del inp['name'] if not found and submit is not None and submit is not False: raise LinkNotFoundError( f"Specified submit element not found: {submit}" ) self._submit_chosen = True
Selects the input (or button) element to use for form submission. :param submit: The :class:`bs4.element.Tag` (or just its *name*-attribute) that identifies the submit element to use. If ``None``, will choose the first valid submit element in the form, if one exists. If ``False``, will not use any submit element; this is useful for simulating AJAX requests, for example. To simulate a normal web browser, only one submit element must be sent. Therefore, this does not need to be called if there is only one submit element in the form. If the element is not found or if multiple elements match, raise a :class:`LinkNotFoundError` exception. Example: :: browser = mechanicalsoup.StatefulBrowser() browser.open(url) form = browser.select_form() form.choose_submit('form_name_attr') browser.submit_selected()
choose_submit
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/form.py
MIT
def print_summary(self): """Print a summary of the form. May help finding which fields need to be filled-in. """ for input in self.form.find_all( ("input", "textarea", "select", "button")): input_copy = copy.copy(input) # Text between the opening tag and the closing tag often # contains a lot of spaces that we don't want here. for subtag in input_copy.find_all() + [input_copy]: if subtag.string: subtag.string = subtag.string.strip() print(input_copy)
Print a summary of the form. May help finding which fields need to be filled-in.
print_summary
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/form.py
MIT
def _assert_valid_file_upload(self, tag, value): """Raise an exception if a multipart file input is not an open file.""" if ( is_multipart_file_upload(self.form, tag) and not isinstance(value, io.IOBase) ): raise ValueError( "From v1.3.0 onwards, you must pass an open file object " 'directly, e.g. `form["name"] = open("/path/to/file", "rb")`. ' "This change is to remediate a security vulnerability where " "a malicious web server could read arbitrary files from the " "client (CVE-2023-34457)." )
Raise an exception if a multipart file input is not an open file.
_assert_valid_file_upload
python
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/form.py
MIT