Spaces:
Sleeping
Sleeping
File size: 5,613 Bytes
70b87af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class KnowledgeBaseWebReader(BaseReader):
"""Knowledge base reader.
Crawls and reads articles from a knowledge base/help center with Playwright.
Tested on Zendesk and Intercom CMS, may work on others.
Can be run in headless mode but it may be blocked by Cloudflare. Run it headed to be safe.
Times out occasionally, just increase the default time out if it does.
Requires the `playwright` package.
Args:
root_url (str): the base url of the knowledge base, with no trailing slash
e.g. 'https://support.intercom.com'
link_selectors (List[str]): list of css selectors to find links to articles while crawling
e.g. ['.article-list a', '.article-list a']
article_path (str): the url path of articles on this domain so the crawler knows when to stop
e.g. '/articles'
title_selector (Optional[str]): css selector to find the title of the article
e.g. '.article-title'
subtitle_selector (Optional[str]): css selector to find the subtitle/description of the article
e.g. '.article-subtitle'
body_selector (Optional[str]): css selector to find the body of the article
e.g. '.article-body'
"""
def __init__(
self,
root_url: str,
link_selectors: List[str],
article_path: str,
title_selector: Optional[str] = None,
subtitle_selector: Optional[str] = None,
body_selector: Optional[str] = None,
) -> None:
"""Initialize with parameters."""
self.root_url = root_url
self.link_selectors = link_selectors
self.article_path = article_path
self.title_selector = title_selector
self.subtitle_selector = subtitle_selector
self.body_selector = body_selector
def load_data(self) -> List[Document]:
"""Load data from the knowledge base."""
from playwright.sync_api import sync_playwright
with sync_playwright() as p:
browser = p.chromium.launch(headless=False)
# Crawl
article_urls = self.get_article_urls(
browser,
self.root_url,
self.root_url,
)
# Scrape
documents = []
for url in article_urls:
article = self.scrape_article(
browser,
url,
)
extra_info = {
"title": article["title"],
"subtitle": article["subtitle"],
"url": article["url"],
}
documents.append(Document(text=article["body"], extra_info=extra_info))
browser.close()
return documents
def scrape_article(
self,
browser: Any,
url: str,
) -> Dict[str, str]:
"""Scrape a single article url.
Args:
browser (Any): a Playwright Chromium browser.
url (str): URL of the article to scrape.
Returns:
Dict[str, str]: a mapping of article attributes to their values.
"""
page = browser.new_page(ignore_https_errors=True)
page.set_default_timeout(60000)
page.goto(url, wait_until="domcontentloaded")
title = (
(
page.query_selector(self.title_selector).evaluate(
"node => node.innerText"
)
)
if self.title_selector
else ""
)
subtitle = (
(
page.query_selector(self.subtitle_selector).evaluate(
"node => node.innerText"
)
)
if self.subtitle_selector
else ""
)
body = (
(page.query_selector(self.body_selector).evaluate("node => node.innerText"))
if self.body_selector
else ""
)
page.close()
print("scraped:", url)
return {"title": title, "subtitle": subtitle, "body": body, "url": url}
def get_article_urls(
self, browser: Any, root_url: str, current_url: str
) -> List[str]:
"""Recursively crawl through the knowledge base to find a list of articles.
Args:
browser (Any): a Playwright Chromium browser.
root_url (str): root URL of the knowledge base.
current_url (str): current URL that is being crawled.
Returns:
List[str]: a list of URLs of found articles.
"""
page = browser.new_page(ignore_https_errors=True)
page.set_default_timeout(60000)
page.goto(current_url, wait_until="domcontentloaded")
# If this is a leaf node aka article page, return itself
if self.article_path in current_url:
print("Found an article: ", current_url)
page.close()
return [current_url]
# Otherwise crawl this page and find all the articles linked from it
article_urls = []
links = []
for link_selector in self.link_selectors:
ahrefs = page.query_selector_all(link_selector)
links.extend(ahrefs)
for link in links:
url = root_url + page.evaluate("(node) => node.getAttribute('href')", link)
article_urls.extend(self.get_article_urls(browser, root_url, url))
page.close()
return article_urls
|