File size: 6,834 Bytes
03c0888 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
# LLM Extraction with AsyncWebCrawler
Crawl4AI's AsyncWebCrawler allows you to use Language Models (LLMs) to extract structured data or relevant content from web pages asynchronously. Below are two examples demonstrating how to use `LLMExtractionStrategy` for different purposes with the AsyncWebCrawler.
## Example 1: Extract Structured Data
In this example, we use the `LLMExtractionStrategy` to extract structured data (model names and their fees) from the OpenAI pricing page.
```python
import os
import json
import asyncio
from crawl4ai import AsyncWebCrawler
from crawl4ai.extraction_strategy import LLMExtractionStrategy
from pydantic import BaseModel, Field
class OpenAIModelFee(BaseModel):
model_name: str = Field(..., description="Name of the OpenAI model.")
input_fee: str = Field(..., description="Fee for input token for the OpenAI model.")
output_fee: str = Field(..., description="Fee for output token for the OpenAI model.")
async def extract_openai_fees():
url = 'https://openai.com/api/pricing/'
async with AsyncWebCrawler(verbose=True) as crawler:
result = await crawler.arun(
url=url,
word_count_threshold=1,
extraction_strategy=LLMExtractionStrategy(
provider="openai/gpt-4o", # Or use ollama like provider="ollama/nemotron"
api_token=os.getenv('OPENAI_API_KEY'),
schema=OpenAIModelFee.model_json_schema(),
extraction_type="schema",
instruction="From the crawled content, extract all mentioned model names along with their "
"fees for input and output tokens. Make sure not to miss anything in the entire content. "
'One extracted model JSON format should look like this: '
'{ "model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens" }'
),
bypass_cache=True,
)
model_fees = json.loads(result.extracted_content)
print(f"Number of models extracted: {len(model_fees)}")
with open(".data/openai_fees.json", "w", encoding="utf-8") as f:
json.dump(model_fees, f, indent=2)
asyncio.run(extract_openai_fees())
```
## Example 2: Extract Relevant Content
In this example, we instruct the LLM to extract only content related to technology from the NBC News business page.
```python
import os
import json
import asyncio
from crawl4ai import AsyncWebCrawler
from crawl4ai.extraction_strategy import LLMExtractionStrategy
async def extract_tech_content():
async with AsyncWebCrawler(verbose=True) as crawler:
result = await crawler.arun(
url="https://www.nbcnews.com/business",
extraction_strategy=LLMExtractionStrategy(
provider="openai/gpt-4o",
api_token=os.getenv('OPENAI_API_KEY'),
instruction="Extract only content related to technology"
),
bypass_cache=True,
)
tech_content = json.loads(result.extracted_content)
print(f"Number of tech-related items extracted: {len(tech_content)}")
with open(".data/tech_content.json", "w", encoding="utf-8") as f:
json.dump(tech_content, f, indent=2)
asyncio.run(extract_tech_content())
```
## Advanced Usage: Combining JS Execution with LLM Extraction
This example demonstrates how to combine JavaScript execution with LLM extraction to handle dynamic content:
```python
async def extract_dynamic_content():
js_code = """
const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More'));
if (loadMoreButton) {
loadMoreButton.click();
await new Promise(resolve => setTimeout(resolve, 2000));
}
"""
wait_for = """
() => {
const articles = document.querySelectorAll('article.tease-card');
return articles.length > 10;
}
"""
async with AsyncWebCrawler(verbose=True) as crawler:
result = await crawler.arun(
url="https://www.nbcnews.com/business",
js_code=js_code,
wait_for=wait_for,
css_selector="article.tease-card",
extraction_strategy=LLMExtractionStrategy(
provider="openai/gpt-4o",
api_token=os.getenv('OPENAI_API_KEY'),
instruction="Summarize each article, focusing on technology-related content"
),
bypass_cache=True,
)
summaries = json.loads(result.extracted_content)
print(f"Number of summarized articles: {len(summaries)}")
with open(".data/tech_summaries.json", "w", encoding="utf-8") as f:
json.dump(summaries, f, indent=2)
asyncio.run(extract_dynamic_content())
```
## Customizing LLM Provider
Crawl4AI uses the `litellm` library under the hood, which allows you to use any LLM provider you want. Just pass the correct model name and API token:
```python
extraction_strategy=LLMExtractionStrategy(
provider="your_llm_provider/model_name",
api_token="your_api_token",
instruction="Your extraction instruction"
)
```
This flexibility allows you to integrate with various LLM providers and tailor the extraction process to your specific needs.
## Error Handling and Retries
When working with external LLM APIs, it's important to handle potential errors and implement retry logic. Here's an example of how you might do this:
```python
import asyncio
from tenacity import retry, stop_after_attempt, wait_exponential
class LLMExtractionError(Exception):
pass
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10))
async def extract_with_retry(crawler, url, extraction_strategy):
try:
result = await crawler.arun(url=url, extraction_strategy=extraction_strategy, bypass_cache=True)
return json.loads(result.extracted_content)
except Exception as e:
raise LLMExtractionError(f"Failed to extract content: {str(e)}")
async def main():
async with AsyncWebCrawler(verbose=True) as crawler:
try:
content = await extract_with_retry(
crawler,
"https://www.example.com",
LLMExtractionStrategy(
provider="openai/gpt-4o",
api_token=os.getenv('OPENAI_API_KEY'),
instruction="Extract and summarize main points"
)
)
print("Extracted content:", content)
except LLMExtractionError as e:
print(f"Extraction failed after retries: {e}")
asyncio.run(main())
```
This example uses the `tenacity` library to implement a retry mechanism with exponential backoff, which can help handle temporary failures or rate limiting from the LLM API. |