File size: 3,473 Bytes
03c0888
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import os
import sys
import pytest
import asyncio
import json

# Add the parent directory to the Python path
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(parent_dir)

from crawl4ai.async_webcrawler import AsyncWebCrawler
from crawl4ai.chunking_strategy import RegexChunking, NlpSentenceChunking
from crawl4ai.extraction_strategy import CosineStrategy, LLMExtractionStrategy

@pytest.mark.asyncio
async def test_regex_chunking():
    async with AsyncWebCrawler(verbose=True) as crawler:
        url = "https://www.nbcnews.com/business"
        chunking_strategy = RegexChunking(patterns=["\n\n"])
        result = await crawler.arun(
            url=url,
            chunking_strategy=chunking_strategy,
            bypass_cache=True
        )
        assert result.success
        assert result.extracted_content
        chunks = json.loads(result.extracted_content)
        assert len(chunks) > 1  # Ensure multiple chunks were created

# @pytest.mark.asyncio
# async def test_cosine_strategy():
#     async with AsyncWebCrawler(verbose=True) as crawler:
#         url = "https://www.nbcnews.com/business"
#         extraction_strategy = CosineStrategy(word_count_threshold=10, max_dist=0.2, linkage_method="ward", top_k=3, sim_threshold=0.3)
#         result = await crawler.arun(
#             url=url,
#             extraction_strategy=extraction_strategy,
#             bypass_cache=True
#         )
#         assert result.success
#         assert result.extracted_content
#         extracted_data = json.loads(result.extracted_content)
#         assert len(extracted_data) > 0
#         assert all('tags' in item for item in extracted_data)

@pytest.mark.asyncio
async def test_llm_extraction_strategy():
    async with AsyncWebCrawler(verbose=True) as crawler:
        url = "https://www.nbcnews.com/business"
        extraction_strategy = LLMExtractionStrategy(
            provider="openai/gpt-4o-mini",
            api_token=os.getenv('OPENAI_API_KEY'),
            instruction="Extract only content related to technology"
        )
        result = await crawler.arun(
            url=url,
            extraction_strategy=extraction_strategy,
            bypass_cache=True
        )
        assert result.success
        assert result.extracted_content
        extracted_data = json.loads(result.extracted_content)
        assert len(extracted_data) > 0
        assert all('content' in item for item in extracted_data)

# @pytest.mark.asyncio
# async def test_combined_chunking_and_extraction():
#     async with AsyncWebCrawler(verbose=True) as crawler:
#         url = "https://www.nbcnews.com/business"
#         chunking_strategy = RegexChunking(patterns=["\n\n"])
#         extraction_strategy = CosineStrategy(word_count_threshold=10, max_dist=0.2, linkage_method="ward", top_k=3, sim_threshold=0.3)
#         result = await crawler.arun(
#             url=url,
#             chunking_strategy=chunking_strategy,
#             extraction_strategy=extraction_strategy,
#             bypass_cache=True
#         )
#         assert result.success
#         assert result.extracted_content
#         extracted_data = json.loads(result.extracted_content)
#         assert len(extracted_data) > 0
#         assert all('tags' in item for item in extracted_data)
#         assert all('content' in item for item in extracted_data)

# Entry point for debugging
if __name__ == "__main__":
    pytest.main([__file__, "-v"])