File size: 1,567 Bytes
8151600 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import json
import time
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai.content_filter_strategy import PruningContentFilter
# Define function for scraping with Crawl4AI
async def trigger_scraping_channels(channel_urls, num_of_posts, start_date, end_date, order_by, country):
"""
Trigger scraping for multiple channel URLs with Crawl4AI.
"""
browser_config = BrowserConfig(headless=True, verbose=True)
run_config = CrawlerRunConfig(
cache_mode=CacheMode.ENABLED,
markdown_generator=None, # Optionally, use a Markdown generator if needed
content_filter=PruningContentFilter(threshold=0.5, threshold_type="fixed", min_word_threshold=0),
)
async with AsyncWebCrawler(config=browser_config) as crawler:
results = []
for url in channel_urls:
result = await crawler.arun(
url=url,
config=run_config
)
results.append(result.markdown)
return results
# Function to get the progress of the scraping task
async def get_progress(snapshot_id):
"""
Get the progress of the scraping task.
"""
return {"status": "ready", "snapshot_id": snapshot_id}
# Function to get the output of the scraping task
async def get_output(snapshot_id, format="json"):
"""
Get the output of the scraping task.
"""
# Assuming we fetch the output after scraping and convert to JSON
return [{"url": "https://example.com", "shortcode": "abc123", "formatted_transcript": []}]
|