Pamudu13's picture
Update app.py
4ce93da verified
raw
history blame
13.3 kB
from flask import Flask, jsonify, request, Response, stream_with_context
from flask_cors import CORS
import requests
from bs4 import BeautifulSoup
import os
import re
import urllib.parse
import time
import random
import base64
from io import BytesIO
from googlesearch import search
import logging
import queue
# Create a logging filter to suppress socket warnings
class SocketWarningFilter(logging.Filter):
def filter(self, record):
return not (record.levelname == 'WARNING' and 'socket.send()' in record.getMessage())
# Create a queue for log messages
log_queue = queue.Queue()
# Custom log handler that puts messages in the queue
class QueueHandler(logging.Handler):
def emit(self, record):
log_entry = self.format(record)
log_queue.put(log_entry)
# Set up logging with the custom handler
logger = logging.getLogger()
queue_handler = QueueHandler()
queue_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
queue_handler.addFilter(SocketWarningFilter()) # Add the filter to the handler
logger.addHandler(queue_handler)
logger.setLevel(logging.INFO)
# Also add the filter to the root logger to catch all socket warnings
logging.getLogger().addFilter(SocketWarningFilter())
app = Flask(__name__)
# Enable CORS with specific settings
CORS(app, resources={
r"/*": {
"origins": "*",
"methods": ["GET", "POST", "OPTIONS"],
"allow_headers": ["Content-Type", "Authorization"]
}
})
def search_images(query, num_images=5):
# Headers to mimic a browser request
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'DNT': '1',
'Connection': 'keep-alive',
}
# Format the query for URL
formatted_query = urllib.parse.quote(query)
# Google Images URL
url = f"https://www.google.com/search?q={formatted_query}&tbm=isch&safe=active"
try:
# Get the HTML content
response = requests.get(url, headers=headers, timeout=30)
response.raise_for_status()
# Find all image URLs using regex
image_urls = re.findall(r'https?://[^"\']*?(?:jpg|jpeg|png|gif)', response.text)
# Remove duplicates while preserving order
image_urls = list(dict.fromkeys(image_urls))
# Store results
results = []
downloaded = 0
for img_url in image_urls:
if downloaded >= num_images:
break
try:
# Skip small thumbnails and icons
if 'gstatic.com' in img_url or 'google.com' in img_url:
continue
# Download image
img_response = requests.get(img_url, headers=headers, timeout=10)
img_response.raise_for_status()
# Check if the response is actually an image
content_type = img_response.headers.get('Content-Type', '')
if not content_type.startswith('image/'):
continue
# Convert image to base64
image_base64 = base64.b64encode(img_response.content).decode('utf-8')
# Add to results
results.append({
'image_url': img_url,
'base64_data': f"data:{content_type};base64,{image_base64}"
})
downloaded += 1
# Add a random delay between downloads
time.sleep(random.uniform(0.5, 1))
except Exception as e:
logger.error(f"Error downloading image: {str(e)}")
continue
return results
except Exception as e:
logger.error(f"An error occurred: {str(e)}")
return []
@app.route('/search_images', methods=['GET'])
def api_search_images():
try:
# Get query parameters
query = request.args.get('query', '')
num_images = int(request.args.get('num_images', 5))
if not query:
return jsonify({'error': 'Query parameter is required'}), 400
if num_images < 1 or num_images > 20:
return jsonify({'error': 'Number of images must be between 1 and 20'}), 400
# Search for images
results = search_images(query, num_images)
response = jsonify({
'success': True,
'query': query,
'results': results
})
# Add CORS headers
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as e:
logger.error(f"Error in search_images: {str(e)}")
response = jsonify({
'success': False,
'error': str(e)
}), 500
# Add CORS headers
response.headers['Access-Control-Allow-Origin'] = '*'
return response
def scrape_site_content(query, num_sites=5):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'DNT': '1',
'Connection': 'keep-alive',
}
results = []
scraped = 0
max_retries = 3
base_delay = 5 # Base delay in seconds
try:
logger.info(f"Starting to scrape content for query: {query}")
# Add initial delay before starting searches
initial_delay = random.uniform(2, 4)
logger.info(f"Initial delay of {initial_delay:.2f} seconds before starting searches...")
time.sleep(initial_delay)
# Use googlesearch-python to get URLs with retry logic
search_results = []
retry_count = 0
while retry_count < max_retries:
try:
search_results = list(search(query, num_results=num_sites))
break
except Exception as e:
retry_count += 1
if "429" in str(e):
delay = base_delay * (2 ** retry_count) # Exponential backoff
logger.warning(f"Rate limited by Google. Waiting {delay} seconds before retry {retry_count}/{max_retries}")
time.sleep(delay)
else:
logger.error(f"Error during search (attempt {retry_count}/{max_retries}): {str(e)}")
if retry_count == max_retries:
raise
time.sleep(base_delay)
logger.info(f"Found {len(search_results)} URLs to scrape for query: {query}")
# Process each found URL
for url in search_results:
if scraped >= num_sites:
break
try:
logger.info(f"Attempting to scrape URL: {url}")
# Add random delay before each request
delay = random.uniform(1, 3)
logger.info(f"Waiting {delay:.2f} seconds before request...")
time.sleep(delay)
# Get the HTML content with retry logic
retry_count = 0
while retry_count < max_retries:
try:
response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status()
break
except requests.exceptions.RequestException as e:
retry_count += 1
if "429" in str(e):
delay = base_delay * (2 ** retry_count)
logger.warning(f"Rate limited. Waiting {delay} seconds before retry {retry_count}/{max_retries}")
time.sleep(delay)
else:
logger.error(f"Request failed (attempt {retry_count}/{max_retries}): {str(e)}")
if retry_count == max_retries:
raise
time.sleep(base_delay)
logger.info(f"Successfully retrieved content from: {url}")
# Verify it's HTML content
content_type = response.headers.get('Content-Type', '').lower()
if 'text/html' not in content_type:
logger.info(f"Skipping {url} - not HTML content (Content-Type: {content_type})")
continue
# Parse the HTML content
soup = BeautifulSoup(response.text, 'html.parser')
logger.info(f"Successfully parsed HTML from: {url}")
# Remove script and style elements
for script in soup(["script", "style"]):
script.decompose()
# Extract text content (limit to first 1000 characters)
text_content = soup.get_text(separator='\n', strip=True)[:10000]
logger.info(f"Extracted {len(text_content)} characters of text from: {url}")
# Extract all links (limit to first 10)
links = []
for link in soup.find_all('a', href=True)[:10]:
href = link['href']
if href.startswith('http'):
links.append({
'text': link.get_text(strip=True),
'url': href
})
logger.info(f"Found {len(links)} valid links on: {url}")
# Extract meta information
title = soup.title.string if soup.title else ''
meta_description = ''
meta_keywords = ''
meta_desc_tag = soup.find('meta', attrs={'name': 'description'})
if meta_desc_tag:
meta_description = meta_desc_tag.get('content', '')
meta_keywords_tag = soup.find('meta', attrs={'name': 'keywords'})
if meta_keywords_tag:
meta_keywords = meta_keywords_tag.get('content', '')
results.append({
'url': url,
'title': title,
'meta_description': meta_description,
'meta_keywords': meta_keywords,
'text_content': text_content,
'links': links
})
scraped += 1
logger.info(f"Successfully scraped {scraped}/{num_sites} sites. Current URL: {url}")
# Add a random delay between successful scrapes
delay = random.uniform(2, 4)
logger.info(f"Waiting {delay:.2f} seconds before next scrape...")
time.sleep(delay)
except requests.exceptions.RequestException as e:
logger.error(f"Request failed for URL {url}: {str(e)}")
continue
except Exception as e:
logger.error(f"Error scraping {url}: {str(e)}")
continue
except Exception as e:
logger.error(f"Error in search: {str(e)}")
logger.info(f"Completed scraping. Successfully scraped {len(results)} out of {num_sites} sites")
return results
@app.route('/scrape_sites', methods=['GET'])
def api_scrape_sites():
try:
# Get query parameters
query = request.args.get('query', '')
num_sites = int(request.args.get('num_sites', 10))
if not query:
return jsonify({'error': 'Query parameter is required'}), 400
if num_sites < 1 or num_sites > 20:
return jsonify({'error': 'Number of sites must be between 1 and 20'}), 400
# Scrape the websites
results = scrape_site_content(query, num_sites)
response = jsonify({
'success': True,
'query': query,
'results': results
})
# Add CORS headers
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as e:
logger.error(f"Error in api_scrape_sites: {str(e)}")
response = jsonify({
'success': False,
'error': str(e)
}), 500
# Add CORS headers
response.headers['Access-Control-Allow-Origin'] = '*'
return response
@app.route('/logs/stream')
def stream_logs():
def generate():
while True:
try:
# Get log message from queue, timeout after 1 second
log_message = log_queue.get(timeout=1)
yield f"data: {log_message}\n\n"
except queue.Empty:
# Send a heartbeat to keep the connection alive
yield "data: heartbeat\n\n"
except GeneratorExit:
break
response = Response(stream_with_context(generate()), mimetype='text/event-stream')
response.headers['Cache-Control'] = 'no-cache'
response.headers['Connection'] = 'keep-alive'
return response
if __name__ == '__main__':
logger.info("Starting Flask API server...")
app.run(host='0.0.0.0', port=5001, debug=True)