cyberandy's picture
Update app.py
eef1ed6 verified
raw
history blame
20.3 kB
import gradio as gr
import requests
from bs4 import BeautifulSoup
import os
import json
import logging
import pandas as pd
import numpy as np # Added for mean calculation
import matplotlib.pyplot as plt # Added for plotting
from typing import Optional, List, Dict, Any
# ------------------------
# Configuration
# ------------------------
WORDLIFT_API_URL = "https://api.wordlift.io/content-evaluations"
WORDLIFT_API_KEY = os.getenv("WORDLIFT_API_KEY") # Get API key from environment variable
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# ------------------------
# Custom CSS & Theme
# ------------------------
css = """
@import url('https://fonts.googleapis.com/css2?family=Open+Sans:wght@300;400;600;700&display=swap');
body {
font-family: 'Open Sans', sans-serif !important;
}
.primary-btn {
background-color: #3452db !important;
color: white !important;
}
.primary-btn:hover {
background-color: #2a41af !important;
}
.gradio-container {
max-width: 1200px; /* Limit width for better readability */
margin: auto;
}
.plot-container {
min-height: 400px; /* Ensure plot area is visible */
}
"""
theme = gr.themes.Soft(
primary_hue=gr.themes.colors.Color(
name="blue",
c50="#eef1ff",
c100="#e0e5ff",
c200="#c3cbff",
c300="#a5b2ff",
c400="#8798ff",
c500="#6a7eff",
c600="#3452db",
c700="#2a41af",
c800="#1f3183",
c900="#152156",
c950="#0a102b",
)
)
# ------------------------
# Content Fetching Logic
# ------------------------
def fetch_content_from_url(url: str, timeout: int = 15) -> str:
"""Fetches main text content from a URL."""
logger.info(f"Fetching content from: {url}")
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
# Use stream=True and then process content to handle large files efficiently,
# though BeautifulSoup will load it all eventually. Timeout is for connection.
with requests.get(url, headers=headers, timeout=timeout, stream=True) as response:
response.raise_for_status() # Raise an exception for bad status codes
# Limit the amount of data read to avoid excessive memory usage
max_bytes_to_read = 2 * 1024 * 1024 # 2MB limit for initial read
content = response.content[:max_bytes_to_read]
if len(response.content) > max_bytes_to_read:
logger.warning(f"Content for {url} is larger than {max_bytes_to_read} bytes, reading truncated content.")
soup = BeautifulSoup(content, 'html.parser')
# Attempt to find main content block
# Prioritize more specific semantic tags
main_content = soup.find('article') or soup.find('main') or soup.find(class_=lambda x: x and ('content' in x.lower() or 'article' in x.lower()))
if main_content:
# Extract text from common text-containing tags within the main block
text_elements = main_content.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', 'blockquote', 'figcaption'])
text = ' '.join([elem.get_text() for elem in text_elements])
else:
# Fallback to extracting text from body if no main block found
text_elements = soup.body.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', 'blockquote', 'figcaption'])
text = ' '.join([elem.get_text() for elem in text_elements])
logger.warning(f"No specific content tags (<article>, <main>, etc.) found for {url}, extracting from body.")
# Clean up extra whitespace
text = ' '.join(text.split())
# Limit text length *after* extraction and cleaning
# Adjust based on API limits/cost. WordLift's typical text APIs handle up to ~1M chars.
max_text_length = 1000000
if len(text) > max_text_length:
logger.warning(f"Extracted text for {url} is too long ({len(text)} chars), truncating to {max_text_length} chars.")
text = text[:max_text_length]
return text.strip() if text else None # Return None if text is empty after processing
except requests.exceptions.RequestException as e:
logger.error(f"Failed to fetch content from {url}: {e}")
return None
except Exception as e:
logger.error(f"Error processing content from {url}: {e}")
return None
# ------------------------
# WordLift API Call Logic
# ------------------------
def call_wordlift_api(text: str, keywords: Optional[List[str]] = None) -> Optional[Dict[str, Any]]:
"""Calls the WordLift Content Evaluation API."""
if not WORDLIFT_API_KEY:
logger.error("WORDLIFT_API_KEY environment variable not set.")
return {"error": "API key not configured."}
if not text or not text.strip():
return {"error": "No significant content to evaluate."}
payload = {
"text": text,
"keywords": keywords if keywords else []
}
headers = {
'Authorization': f'Key {WORDLIFT_API_KEY}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
logger.info(f"Calling WordLift API with text length {len(text)} and {len(keywords or [])} keywords.")
try:
response = requests.post(WORDLIFT_API_URL, headers=headers, json=payload, timeout=90) # Increased timeout again
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
return response.json()
except requests.exceptions.HTTPError as e:
logger.error(f"WordLift API HTTP error for {e.request.url}: {e.response.status_code} - {e.response.text}")
try:
error_detail = e.response.json()
except json.JSONDecodeError:
error_detail = e.response.text
return {"error": f"API returned status code {e.response.status_code}", "details": error_detail}
except requests.exceptions.Timeout as e:
logger.error(f"WordLift API request timed out for {e.request.url}: {e}")
return {"error": f"API request timed out."}
except requests.exceptions.RequestException as e:
logger.error(f"WordLift API request error for {e.request.url}: {e}")
return {"error": f"API request failed: {e}"}
except Exception as e:
logger.error(f"Unexpected error during API call: {e}")
return {"error": f"An unexpected error occurred: {e}"}
# ------------------------
# Plotting Logic
# ------------------------
def plot_average_radar(average_scores: Dict[str, float], avg_overall: Optional[float]) -> Any:
"""Return a radar (spider) plot as a Matplotlib figure showing average scores."""
if not average_scores or all(v is None for v in average_scores.values()):
# Return a placeholder figure if no valid data is available
fig, ax = plt.subplots(figsize=(6, 6))
ax.text(0.5, 0.5, "No successful evaluations to plot.", horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=12)
ax.axis('off') # Hide axes
plt.title("Average Content Quality Scores", size=16, y=1.05)
plt.tight_layout()
return fig
categories = list(average_scores.keys())
values = [average_scores[cat] for cat in categories]
# Ensure values are floats, replace None with 0 for plotting
values = [float(v) if v is not None else 0 for v in values]
num_vars = len(categories)
# Calculate angles for the radar chart
angles = [n / float(num_vars) * 2 * np.pi for n in range(num_vars)]
angles += angles[:1] # Complete the circle
values += values[:1] # Complete the circle for values
fig, ax = plt.subplots(figsize=(6, 6), subplot_kw=dict(projection='polar'))
line_color = '#3452DB'
fill_color = '#A1A7AF'
background_color = '#F6F6F7'
annotation_color = '#191919'
# Plot data
ax.plot(angles, values, 'o-', linewidth=2, color=line_color, label='Average Scores')
ax.fill(angles, values, alpha=0.4, color=fill_color)
# Set tick locations and labels
ax.set_xticks(angles[:-1])
ax.set_xticklabels(categories, color=line_color, fontsize=10)
# Set y-axis limits. Max score is 100.
ax.set_ylim(0, 100)
# Draw grid lines and axes
ax.grid(True, alpha=0.5, color=fill_color)
ax.set_facecolor(background_color)
# Add score annotations next to points
for angle, value, category in zip(angles[:-1], values[:-1], categories):
# Adjust position slightly so text doesn't overlap the point/line
# Radius adjustment can be tricky; let's just add text at the point for simplicity
ax.text(angle, value + 5, f'{value:.1f}', color=annotation_color,
horizontalalignment='center', verticalalignment='bottom' if value > 50 else 'top', fontsize=9)
# Add title
overall_title = f'Average Content Quality Scores\nOverall: {avg_overall:.1f}/100' if avg_overall is not None else 'Average Content Quality Scores'
plt.title(overall_title, size=16, y=1.1, color=annotation_color)
plt.tight_layout()
return fig
# ------------------------
# Main Evaluation Batch Function
# ------------------------
def evaluate_urls_batch(url_data: pd.DataFrame):
"""
Evaluates a batch of URLs using the WordLift API.
Args:
url_data: A pandas DataFrame with columns ['URL', 'Target Keywords (comma-separated)'].
Returns:
A tuple containing:
- A pandas DataFrame with the summary results.
- A dictionary containing the full results (including errors) keyed by URL.
- A Matplotlib figure for the average radar chart.
"""
# Check if the DataFrame has any rows (correct way using .empty)
if url_data.empty:
logger.info("Input DataFrame is empty. Returning empty results.")
# Return empty summary DF, empty full results, and an empty placeholder plot
empty_summary_df = pd.DataFrame(columns=[
'URL', 'Status', 'Overall Score', 'Content Purpose',
'Content Accuracy', 'Content Depth', 'Readability Score (API)',
'Readability Grade Level', 'SEO Score', 'Word Count', 'Error/Details'
])
return empty_summary_df, {}, plot_average_radar(None, None) # Pass None to plotting function
summary_results = []
full_results = {}
# Lists to store scores for calculating averages
purpose_scores = []
accuracy_scores = []
depth_scores = []
readability_scores = []
seo_scores = []
overall_scores = []
# Ensure columns exist, add them if not (though Dataframe component should enforce this)
# Using .get() with default None is safer if columns might sometimes be missing
urls = url_data.get('URL', pd.Series(dtype=str))
keywords_col = url_data.get('Target Keywords (comma-separated)', pd.Series(dtype=str))
for index, url in enumerate(urls):
url = url.strip() if pd.notna(url) else ""
keywords_str = keywords_col.iloc[index].strip() if pd.notna(keywords_col.iloc[index]) else ""
keywords = [kw.strip() for kw in keywords_str.split(',') if kw.strip()]
# Generate a unique key for full_results, especially if URL is empty or duplicate
result_key = url if url else f"Row_{index}"
# Ensure unique key in case of duplicate empty URLs, maybe use index always?
result_key = f"Row_{index}_{url}" if url else f"Row_{index}"
if not url:
summary_results.append(["", "Skipped", "-", "-", "-", "-", "-", "-", "-", "-", "Empty URL"])
full_results[result_key] = {"status": "Skipped", "error": "Empty URL input."}
logger.warning(f"Skipping evaluation for row {index}: Empty URL")
continue # Move to next URL
logger.info(f"Processing URL: {url} (Row {index}) with keywords: {keywords}")
# 1. Fetch Content
content = fetch_content_from_url(url)
if content is None or not content.strip():
status = "Failed"
error_msg = "Failed to fetch or extract content."
summary_results.append([url, status, "-", "-", "-", "-", "-", "-", "-", "-", error_msg])
full_results[result_key] = {"status": status, "error": error_msg}
logger.error(f"Processing failed for {url} (Row {index}): {error_msg}")
continue # Move to next URL
# 2. Call WordLift API
api_result = call_wordlift_api(content, keywords)
# 3. Process API Result
summary_row = [url]
if api_result and "error" not in api_result:
status = "Success"
qs = api_result.get('quality_score', {})
breakdown = qs.get('breakdown', {})
content_breakdown = breakdown.get('content', {})
readability_breakdown = breakdown.get('readability', {})
seo_breakdown = breakdown.get('seo', {})
metadata = api_result.get('metadata', {})
# Append scores for average calculation (only for successful calls)
purpose_scores.append(content_breakdown.get('purpose'))
accuracy_scores.append(content_breakdown.get('accuracy'))
depth_scores.append(content_breakdown.get('depth'))
readability_scores.append(readability_breakdown.get('score')) # API's readability score (e.g. 2.5)
seo_scores.append(seo_breakdown.get('score'))
overall_scores.append(qs.get('overall'))
# Append data for the summary table row
summary_row.extend([
status,
f'{qs.get("overall", "-"): .1f}',
f'{content_breakdown.get("purpose", "-"): .0f}', # Assuming integer scores
f'{content_breakdown.get("accuracy", "-"): .0f}', # Assuming integer scores
f'{content_breakdown.get("depth", "-"): .0f}', # Assuming integer scores
f'{readability_breakdown.get("score", "-"): .1f}',
f'{readability_breakdown.get("grade_level", "-"): .0f}', # Assuming integer grade
f'{seo_breakdown.get("score", "-"): .1f}',
f'{metadata.get("word_count", "-"): .0f}', # Assuming integer word count
None # No error
])
full_results[result_key] = api_result # Store full API result
else:
status = "Failed"
error_msg = api_result.get("error", "Unknown API error.") if api_result else "API call failed."
details = api_result.get("details", "") if api_result else ""
summary_row.extend([
status,
"-", "-", "-", "-", "-", "-", "-", "-",
f"{error_msg} {details}"
])
full_results[result_key] = {"status": status, "error": error_msg, "details": details}
logger.error(f"API call failed for {url} (Row {index}): {error_msg} {details}")
summary_results.append(summary_row)
# Calculate Averages *after* processing all URLs
avg_purpose = np.nanmean(purpose_scores) if purpose_scores else None # Use nanmean to ignore None/NaN
avg_accuracy = np.nanmean(accuracy_scores) if accuracy_scores else None
avg_depth = np.nanmean(depth_scores) if depth_scores else None
avg_readability = np.nanmean(readability_scores) if readability_scores else None
avg_seo = np.nanmean(seo_scores) if seo_scores else None
avg_overall = np.nanmean(overall_scores) if overall_scores else None
# Prepare scores for the radar plot function
average_scores_dict = {
'Purpose': avg_purpose,
'Accuracy': avg_accuracy,
'Depth': avg_depth,
'Readability': avg_readability,
'SEO': avg_seo
}
# Generate the average radar plot
average_radar_fig = plot_average_radar(average_scores_dict, avg_overall)
# Create pandas DataFrame for summary output
summary_df = pd.DataFrame(summary_results, columns=[
'URL', 'Status', 'Overall Score', 'Content Purpose',
'Content Accuracy', 'Content Depth', 'Readability Score (API)',
'Readability Grade Level', 'SEO Score', 'Word Count', 'Error/Details'
])
# Note: Formatting is already done when creating the summary_row list above
# using f-strings like f'{value: .1f}' or f'{value: .0f}', and setting '-' for None.
# This ensures that pandas DataFrame displays formatted strings directly.
return summary_df, full_results, average_radar_fig # Return the plot too
# ------------------------
# Gradio Blocks Interface Setup
# ------------------------
with gr.Blocks(css=css, theme=theme) as demo:
gr.Markdown("# WordLift Multi-URL Content Evaluator")
gr.Markdown(
"Enter up to 30 URLs in the table below. "
"Optionally, provide comma-separated target keywords for each URL. "
"The app will fetch content from each URL and evaluate it using the WordLift API."
)
with gr.Row():
with gr.Column(scale=1):
url_input_df = gr.Dataframe(
headers=["URL", "Target Keywords (comma-separated)"],
datatype=["str", "str"],
row_count=(1, 30), # Allow adding rows up to 30
col_count=(2, "fixed"),
value=[
["https://www.wordlift.io/blog/google-helpful-content-update-2023/", "helpful content, google update"],
["https://www.wordlift.io/blog/what-is-a-knowledge-graph/", "knowledge graph, semantic web"],
["https://www.example.com/non-existent-page", ""], # Example of a failing URL
["", ""] # Example of an empty row
], # Default examples
label="URLs and Keywords"
)
submit_button = gr.Button("Evaluate All URLs", elem_classes=["primary-btn"])
with gr.Column(scale=1, elem_classes="plot-container"):
# New component for the average radar plot
average_radar_output = gr.Plot(label="Average Content Quality Scores Radar")
gr.Markdown("## Detailed Results")
with gr.Column():
summary_output_df = gr.DataFrame(
label="Summary Results",
# Data types are all string now because we formatted them with f-strings to include '-'
headers=['URL', 'Status', 'Overall Score', 'Content Purpose',
'Content Accuracy', 'Content Depth', 'Readability Score (API)',
'Readability Grade Level', 'SEO Score', 'Word Count', 'Error/Details'],
datatype=["str"] * 11,
wrap=True # Wrap text in columns
)
with gr.Accordion("Full JSON Results", open=False):
# Changed the output type to gr.JSON
full_results_json = gr.JSON(label="Raw API Results per URL (or Error)")
submit_button.click(
fn=evaluate_urls_batch,
inputs=[url_input_df],
# Updated outputs to include the average radar plot
outputs=[summary_output_df, full_results_json, average_radar_output]
)
# Launch the app
if __name__ == "__main__":
if not WORDLIFT_API_KEY:
logger.error("\n----------------------------------------------------------")
logger.error("WORDLIFT_API_KEY environment variable is not set.")
logger.error("Please set it before running the script:")
logger.error(" export WORDLIFT_API_KEY='YOUR_API_KEY'")
logger.error("Or if using a .env file and python-dotenv:")
logger.error(" pip install python-dotenv")
logger.error(" # Add WORDLIFT_API_KEY=YOUR_API_KEY to a .env file")
logger.error(" # import dotenv; dotenv.load_dotenv()")
logger.error(" # in your script before getting the key.")
logger.error("----------------------------------------------------------\n")
# You might want to sys.exit(1) here if the API key is mandatory
logger.info("Launching Gradio app...")
# Consider using share=True for easy sharing, but be mindful of security/costs
# demo.launch(share=True)
demo.launch()