Spaces:
Sleeping
Sleeping
File size: 8,752 Bytes
5215be1 c2c731a 5215be1 523e9ce 5215be1 c2c731a 5215be1 c2c731a 18d6761 5215be1 c2c731a 5215be1 523e9ce 5215be1 523e9ce 5215be1 523e9ce 5215be1 523e9ce 5215be1 c2c731a 5215be1 c2c731a 5215be1 c2c731a 5215be1 523e9ce 18d6761 523e9ce c2c731a 18d6761 c2c731a 5215be1 c2c731a 5215be1 c2c731a 523e9ce 18d6761 5215be1 18d6761 5215be1 c2c731a 18d6761 c2c731a 523e9ce c2c731a 523e9ce c2c731a 523e9ce c2c731a 523e9ce c2c731a 523e9ce 18d6761 5215be1 18d6761 5215be1 18d6761 c2c731a 5215be1 18d6761 c2c731a 18d6761 5215be1 18d6761 5215be1 2af5feb 18d6761 5215be1 c2c731a 2af5feb 18d6761 5215be1 18d6761 5215be1 523e9ce 18d6761 c2c731a 5215be1 18d6761 5215be1 523e9ce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 |
import gradio as gr
import requests
import time
from bs4 import BeautifulSoup
from transformers import pipeline
import PyPDF2
import docx
import os
from typing import List, Optional
class ContentAnalyzer:
def __init__(self):
print("[DEBUG] Initializing pipelines...")
self.summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
self.sentiment_analyzer = pipeline("sentiment-analysis")
self.zero_shot = pipeline("zero-shot-classification")
print("[DEBUG] Pipelines initialized.")
def read_file(self, file_obj) -> str:
"""Read content from different file types."""
if file_obj is None:
return ""
file_ext = os.path.splitext(file_obj.name)[1].lower()
print(f"[DEBUG] File extension: {file_ext}")
try:
if file_ext == '.txt':
return file_obj.read().decode('utf-8')
elif file_ext == '.pdf':
pdf_reader = PyPDF2.PdfReader(file_obj)
text = ""
for page in pdf_reader.pages:
text += page.extract_text() + "\n"
return text
elif file_ext == '.docx':
doc = docx.Document(file_obj)
return "\n".join([paragraph.text for paragraph in doc.paragraphs])
else:
return f"Unsupported file type: {file_ext}"
except Exception as e:
return f"Error reading file: {str(e)}"
def fetch_web_content(self, url: str) -> str:
"""Fetch content from URL."""
print(f"[DEBUG] Attempting to fetch URL: {url}")
try:
response = requests.get(url, timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
# Remove scripts and styles
for script in soup(["script", "style"]):
script.decompose()
text = soup.get_text(separator='\n')
lines = (line.strip() for line in text.splitlines())
final_text = "\n".join(line for line in lines if line)
return final_text
except Exception as e:
return f"Error fetching URL: {str(e)}"
def analyze_content(
self,
content: str,
analysis_types: List[str],
) -> dict:
"""Perform summarization, sentiment analysis, and topic detection on `content`."""
results = {}
truncated = content[:1000] + "..." if len(content) > 1000 else content
results["original_text"] = truncated
# Summarize
if "summarize" in analysis_types:
summary = self.summarizer(content[:1024], max_length=130, min_length=30)
results["summary"] = summary[0]['summary_text']
# Sentiment
if "sentiment" in analysis_types:
sentiment = self.sentiment_analyzer(content[:512])
results["sentiment"] = {
"label": sentiment[0]['label'],
"score": round(sentiment[0]['score'], 3)
}
# Topics
if "topics" in analysis_types:
topics = self.zero_shot(
content[:512],
candidate_labels=[
"technology", "science", "business", "politics",
"entertainment", "education", "health", "sports"
]
)
results["topics"] = [
{"label": label, "score": round(score, 3)}
for label, score in zip(topics['labels'], topics['scores'])
if score > 0.1
]
return results
def create_interface():
analyzer = ContentAnalyzer()
with gr.Blocks(title="Content Analyzer") as demo:
gr.Markdown("# π Content Analyzer")
gr.Markdown(
"Analyze text from **Text**, **URL**, or **File** with summarization, "
"sentiment, and topic detection. A progress bar will appear during processing."
)
# Dropdown for input type
input_choice = gr.Dropdown(
choices=["Text", "URL", "File"],
value="Text",
label="Select Input Type"
)
# We use three separate columns to conditionally display
with gr.Column(visible=True) as text_col:
text_input = gr.Textbox(
label="Enter Text",
placeholder="Paste your text here...",
lines=5
)
with gr.Column(visible=False) as url_col:
url_input = gr.Textbox(
label="Enter URL",
placeholder="https://example.com"
)
with gr.Column(visible=False) as file_col:
file_input = gr.File(
label="Upload File",
file_types=[".txt", ".pdf", ".docx"]
)
def show_inputs(choice):
"""Return a dict mapping columns to booleans for visibility."""
return {
text_col: choice == "Text",
url_col: choice == "URL",
file_col: choice == "File"
}
input_choice.change(
fn=show_inputs,
inputs=[input_choice],
outputs=[text_col, url_col, file_col]
)
analysis_types = gr.CheckboxGroup(
choices=["summarize", "sentiment", "topics"],
value=["summarize"],
label="Analysis Types"
)
analyze_btn = gr.Button("Analyze", variant="primary")
# Output tabs
with gr.Tabs():
with gr.Tab("Original Text"):
original_text = gr.Markdown()
with gr.Tab("Summary"):
summary_output = gr.Markdown()
with gr.Tab("Sentiment"):
sentiment_output = gr.Markdown()
with gr.Tab("Topics"):
topics_output = gr.Markdown()
def process_analysis(choice, text_val, url_val, file_val, types):
"""
This function does everything in one place using a 'with gr.Progress() as p:' block,
so we can show each step of the process. We add time.sleep(1) just to demonstrate
the progress bar (otherwise it may appear/disappear too quickly).
"""
with gr.Progress() as p:
# STEP 1: Retrieve content
p(0, total=4, desc="Reading input")
time.sleep(1) # For demonstration
if choice == "Text":
content = text_val or ""
elif choice == "URL":
content = analyzer.fetch_web_content(url_val or "")
else: # File
content = analyzer.read_file(file_val)
if not content or content.startswith("Error"):
return content or "No content provided", "", "", ""
# STEP 2: Summarize
p(1, total=4, desc="Summarizing content")
time.sleep(1) # For demonstration
# STEP 3: Sentiment
p(2, total=4, desc="Performing sentiment analysis")
time.sleep(1) # For demonstration
# STEP 4: Topics
p(3, total=4, desc="Identifying topics")
time.sleep(1) # For demonstration
# After the progress steps, do the actual analysis in one shot
# (You could interleave the calls to pipeline with each progress step
# if you want real-time progress. This is a simplified approach.)
results = analyzer.analyze_content(content, types)
if "error" in results:
return results["error"], "", "", ""
original = results.get("original_text", "")
summary = results.get("summary", "")
sentiment = ""
if "sentiment" in results:
s = results["sentiment"]
sentiment = f"**Sentiment:** {s['label']} (Confidence: {s['score']})"
topics = ""
if "topics" in results:
t_list = "\n".join([
f"- {t['label']}: {t['score']}"
for t in results["topics"]
])
topics = "**Detected Topics:**\n" + t_list
return original, summary, sentiment, topics
analyze_btn.click(
fn=process_analysis,
inputs=[input_choice, text_input, url_input, file_input, analysis_types],
outputs=[original_text, summary_output, sentiment_output, topics_output],
show_progress=True # This ensures the Gradio progress bar is enabled
)
return demo
if __name__ == "__main__":
demo = create_interface()
demo.launch()
|