Spaces:
Sleeping
Sleeping
import spaces | |
import gradio as gr | |
import logging | |
import os | |
import tempfile | |
import pandas as pd | |
import requests | |
from bs4 import BeautifulSoup | |
import torch | |
import whisper | |
import subprocess | |
from pydub import AudioSegment | |
import fitz # PyMuPDF | |
import docx | |
import yt_dlp | |
from functools import lru_cache | |
import gc | |
import time | |
from huggingface_hub import login | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
import traceback # For detailed error logging | |
# Configure logging | |
logging.basicConfig( | |
level=logging.INFO, | |
format='%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s' | |
) | |
logger = logging.getLogger(__name__) | |
logger.info("--- Starting App ---") | |
# Login to Hugging Face Hub if token is available | |
HUGGINGFACE_TOKEN = os.environ.get('HUGGINGFACE_TOKEN') | |
if HUGGINGFACE_TOKEN: | |
logger.info("HUGGINGFACE_TOKEN environment variable found.") | |
try: | |
login(token=HUGGINGFACE_TOKEN) | |
logger.info("Successfully logged in to Hugging Face Hub.") | |
except Exception as e: | |
logger.error(f"Failed to login to Hugging Face Hub: {e}") | |
logger.error(traceback.format_exc()) | |
else: | |
logger.warning("HUGGINGFACE_TOKEN environment variable not set.") | |
class ModelManager: | |
_instance = None | |
def __new__(cls): | |
if cls._instance is None: | |
logger.info("Creating new ModelManager instance.") | |
cls._instance = super(ModelManager, cls).__new__(cls) | |
cls._instance._initialized = False | |
return cls._instance | |
def __init__(self): | |
if not hasattr(self, '_initialized') or not self._initialized: | |
logger.info("Initializing ModelManager attributes.") | |
self.tokenizer = None | |
self.model = None | |
self.text_pipeline = None | |
self.whisper_model = None | |
self.llm_loaded = False | |
self.whisper_loaded = False | |
self.last_used = time.time() | |
self.llm_loading = False | |
self.whisper_loading = False | |
self._initialized = True | |
def _cleanup_memory(self): | |
logger.info("Running garbage collection...") | |
collected_count = gc.collect() | |
logger.info(f"Garbage collected ({collected_count} objects).") | |
if torch.cuda.is_available(): | |
logger.info("Clearing CUDA cache...") | |
torch.cuda.empty_cache() | |
logger.info("CUDA cache cleared.") | |
def reset_llm(self): | |
logger.info("--- Attempting to reset LLM ---") | |
try: | |
if hasattr(self, 'model') and self.model is not None: del self.model; logger.info("LLM model deleted.") | |
if hasattr(self, 'tokenizer') and self.tokenizer is not None: del self.tokenizer; logger.info("LLM tokenizer deleted.") | |
if hasattr(self, 'text_pipeline') and self.text_pipeline is not None: del self.text_pipeline; logger.info("LLM pipeline deleted.") | |
self.model = None; self.tokenizer = None; self.text_pipeline = None | |
self.llm_loaded = False | |
self._cleanup_memory() | |
logger.info("LLM components reset successfully.") | |
except Exception as e: logger.error(f"!!! ERROR during LLM reset: {e}"); logger.error(traceback.format_exc()) | |
def reset_whisper(self): | |
logger.info("--- Attempting to reset Whisper ---") | |
try: | |
if hasattr(self, 'whisper_model') and self.whisper_model is not None: del self.whisper_model; logger.info("Whisper model deleted.") | |
self.whisper_model = None | |
self.whisper_loaded = False | |
self._cleanup_memory() | |
logger.info("Whisper component reset successfully.") | |
except Exception as e: logger.error(f"!!! ERROR during Whisper reset: {e}"); logger.error(traceback.format_exc()) | |
def initialize_llm(self): | |
logger.info("Attempting to initialize LLM.") | |
if self.llm_loading: logger.info("LLM initialization already in progress."); return True | |
if self.llm_loaded: logger.info("LLM already initialized."); self.last_used = time.time(); return True | |
self.llm_loading = True | |
logger.info("Starting LLM initialization...") | |
try: | |
MODEL_NAME = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" | |
logger.info(f"Using LLM model: {MODEL_NAME}") | |
self.tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HUGGINGFACE_TOKEN, use_fast=True) | |
if self.tokenizer.pad_token is None: self.tokenizer.pad_token = self.tokenizer.eos_token | |
self.model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, token=HUGGINGFACE_TOKEN, device_map="auto", torch_dtype=torch.float16, low_cpu_mem_usage=True, offload_folder="offload", offload_state_dict=True) | |
self.text_pipeline = pipeline("text-generation", model=self.model, tokenizer=self.tokenizer, torch_dtype=torch.float16, device_map="auto", max_length=1024) | |
logger.info("LLM initialized successfully.") | |
self.last_used = time.time(); self.llm_loaded = True; self.llm_loading = False; return True | |
except Exception as e: logger.error(f"!!! ERROR during LLM initialization: {e}"); logger.error(traceback.format_exc()); self.reset_llm(); self.llm_loading = False; raise | |
def initialize_whisper(self): | |
logger.info("Attempting to initialize Whisper.") | |
if self.whisper_loading: logger.info("Whisper initialization already in progress."); return True | |
if self.whisper_loaded: logger.info("Whisper already initialized."); self.last_used = time.time(); return True | |
self.whisper_loading = True | |
logger.info("Starting Whisper initialization...") | |
try: | |
WHISPER_MODEL_NAME = "tiny" | |
self.whisper_model = whisper.load_model(WHISPER_MODEL_NAME, device="cuda" if torch.cuda.is_available() else "cpu", download_root="/tmp/whisper") | |
logger.info(f"Whisper model '{WHISPER_MODEL_NAME}' loaded successfully.") | |
self.last_used = time.time(); self.whisper_loaded = True; self.whisper_loading = False; return True | |
except Exception as e: logger.error(f"!!! ERROR during Whisper initialization: {e}"); logger.error(traceback.format_exc()); self.reset_whisper(); self.whisper_loading = False; raise | |
def check_llm_initialized(self): | |
logger.info("Checking if LLM is initialized.") | |
if not self.llm_loaded: | |
logger.info("LLM not initialized, attempting initialization...") | |
if not self.llm_loading: self.initialize_llm(); logger.info("LLM initialization completed by check_llm_initialized.") | |
else: | |
logger.info("LLM initialization already in progress. Waiting briefly.") | |
time.sleep(10) | |
if not self.llm_loaded: raise RuntimeError("LLM initialization timed out or failed after waiting.") | |
else: logger.info("LLM seems initialized now after waiting.") | |
else: logger.info("LLM was already initialized.") | |
self.last_used = time.time() | |
def check_whisper_initialized(self): | |
logger.info("Checking if Whisper is initialized.") | |
if not self.whisper_loaded: | |
logger.info("Whisper model not initialized, attempting initialization...") | |
if not self.whisper_loading: self.initialize_whisper(); logger.info("Whisper initialization completed by check_whisper_initialized.") | |
else: | |
logger.info("Whisper initialization already in progress. Waiting briefly.") | |
time.sleep(10) | |
if not self.whisper_loaded: raise RuntimeError("Whisper initialization timed out or failed after waiting.") | |
else: logger.info("Whisper seems initialized now after waiting.") | |
else: logger.info("Whisper was already initialized.") | |
self.last_used = time.time() | |
def reset_models(self, force=False): | |
if force: logger.info("Forcing reset of all models."); self.reset_llm(); self.reset_whisper() | |
# Create global model manager instance | |
logger.info("Creating global ModelManager instance.") | |
model_manager = ModelManager() | |
# --- Functions: download_social_media_video, convert_video_to_audio, etc. --- | |
# --- Kept exactly the same as the previous full version --- | |
def download_social_media_video(url): | |
logger.info(f"Attempting social download: {url}") | |
temp_dir = tempfile.mkdtemp() | |
output_template = os.path.join(temp_dir, '%(id)s.%(ext)s') | |
final_audio_file_path = None | |
ydl_opts = {'format': 'bestaudio/best', 'postprocessors': [{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192'}], 'outtmpl': output_template, 'quiet': True, 'no_warnings': True, 'nocheckcertificate': True, 'retries': 3, 'socket_timeout': 15, 'cachedir': False} | |
try: | |
with yt_dlp.YoutubeDL(ydl_opts) as ydl: info_dict = ydl.extract_info(url, download=True) | |
found_files = [f for f in os.listdir(temp_dir) if f.endswith('.mp3')] | |
if not found_files: raise FileNotFoundError(f"Downloaded MP3 not found in {temp_dir}") | |
final_audio_file_path = os.path.join(temp_dir, found_files[0]) | |
with open(final_audio_file_path, 'rb') as f: audio_content = f.read() | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_output_file: | |
temp_output_file.write(audio_content); final_path_for_gradio = temp_output_file.name | |
logger.info(f"Social audio saved to: {final_path_for_gradio}") | |
return final_path_for_gradio | |
except yt_dlp.utils.DownloadError as e: logger.error(f"yt-dlp error {url}: {e}"); return None | |
except Exception as e: logger.error(f"Download error {url}: {e}"); logger.error(traceback.format_exc()); return None | |
finally: | |
if os.path.exists(temp_dir): | |
try: import shutil; shutil.rmtree(temp_dir) | |
except Exception as cleanup_e: logger.warning(f"Cleanup failed {temp_dir}: {cleanup_e}") | |
def convert_video_to_audio(video_file_path): | |
logger.info(f"Converting video: {video_file_path}") | |
output_file_path = None | |
try: | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_file: output_file_path = temp_file.name | |
command = ["ffmpeg", "-i", video_file_path, "-vn", "-acodec", "libmp3lame", "-ab", "192k", "-ar", "44100", "-ac", "2", output_file_path, "-y", "-loglevel", "error"] | |
subprocess.run(command, check=True, capture_output=True, text=True, timeout=120) | |
if not os.path.exists(output_file_path) or os.path.getsize(output_file_path) == 0: raise RuntimeError("ffmpeg output empty") | |
logger.info(f"Video converted to: {output_file_path}") | |
return output_file_path | |
except subprocess.CalledProcessError as e: logger.error(f"ffmpeg fail {video_file_path}: {e.stderr}"); raise RuntimeError(f"ffmpeg failed: {e.stderr}") from e | |
except subprocess.TimeoutExpired as e: logger.error(f"ffmpeg timeout {video_file_path}"); raise RuntimeError("ffmpeg timed out") from e | |
except Exception as e: logger.error(f"Video conversion error {video_file_path}: {e}"); logger.error(traceback.format_exc()); raise | |
finally: | |
if output_file_path and os.path.exists(output_file_path) and ( 'e' in locals() or (not os.path.exists(output_file_path) or os.path.getsize(output_file_path) == 0)): | |
try: os.remove(output_file_path) | |
except: pass | |
def preprocess_audio(input_audio_path): | |
logger.info(f"Preprocessing audio: {input_audio_path}") | |
output_path = None | |
try: | |
if not os.path.exists(input_audio_path): raise FileNotFoundError(f"Preprocessing input not found: {input_audio_path}") | |
audio = AudioSegment.from_file(input_audio_path) | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_file: | |
output_path = temp_file.name; audio.export(output_path, format="mp3") | |
logger.info(f"Audio preprocessed to: {output_path}") | |
return output_path | |
except FileNotFoundError as e: logger.error(f"Preprocessing file not found: {e}"); raise | |
except Exception as e: logger.error(f"Preprocessing error {input_audio_path}: {e}"); logger.error(traceback.format_exc()); raise | |
finally: | |
if 'e' in locals() and output_path and os.path.exists(output_path): | |
try: os.remove(output_path) | |
except: pass | |
def transcribe_audio_or_video(file_input): | |
logger.info(f"--- Starting transcription: {type(file_input)} ---") | |
audio_file_to_transcribe = None; temp_files_to_clean = []; transcription = "" | |
try: | |
logger.info("Checking Whisper model..."); model_manager.check_whisper_initialized() | |
if file_input is None: return "" | |
if isinstance(file_input, str): input_path = file_input | |
elif hasattr(file_input, 'name') and file_input.name: input_path = file_input.name | |
else: raise TypeError("Invalid input type.") | |
if not os.path.exists(input_path): raise FileNotFoundError(f"Input not found: {input_path}") | |
file_extension = os.path.splitext(input_path)[1].lower() | |
if file_extension in ['.mp4', '.avi', '.mov', '.mkv', '.webm']: | |
converted_audio_path = convert_video_to_audio(input_path) | |
temp_files_to_clean.append(converted_audio_path); audio_file_to_process = converted_audio_path | |
elif file_extension in ['.mp3', '.wav', '.ogg', '.flac', '.m4a', '.aac']: audio_file_to_process = input_path | |
else: raise ValueError(f"Unsupported type: {file_extension}") | |
try: | |
preprocessed_audio_path = preprocess_audio(audio_file_to_process) | |
if preprocessed_audio_path != audio_file_to_process: temp_files_to_clean.append(preprocessed_audio_path) | |
audio_file_to_transcribe = preprocessed_audio_path | |
except Exception as preprocess_err: logger.warning(f"Preprocessing failed ({preprocess_err}), using original."); audio_file_to_transcribe = audio_file_to_process | |
if not os.path.exists(audio_file_to_transcribe): raise FileNotFoundError(f"File to transcribe lost: {audio_file_to_transcribe}") | |
logger.info(f"Transcribing: {audio_file_to_transcribe}") | |
with torch.inference_mode(): | |
use_fp16 = torch.cuda.is_available() | |
result = model_manager.whisper_model.transcribe(audio_file_to_transcribe, fp16=use_fp16) | |
if not result or "text" not in result: raise RuntimeError("Transcription empty result") | |
transcription = result.get("text", "") | |
logger.info(f"Transcription success: '{transcription[:100]}...'") | |
except Exception as e: logger.error(f"!!! Transcription failed: {e}"); logger.error(traceback.format_exc()); transcription = f"Error during transcription: {e}" | |
finally: | |
logger.debug(f"--- Cleaning {len(temp_files_to_clean)} temp transcription files ---") | |
for temp_file in temp_files_to_clean: | |
try: | |
if os.path.exists(temp_file): os.remove(temp_file) | |
except Exception as e: logger.warning(f"Cleanup failed {temp_file}: {e}") | |
return transcription | |
def read_document(document_path): | |
logger.info(f"Reading document: {document_path}") | |
try: | |
if not os.path.exists(document_path): raise FileNotFoundError(f"Doc not found: {document_path}") | |
ext = os.path.splitext(document_path)[1].lower(); logger.debug(f"Doc type: {ext}") | |
content = "" | |
if ext == ".pdf": | |
doc = fitz.open(document_path) | |
if doc.is_encrypted and not doc.authenticate(""): raise ValueError("Encrypted PDF") | |
content = "\n".join([page.get_text() for page in doc]); doc.close() | |
elif ext == ".docx": doc = docx.Document(document_path); content = "\n".join([p.text for p in doc.paragraphs]) | |
elif ext in (".xlsx", ".xls"): | |
xls = pd.ExcelFile(document_path); parts = [] | |
for sheet in xls.sheet_names: df = pd.read_excel(xls, sheet_name=sheet); parts.append(f"--- {sheet} ---\n{df.to_string()}") | |
content = "\n\n".join(parts).strip() | |
elif ext == ".csv": | |
try: | |
with open(document_path, 'rb') as f: import chardet; enc = chardet.detect(f.read())['encoding'] | |
df = pd.read_csv(document_path, encoding=enc) | |
except Exception as e1: | |
logger.warning(f"CSV parse failed ({e1}), trying alternatives...") | |
try: df = pd.read_csv(document_path, sep=';', encoding=enc) | |
except Exception as e2: df = pd.read_csv(document_path, encoding='latin1') # Last resort | |
content = df.to_string() | |
else: return "Unsupported file type." | |
logger.info(f"Doc read success. Length: {len(content)}") | |
return content | |
except Exception as e: logger.error(f"!!! Read doc error: {e}"); logger.error(traceback.format_exc()); return f"Error reading document: {e}" | |
def read_url(url): | |
logger.info(f"Reading URL: {url}") | |
if not url or not url.strip().startswith('http'): return "" | |
try: | |
headers = {'User-Agent': 'Mozilla/5.0 ...', 'Accept': 'text/html...', 'Accept-Language': 'en-US,en;q=0.9', 'Connection': 'keep-alive'} | |
response = requests.get(url, headers=headers, timeout=20, allow_redirects=True) | |
response.raise_for_status() | |
ct = response.headers.get('content-type', '').lower() | |
if not ('html' in ct or 'text' in ct): return f"Error: Non-text content type: {ct}" | |
enc = response.encoding if response.encoding else response.apparent_encoding | |
html = response.content.decode(enc or 'utf-8', errors='ignore') | |
soup = BeautifulSoup(html, 'html.parser') | |
for tag in soup(["script", "style", "meta", "noscript", "iframe", "header", "footer", "nav", "aside", "form", "button", "link", "head"]): tag.extract() | |
main = (soup.find("main") or soup.find("article") or soup.find("div", class_=["content", "main", "post-content", "entry-content", "article-body", "story-content"]) or soup.find("div", id=["content", "main", "article", "story"])) | |
text = main.get_text(separator='\n', strip=True) if main else soup.body.get_text(separator='\n', strip=True) if soup.body else soup.get_text(separator='\n', strip=True) | |
lines = [line.strip() for line in text.split('\n') if line.strip()]; cleaned = "\n".join(lines) | |
if not cleaned: return "Error: Could not extract text." | |
max_c = 15000; final = (cleaned[:max_c] + "... [truncated]") if len(cleaned) > max_c else cleaned | |
logger.info(f"URL read success. Length: {len(final)}") | |
return final | |
except Exception as e: logger.error(f"!!! Read URL error: {e}"); logger.error(traceback.format_exc()); return f"Error reading URL: {e}" | |
def process_social_media_url(url): | |
logger.info(f"--- Processing social URL: {url} ---") | |
if not url or not url.strip().startswith('http'): return None | |
text = None; video = None; audio_file = None | |
try: text_res = read_url(url); text = text_res if text_res and not text_res.startswith("Error:") else None | |
except Exception as e: logger.error(f"Social text read error: {e}") | |
try: | |
audio_file = download_social_media_video(url) | |
if audio_file: video_res = transcribe_audio_or_video(audio_file); video = video_res if video_res and not video_res.startswith("Error:") else None | |
except Exception as e: logger.error(f"Social audio proc error: {e}") | |
finally: | |
if audio_file and os.path.exists(audio_file): | |
try: os.remove(audio_file) | |
except Exception as e: logger.warning(f"Social cleanup fail {audio_file}: {e}") | |
logger.debug(f"--- Finished social URL: {url} ---") | |
if text or video: return {"text": text or "", "video": video or ""} | |
else: return None | |
# ============================================================== | |
# ========= SIMPLIFIED generate_news FOR DEBUGGING ============= | |
# ============================================================== | |
# Duraciรณn corta solo para prueba | |
def generate_news(instructions, facts, size, tone, *args): | |
request_start_time = time.time() | |
logger.info("--- generate_news function started (SIMPLIFIED DEBUG VERSION) ---") | |
generated_article = "Debug: Simplified function executed." | |
raw_transcriptions = f"Debug info:\nInstructions: {bool(instructions)}\nFacts: {bool(facts)}\nSize: {size}\nTone: {tone}\nNum args: {len(args)}" | |
error_to_report = None | |
# --- Comenta TODO el procesamiento y carga de modelos --- | |
try: | |
logger.info("Simplified version: Skipping all processing and model loading.") | |
# --- NO LLAMES A check_llm_initialized NI check_whisper_initialized --- | |
# --- NO PROCESES documents, urls, audio, social --- | |
# --- NO CONSTRUYAS EL PROMPT --- | |
# --- NO LLAMES A text_pipeline --- | |
pass # Simplemente no hacemos nada | |
logger.info("Simplified version: Reached end of try block.") | |
except Exception as e: | |
total_time = time.time() - request_start_time | |
logger.error(f"!!! UNHANDLED Error even in SIMPLIFIED generate_news after {total_time:.2f} seconds: {str(e)}") | |
logger.error(traceback.format_exc()) | |
error_to_report = f"Error in simplified function: {str(e)}" | |
generated_article = error_to_report | |
raw_transcriptions += f"\n\n[CRITICAL ERROR] Simplified execution failed: {str(e)}" | |
total_time = time.time() - request_start_time | |
logger.info(f"--- generate_news (SIMPLIFIED DEBUG VERSION) finished in {total_time:.2f} seconds. ---") | |
# Asegรบrate de devolver dos strings | |
return generated_article, raw_transcriptions | |
# ============================================================== | |
# ================= END OF SIMPLIFIED VERSION ================== | |
# ============================================================== | |
# --- create_demo function --- | |
# --- MODIFIED: Removed file_types from gr.File --- | |
def create_demo(): | |
"""Creates the Gradio interface""" | |
logger.info("--- Creating Gradio interface ---") | |
with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
gr.Markdown("# ๐ฐ NewsIA - AI News Generator") | |
gr.Markdown("Create professional news articles from multiple information sources.") | |
all_inputs = [] | |
with gr.Row(): | |
with gr.Column(scale=2): | |
instructions = gr.Textbox(label="Instructions for the News Article", placeholder="Enter specific instructions...", lines=2) | |
all_inputs.append(instructions) | |
facts = gr.Textbox(label="Main Facts", placeholder="Describe the most important facts...", lines=4) | |
all_inputs.append(facts) | |
with gr.Row(): | |
size_slider = gr.Slider(label="Approximate Length (words)", minimum=100, maximum=700, value=250, step=50) | |
all_inputs.append(size_slider) | |
tone_dropdown = gr.Dropdown(label="Tone of the News Article", choices=["neutral", "serious", "formal", "urgent", "investigative", "human-interest", "lighthearted"], value="neutral") | |
all_inputs.append(tone_dropdown) | |
with gr.Column(scale=3): | |
with gr.Tabs(): | |
with gr.TabItem("๐ Documents"): | |
gr.Markdown("Upload relevant documents (PDF, DOCX, XLSX, CSV). Max 5.") | |
doc_inputs = [] | |
for i in range(1, 6): | |
# *** CHANGED: Removed file_types *** | |
doc_file = gr.File(label=f"Document {i}", file_count="single") | |
doc_inputs.append(doc_file) | |
all_inputs.extend(doc_inputs) | |
with gr.TabItem("๐ Audio/Video"): | |
gr.Markdown("Upload audio or video files... Max 5 sources.") | |
audio_video_inputs = [] | |
for i in range(1, 6): | |
with gr.Group(): | |
gr.Markdown(f"**Source {i}**") | |
# *** CHANGED: Removed file_types *** | |
audio_file = gr.File(label=f"Audio/Video File {i}") | |
with gr.Row(): | |
speaker_name = gr.Textbox(label="Speaker Name", placeholder="Name...") | |
speaker_role = gr.Textbox(label="Role/Position", placeholder="Role...") | |
audio_video_inputs.extend([audio_file, speaker_name, speaker_role]) | |
all_inputs.extend(audio_video_inputs) | |
with gr.TabItem("๐ URLs"): | |
gr.Markdown("Add URLs to relevant web pages... Max 5.") | |
url_inputs = [] | |
for i in range(1, 6): | |
url_textbox = gr.Textbox(label=f"URL {i}", placeholder="https://...") | |
url_inputs.append(url_textbox) | |
all_inputs.extend(url_inputs) | |
with gr.TabItem("๐ฑ Social Media"): | |
gr.Markdown("Add URLs to social media posts... Max 3.") | |
social_inputs = [] | |
for i in range(1, 4): | |
with gr.Group(): | |
gr.Markdown(f"**Social Media Source {i}**") | |
social_url_textbox = gr.Textbox(label=f"Post URL", placeholder="https://...") | |
with gr.Row(): | |
social_name_textbox = gr.Textbox(label=f"Account Name/User", placeholder="@username") | |
social_context_textbox = gr.Textbox(label=f"Context", placeholder="Context...") | |
social_inputs.extend([social_url_textbox, social_name_textbox, social_context_textbox]) | |
all_inputs.extend(social_inputs) | |
generate_button = gr.Button("โจ Generate News Article", variant="primary") | |
clear_button = gr.Button("๐ Clear All Inputs") | |
with gr.Tabs(): | |
with gr.TabItem("๐ Generated News Article"): | |
news_output = gr.Textbox(label="Draft News Article", lines=20, show_copy_button=True, interactive=False) | |
with gr.TabItem("๐๏ธ Source Transcriptions & Logs"): | |
transcriptions_output = gr.Textbox(label="Transcriptions and Processing Log", lines=15, show_copy_button=True, interactive=False) | |
outputs_list = [news_output, transcriptions_output] | |
generate_button.click(fn=generate_news, inputs=all_inputs, outputs=outputs_list) | |
def clear_all_inputs_and_outputs(): | |
logger.info("--- Clear All button clicked ---") | |
reset_values = [] | |
for input_comp in all_inputs: | |
if isinstance(input_comp, (gr.Textbox, gr.Dropdown)): reset_values.append("") | |
elif isinstance(input_comp, gr.Slider): reset_values.append(250) | |
elif isinstance(input_comp, gr.File): reset_values.append(None) | |
else: reset_values.append(None) | |
reset_values.extend(["", ""]) | |
try: logger.info("Calling model reset from clear button handler."); model_manager.reset_models(force=True) | |
except Exception as e: logger.error(f"Error resetting models during clear: {e}") | |
logger.info("--- Clear All operation finished ---") | |
return reset_values | |
clear_button.click(fn=clear_all_inputs_and_outputs, inputs=None, outputs=all_inputs + outputs_list) | |
logger.info("--- Gradio interface creation complete ---") | |
return demo | |
# --- main execution block remains the same --- | |
if __name__ == "__main__": | |
logger.info("--- Running main execution block ---") | |
news_demo = create_demo() | |
news_demo.queue() | |
logger.info("Launching Gradio interface...") | |
try: | |
news_demo.launch(server_name="0.0.0.0", server_port=7860) | |
logger.info("Gradio launch called. Application running.") | |
except Exception as launch_err: | |
logger.error(f"!!! CRITICAL Error during Gradio launch: {launch_err}") | |
logger.error(traceback.format_exc()) | |
logger.info("--- Main execution block potentially finished ---") |