awacke1's picture
Update app.py
cc11cd1 verified
raw
history blame
4.3 kB
import os
import base64
import gradio as gr
import openai
import fitz # PyMuPDF
from io import BytesIO
from moviepy.video.io.VideoFileClip import VideoFileClip
from gtts import gTTS
from gradio_client import Client
# 🔐 CONFIG
KEY_FILE = "openai_api_key.txt"
MODEL = "gpt-4o-2024-05-13"
# Load stored API key
DEFAULT_KEY = ""
if os.path.exists(KEY_FILE):
with open(KEY_FILE, 'r') as f:
DEFAULT_KEY = f.read().strip()
# 🔧 Helpers
def save_api_key(key):
with open(KEY_FILE, 'w') as f:
f.write(key.strip())
return "🔑 Key saved!"
def get_api_key(input_key):
# Priority: user input > stored > ENV
if input_key and input_key.strip():
save_api_key(input_key)
return input_key.strip()
if DEFAULT_KEY:
return DEFAULT_KEY
env = os.getenv('OPENAI_KEY', '')
if env:
save_api_key(env)
return env
raise gr.Error("❗ OpenAI API key required.")
# 📋 Gallery Processing
def apply_filters(files, filters):
selected = []
for f in files:
lower = f.lower()
if any(lower.endswith(ext) for ext in ['.png', '.jpg', '.jpeg']) and '🖼️ Images' in filters:
selected.append((f, 'image'))
if any(lower.endswith(ext) for ext in ['.wav', '.mp3']) and '🎤 Audio' in filters:
selected.append((f, 'audio'))
if any(lower.endswith(ext) for ext in ['.mp4', '.webm']) and '🎥 Video' in filters:
selected.append((f, 'video'))
if lower.endswith('.pdf') and '📄 PDF' in filters:
selected.append((f, 'pdf'))
return selected
def generate_table(selected, api_key):
key = get_api_key(api_key)
openai.api_key = key
# Build markdown table
md = "| ✅ | Type | Filename |\n|---|------|----------|\n"
for path, typ in selected:
emoji = '🖼️' if typ=='image' else '🎤' if typ=='audio' else '🎥' if typ=='video' else '📄'
name = os.path.basename(path)
md += f"| ✅ | {emoji} {typ.capitalize()} | {name} |\n"
return md
# 🗨️ Chat Handler
def chat_handler(api_key, message, history):
key = get_api_key(api_key)
openai.api_key = key
messages = []
for u, a in history:
messages.append({"role": "user", "content": u})
messages.append({"role": "assistant","content": a})
messages.append({"role": "user", "content": message})
resp = openai.ChatCompletion.create(model=MODEL, messages=messages)
answer = resp.choices[0].message.content
history.append((message, answer))
return history
# 🎞️ Example: Video Summarizer (placeholder)
# def summarize_video(api_key, file_path, prompt):
# ... implementation ...
# 🔑 UI Definition
with gr.Blocks(title="🔬🧠 ScienceBrain.Gradio") as demo:
gr.Markdown("# 🔬🧠 ScienceBrain Gradio")
with gr.Row():
api_input = gr.Textbox(label="🔑 OpenAI Key", value=DEFAULT_KEY, type="password")
save_btn = gr.Button("💾 Save Key")
status_txt = gr.Textbox(interactive=False)
save_btn.click(save_api_key, inputs=api_input, outputs=status_txt)
gr.Markdown("## 📋 Media Gallery & Filters")
upload = gr.File(file_count="multiple", label="Upload files (images, audio, videos, PDFs)")
gallery = gr.Gallery(label="Filtered Gallery").style(grid=[4], height="auto")
filter_opts = ["🖼️ Images", "🎤 Audio", "🎥 Video", "📄 PDF"]
filters = gr.CheckboxGroup(filter_opts, value=filter_opts, label="🔍 Filter Types")
select_btn = gr.Button("⚙️ Apply Filters")
selected = gr.Variable()
select_btn.click(apply_filters, inputs=[upload, filters], outputs=[gallery, selected])
gr.Markdown("## ✅ Include in Discussion")
disc = gr.CheckboxGroup(label="Select items", choices=[])
gallery.select(lambda x: [x], None, disc)
gr.Markdown("## 📝 Summary Table")
table_md = gr.Markdown()
table_btn = gr.Button("Generate Table")
table_btn.click(generate_table, inputs=[disc, api_input], outputs=table_md)
with gr.Tab("💬 Chat"):
chatbot = gr.Chatbot()
msg_in = gr.Textbox(placeholder="Type your message...")
msg_in.submit(chat_handler, inputs=[api_input, msg_in, chatbot], outputs=chatbot)
if __name__ == "__main__":
demo.launch()