Spaces:
Running
Running
import gradio as gr | |
import google.generativeai as genai | |
from pathlib import Path | |
import tempfile | |
def summarize_video(video_path): | |
if video_path is None: | |
return "Please upload a video file." | |
try: | |
# Create a temporary file to store the video | |
with tempfile.NamedTemporaryFile(suffix=Path(video_path.name).suffix, delete=False) as tmp_file: | |
tmp_file.write(video_path.read()) | |
video_file_path = tmp_file.name | |
# Create the prompt | |
prompt = "Summarize this video" | |
# Set up the model | |
model = genai.GenerativeModel(model_name="models/gemini-1.5-pro", api_key=os.environ['GOOGLE_API_KEY']) | |
# Make the LLM request | |
print("Making LLM inference request...") | |
response = model.generate_content([prompt, video_file_path], | |
request_options={"timeout": 2000}) | |
return response.text | |
except Exception as e: | |
return f"An error occurred: {str(e)}" | |
finally: | |
# Clean up temporary file | |
if 'video_file_path' in locals(): | |
Path(video_file_path).unlink(missing_ok=True) | |
# Create Gradio interface | |
iface = gr.Interface( | |
fn=summarize_video, | |
inputs=gr.Video(label="Upload Video"), | |
outputs=gr.Textbox(label="Summary", lines=10), | |
title="Video Summarizer", | |
description="Upload a video to get an AI-generated summary using Gemini 1.5 Pro.", | |
examples=[], | |
cache_examples=False | |
) | |
# Launch the interface | |
if __name__ == "__main__": | |
iface.launch(share=True) |