import gradio as gr import google.generativeai as genai # API key input import os token=os.environ.get("TOKEN") genai.configure(api_key=token) def describe_video(pp,video_file): try: print(f"Uploading file...") uploaded_video = genai.upload_file(path=video_file) print(f"Completed upload: {uploaded_video.uri}") import time while uploaded_video.state.name == "PROCESSING": print("Waiting for video to be processed.") time.sleep(10) uploaded_video = genai.get_file(uploaded_video.name) if uploaded_video.state.name == "FAILED": raise ValueError(uploaded_video.state.name) print(f"Video processing complete: " + uploaded_video.uri) prompt = pp # Set the model to Gemini 1.5 Pro. model = genai.GenerativeModel(model_name="models/gemini-1.5-flash-latest") # Make the LLM request. print("Making LLM inference request...") response = model.generate_content( [prompt, uploaded_video], request_options={"timeout": 600} ) print(response.text) genai.delete_file(uploaded_video.name) print(f"Deleted file {uploaded_video.uri}") return response.text except Exception as e: return f"An error occurred: {e}" # Create the Gradio interface iface = gr.Interface( fn=describe_video, inputs=[gr.Textbox(),gr.Video()], outputs=gr.Textbox(), title="y Video.", description="Pose des questions sur la vidéo et obtient une réponse.", ) # Launch the interface iface.launch()