import os import openai from dotenv import load_dotenv load_dotenv() openai.api_key=os.environ["OPENAI_API_KEY"] import gradio as gr from langchain.llms import OpenAI from interface import AudioInterface interface = AudioInterface() def process(filepath): print(filepath) audio = open(filepath,"rb") transcript = openai.Audio.transcribe("whisper-1",audio) llm = OpenAI(temperature=1) #print(llm(transcript["text"])) interface.speak(llm(transcript["text"])) return llm(transcript["text"]) demo = gr.Interface( fn=process, inputs=gr.Audio(source="microphone",type="filepath"), outputs="text") demo.launch() """ from dotenv import load_dotenv load_dotenv() from interface import AudioInterface from agents import SmartChatAgent interface = AudioInterface() agent = SmartChatAgent() while True: text = interface.listen() response = agent.run(text) interface.speak(response) """