playground / app.py
Francesco's picture
added patient data
402fd8a
raw
history blame
1.72 kB
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
import gradio as gr
from elevenlabs import generate, play
from pathlib import Path
import json
# import whisper
# model = whisper.load_model("base", device="cuda")
prompt = PromptTemplate(
input_variables=["patient", "user_input"],
template=Path("prompts/patient.prompt").read_text(),
)
llm = ChatOpenAI(temperature=0.7)
chain = LLMChain(llm=llm, prompt=prompt)
with open("data/patients.json") as f:
patiens = json.load(f)
patient = patiens[0]
print(patient)
def run_text_prompt(message, chat_history):
bot_message = chain.run(patient=patient, user_input=message)
# audio = generate(text=bot_message, voice="Bella")
# play(audio, notebook=True)
chat_history.append((message, bot_message))
return "", chat_history
# def run_audio_prompt(audio, chat_history):
# if audio is None:
# return None, chat_history
# message_transcription = model.transcribe(audio)["text"]
# _, chat_history = run_text_prompt(message_transcription, chat_history)
# return None, chat_history
with gr.Blocks() as demo:
gr.Markdown(f"```json\n{json.dumps(patient)}```")
chatbot = gr.Chatbot()
msg = gr.Textbox()
msg.submit(run_text_prompt, [msg, chatbot], [msg, chatbot])
# with gr.Row():
# audio = gr.Audio(source="microphone", type="filepath")
# send_audio_button = gr.Button("Send Audio", interactive=True)
# send_audio_button.click(run_audio_prompt, [audio, chatbot], [audio, chatbot])
demo.launch(debug=True)