Francesco commited on
Commit
296923b
·
1 Parent(s): 6d82372

fix for chat + session state

Browse files
Files changed (2) hide show
  1. app.py +48 -26
  2. prompts/patient.prompt +1 -4
app.py CHANGED
@@ -8,62 +8,80 @@ import json
8
  import os
9
  from functools import partial
10
  from pathlib import Path
 
11
 
12
  import gradio as gr
13
- from elevenlabs import generate, play
14
- from langchain.chains import LLMChain
15
  from langchain.chat_models import ChatOpenAI
16
- from langchain.prompts import PromptTemplate
 
17
 
18
  # import whisper
19
 
20
  # model = whisper.load_model("base", device="cuda")
21
 
22
 
23
- prompt = PromptTemplate(
24
- input_variables=["patient", "user_input"],
25
- template=Path("prompts/patient.prompt").read_text(),
 
 
26
  )
27
 
28
- llm = ChatOpenAI(temperature=0.7)
 
29
 
30
- chain = LLMChain(llm=llm, prompt=prompt)
31
 
32
  with open("data/patients.json") as f:
33
  patiens = json.load(f)
34
 
35
  patients_names = [el["name"] for el in patiens]
36
- patient = [patiens[0]]
37
 
38
 
39
- def run_text_prompt(message, chat_history, patient):
40
- print(message, chat_history, patient[0]["name"])
41
- bot_message = chain.run(patient=patient[0], user_input=message)
 
 
 
 
 
 
42
 
43
- chat_history.append((message, bot_message))
44
- return "", chat_history
45
 
 
 
 
46
 
47
- def on_drop_down_change(selected_item):
 
48
  index = patients_names.index(selected_item)
49
- patient[0] = patiens[index]
 
50
  print(f"You selected: {selected_item}", index)
51
- return f"```json\n{json.dumps(patient[0], indent=2)}\n```"
52
 
53
 
54
  with gr.Blocks() as demo:
55
  chatbot = gr.Chatbot()
 
 
56
 
57
  with gr.Row():
58
  with gr.Column():
59
  msg = gr.Textbox()
60
  msg.submit(
61
- partial(run_text_prompt, patient=patient),
62
- [msg, chatbot],
63
- [msg, chatbot],
64
  )
65
  clear = gr.Button("Clear")
66
- clear.click(lambda: None, None, chatbot, queue=False)
 
 
 
 
 
67
 
68
  with gr.Column():
69
  patients_names = [el["name"] for el in patiens]
@@ -73,8 +91,12 @@ with gr.Blocks() as demo:
73
  interactive=True,
74
  label="Patient",
75
  )
76
- markdown = gr.Markdown(f"```json\n{json.dumps(patient[0], indent=2)}\n```")
77
- dropdown.change(fn=on_drop_down_change, inputs=dropdown, outputs=markdown)
78
- print(patient)
79
- # dropdown.value(lambda x: print(x))
80
- # patient = dropdown.select(on_dropdown_select)
 
 
 
 
 
8
  import os
9
  from functools import partial
10
  from pathlib import Path
11
+ from pprint import pprint
12
 
13
  import gradio as gr
 
 
14
  from langchain.chat_models import ChatOpenAI
15
+ from langchain.prompts import (HumanMessagePromptTemplate,
16
+ PromptTemplate, SystemMessagePromptTemplate)
17
 
18
  # import whisper
19
 
20
  # model = whisper.load_model("base", device="cuda")
21
 
22
 
23
+ system_message_prompt = SystemMessagePromptTemplate(
24
+ prompt=PromptTemplate(
25
+ input_variables=["patient"],
26
+ template=Path("prompts/patient.prompt").read_text(),
27
+ )
28
  )
29
 
30
+ human_template = "Doctor: {text}"
31
+ human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
32
 
33
+ chat = ChatOpenAI(temperature=0.7)
34
 
35
  with open("data/patients.json") as f:
36
  patiens = json.load(f)
37
 
38
  patients_names = [el["name"] for el in patiens]
 
39
 
40
 
41
+ def run_text_prompt(message, chat_history, messages):
42
+ if not messages:
43
+ messages = []
44
+ messages.append(system_message_prompt.format(patient=patient))
45
+ messages.append(human_message_prompt.format(text=message))
46
+ messages.append(chat(messages))
47
+ pprint(messages)
48
+ chat_history.append((message, messages[-1].content))
49
+ return "", chat_history, messages
50
 
 
 
51
 
52
+ def on_clear_button_click(patient, messages):
53
+ messages = [system_message_prompt.format(patient=patient)]
54
+ return [], messages
55
 
56
+
57
+ def on_drop_down_change(selected_item, messages):
58
  index = patients_names.index(selected_item)
59
+ patient = patiens[index]
60
+ messages = [system_message_prompt.format(patient=patient)]
61
  print(f"You selected: {selected_item}", index)
62
+ return f"```json\n{json.dumps(patient, indent=2)}\n```", patient, [], messages
63
 
64
 
65
  with gr.Blocks() as demo:
66
  chatbot = gr.Chatbot()
67
+ messages = gr.State([])
68
+ patient = gr.State(patiens[0])
69
 
70
  with gr.Row():
71
  with gr.Column():
72
  msg = gr.Textbox()
73
  msg.submit(
74
+ run_text_prompt,
75
+ [msg, chatbot, messages],
76
+ [msg, chatbot, messages],
77
  )
78
  clear = gr.Button("Clear")
79
+ clear.click(
80
+ on_clear_button_click,
81
+ [patient, messages],
82
+ [chatbot, messages],
83
+ queue=False,
84
+ )
85
 
86
  with gr.Column():
87
  patients_names = [el["name"] for el in patiens]
 
91
  interactive=True,
92
  label="Patient",
93
  )
94
+ markdown = gr.Markdown(
95
+ f"```json\n{json.dumps(patient.value, indent=2)}\n```"
96
+ )
97
+ dropdown.change(
98
+ fn=on_drop_down_change,
99
+ inputs=[dropdown, messages],
100
+ outputs=[markdown, patient, chatbot, messages],
101
+ ),
102
+ # demo.launch(debug=True)
prompts/patient.prompt CHANGED
@@ -4,7 +4,4 @@ You will act like a patient with a given persona and a present complain. I will
4
 
5
  Please reply using `presentingComplaint`. You should also use `characterSummary` to impersonate the patient. `levelOfUnderstanding` is between 0 and 10 and represent the level of the patient in medicine. A low number indicate somebody who is not familiar with medical terminology.
6
 
7
- Send only one reply
8
-
9
- Doctor: {user_input}
10
- Patient:
 
4
 
5
  Please reply using `presentingComplaint`. You should also use `characterSummary` to impersonate the patient. `levelOfUnderstanding` is between 0 and 10 and represent the level of the patient in medicine. A low number indicate somebody who is not familiar with medical terminology.
6
 
7
+ Send only one reply