Update app.py
Browse files
app.py
CHANGED
@@ -42,6 +42,38 @@ def format_prompt(message, history):
|
|
42 |
return prompt
|
43 |
|
44 |
def generate_response(prompt, history, temperature=0.9, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
|
47 |
'''
|
|
|
42 |
return prompt
|
43 |
|
44 |
def generate_response(prompt, history, temperature=0.9, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0):
|
45 |
+
temperature = float(temperature)
|
46 |
+
if temperature < 1e-2:
|
47 |
+
temperature = 1e-2
|
48 |
+
top_p = float(top_p)
|
49 |
+
|
50 |
+
generate_kwargs = dict(
|
51 |
+
temperature=temperature,
|
52 |
+
max_new_tokens=max_new_tokens,
|
53 |
+
top_p=top_p,
|
54 |
+
repetition_penalty=repetition_penalty,
|
55 |
+
do_sample=True,
|
56 |
+
seed=42,
|
57 |
+
)
|
58 |
+
runtimeFlag = "cuda:0"
|
59 |
+
formatted_prompt = format_prompt(f"{prompt}", history)
|
60 |
+
inputs = tokenizer([formatted_prompt], return_tensors="pt").to(runtimeFlag)
|
61 |
+
|
62 |
+
# UI design
|
63 |
+
examples=[
|
64 |
+
["Patient is feeling stressed due to work and has trouble sleeping.", None, None, None, None, None],
|
65 |
+
["Client is dealing with relationship issues and is seeking advice on communication strategies.", None, None, None, None, None],
|
66 |
+
["Individual has recently experienced a loss and is having difficulty coping with grief.", None, None, None, None, None],
|
67 |
+
]
|
68 |
+
|
69 |
+
gr.ChatInterface(
|
70 |
+
fn=generate,
|
71 |
+
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
72 |
+
title="Psychological Assistant: Expert in Assessment and Strategic Planning",
|
73 |
+
description="Enter counseling notes to generate an assessment and plan.",
|
74 |
+
examples=examples,
|
75 |
+
concurrency_limit=20,
|
76 |
+
).launch(show_api=False, debug=True)
|
77 |
|
78 |
|
79 |
'''
|