shekhardhangar commited on
Commit
9afdfd9
Β·
verified Β·
1 Parent(s): 67c156c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +178 -0
app.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import transformers
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
5
+ from peft import PeftConfig, PeftModel
6
+ import warnings
7
+ from threading import Thread
8
+
9
+ warnings.filterwarnings("ignore")
10
+
11
+ PEFT_MODEL = "givyboy/phi-2-finetuned-mental-health-conversational"
12
+
13
+ SYSTEM_PROMPT = """Answer the following question truthfully.
14
+ If you don't know the answer, respond 'Sorry, I don't know the answer to this question.'.
15
+ If the question is too complex, respond 'Kindly, consult a psychiatrist for further queries.'."""
16
+
17
+ USER_PROMPT = lambda x: f"""<HUMAN>: {x}\n<ASSISTANT>: """
18
+ ADD_RESPONSE = lambda x, y: f"""<HUMAN>: {x}\n<ASSISTANT>: {y}"""
19
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
+
21
+ bnb_config = BitsAndBytesConfig(
22
+ load_in_4bit=True,
23
+ bnb_4bit_quant_type="nf4",
24
+ bnb_4bit_use_double_quant=True,
25
+ bnb_4bit_compute_dtype=torch.float16,
26
+ )
27
+
28
+ config = PeftConfig.from_pretrained(PEFT_MODEL)
29
+
30
+ peft_base_model = AutoModelForCausalLM.from_pretrained(
31
+ config.base_model_name_or_path,
32
+ return_dict=True,
33
+ # quantization_config=bnb_config,
34
+ device_map="auto",
35
+ trust_remote_code=True,
36
+ offload_folder="offload/",
37
+ offload_state_dict=True,
38
+ )
39
+
40
+ peft_model = PeftModel.from_pretrained(
41
+ peft_base_model,
42
+ PEFT_MODEL,
43
+ offload_folder="offload/",
44
+ offload_state_dict=True,
45
+ )
46
+ peft_model = peft_model.to(DEVICE)
47
+
48
+ peft_tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
49
+ peft_tokenizer.pad_token = peft_tokenizer.eos_token
50
+
51
+ pipeline = transformers.pipeline(
52
+ "text-generation",
53
+ model=peft_model,
54
+ tokenizer=peft_tokenizer,
55
+ torch_dtype=torch.bfloat16,
56
+ trust_remote_code=True,
57
+ device_map="auto",
58
+ )
59
+
60
+
61
+ # def format_message(message: str, history: list[str], memory_limit: int = 3) -> str:
62
+ # if len(history) > memory_limit:
63
+ # history = history[-memory_limit:]
64
+
65
+ # if len(history) == 0:
66
+ # return f"{SYSTEM_PROMPT}\n{USER_PROMPT(message)}"
67
+
68
+ # formatted_message = f"{SYSTEM_PROMPT}\n{ADD_RESPONSE(history[0][0], history[0][1])}"
69
+
70
+ # for msg, ans in history[1:]:
71
+ # formatted_message += f"\n{ADD_RESPONSE(msg, ans)}"
72
+
73
+ # formatted_message += f"\n{USER_PROMPT(message)}"
74
+ # return formatted_message
75
+
76
+
77
+ # def get_model_response(message: str, history: list[str]) -> str:
78
+ # formatted_message = format_message(message, history)
79
+ # sequences = pipeline(
80
+ # formatted_message,
81
+ # do_sample=True,
82
+ # top_k=10,
83
+ # num_return_sequences=1,
84
+ # eos_token_id=peft_tokenizer.eos_token_id,
85
+ # max_length=600,
86
+ # )[0]
87
+ # print(sequences["generated_text"])
88
+ # output = sequences["generated_text"].split("<ASSISTANT>:")[-1].strip()
89
+ # # print(f"Response: {output}")
90
+ # return output
91
+
92
+
93
+ start_message = ""
94
+
95
+
96
+ def user(message, history):
97
+ # Append the user's message to the conversation history
98
+ return "", history + [[message, ""]]
99
+
100
+
101
+ def chat(message, history):
102
+ chat_history = []
103
+ for item in history:
104
+ chat_history.append({"role": "user", "content": item[0]})
105
+ if item[1] is not None:
106
+ chat_history.append({"role": "assistant", "content": item[1]})
107
+
108
+ message = f"{SYSTEM_PROMPT}\n{USER_PROMPT(message)}"
109
+ chat_history.append({"role": "user", "content": message})
110
+ messages = peft_tokenizer.apply_chat_template(chat_history, tokenize=False, add_generation_prompt=True)
111
+
112
+ # Tokenize the messages string
113
+ model_inputs = peft_tokenizer([messages], return_tensors="pt").to(DEVICE)
114
+ streamer = transformers.TextIteratorStreamer(
115
+ peft_tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
116
+ )
117
+ generate_kwargs = dict(
118
+ model_inputs,
119
+ streamer=streamer,
120
+ max_new_tokens=1024,
121
+ do_sample=True,
122
+ top_p=0.95,
123
+ top_k=1000,
124
+ temperature=0.75,
125
+ num_beams=1,
126
+ )
127
+ t = Thread(target=peft_model.generate, kwargs=generate_kwargs)
128
+ t.start()
129
+
130
+ # Initialize an empty string to store the generated text
131
+ partial_text = ""
132
+ for new_text in streamer:
133
+ # print(new_text)
134
+ partial_text += new_text
135
+ # Yield an empty string to cleanup the message textbox and the updated conversation history
136
+ yield partial_text
137
+
138
+
139
+ chat = gr.ChatInterface(fn=chat, title="Mental Health Chatbot - by Jayda Hunte")
140
+ chat.launch(share=True)
141
+
142
+ # import os
143
+ # from openai import OpenAI
144
+ # from dotenv import load_dotenv
145
+ # import gradio as gr
146
+
147
+ # load_dotenv()
148
+ # API_KEY = os.getenv("OPENAI_API_KEY")
149
+ # openai = OpenAI(api_key=API_KEY)
150
+
151
+ # create_msg = lambda x, y: {"role": x, "content": y}
152
+
153
+ # SYSTEM_PROMPT = create_msg(
154
+ # "system",
155
+ # """You are a helpful mental health chatbot, please answer with care. If you don't know the answer, respond 'Sorry, I don't know the answer to this question.'. If the question is too complex, respond 'Kindly, consult a psychiatrist for further queries.'.""".strip(),
156
+ # )
157
+
158
+
159
+ # def predict(message, history):
160
+ # history_openai_format = []
161
+ # history_openai_format.append(SYSTEM_PROMPT)
162
+ # for human, assistant in history:
163
+ # history_openai_format.append({"role": "user", "content": human})
164
+ # history_openai_format.append({"role": "assistant", "content": assistant})
165
+ # history_openai_format.append({"role": "user", "content": message})
166
+
167
+ # response = openai.chat.completions.create(
168
+ # model="ft:gpt-3.5-turbo-0613:personal::8kBTG8eh", messages=history_openai_format, temperature=0.35, stream=True
169
+ # )
170
+
171
+ # partial_message = ""
172
+ # for chunk in response:
173
+ # if chunk.choices[0].delta.content is not None:
174
+ # partial_message = partial_message + chunk.choices[0].delta.content
175
+ # yield partial_message
176
+
177
+
178
+ # gr.ChatInterface(fn=predict, title="Mental Health Chatbot").launch(share=True)