REALME5-pro commited on
Commit
0ccf754
·
verified ·
1 Parent(s): bdb0a6e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -16
app.py CHANGED
@@ -31,26 +31,26 @@ def classify_psychiatric_text(txt):
31
  probabilities = torch.softmax(logits, dim=1).squeeze().tolist()
32
  return dict(zip(psychiatric_labels, probabilities))
33
 
34
- # Load BlenderBot for Lifestyle and Nutrition Chatbot
35
- blender_model_name = "facebook/blenderbot-3B" # Pre-trained BlenderBot 3B model
36
- blender_tokenizer = BlenderbotTokenizer.from_pretrained(blender_model_name)
37
- blender_model = BlenderbotForConditionalGeneration.from_pretrained(blender_model_name)
38
 
39
  # Chat function for Lifestyle and Nutrition
40
  chat_history = []
41
 
42
- def chatbot_response(user_input):
43
- global chat_history
44
- new_input_ids = blender_tokenizer.encode(user_input + blender_tokenizer.eos_token, return_tensors='pt')
45
- bot_input_ids = torch.cat([chat_history, new_input_ids], dim=-1) if chat_history else new_input_ids
46
- chat_history = blender_model.generate(bot_input_ids, max_length=1000, pad_token_id=blender_tokenizer.eos_token_id)
47
- response = blender_tokenizer.decode(chat_history[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
48
- return response
49
-
50
- def clear_chat():
51
- global chat_history
52
- chat_history = []
53
- return []
54
 
55
  # Gradio Interfaces
56
  medical_text = gr.Textbox(lines=2, label='Describe your symptoms in detail')
 
31
  probabilities = torch.softmax(logits, dim=1).squeeze().tolist()
32
  return dict(zip(psychiatric_labels, probabilities))
33
 
34
+ # # Load BlenderBot for Lifestyle and Nutrition Chatbot
35
+ # blender_model_name = "facebook/blenderbot-3B" # Pre-trained BlenderBot 3B model
36
+ # blender_tokenizer = BlenderbotTokenizer.from_pretrained(blender_model_name)
37
+ # blender_model = BlenderbotForConditionalGeneration.from_pretrained(blender_model_name)
38
 
39
  # Chat function for Lifestyle and Nutrition
40
  chat_history = []
41
 
42
+ # def chatbot_response(user_input):
43
+ # global chat_history
44
+ # new_input_ids = blender_tokenizer.encode(user_input + blender_tokenizer.eos_token, return_tensors='pt')
45
+ # bot_input_ids = torch.cat([chat_history, new_input_ids], dim=-1) if chat_history else new_input_ids
46
+ # chat_history = blender_model.generate(bot_input_ids, max_length=1000, pad_token_id=blender_tokenizer.eos_token_id)
47
+ # response = blender_tokenizer.decode(chat_history[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
48
+ # return response
49
+
50
+ # def clear_chat():
51
+ # global chat_history
52
+ # chat_history = []
53
+ # return []
54
 
55
  # Gradio Interfaces
56
  medical_text = gr.Textbox(lines=2, label='Describe your symptoms in detail')