REALME5-pro commited on
Commit
dc200c7
·
verified ·
1 Parent(s): ae90b7d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -1,5 +1,5 @@
1
  from fastai.text.all import *
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
  import gradio as gr
5
 
@@ -31,20 +31,20 @@ def classify_psychiatric_text(txt):
31
  probabilities = torch.softmax(logits, dim=1).squeeze().tolist()
32
  return dict(zip(psychiatric_labels, probabilities))
33
 
34
- # Load pre-trained conversational model for Lifestyle and Nutrition Chatbot
35
- lifestyle_model_name = "microsoft/DialoGPT-medium" # Replace with a fine-tuned model if available
36
- lifestyle_tokenizer = AutoTokenizer.from_pretrained(lifestyle_model_name)
37
- lifestyle_model = AutoModelForCausalLM.from_pretrained(lifestyle_model_name)
38
 
39
  # Chat function for Lifestyle and Nutrition
40
  chat_history = []
41
 
42
  def chatbot_response(user_input):
43
  global chat_history
44
- new_input_ids = lifestyle_tokenizer.encode(user_input + lifestyle_tokenizer.eos_token, return_tensors='pt')
45
  bot_input_ids = torch.cat([chat_history, new_input_ids], dim=-1) if chat_history else new_input_ids
46
- chat_history = lifestyle_model.generate(bot_input_ids, max_length=1000, pad_token_id=lifestyle_tokenizer.eos_token_id)
47
- response = lifestyle_tokenizer.decode(chat_history[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
48
  return response
49
 
50
  def clear_chat():
 
1
  from fastai.text.all import *
2
+ from transformers import BlenderbotForConditionalGeneration, BlenderbotTokenizer
3
  import torch
4
  import gradio as gr
5
 
 
31
  probabilities = torch.softmax(logits, dim=1).squeeze().tolist()
32
  return dict(zip(psychiatric_labels, probabilities))
33
 
34
+ # Load BlenderBot for Lifestyle and Nutrition Chatbot
35
+ blender_model_name = "facebook/blenderbot-3B" # Pre-trained BlenderBot 3B model
36
+ blender_tokenizer = BlenderbotTokenizer.from_pretrained(blender_model_name)
37
+ blender_model = BlenderbotForConditionalGeneration.from_pretrained(blender_model_name)
38
 
39
  # Chat function for Lifestyle and Nutrition
40
  chat_history = []
41
 
42
  def chatbot_response(user_input):
43
  global chat_history
44
+ new_input_ids = blender_tokenizer.encode(user_input + blender_tokenizer.eos_token, return_tensors='pt')
45
  bot_input_ids = torch.cat([chat_history, new_input_ids], dim=-1) if chat_history else new_input_ids
46
+ chat_history = blender_model.generate(bot_input_ids, max_length=1000, pad_token_id=blender_tokenizer.eos_token_id)
47
+ response = blender_tokenizer.decode(chat_history[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
48
  return response
49
 
50
  def clear_chat():