kalyani2599 commited on
Commit
dbaec39
·
verified ·
1 Parent(s): d3cb7f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -16
app.py CHANGED
@@ -1,22 +1,37 @@
1
- import streamlit as st
2
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
 
3
 
4
- # Load model and tokenizer
5
  model_name = "kalyani2599/emotional_support_bot"
6
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
9
- def chatbot_response(input_text):
10
- inputs = tokenizer(input_text, return_tensors="pt")
11
- outputs = model.generate(**inputs, max_length=100)
12
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
13
- return response
 
14
 
15
- # Streamlit UI
16
- st.title("Emotional Support Chatbot")
17
- st.write("Chat with the emotional support bot below:")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- user_input = st.text_input("Your Message")
20
- if user_input:
21
- response = chatbot_response(user_input)
22
- st.text_area("Chatbot Response", response, height=150)
 
 
1
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
2
+ import os
3
 
4
+ # Ensure correct model and tokenizer initialization
5
  model_name = "kalyani2599/emotional_support_bot"
 
 
6
 
7
+ # Clear Hugging Face cache (optional but useful if there are issues with cached files)
8
+ cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "huggingface")
9
+ if os.path.exists(cache_dir):
10
+ for file in os.listdir(cache_dir):
11
+ file_path = os.path.join(cache_dir, file)
12
+ os.remove(file_path)
13
 
14
+ # Load model and tokenizer
15
+ try:
16
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
17
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) # Using the fast tokenizer version
18
+ except Exception as e:
19
+ print(f"Error loading model or tokenizer: {e}")
20
+ # If the model/tokenizer fails to load, try a different one
21
+ model_name = "facebook/blenderbot-3B" # Example fallback model
22
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
23
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
24
+
25
+ def chatbot_response(input_text):
26
+ try:
27
+ inputs = tokenizer(input_text, return_tensors="pt")
28
+ outputs = model.generate(**inputs, max_length=100)
29
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
30
+ return response
31
+ except Exception as e:
32
+ return f"Error in generating response: {e}"
33
 
34
+ # Example chatbot interaction
35
+ input_text = "Hello, how are you?"
36
+ response = chatbot_response(input_text)
37
+ print(response)