File size: 1,494 Bytes
c5676d3
d3cb7f6
dbaec39
d3cb7f6
dbaec39
d3cb7f6
 
dbaec39
 
 
 
 
 
d3cb7f6
dbaec39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d3cb7f6
dbaec39
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39

from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import os

# Ensure correct model and tokenizer initialization
model_name = "kalyani2599/emotional_support_bot"

# Clear Hugging Face cache (optional but useful if there are issues with cached files)
cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "huggingface")
if os.path.exists(cache_dir):
    for file in os.listdir(cache_dir):
        file_path = os.path.join(cache_dir, file)
        os.remove(file_path)

# Load model and tokenizer
try:
    model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
    tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)  # Using the fast tokenizer version
except Exception as e:
    print(f"Error loading model or tokenizer: {e}")
    # If the model/tokenizer fails to load, try a different one
    model_name = "facebook/blenderbot-3B"  # Example fallback model
    model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
    tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)

def chatbot_response(input_text):
    try:
        inputs = tokenizer(input_text, return_tensors="pt")
        outputs = model.generate(**inputs, max_length=100)
        response = tokenizer.decode(outputs[0], skip_special_tokens=True)
        return response
    except Exception as e:
        return f"Error in generating response: {e}"

# Example chatbot interaction
input_text = "Hello, how are you?"
response = chatbot_response(input_text)
print(response)