Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,9 +4,11 @@ import torch
|
|
4 |
|
5 |
# Load your fine-tuned model and tokenizer from the Hugging Face Hub or local files
|
6 |
model_name = "Izza-shahzad-13/fine-tuned-flan-t5"
|
7 |
-
# Replace with your Hugging Face
|
8 |
-
|
9 |
-
model
|
|
|
|
|
10 |
|
11 |
# Set device
|
12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
@@ -37,4 +39,4 @@ st.write("Type your thoughts or feelings, and let the model respond.")
|
|
37 |
user_input = st.text_input("How are you feeling today?")
|
38 |
if user_input:
|
39 |
response = generate_response(user_input)
|
40 |
-
st.write("Model Response:", response)
|
|
|
4 |
|
5 |
# Load your fine-tuned model and tokenizer from the Hugging Face Hub or local files
|
6 |
model_name = "Izza-shahzad-13/fine-tuned-flan-t5"
|
7 |
+
access_token = "" # Replace with your Hugging Face token
|
8 |
+
|
9 |
+
# Load model and tokenizer with access token
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=access_token)
|
11 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, use_auth_token=access_token)
|
12 |
|
13 |
# Set device
|
14 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
39 |
user_input = st.text_input("How are you feeling today?")
|
40 |
if user_input:
|
41 |
response = generate_response(user_input)
|
42 |
+
st.write("Model Response:", response)
|