Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ import torch
|
|
3 |
from sentence_transformers import SentenceTransformer, util
|
4 |
import gradio as gr
|
5 |
import json
|
6 |
-
from transformers import AutoTokenizer,
|
7 |
import spaces
|
8 |
|
9 |
# Ensure you have GPU support
|
@@ -20,11 +20,11 @@ embeddings = torch.tensor(df['embedding'].tolist(), device=device)
|
|
20 |
model = SentenceTransformer('all-MiniLM-L6-v2', device=device)
|
21 |
|
22 |
# Load the ai model for response generation
|
23 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
24 |
-
model_response =
|
25 |
|
26 |
# Load the NLU model for intent detection
|
27 |
-
nlu_model = AutoModelForSequenceClassification.from_pretrained("
|
28 |
|
29 |
# Define the function to find the most relevant document
|
30 |
@spaces.GPU(duration=120)
|
|
|
3 |
from sentence_transformers import SentenceTransformer, util
|
4 |
import gradio as gr
|
5 |
import json
|
6 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSequenceClassification
|
7 |
import spaces
|
8 |
|
9 |
# Ensure you have GPU support
|
|
|
20 |
model = SentenceTransformer('all-MiniLM-L6-v2', device=device)
|
21 |
|
22 |
# Load the ai model for response generation
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2-medium")
|
24 |
+
model_response = AutoModelForCausalLM.from_pretrained("openai-community/gpt2-medium").to(device)
|
25 |
|
26 |
# Load the NLU model for intent detection
|
27 |
+
nlu_model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased-finetuned-sst-2-english").to(device)
|
28 |
|
29 |
# Define the function to find the most relevant document
|
30 |
@spaces.GPU(duration=120)
|