Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,13 +10,15 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
10 |
model = AutoModel.from_pretrained(model_name)
|
11 |
|
12 |
def encode_text(text):
|
13 |
-
inputs = tokenizer(text, return_tensors='pt', padding=True, truncation=True)
|
14 |
outputs = model(**inputs)
|
|
|
15 |
return outputs.last_hidden_state.mean(dim=1).detach().numpy()
|
16 |
|
17 |
def find_best_response(user_input, response_pool):
|
18 |
user_embedding = encode_text(user_input)
|
19 |
response_embeddings = np.array([encode_text(resp) for resp in response_pool])
|
|
|
20 |
similarities = cosine_similarity(user_embedding, response_embeddings).flatten()
|
21 |
best_response_index = np.argmax(similarities)
|
22 |
return response_pool[best_response_index]
|
@@ -45,3 +47,4 @@ iface = gr.Interface(
|
|
45 |
|
46 |
# Launch the interface
|
47 |
iface.launch()
|
|
|
|
10 |
model = AutoModel.from_pretrained(model_name)
|
11 |
|
12 |
def encode_text(text):
|
13 |
+
inputs = tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=128)
|
14 |
outputs = model(**inputs)
|
15 |
+
# Ensure the output is 2D by averaging the last hidden state along the sequence dimension
|
16 |
return outputs.last_hidden_state.mean(dim=1).detach().numpy()
|
17 |
|
18 |
def find_best_response(user_input, response_pool):
|
19 |
user_embedding = encode_text(user_input)
|
20 |
response_embeddings = np.array([encode_text(resp) for resp in response_pool])
|
21 |
+
# Check if response_embeddings need reshaping
|
22 |
similarities = cosine_similarity(user_embedding, response_embeddings).flatten()
|
23 |
best_response_index = np.argmax(similarities)
|
24 |
return response_pool[best_response_index]
|
|
|
47 |
|
48 |
# Launch the interface
|
49 |
iface.launch()
|
50 |
+
|