Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -36,18 +36,72 @@ def preprocess_text(text):
|
|
36 |
# Print the length of cleaned_chunks
|
37 |
len(cleaned_chunks)
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
# Return the cleaned_chunks
|
40 |
return cleaned_chunks
|
41 |
|
42 |
# Call the preprocess_text function and store the result in a cleaned_chunks variable
|
43 |
cleaned_chunks = preprocess_text(safe_restaurant)
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
def respond(message, history):
|
46 |
#responses = ["Yes", "No"]
|
47 |
#return random.choice(responses)
|
48 |
|
49 |
-
messages= [{"role": "system", "content": "
|
50 |
|
|
|
|
|
51 |
|
52 |
if history:
|
53 |
messages.extend(history)
|
|
|
36 |
# Print the length of cleaned_chunks
|
37 |
len(cleaned_chunks)
|
38 |
|
39 |
+
# step 4 from sematic search(embed chuncks)
|
40 |
+
#Load the pre-trained embedding model that converts text to vectors
|
41 |
+
model = SentenceTransformer('all-MiniLM-L6-v2')
|
42 |
+
|
43 |
+
def create_embeddings(text_chunks):
|
44 |
+
# Convert each text chunk into a vector embedding and store as a tensor
|
45 |
+
chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
|
46 |
+
|
47 |
+
# Print the chunk embeddings
|
48 |
+
print(chunk_embeddings)
|
49 |
+
|
50 |
+
# Print the shape of chunk_embeddings
|
51 |
+
print(chunk_embeddings.shape)
|
52 |
+
|
53 |
+
# Return the chunk_embeddings
|
54 |
+
return chunk_embeddings
|
55 |
+
|
56 |
+
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
|
57 |
+
chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
|
58 |
+
|
59 |
# Return the cleaned_chunks
|
60 |
return cleaned_chunks
|
61 |
|
62 |
# Call the preprocess_text function and store the result in a cleaned_chunks variable
|
63 |
cleaned_chunks = preprocess_text(safe_restaurant)
|
64 |
|
65 |
+
# step 5 from sematic searc (find and print top chunk)
|
66 |
+
# Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
|
67 |
+
def get_top_chunks(query, chunk_embeddings, text_chunks):
|
68 |
+
# Convert the query text into a vector embedding
|
69 |
+
query_embedding = model.encode(query, convert_to_tensor = True) # Complete this line
|
70 |
+
|
71 |
+
# Normalize the query embedding to unit length for accurate similarity comparison
|
72 |
+
query_embedding_normalized = query_embedding / query_embedding.norm()
|
73 |
+
|
74 |
+
# Normalize all chunk embeddings to unit length for consistent comparison
|
75 |
+
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
|
76 |
+
|
77 |
+
# Calculate cosine similarity between query and all chunks using matrix multiplication
|
78 |
+
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
|
79 |
+
|
80 |
+
# Print the similarities
|
81 |
+
print(similarities)
|
82 |
+
# Find the indices of the 3 chunks with highest similarity scores
|
83 |
+
top_indices = torch.topk(similarities, k=3).indices
|
84 |
+
|
85 |
+
# Print the top indices
|
86 |
+
print(top_indices)
|
87 |
+
# Create an empty list to store the most relevant chunks
|
88 |
+
top_chunks = []
|
89 |
+
|
90 |
+
# Loop through the top indices and retrieve the corresponding text chunks
|
91 |
+
for x in top_indices:
|
92 |
+
chunks = text_chunks[x]
|
93 |
+
top_chunks.append(chunks)
|
94 |
+
# Return the list of most relevant chunks
|
95 |
+
return top_chunks
|
96 |
+
|
97 |
def respond(message, history):
|
98 |
#responses = ["Yes", "No"]
|
99 |
#return random.choice(responses)
|
100 |
|
101 |
+
messages= [{"role": "system", "content": "You are a friendly chat bot that help people find restaurants that fit their needs/dietary restriction and location range!"}]
|
102 |
|
103 |
+
best_restaurant_chunks = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
|
104 |
+
print(best_restaurant_chunks)
|
105 |
|
106 |
if history:
|
107 |
messages.extend(history)
|