riyamalshe commited on
Commit
d00a5c1
·
verified ·
1 Parent(s): ef44b58

Added code from semantic search colab

Browse files
Files changed (1) hide show
  1. app.py +111 -0
app.py CHANGED
@@ -1,6 +1,117 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
5
 
6
  def respond(message, history):
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # STEP 1 FROM SEMANTIC SEARCH
5
+ from sentence_transformers import SentenceTransformer
6
+ import torch
7
+
8
+ # STEP 2 FROM SEMANTIC SEARCH
9
+ # Open the water_cycle.txt file in read mode with UTF-8 encoding
10
+ with open("water_cycle.txt", "r", encoding="utf-8") as file:
11
+ # Read the entire contents of the file and store it in a variable
12
+ water_cycle_text = file.read()
13
+
14
+ # Print the text below
15
+ print(water_cycle_text)
16
+
17
+
18
+ # STEP 3 FROM SEMANTIC SEARCH
19
+ def preprocess_text(text):
20
+ # Strip extra whitespace from the beginning and the end of the text
21
+ cleaned_text = text.strip()
22
+
23
+ # Split the cleaned_text by every newline character (\n)
24
+ chunks = cleaned_text.split("\n")
25
+
26
+ # Create an empty list to store cleaned chunks
27
+ cleaned_chunks = []
28
+
29
+ # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
30
+ for chunk in chunks:
31
+ chunk = chunk.strip()
32
+ if chunk != "":
33
+ cleaned_chunks.append(chunk)
34
+
35
+ # Print cleaned_chunks
36
+ print(cleaned_chunks)
37
+
38
+
39
+ # Print the length of cleaned_chunks
40
+ print(len(cleaned_chunks))
41
+
42
+ # Return the cleaned_chunks
43
+ return cleaned_chunks
44
+
45
+ # Call the preprocess_text function and store the result in a cleaned_chunks variable
46
+ cleaned_chunks = preprocess_text(water_cycle_text) # Complete this line
47
+
48
+ #STEP 4 FROM SEMANTIC SEARCH
49
+ # Load the pre-trained embedding model that converts text to vectors
50
+ model = SentenceTransformer('all-MiniLM-L6-v2')
51
+
52
+ def create_embeddings(text_chunks):
53
+ # Convert each text chunk into a vector embedding and store as a tensor
54
+ chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
55
+
56
+ # Print the chunk embeddings
57
+ print(chunk_embeddings)
58
+
59
+
60
+ # Print the shape of chunk_embeddings
61
+ print(chunk_embeddings.shape)
62
+
63
+
64
+ # Return the chunk_embeddings
65
+ return chunk_embeddings
66
+
67
+ # Call the create_embeddings function and store the result in a new chunk_embeddings variable
68
+ chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
69
+
70
+ #STEP 5 FROM SEMANTIC SEARCH
71
+ # Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
72
+ def get_top_chunks(query, chunk_embeddings, text_chunks):
73
+ # Convert the query text into a vector embedding
74
+ query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line
75
+
76
+ # Normalize the query embedding to unit length for accurate similarity comparison
77
+ query_embedding_normalized = query_embedding / query_embedding.norm()
78
+
79
+ # Normalize all chunk embeddings to unit length for consistent comparison
80
+ chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
81
+
82
+ # Calculate cosine similarity between query and all chunks using matrix multiplication
83
+ similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
84
+
85
+ # Print the similarities
86
+ print(similarities)
87
+
88
+
89
+ # Find the indices of the 3 chunks with highest similarity scores
90
+ top_indices = torch.topk(similarities, k=3).indices
91
+
92
+ # Print the top indices
93
+ print(top_indices)
94
+
95
+ # Create an empty list to store the most relevant chunks
96
+ top_chunks = []
97
+
98
+ # Loop through the top indices and retrieve the corresponding text chunks
99
+ for i in top_indices:
100
+ top_chunks.append(cleaned_chunks[i])
101
+
102
+ # Return the list of most relevant chunks
103
+ return top_chunks
104
+
105
+ print(get_top_chunks("How are you", chunk_embeddings, cleaned_chunks))
106
+
107
+ # STEP 6 FROM SEMANTIC SEARCH
108
+ # Call the get_top_chunks function with the original query
109
+ top_results = get_top_chunks("How does water get into the sky?", chunk_embeddings, cleaned_chunks) # Complete this line
110
+
111
+ # Print the top results
112
+ print(top_results)
113
+
114
+
115
  client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
116
 
117
  def respond(message, history):