aditijuluri commited on
Commit
26f0f1e
·
verified ·
1 Parent(s): 7cd192c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -67
app.py CHANGED
@@ -1,79 +1,76 @@
1
- from huggingface_hub import InferenceClient
 
2
  from sentence_transformers import SentenceTransformer
3
  import torch
4
  import gradio as gr
5
-
6
- # Initialize the Hugging Face Inference Client
7
  client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
8
-
9
- # Step 1: Load and preprocess the context file
10
  with open("reconext_file.txt", "r", encoding="utf-8") as file:
11
- reconext_file_text = file.read()
12
-
 
 
 
13
  def preprocess_text(text):
14
- cleaned_text = text.strip()
15
- chunks = cleaned_text.split("\n")
16
- cleaned_chunks = [chunk.strip() for chunk in chunks if len(chunk.strip()) > 0]
17
- return cleaned_chunks
18
-
19
- cleaned_chunks = preprocess_text(reconext_file_text)
20
-
21
- # Step 2: Create embeddings for the text chunks
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  model = SentenceTransformer('all-MiniLM-L6-v2')
23
-
24
  def create_embeddings(text_chunks):
25
- chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True)
26
- return chunk_embeddings
27
-
28
- chunk_embeddings = create_embeddings(cleaned_chunks)
29
-
30
- # Step 3: Semantic search to get relevant chunks for a user query
 
 
 
 
 
 
31
  def get_top_chunks(query, chunk_embeddings, text_chunks):
32
- query_embedding = model.encode(query, convert_to_tensor=True)
33
- query_embedding_normalized = query_embedding / query_embedding.norm()
34
- chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
35
- similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
36
- top_indices = torch.topk(similarities, k=3).indices
37
- top_chunks = [text_chunks[i] for i in top_indices]
38
- return top_chunks
39
-
40
- # Step 4: Generate a response using the Hugging Face model
41
- def respond(message, history):
42
- best_next_watch = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
43
- str_watch_chunks = "\n".join(best_next_watch)
44
-
45
- # Build messages for prompt
46
- messages = [
47
- {
48
- "role": "system",
49
- "content": (
50
- "You are a gen-z helpful chatbot that helps teenagers find their next best watch. "
51
- "Speak in gen-z terms and be natural. Answer the user's question based on:\n" + str_watch_chunks
52
- )
53
- }
54
- ]
55
- if history:
56
- messages.extend(history)
57
-
58
- messages.append({"role": "user", "content": message})
59
-
60
- response = client.chat_completion(
61
- messages, max_tokens=300, temperature=1.3, top_p=0.6
62
- )
63
- return response['choices'][0]['message']['content'].strip()
64
-
65
- # Step 5: Create the Gradio interface
66
- with gr.Blocks() as demo:
67
- chatbot = gr.Chatbot()
68
- msg = gr.Textbox(placeholder="Ask me what to watch...", label="Your Message")
69
- state = gr.State([]) # Track conversation history
70
-
71
- # Initial assistant message
72
- def startup():
73
- greeting = (
74
- "hey there! its RecoNext, your favorite binge buddy! get started by telling me you age, preferred genre, and past tv shows or movies you have liked!"
75
- )
76
- return [("", greeting)], [{"role": "assistant", "content": greeting}]
77
  def respond(message, history):
78
  best_next_watch = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
79
  print(best_next_watch)
 
1
+ from huggingface_hub import InferenceClient
2
+ #step 1 from semantic search
3
  from sentence_transformers import SentenceTransformer
4
  import torch
5
  import gradio as gr
6
+ import random
 
7
  client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
8
+ #step 2 from semantic search read file
9
+ # Open the water_cycle.txt file in read mode with UTF-8 encoding
10
  with open("reconext_file.txt", "r", encoding="utf-8") as file:
11
+ # Read the entire contents of the file and store it in a variable
12
+ reconext_file_text = file.read()
13
+ # Print the text below
14
+ print(reconext_file_text)
15
+ #step 3 from semantix search
16
  def preprocess_text(text):
17
+ # Strip extra whitespace from the beginning and the end of the text
18
+ cleaned_text = text.strip()
19
+ # Split the cleaned_text by every newline character (\n)
20
+ chunks = cleaned_text.split("\n")
21
+ # Create an empty list to store cleaned chunks
22
+ cleaned_chunks = []
23
+ # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
24
+ for chunk in chunks:
25
+ clean_chunk = chunk.strip()
26
+ if(len(clean_chunk) >= 0):
27
+ cleaned_chunks.append(clean_chunk)
28
+ # Print cleaned_chunks
29
+ print(cleaned_chunks)
30
+ # Print the length of cleaned_chunks
31
+ print(len(cleaned_chunks))
32
+ # Return the cleaned_chunks
33
+ return cleaned_chunks
34
+ # Call the preprocess_text function and store the result in a cleaned_chunks variable
35
+ cleaned_chunks = preprocess_text(reconext_file_text) # Complete this line
36
+ #step 4 from semantic search
37
+ # Load the pre-trained embedding model that converts text to vectors
38
  model = SentenceTransformer('all-MiniLM-L6-v2')
 
39
  def create_embeddings(text_chunks):
40
+ # Convert each text chunk into a vector embedding and store as a tensor
41
+ chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
42
+ # Print the chunk embeddings
43
+ print(chunk_embeddings)
44
+ # Print the shape of chunk_embeddings
45
+ print(chunk_embeddings.shape)
46
+ # Return the chunk_embeddings
47
+ return chunk_embeddings
48
+ # Call the create_embeddings function and store the result in a new chunk_embeddings variable
49
+ chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
50
+ #step 5 from semantic search
51
+ # Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
52
  def get_top_chunks(query, chunk_embeddings, text_chunks):
53
+ # Convert the query text into a vector embedding
54
+ query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line
55
+ # Normalize the query embedding to unit length for accurate similarity comparison
56
+ query_embedding_normalized = query_embedding / query_embedding.norm()
57
+ # Normalize all chunk embeddings to unit length for consistent comparison
58
+ chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
59
+ # Calculate cosine similarity between query and all chunks using matrix multiplication
60
+ similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
61
+ # Print the similarities
62
+ print(similarities)
63
+ # Find the indices of the 3 chunks with highest similarity scores
64
+ top_indices = torch.topk(similarities, k=3).indices
65
+ # Print the top indices
66
+ print(top_indices)
67
+ # Create an empty list to store the most relevant chunks
68
+ top_chunks = []
69
+ # Loop through the top indices and retrieve the corresponding text chunks
70
+ for i in top_indices:
71
+ top_chunks.append(text_chunks[i])
72
+ # Return the list of most relevant chunks
73
+ return top_chunks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  def respond(message, history):
75
  best_next_watch = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
76
  print(best_next_watch)