aditijuluri commited on
Commit
731387e
·
verified ·
1 Parent(s): 18f6482

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -66
app.py CHANGED
@@ -1,88 +1,94 @@
1
  from huggingface_hub import InferenceClient
 
2
  from sentence_transformers import SentenceTransformer
3
  import torch
4
  import gradio as gr
5
  import random
6
-
7
- # Load model for chat
8
  client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
9
-
10
- # Step 2: Load TV show database
11
  with open("reconext_file.txt", "r", encoding="utf-8") as file:
12
- reconext_file_text = file.read()
13
-
14
- # Step 3: Preprocess the text
 
 
15
  def preprocess_text(text):
16
- cleaned_text = text.strip()
17
- chunks = cleaned_text.split("\n")
18
- cleaned_chunks = []
19
- for chunk in chunks:
20
- clean_chunk = chunk.strip()
21
- if len(clean_chunk) > 0:
22
- cleaned_chunks.append(clean_chunk)
23
- return cleaned_chunks
24
-
25
- cleaned_chunks = preprocess_text(reconext_file_text)
26
-
27
- # Step 4: Create embeddings
 
 
 
 
 
 
 
 
 
28
  model = SentenceTransformer('all-MiniLM-L6-v2')
29
-
30
  def create_embeddings(text_chunks):
31
- chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True)
32
- return chunk_embeddings
33
-
34
- chunk_embeddings = create_embeddings(cleaned_chunks)
35
-
36
- # Step 5: Semantic search
 
 
 
 
 
 
37
  def get_top_chunks(query, chunk_embeddings, text_chunks):
38
- query_embedding = model.encode(query, convert_to_tensor=True)
39
- query_embedding_normalized = query_embedding / query_embedding.norm()
40
- chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
41
- similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
42
- top_indices = torch.topk(similarities, k=3).indices
43
- top_chunks = [text_chunks[i] for i in top_indices]
44
- return top_chunks
45
-
46
- # Response function
 
 
 
 
 
 
 
 
 
 
 
 
47
  def respond(message, history):
48
  best_next_watch = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
 
49
  str_watch_chunks = "\n".join(best_next_watch)
50
-
51
  messages = [
52
- {
53
- "role": "system",
54
- "content": (
55
- "You are a Gen Z and Gen Alpha-friendly chatbot that helps teenagers find their next best TV show to watch. "
56
- "Speak naturally and casually, like someone from Gen Z. Only recommend TV shows, never movies. Use only the shows in our database. "
57
- "YOU CAN NEVER USE OUTSIDE DATA — ONLY TAKE DATA FROM OUR DATABASE! Match show suggestions to the user's age using TV ratings: "
58
- "TV-G is for all ages, TV-PG is for ages 6 and up, TV-14 is for 14 and up, and TV-MA is for 18 and up. "
59
- "If they don’t share their age, assume they’re Gen Z or Gen Alpha and use those guidelines. "
60
- "If the user is not Gen Z or Gen Alpha, you can recommend any show from the database. "
61
- "If they give you a genre, use it to guide your recommendation. If they don’t, pick something fun or relevant. "
62
- "If they mention a show they liked, match the genre of that show to recommend something similar. "
63
- "If they mention a specific streaming platform, only give shows that are available there. "
64
- "If nothing matches all their preferences, suggest the most similar show from the database. You got this! "
65
- "Remember you can ONLY take data from:\n\n" + str_watch_chunks
66
- )
67
  }
68
  ]
69
-
70
  if history:
71
  messages.extend(history)
72
-
73
- messages.append({"role": "user", "content": message})
74
-
 
75
  response = client.chat_completion(
76
- messages, max_tokens=700, temperature=1.3, top_p=0.6
77
  )
78
-
79
  return response['choices'][0]['message']['content'].strip()
80
-
81
- # Create a Chatbot component with an initial message
82
- chatbot_ui = gr.Chatbot(value=[("🤖", "Hey! I’m your Gen-Z watch buddy.\nI help you find your next favorite TV show based on what you like, your age, or your favorite genre. Just tell me what you're into!")])
83
-
84
- # Create the ChatInterface using the custom chatbot component
85
- chat_interface = gr.ChatInterface(fn=respond, chatbot=chatbot_ui, type="messages")
86
-
87
- # Launch it
88
- chat_interface.launch(share=True)
 
1
  from huggingface_hub import InferenceClient
2
+ #step 1 from semantic search
3
  from sentence_transformers import SentenceTransformer
4
  import torch
5
  import gradio as gr
6
  import random
 
 
7
  client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
8
+ #step 2 from semantic search read file
9
+ # Open the water_cycle.txt file in read mode with UTF-8 encoding
10
  with open("reconext_file.txt", "r", encoding="utf-8") as file:
11
+ # Read the entire contents of the file and store it in a variable
12
+ reconext_file_text = file.read()
13
+ # Print the text below
14
+ print(reconext_file_text)
15
+ #step 3 from semantix search
16
  def preprocess_text(text):
17
+ # Strip extra whitespace from the beginning and the end of the text
18
+ cleaned_text = text.strip()
19
+ # Split the cleaned_text by every newline character (\n)
20
+ chunks = cleaned_text.split("\n")
21
+ # Create an empty list to store cleaned chunks
22
+ cleaned_chunks = []
23
+ # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
24
+ for chunk in chunks:
25
+ clean_chunk = chunk.strip()
26
+ if(len(clean_chunk) >= 0):
27
+ cleaned_chunks.append(clean_chunk)
28
+ # Print cleaned_chunks
29
+ print(cleaned_chunks)
30
+ # Print the length of cleaned_chunks
31
+ print(len(cleaned_chunks))
32
+ # Return the cleaned_chunks
33
+ return cleaned_chunks
34
+ # Call the preprocess_text function and store the result in a cleaned_chunks variable
35
+ cleaned_chunks = preprocess_text(reconext_file_text) # Complete this line
36
+ #step 4 from semantic search
37
+ # Load the pre-trained embedding model that converts text to vectors
38
  model = SentenceTransformer('all-MiniLM-L6-v2')
 
39
  def create_embeddings(text_chunks):
40
+ # Convert each text chunk into a vector embedding and store as a tensor
41
+ chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
42
+ # Print the chunk embeddings
43
+ print(chunk_embeddings)
44
+ # Print the shape of chunk_embeddings
45
+ print(chunk_embeddings.shape)
46
+ # Return the chunk_embeddings
47
+ return chunk_embeddings
48
+ # Call the create_embeddings function and store the result in a new chunk_embeddings variable
49
+ chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
50
+ #step 5 from semantic search
51
+ # Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
52
  def get_top_chunks(query, chunk_embeddings, text_chunks):
53
+ # Convert the query text into a vector embedding
54
+ query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line
55
+ # Normalize the query embedding to unit length for accurate similarity comparison
56
+ query_embedding_normalized = query_embedding / query_embedding.norm()
57
+ # Normalize all chunk embeddings to unit length for consistent comparison
58
+ chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
59
+ # Calculate cosine similarity between query and all chunks using matrix multiplication
60
+ similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
61
+ # Print the similarities
62
+ print(similarities)
63
+ # Find the indices of the 3 chunks with highest similarity scores
64
+ top_indices = torch.topk(similarities, k=3).indices
65
+ # Print the top indices
66
+ print(top_indices)
67
+ # Create an empty list to store the most relevant chunks
68
+ top_chunks = []
69
+ # Loop through the top indices and retrieve the corresponding text chunks
70
+ for i in top_indices:
71
+ top_chunks.append(text_chunks[i])
72
+ # Return the list of most relevant chunks
73
+ return top_chunks
74
  def respond(message, history):
75
  best_next_watch = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
76
+ print(best_next_watch)
77
  str_watch_chunks = "\n".join(best_next_watch)
 
78
  messages = [
79
+ {"role":"system",
80
+ "content": "You are a Gen Z and Gen Alpha-friendly chatbot that helps teenagers find their next best TV show to watch. Speak naturally and casually, like someone from Gen Z. Only recommend TV shows, never movies. Use only the shows in our database YOU CAN NEVER USE OUTSIDE DATA ONLY TAKE DATA FROM OUR DATABASE! Match show suggestions to the user's age using TV ratings: TV-G is for all ages, TV-PG is for ages 6 and up, TV-14 is for 14 and up, and TV-MA is for 18 and up. If they don’t share their age, assume they’re Gen Z or Gen Alpha and use those guidelines. If the user is not Gen Z or Gen Alpha, you can recommend any show from the database. If they give you a genre, use it to guide your recommendation. If they don’t, pick something fun or relevant. If they mention a show they liked, match the genre of that show to recommend something similar. If they mention a specific streaming platform, only give shows that are available there. If nothing matches all their preferences, suggest the most similar show from the database. You got this! Remember you can ONLY take data from " + str_watch_chunks + " ."
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  }
82
  ]
 
83
  if history:
84
  messages.extend(history)
85
+ messages.append(
86
+ {'role':'user',
87
+ 'content':message}
88
+ )
89
  response = client.chat_completion(
90
+ messages, max_tokens = 700, temperature=1.3, top_p=0.6
91
  )
 
92
  return response['choices'][0]['message']['content'].strip()
93
+ chatbot = gr.ChatInterface(respond, type="messages")
94
+ chatbot.launch()