aditijuluri commited on
Commit
6324ff2
·
verified ·
1 Parent(s): 07237fe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -72
app.py CHANGED
@@ -1,97 +1,82 @@
1
  from huggingface_hub import InferenceClient
2
- #step 1 from semantic search
3
  from sentence_transformers import SentenceTransformer
4
  import torch
5
  import gradio as gr
6
  import random
 
 
7
  client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
8
 
9
-
10
- #step 2 from semantic search read file
11
- # Open the water_cycle.txt file in read mode with UTF-8 encoding
12
  with open("reconext_file.txt", "r", encoding="utf-8") as file:
13
- # Read the entire contents of the file and store it in a variable
14
- reconext_file_text = file.read()
15
- # Print the text below
16
- print(reconext_file_text)
17
- #step 3 from semantix search
18
  def preprocess_text(text):
19
- # Strip extra whitespace from the beginning and the end of the text
20
- cleaned_text = text.strip()
21
- # Split the cleaned_text by every newline character (\n)
22
- chunks = cleaned_text.split("\n")
23
- # Create an empty list to store cleaned chunks
24
- cleaned_chunks = []
25
- # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
26
- for chunk in chunks:
27
- clean_chunk = chunk.strip()
28
- if(len(clean_chunk) >= 0):
29
- cleaned_chunks.append(clean_chunk)
30
- # Print cleaned_chunks
31
- print(cleaned_chunks)
32
- # Print the length of cleaned_chunks
33
- print(len(cleaned_chunks))
34
- # Return the cleaned_chunks
35
- return cleaned_chunks
36
- # Call the preprocess_text function and store the result in a cleaned_chunks variable
37
- cleaned_chunks = preprocess_text(reconext_file_text) # Complete this line
38
- #step 4 from semantic search
39
- # Load the pre-trained embedding model that converts text to vectors
40
  model = SentenceTransformer('all-MiniLM-L6-v2')
 
41
  def create_embeddings(text_chunks):
42
- # Convert each text chunk into a vector embedding and store as a tensor
43
- chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
44
- # Print the chunk embeddings
45
- print(chunk_embeddings)
46
- # Print the shape of chunk_embeddings
47
- print(chunk_embeddings.shape)
48
- # Return the chunk_embeddings
49
- return chunk_embeddings
50
- # Call the create_embeddings function and store the result in a new chunk_embeddings variable
51
- chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
52
- #step 5 from semantic search
53
- # Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
54
  def get_top_chunks(query, chunk_embeddings, text_chunks):
55
- # Convert the query text into a vector embedding
56
- query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line
57
- # Normalize the query embedding to unit length for accurate similarity comparison
58
- query_embedding_normalized = query_embedding / query_embedding.norm()
59
- # Normalize all chunk embeddings to unit length for consistent comparison
60
- chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
61
- # Calculate cosine similarity between query and all chunks using matrix multiplication
62
- similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
63
- # Print the similarities
64
- print(similarities)
65
- # Find the indices of the 3 chunks with highest similarity scores
66
- top_indices = torch.topk(similarities, k=3).indices
67
- # Print the top indices
68
- print(top_indices)
69
- # Create an empty list to store the most relevant chunks
70
- top_chunks = []
71
- # Loop through the top indices and retrieve the corresponding text chunks
72
- for i in top_indices:
73
- top_chunks.append(text_chunks[i])
74
- # Return the list of most relevant chunks
75
- return top_chunks
76
  def respond(message, history):
77
  best_next_watch = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
78
- print(best_next_watch)
79
  str_watch_chunks = "\n".join(best_next_watch)
80
  messages = [
81
- {"role":"system",
82
- "content": "You are a Gen Z and Gen Alpha-friendly chatbot that helps teenagers find their next best TV show to watch. Speak naturally and casually, like someone from Gen Z. Only recommend TV shows, never movies. Use only the shows in our database YOU CAN NEVER USE OUTSIDE DATA ONLY TAKE DATA FROM OUR DATABASE! Match show suggestions to the user's age using TV ratings: TV-G is for all ages, TV-PG is for ages 6 and up, TV-14 is for 14 and up, and TV-MA is for 18 and up. If they don’t share their age, assume they’re Gen Z or Gen Alpha and use those guidelines. If the user is not Gen Z or Gen Alpha, you can recommend any show from the database. If they give you a genre, use it to guide your recommendation. If they don’t, pick something fun or relevant. If they mention a show they liked, match the genre of that show to recommend something similar. If they mention a specific streaming platform, only give shows that are available there. If nothing matches all their preferences, suggest the most similar show from the database. You got this! Remember you can ONLY take data from " + str_watch_chunks + " ."
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  }
84
  ]
85
  if history:
86
  messages.extend(history)
87
- messages.append(
88
- {'role':'user',
89
- 'content':message}
90
- )
91
  response = client.chat_completion(
92
- messages, max_tokens = 700, temperature=1.3, top_p=0.6
93
  )
94
  return response['choices'][0]['message']['content'].strip()
95
- initial_message = [("🤖", "Hey! I’m your Gen-Z watch buddy.\nI help you find your next favorite TV show based on what you like, your age, or favorite genre. Just tell me what you're into!")]
 
 
 
 
96
  chatbot = gr.ChatInterface(respond, type="messages", chatbot=initial_message)
97
  chatbot.launch()
 
1
  from huggingface_hub import InferenceClient
 
2
  from sentence_transformers import SentenceTransformer
3
  import torch
4
  import gradio as gr
5
  import random
6
+
7
+ # Load model for chat
8
  client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
9
 
10
+ # Read the TV show database
 
 
11
  with open("reconext_file.txt", "r", encoding="utf-8") as file:
12
+ reconext_file_text = file.read()
13
+
14
+ # Preprocess text chunks
 
 
15
  def preprocess_text(text):
16
+ cleaned_text = text.strip()
17
+ chunks = cleaned_text.split("\n")
18
+ cleaned_chunks = []
19
+ for chunk in chunks:
20
+ clean_chunk = chunk.strip()
21
+ if len(clean_chunk) > 0:
22
+ cleaned_chunks.append(clean_chunk)
23
+ return cleaned_chunks
24
+
25
+ cleaned_chunks = preprocess_text(reconext_file_text)
26
+
27
+ # Create embeddings
 
 
 
 
 
 
 
 
 
28
  model = SentenceTransformer('all-MiniLM-L6-v2')
29
+
30
  def create_embeddings(text_chunks):
31
+ chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True)
32
+ return chunk_embeddings
33
+
34
+ chunk_embeddings = create_embeddings(cleaned_chunks)
35
+
36
+ # Semantic search function
 
 
 
 
 
 
37
  def get_top_chunks(query, chunk_embeddings, text_chunks):
38
+ query_embedding = model.encode(query, convert_to_tensor=True)
39
+ query_embedding_normalized = query_embedding / query_embedding.norm()
40
+ chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
41
+ similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
42
+ top_indices = torch.topk(similarities, k=3).indices
43
+ top_chunks = [text_chunks[i] for i in top_indices]
44
+ return top_chunks
45
+
46
+ # Chat response function
 
 
 
 
 
 
 
 
 
 
 
 
47
  def respond(message, history):
48
  best_next_watch = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
 
49
  str_watch_chunks = "\n".join(best_next_watch)
50
  messages = [
51
+ {
52
+ "role": "system",
53
+ "content": (
54
+ "You are a Gen Z and Gen Alpha-friendly chatbot that helps teenagers find their next best TV show to watch. "
55
+ "Speak naturally and casually, like someone from Gen Z. Only recommend TV shows, never movies. Use only the shows in our database "
56
+ "YOU CAN NEVER USE OUTSIDE DATA ONLY TAKE DATA FROM OUR DATABASE! Match show suggestions to the user's age using TV ratings: "
57
+ "TV-G is for all ages, TV-PG is for ages 6 and up, TV-14 is for 14 and up, and TV-MA is for 18 and up. "
58
+ "If they don’t share their age, assume they’re Gen Z or Gen Alpha and use those guidelines. "
59
+ "If the user is not Gen Z or Gen Alpha, you can recommend any show from the database. "
60
+ "If they give you a genre, use it to guide your recommendation. If they don’t, pick something fun or relevant. "
61
+ "If they mention a show they liked, match the genre of that show to recommend something similar. "
62
+ "If they mention a specific streaming platform, only give shows that are available there. "
63
+ "If nothing matches all their preferences, suggest the most similar show from the database. You got this! "
64
+ "Remember you can ONLY take data from:\n\n" + str_watch_chunks
65
+ )
66
  }
67
  ]
68
  if history:
69
  messages.extend(history)
70
+ messages.append({"role": "user", "content": message})
71
+
 
 
72
  response = client.chat_completion(
73
+ messages, max_tokens=700, temperature=1.3, top_p=0.6
74
  )
75
  return response['choices'][0]['message']['content'].strip()
76
+
77
+ # Initial chatbot message
78
+ initial_message = [("🤖", "Hey! I’m your Gen-Z watch buddy.\nI help you find your next favorite TV show based on what you like, your age, or your favorite genre. Just tell me what you're into!")]
79
+
80
+ # Gradio chat interface
81
  chatbot = gr.ChatInterface(respond, type="messages", chatbot=initial_message)
82
  chatbot.launch()