Yuvalamitay commited on
Commit
86faa0f
·
verified ·
1 Parent(s): 7e8646b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -72
app.py CHANGED
@@ -1,92 +1,96 @@
1
  from huggingface_hub import InferenceClient
 
 
2
  from sentence_transformers import SentenceTransformer
3
  import torch
4
  import gradio as gr
5
- import requests
6
- import os
7
- TMDB_TOKEN = os.getenv("TMDB_BEARER_TOKEN")
8
 
9
- # Hugging Face model
10
  client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
11
-
12
- # Load and clean reconext text
13
  with open("reconext_file.txt", "r", encoding="utf-8") as file:
14
- reconext_file_text = file.read()
15
-
 
 
 
16
  def preprocess_text(text):
17
- chunks = [chunk.strip() for chunk in text.strip().split("\n") if chunk.strip()]
18
- return chunks
19
-
20
- cleaned_chunks = preprocess_text(reconext_file_text)
21
-
22
- # Convert text chunks to embeddings
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  model = SentenceTransformer('all-MiniLM-L6-v2')
24
-
25
  def create_embeddings(text_chunks):
26
- return model.encode(text_chunks, convert_to_tensor=True)
27
-
28
- chunk_embeddings = create_embeddings(cleaned_chunks)
29
-
30
- # Semantic search for top relevant chunks
 
 
 
 
 
 
 
31
  def get_top_chunks(query, chunk_embeddings, text_chunks):
32
- query_embedding = model.encode(query, convert_to_tensor=True)
33
- query_embedding_normalized = query_embedding / query_embedding.norm()
34
- chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
35
- similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
36
- top_indices = torch.topk(similarities, k=3).indices
37
- return [text_chunks[i] for i in top_indices]
38
-
39
- # TMDB API function
40
- def get_tmdb_recommendation(query):
41
- url = "https://api.themoviedb.org/3/search/multi"
42
- headers = {
43
- "Authorization": f"Bearer {TMDB_BEARER_TOKEN}"
44
- }
45
- params = {
46
- "query": query,
47
- "include_adult": False,
48
- "language": "en-US",
49
- "page": 1
50
- }
51
-
52
- response = requests.get(url, headers=headers, params=params)
53
- if response.status_code == 200:
54
- results = response.json().get("results", [])
55
- if not results:
56
- return "Nothin' popped up on TMDB for that 🫠"
57
-
58
- top = results[0]
59
- title = top.get("title") or top.get("name") or "a mystery show"
60
- overview = top.get("overview", "No description available.")
61
- return f"🔥 Try watching **{title}** — {overview}"
62
- else:
63
- return "TMDB ghosted us 👻 Try again later."
64
-
65
- # Chatbot response function
66
  def respond(message, history):
67
- if any(word in message.lower() for word in ["recommend", "suggest", "watch", "movie", "show"]):
68
- return get_tmdb_recommendation(message)
69
-
70
- best_chunks = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
71
- str_chunks = "\n".join(best_chunks)
72
-
73
  messages = [
74
- {
75
- "role": "system",
76
- "content": f"You are a gen-z helpful chatbot that helps teenagers find their next best watch as in TV shows and movies. Speak in a chill, funny, and relatable tone, but not too long of replies. Use the info below to answer:\n{str_chunks}"
77
  }
78
  ]
79
-
80
  if history:
81
  messages.extend(history)
82
-
83
- messages.append({'role': 'user', 'content': message})
84
-
 
85
  response = client.chat_completion(
86
- messages, max_tokens=300, temperature=1.3, top_p=0.6
87
  )
88
  return response['choices'][0]['message']['content'].strip()
89
-
90
- # Gradio app
91
- chatbot = gr.ChatInterface(respond, title="📺 Gen-Z Watch Buddy")
92
- chatbot.launch()
 
1
  from huggingface_hub import InferenceClient
2
+
3
+ #step 1 from semantic search
4
  from sentence_transformers import SentenceTransformer
5
  import torch
6
  import gradio as gr
7
+ import random
 
 
8
 
 
9
  client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
10
+ #step 2 from semantic search read file
11
+ # Open the water_cycle.txt file in read mode with UTF-8 encoding
12
  with open("reconext_file.txt", "r", encoding="utf-8") as file:
13
+ # Read the entire contents of the file and store it in a variable
14
+ reconext_file_text = file.read()
15
+ # Print the text below
16
+ print(reconext_file_text)
17
+ #step 3 from semantix search
18
  def preprocess_text(text):
19
+ # Strip extra whitespace from the beginning and the end of the text
20
+ cleaned_text = text.strip()
21
+ # Split the cleaned_text by every newline character (\n)
22
+ chunks = cleaned_text.split("\n")
23
+ # Create an empty list to store cleaned chunks
24
+ cleaned_chunks = []
25
+ # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
26
+ for chunk in chunks:
27
+ clean_chunk = chunk.strip()
28
+ if(len(clean_chunk) >= 0):
29
+ cleaned_chunks.append(clean_chunk)
30
+ # Print cleaned_chunks
31
+ print(cleaned_chunks)
32
+ # Print the length of cleaned_chunks
33
+ print(len(cleaned_chunks))
34
+ # Return the cleaned_chunks
35
+ return cleaned_chunks
36
+ # Call the preprocess_text function and store the result in a cleaned_chunks variable
37
+ cleaned_chunks = preprocess_text(reconext_file_text) # Complete this line
38
+ #step 4 from semantic search
39
+ # Load the pre-trained embedding model that converts text to vectors
40
  model = SentenceTransformer('all-MiniLM-L6-v2')
 
41
  def create_embeddings(text_chunks):
42
+ # Convert each text chunk into a vector embedding and store as a tensor
43
+ chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
44
+ # Print the chunk embeddings
45
+ print(chunk_embeddings)
46
+ # Print the shape of chunk_embeddings
47
+ print(chunk_embeddings.shape)
48
+ # Return the chunk_embeddings
49
+ return chunk_embeddings
50
+ # Call the create_embeddings function and store the result in a new chunk_embeddings variable
51
+ chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
52
+ #step 5 from semantic search
53
+ # Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
54
  def get_top_chunks(query, chunk_embeddings, text_chunks):
55
+ # Convert the query text into a vector embedding
56
+ query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line
57
+ # Normalize the query embedding to unit length for accurate similarity comparison
58
+ query_embedding_normalized = query_embedding / query_embedding.norm()
59
+ # Normalize all chunk embeddings to unit length for consistent comparison
60
+ chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
61
+ # Calculate cosine similarity between query and all chunks using matrix multiplication
62
+ similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
63
+ # Print the similarities
64
+ print(similarities)
65
+ # Find the indices of the 3 chunks with highest similarity scores
66
+ top_indices = torch.topk(similarities, k=3).indices
67
+ # Print the top indices
68
+ print(top_indices)
69
+ # Create an empty list to store the most relevant chunks
70
+ top_chunks = []
71
+ # Loop through the top indices and retrieve the corresponding text chunks
72
+ for i in top_indices:
73
+ top_chunks.append(text_chunks[i])
74
+ # Return the list of most relevant chunks
75
+ return top_chunks
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  def respond(message, history):
77
+ best_next_watch = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
78
+ print(best_next_watch)
79
+ str_watch_chunks = "\n".join(best_next_watch)
 
 
 
80
  messages = [
81
+ {"role":"system",
82
+ "content": "You are a gen-z helpful chatbot that helps teenagers find their next best watch, speak in gen-z terms and be natural. You should answer the users question based on " + str_watch_chunks + " ."
 
83
  }
84
  ]
 
85
  if history:
86
  messages.extend(history)
87
+ messages.append(
88
+ {'role':'user',
89
+ 'content':message}
90
+ )
91
  response = client.chat_completion(
92
+ messages, max_tokens = 300, temperature=1.3, top_p=0.6
93
  )
94
  return response['choices'][0]['message']['content'].strip()
95
+ chatbot = gr.ChatInterface(respond, type="messages")
96
+ chatbot.launch()