Yuvalamitay commited on
Commit
3e01343
·
verified ·
1 Parent(s): 2ae1b89

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -1
app.py CHANGED
@@ -1 +1,94 @@
1
- import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import InferenceClient
2
+ #step 1 from semantic search
3
+ from sentence_transformers import SentenceTransformer
4
+ import torch
5
+ import gradio as gr
6
+ import random
7
+ client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
8
+ #step 2 from semantic search read file
9
+ # Open the water_cycle.txt file in read mode with UTF-8 encoding
10
+ with open("books_file.txt", "r", encoding="utf-8") as file:
11
+ # Read the entire contents of the file and store it in a variable
12
+ books_file_text = file.read()
13
+ # Print the text below
14
+ print(books_file_text)
15
+ #step 3 from semantix search
16
+ def preprocess_text(text):
17
+ # Strip extra whitespace from the beginning and the end of the text
18
+ cleaned_text = text.strip()
19
+ # Split the cleaned_text by every newline character (\n)
20
+ chunks = cleaned_text.split("\n")
21
+ # Create an empty list to store cleaned chunks
22
+ cleaned_chunks = []
23
+ # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
24
+ for chunk in chunks:
25
+ clean_chunk = chunk.strip()
26
+ if(len(clean_chunk) >= 0):
27
+ cleaned_chunks.append(clean_chunk)
28
+ # Print cleaned_chunks
29
+ print(cleaned_chunks)
30
+ # Print the length of cleaned_chunks
31
+ print(len(cleaned_chunks))
32
+ # Return the cleaned_chunks
33
+ return cleaned_chunks
34
+ # Call the preprocess_text function and store the result in a cleaned_chunks variable
35
+ cleaned_chunks = preprocess_text(books_file_text) # Complete this line
36
+ #step 4 from semantic search
37
+ # Load the pre-trained embedding model that converts text to vectors
38
+ model = SentenceTransformer('all-MiniLM-L6-v2')
39
+ def create_embeddings(text_chunks):
40
+ # Convert each text chunk into a vector embedding and store as a tensor
41
+ chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
42
+ # Print the chunk embeddings
43
+ print(chunk_embeddings)
44
+ # Print the shape of chunk_embeddings
45
+ print(chunk_embeddings.shape)
46
+ # Return the chunk_embeddings
47
+ return chunk_embeddings
48
+ # Call the create_embeddings function and store the result in a new chunk_embeddings variable
49
+ chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
50
+ #step 5 from semantic search
51
+ # Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
52
+ def get_top_chunks(query, chunk_embeddings, text_chunks):
53
+ # Convert the query text into a vector embedding
54
+ query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line
55
+ # Normalize the query embedding to unit length for accurate similarity comparison
56
+ query_embedding_normalized = query_embedding / query_embedding.norm()
57
+ # Normalize all chunk embeddings to unit length for consistent comparison
58
+ chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
59
+ # Calculate cosine similarity between query and all chunks using matrix multiplication
60
+ similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
61
+ # Print the similarities
62
+ print(similarities)
63
+ # Find the indices of the 3 chunks with highest similarity scores
64
+ top_indices = torch.topk(similarities, k=3).indices
65
+ # Print the top indices
66
+ print(top_indices)
67
+ # Create an empty list to store the most relevant chunks
68
+ top_chunks = []
69
+ # Loop through the top indices and retrieve the corresponding text chunks
70
+ for i in top_indices:
71
+ top_chunks.append(text_chunks[i])
72
+ # Return the list of most relevant chunks
73
+ return top_chunks
74
+ def respond(message, history):
75
+ best_next_read = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
76
+ print(best_next_red)
77
+ str_read_chunks = "\n".join(best_next_red)
78
+ messages = [
79
+ {"role":"system",
80
+ "content": "You are a Gen Z and Gen Alpha-friendly chatbot that helps teenagers find their next best book to read. Speak naturally and casually, like someone from Gen Z. Only recommend books, never anything else. Use only the books in our database YOU CAN NEVER USE OUTSIDE DATA ONLY TAKE DATA FROM OUR DATABASE! Match show suggestions to the user's age using. If they don’t share their age, assume they’re Gen Z or Gen Alpha and use those guidelines. If the user is not Gen Z or Gen Alpha, you can recommend any book from the database. If they give you a genre, use it to guide your recommendation. If they don’t, pick something fun or relevant. If they mention a book they liked, match the genre of that book to recommend something similar. If nothing matches all their preferences, suggest the most similar book from the database. You got this! Remember you can ONLY take data from " + str_read_chunks_chunks + " ."
81
+ }
82
+ ]
83
+ if history:
84
+ messages.extend(history)
85
+ messages.append(
86
+ {'role':'user',
87
+ 'content':message}
88
+ )
89
+ response = client.chat_completion(
90
+ messages, max_tokens = 700, temperature=1.3, top_p=0.6
91
+ )
92
+ return response['choices'][0]['message']['content'].strip()
93
+ chatbot = gr.ChatInterface(respond, type="messages")
94
+ chatbot.launch()