Cachoups commited on
Commit
e724b8f
·
verified ·
1 Parent(s): 6d183c6

Create comparison1.py

Browse files
Files changed (1) hide show
  1. lib/comparison1.py +42 -0
lib/comparison1.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sentence_transformers import SentenceTransformer
2
+ from sklearn.metrics.pairwise import cosine_similarity
3
+ import numpy as np
4
+
5
+ # Load the pre-trained sentence transformer model
6
+ model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
7
+
8
+ # Get sentence embeddings for a single paragraph
9
+ def get_single_sentence_embedding(paragraph):
10
+ """Obtain embeddings for a single paragraph using a sentence transformer."""
11
+ embedding = model.encode([paragraph], convert_to_tensor=True)
12
+ return embedding
13
+
14
+ # Get sentence embeddings for a list of paragraphs
15
+ def get_sentence_embeddings(paragraphs):
16
+ """Obtain embeddings for a list of paragraphs using a sentence transformer."""
17
+ embeddings = model.encode(paragraphs, convert_to_tensor=True)
18
+ return embeddings
19
+
20
+ # Compute similarity matrices over embeddings
21
+ def compute_similarity(embeddings1, embeddings2):
22
+ """Compute pairwise cosine similarity between two sets of embeddings."""
23
+ return cosine_similarity(embeddings1.cpu().numpy(), embeddings2.cpu().numpy())
24
+
25
+ # Compare a single selected paragraph with a list of stored paragraphs
26
+ def compare_selected_paragraph(paragraph, stored_paragraphs):
27
+ """Compare the selected paragraph with stored paragraphs."""
28
+ # Get embedding for the selected paragraph
29
+ embedding1 = get_single_sentence_embedding(paragraph)
30
+
31
+ # Get embeddings for the stored paragraphs
32
+ embeddings2 = get_sentence_embeddings(stored_paragraphs)
33
+
34
+ # Compute similarity
35
+ similarity_matrix = compute_similarity(embedding1, embeddings2)
36
+
37
+ # Find the most similar paragraph
38
+ most_similar_index = np.argmax(similarity_matrix[0])
39
+ most_similar_paragraph = stored_paragraphs[most_similar_index]
40
+ similarity_score = similarity_matrix[0][most_similar_index]
41
+
42
+ return f"Most similar paragraph {most_similar_index + 1}: {most_similar_paragraph}\nSimilarity score: {similarity_score:.2f}"