Spaces:
Running
Running
File size: 1,915 Bytes
e724b8f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
# Load the pre-trained sentence transformer model
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
# Get sentence embeddings for a single paragraph
def get_single_sentence_embedding(paragraph):
"""Obtain embeddings for a single paragraph using a sentence transformer."""
embedding = model.encode([paragraph], convert_to_tensor=True)
return embedding
# Get sentence embeddings for a list of paragraphs
def get_sentence_embeddings(paragraphs):
"""Obtain embeddings for a list of paragraphs using a sentence transformer."""
embeddings = model.encode(paragraphs, convert_to_tensor=True)
return embeddings
# Compute similarity matrices over embeddings
def compute_similarity(embeddings1, embeddings2):
"""Compute pairwise cosine similarity between two sets of embeddings."""
return cosine_similarity(embeddings1.cpu().numpy(), embeddings2.cpu().numpy())
# Compare a single selected paragraph with a list of stored paragraphs
def compare_selected_paragraph(paragraph, stored_paragraphs):
"""Compare the selected paragraph with stored paragraphs."""
# Get embedding for the selected paragraph
embedding1 = get_single_sentence_embedding(paragraph)
# Get embeddings for the stored paragraphs
embeddings2 = get_sentence_embeddings(stored_paragraphs)
# Compute similarity
similarity_matrix = compute_similarity(embedding1, embeddings2)
# Find the most similar paragraph
most_similar_index = np.argmax(similarity_matrix[0])
most_similar_paragraph = stored_paragraphs[most_similar_index]
similarity_score = similarity_matrix[0][most_similar_index]
return f"Most similar paragraph {most_similar_index + 1}: {most_similar_paragraph}\nSimilarity score: {similarity_score:.2f}"
|