FinanceReport / lib /comparison1.py
Cachoups's picture
Create comparison1.py
e724b8f verified
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
# Load the pre-trained sentence transformer model
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
# Get sentence embeddings for a single paragraph
def get_single_sentence_embedding(paragraph):
"""Obtain embeddings for a single paragraph using a sentence transformer."""
embedding = model.encode([paragraph], convert_to_tensor=True)
return embedding
# Get sentence embeddings for a list of paragraphs
def get_sentence_embeddings(paragraphs):
"""Obtain embeddings for a list of paragraphs using a sentence transformer."""
embeddings = model.encode(paragraphs, convert_to_tensor=True)
return embeddings
# Compute similarity matrices over embeddings
def compute_similarity(embeddings1, embeddings2):
"""Compute pairwise cosine similarity between two sets of embeddings."""
return cosine_similarity(embeddings1.cpu().numpy(), embeddings2.cpu().numpy())
# Compare a single selected paragraph with a list of stored paragraphs
def compare_selected_paragraph(paragraph, stored_paragraphs):
"""Compare the selected paragraph with stored paragraphs."""
# Get embedding for the selected paragraph
embedding1 = get_single_sentence_embedding(paragraph)
# Get embeddings for the stored paragraphs
embeddings2 = get_sentence_embeddings(stored_paragraphs)
# Compute similarity
similarity_matrix = compute_similarity(embedding1, embeddings2)
# Find the most similar paragraph
most_similar_index = np.argmax(similarity_matrix[0])
most_similar_paragraph = stored_paragraphs[most_similar_index]
similarity_score = similarity_matrix[0][most_similar_index]
return f"Most similar paragraph {most_similar_index + 1}: {most_similar_paragraph}\nSimilarity score: {similarity_score:.2f}"