FinanceReport / lib /comparison.py
Cachoups's picture
Update lib/comparison.py
6d183c6 verified
from transformers import BertTokenizer, BertModel
import torch
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
# Load BERT tokenizer and model
bert_model_name = "bert-base-uncased"#"yiyanghkust/finbert-tone" #"bert-base-uncased"
tokenizer = BertTokenizer.from_pretrained(bert_model_name)
model = BertModel.from_pretrained(bert_model_name)
model.eval() # Set to evaluation mode
# Function to obtain BERT embeddings
def get_bert_embeddings(texts):
"""Obtain BERT embeddings for a list of texts."""
embeddings = []
with torch.no_grad():
for text in texts:
inputs = tokenizer(text, return_tensors='pt', truncation=True, padding=True)
outputs = model(**inputs)
# Take the mean of token embeddings as the sentence embedding
embedding = outputs.last_hidden_state.mean(dim=1).squeeze().numpy()
embeddings.append(embedding)
return np.array(embeddings)
# Compute similarity matrices over embeddings
def compute_similarity(embeddings1, embeddings2):
"""Compute pairwise cosine similarity between two sets of embeddings."""
return cosine_similarity(embeddings1, embeddings2)
def compare_selected_paragraph(paragraph, stored_paragraphs):
"""Compare the selected paragraph with stored paragraphs."""
# Here, 'stored_paragraphs' would be available inside the function
# Perform the comparison
embeddings1 = get_bert_embeddings([paragraph]) # Get embedding for the selected paragraph
embeddings2 = get_bert_embeddings(stored_paragraphs) # Get embeddings for stored paragraphs
similarity_matrix = compute_similarity(embeddings1, embeddings2)
# Find the most similar paragraph
most_similar_index = np.argmax(similarity_matrix[0])
most_similar_paragraph = stored_paragraphs[most_similar_index]
similarity_score = similarity_matrix[0][most_similar_index]
return f"Most similar paragraph {most_similar_index+1}: {most_similar_paragraph}\nSimilarity score: {similarity_score:.2f}"