File size: 2,040 Bytes
1aa667b
 
 
 
 
 
6d183c6
1aa667b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
acecfc3
 
 
 
 
 
1aa667b
acecfc3
 
 
 
 
 
 
 
1aa667b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from transformers import BertTokenizer, BertModel
import torch
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np

# Load BERT tokenizer and model
bert_model_name = "bert-base-uncased"#"yiyanghkust/finbert-tone" #"bert-base-uncased"
tokenizer = BertTokenizer.from_pretrained(bert_model_name)
model = BertModel.from_pretrained(bert_model_name)
model.eval()  # Set to evaluation mode

# Function to obtain BERT embeddings
def get_bert_embeddings(texts):
    """Obtain BERT embeddings for a list of texts."""
    embeddings = []
    with torch.no_grad():
        for text in texts:
            inputs = tokenizer(text, return_tensors='pt', truncation=True, padding=True)
            outputs = model(**inputs)
            # Take the mean of token embeddings as the sentence embedding
            embedding = outputs.last_hidden_state.mean(dim=1).squeeze().numpy()
            embeddings.append(embedding)
    return np.array(embeddings)

# Compute similarity matrices over embeddings
def compute_similarity(embeddings1, embeddings2):
    """Compute pairwise cosine similarity between two sets of embeddings."""
    return cosine_similarity(embeddings1, embeddings2)

def compare_selected_paragraph(paragraph, stored_paragraphs):
    """Compare the selected paragraph with stored paragraphs."""
    # Here, 'stored_paragraphs' would be available inside the function
    # Perform the comparison
    embeddings1 = get_bert_embeddings([paragraph])  # Get embedding for the selected paragraph
    embeddings2 = get_bert_embeddings(stored_paragraphs)  # Get embeddings for stored paragraphs

    similarity_matrix = compute_similarity(embeddings1, embeddings2)
    
    # Find the most similar paragraph
    most_similar_index = np.argmax(similarity_matrix[0])
    most_similar_paragraph = stored_paragraphs[most_similar_index]
    similarity_score = similarity_matrix[0][most_similar_index]
    
    return f"Most similar paragraph {most_similar_index+1}: {most_similar_paragraph}\nSimilarity score: {similarity_score:.2f}"