File size: 8,115 Bytes
fd508d7
 
 
 
 
 
aee8e11
fd508d7
 
 
 
d519921
fd508d7
e5cea6f
fd508d7
2a62c44
ca6eb64
 
aa5f607
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e81e85b
fd508d7
 
 
 
 
 
 
 
 
d519921
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df3747d
d519921
 
 
 
 
 
 
 
 
 
 
 
7b7377a
fd508d7
 
 
 
 
df3747d
 
aa5f607
d519921
 
 
 
 
 
a374398
d519921
 
 
 
 
 
 
df3747d
 
d519921
 
 
a374398
d519921
 
 
 
fd508d7
 
cf07922
cc28e50
fd508d7
2da65b0
 
fd508d7
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import spaces
import torch
import torch.nn.functional as F
from torch import Tensor
from transformers import AutoTokenizer, AutoModel
import gradio as gr
import os

title = """
# 👋🏻Welcome to 🙋🏻‍♂️Tonic's 🐣e5-mistral🛌🏻Embeddings """
description = """
You can use this ZeroGPU Space to test out the current model [intfloat/e5-mistral-7b-instruct](https://huggingface.co/intfloat/e5-mistral-7b-instruct). 🐣e5-mistral🛌🏻 has a larger context🪟window, a different prompting/return🛠️mechanism and generally better results than other embedding models. use it via API to create embeddings or try out the sentence similarity to see how various optimization parameters affect performance.
You can also use 🐣e5-mistral🛌🏻 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/e5?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3> 
Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻  [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [Poly](https://github.com/tonic-ai/poly) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
"""
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:30'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tasks = {
        'ArguAna': 'Given a claim, find documents that refute the claim',
        'ClimateFEVER': 'Given a claim about climate change, retrieve documents that support or refute the claim',
        'DBPedia': 'Given a query, retrieve relevant entity descriptions from DBPedia',
        'FEVER': 'Given a claim, retrieve documents that support or refute the claim',
        'FiQA2018': 'Given a financial question, retrieve user replies that best answer the question',
        'HotpotQA': 'Given a multi-hop question, retrieve documents that can help answer the question',
        'MSMARCO': 'Given a web search query, retrieve relevant passages that answer the query',
        'NFCorpus': 'Given a question, retrieve relevant documents that best answer the question',
        'NQ': 'Given a question, retrieve Wikipedia passages that answer the question',
        'QuoraRetrieval': 'Given a question, retrieve questions that are semantically equivalent to the given question',
        'SCIDOCS': 'Given a scientific paper title, retrieve paper abstracts that are cited by the given paper',
        'SciFact': 'Given a scientific claim, retrieve documents that support or refute the claim',
        'Touche2020': 'Given a question, retrieve detailed and persuasive arguments that answer the question',
        'TRECCOVID': 'Given a query on COVID-19, retrieve documents that answer the query',
}

tokenizer = AutoTokenizer.from_pretrained('intfloat/e5-mistral-7b-instruct')
model = AutoModel.from_pretrained('intfloat/e5-mistral-7b-instruct', torch_dtype=torch.float16, device_map=device)

def last_token_pool(last_hidden_states: Tensor, attention_mask: Tensor) -> Tensor:
    left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0])
    if left_padding:
        return last_hidden_states[:, -1]
    else:
        sequence_lengths = attention_mask.sum(dim=1) - 1
        batch_size = last_hidden_states.shape[0]
        return last_hidden_states[torch.arange(batch_size, device=last_hidden_states.device), sequence_lengths]

class EmbeddingModel:
    def __init__(self):
        self.tokenizer = AutoTokenizer.from_pretrained('intfloat/e5-mistral-7b-instruct')
        self.model = AutoModel.from_pretrained('intfloat/e5-mistral-7b-instruct', torch_dtype=torch.float16, device_map=device)

    @spaces.GPU
    def compute_embeddings(selected_task, input_text, system_prompt):
        max_length = 2042
        task_description = tasks[selected_task]
        processed_texts = [f'Instruct: {task_description}\nQuery: {input_text}']
    
        batch_dict = tokenizer(processed_texts, max_length=max_length - 1, return_attention_mask=False, padding=False, truncation=True)
        batch_dict['input_ids'] = [input_ids + [tokenizer.eos_token_id] for input_ids in batch_dict['input_ids']]
        batch_dict = tokenizer.pad(batch_dict, padding=True, return_attention_mask=True, return_tensors='pt')
        batch_dict = {k: v.to(device) for k, v in batch_dict.items()}
        outputs = model(**batch_dict)
        embeddings = last_token_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
        embeddings = F.normalize(embeddings, p=2, dim=1)
        embeddings_list = embeddings.detach().cpu().numpy().tolist()
        return embeddings_list

    @spaces.GPU
    def compute_similarity(self, sentence1, sentence2, extra_sentence1, extra_sentence2):

        sentences = [sentence1, sentence2, extra_sentence1, extra_sentence2]
        encoded_input = self.tokenizer(sentences, padding=True, truncation=True, return_tensors='pt').to(device)
        with torch.no_grad():
            model_output = self.model(**encoded_input)

        embeddings = last_token_pool(model_output.last_hidden_state, encoded_input['attention_mask'])
        embeddings = F.normalize(embeddings, p=2, dim=1)

        # Compute cosine similarity
        similarity1 = F.cosine_similarity(embeddings[0].unsqueeze(0), embeddings[1].unsqueeze(0)).item()
        similarity2 = F.cosine_similarity(embeddings[2].unsqueeze(0), embeddings[3].unsqueeze(0)).item()
        return similarity1, similarity2

    
def app_interface():
    with gr.Blocks() as demo:
        gr.Markdown(title)
        gr.Markdown(description)
        with gr.Row():
            task_dropdown = gr.Dropdown(list(tasks.keys()), label="Select a Task", value=list(tasks.keys())[0])

        with gr.Tab("Embedding Generation"):
            input_text_box = gr.Textbox(label="📖Input Text")
            system_prompt_box = gr.Textbox(label="🤖System Prompt (Optional)")
            compute_button = gr.Button("Try🐣🛌🏻e5")
            output_display = gr.Textbox(label="🐣e5-mistral🛌🏻 Embeddings")
            compute_button.click(
                fn=EmbeddingModel.compute_embeddings,
                inputs=[task_dropdown, input_text_box, system_prompt_box],
                outputs=output_display
            )

        with gr.Tab("Sentence Similarity"):
            sentence1_box = gr.Textbox(label="Sentence 1")
            sentence2_box = gr.Textbox(label="Sentence 2")
            extra_sentence1_box = gr.Textbox(label="Sentence 3")
            extra_sentence2_box = gr.Textbox(label="Sentence 4")
            similarity_button = gr.Button("Compute Similarity")
            similarity_output = gr.Label(label="🐣e5-mistral🛌🏻 Similarity Scores")
            similarity_button.click(
                fn=EmbeddingModel.compute_similarity,
                inputs=[sentence1_box, sentence2_box, extra_sentence1_box, extra_sentence2_box],
                outputs=similarity_output
            )

        with gr.Row():
            with gr.Column():
                system_prompt_box
                input_text_box
            with gr.Column():
                compute_button
                output_display

    return demo

# Run the Gradio app
app_interface().launch()