File size: 7,869 Bytes
47d82ab
6ce5a5b
c5a914e
 
5537833
47d82ab
c5a914e
 
 
 
47d82ab
c5a914e
 
5537833
b9ceebf
47d82ab
5537833
 
 
 
6ce5a5b
 
 
 
941bfc4
0bcdbb7
6ce5a5b
 
c5a914e
 
5f0ab2a
6ce5a5b
 
 
 
 
 
 
5355c89
 
6ce5a5b
 
 
 
 
 
5f0ab2a
 
 
 
6ce5a5b
5f0ab2a
6ce5a5b
 
c9916d8
6ce5a5b
 
 
 
 
 
5355c89
 
 
 
 
6382446
6ce5a5b
 
 
428a5aa
92bb964
 
5537833
92bb964
 
428a5aa
 
 
0bcdbb7
5537833
428a5aa
5537833
 
6ce5a5b
 
 
 
 
 
c5a914e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5537833
 
 
6ce5a5b
5537833
 
47d82ab
95b13f9
47d82ab
 
5537833
af90128
5537833
 
 
6ce5a5b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a8843ce
6ce5a5b
 
 
a035375
 
6ce5a5b
 
a035375
941bfc4
6ce5a5b
5355c89
 
6ce5a5b
5355c89
 
 
 
 
 
6ce5a5b
5355c89
6ce5a5b
 
 
 
 
 
 
 
 
 
c5a914e
6ce5a5b
 
a035375
5537833
 
 
 
 
5bf8ab4
 
 
 
5537833
 
 
 
 
 
 
 
c5a914e
6ce5a5b
 
 
 
 
 
 
 
 
 
c5a914e
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
import os
import random

import requests
import torch
from bs4 import BeautifulSoup
from datasets import Dataset
from langchain.docstore.document import Document
from langchain.llms import HuggingFacePipeline
from langchain.text_splitter import CharacterTextSplitter
from peft import PeftConfig, PeftModel
from transformers import (AutoModel, AutoModelForCausalLM, AutoTokenizer,
                          GenerationConfig, pipeline)

# os.environ["CUDA_VISIBLE_DEVICES"] = "0"


generation_config = GenerationConfig(temperature=.8,
                                     top_p=0.75,
                                     top_k=40)
device = 'cuda'

shared = {
    'answer_context': None,
    'embeddings_dataset': None,
    'full_text': None,
}

text_splitter = CharacterTextSplitter()


def get_nearest_examples(question: str, k: int):
    print(['get_nearest_examples', 'start'])
    question_embedding = get_embeddings([question]).cpu().detach().numpy()
    embeddings_dataset = shared['embeddings_dataset']
    scores, samples = embeddings_dataset.get_nearest_examples(
        "embeddings", question_embedding, k)
    print(['get_nearest_examples', 'scores and samples'])
    print(scores)
    print(samples['id'])
    print(['get_nearest_examples', 'end'])
    return samples


def get_embeddings(text):
    print(['get_embeddings', 'start'])
    encoded_input = emb_tokenizer(text,
                                  padding=True,
                                  truncation=True,
                                  return_tensors="pt")
    encoded_input = {k: v.to('cuda') for k, v in encoded_input.items()}
    model_output = emb_model(**encoded_input)
    model_output = model_output.last_hidden_state[:, 0]
    print(['get_embeddings', 'end'])
    return model_output


def build_faiss_index(text):
    print(['build_faiss_index', 'start'])
    text_list = split_text(text)
    emb_list = []
    for i, item in enumerate(text_list):
        emb_list.append({
            "embeddings": get_embeddings(item).cpu().detach().numpy()[0],
            'id': i
        })
    dataset = Dataset.from_list(emb_list)
    dataset.add_faiss_index(column="embeddings")
    shared['embeddings_dataset'] = dataset
    print(['build_faiss_index', 'end'])


def extract_text(url: str):
    print(['extract_text', 'start'])
    if url is None or url.strip() == '':
        return ''
    response = requests.get(url)
    soup = BeautifulSoup(response.text, "html.parser")
    text = '\n\n'.join(map(lambda p: p.text, soup.find_all('p')))
    shared['full_text'] = text
    print(['extract_text', 'end'])
    return text


def split_text(text: str):
    lines = text.split('\n')
    lines = [line.strip() for line in lines if line.strip()]
    return lines


def remove_prompt(text: str) -> str:
    output_prompt = 'Output: '
    idx = text.index(output_prompt)
    res = text[idx + len(output_prompt):].strip()
    res = res.replace('Input: ', '')
    return res


def summarize_text(text: str) -> str:
    print(['summarize_text', 'start'])

    print(['summarize_text', 'splitting text'])
    texts = text_splitter.split_text(text)
    docs = [Document(page_content=t) for t in texts]
    prompts = [f'<s>Instruction: Elabora un resume del siguiente texto.\nInput: {d.page_content}\nOutput: '
               for d in docs]

    print(['summarize_text', 'generating'])
    cleaned_summaries = [remove_prompt(
        s['generated_text']) for s in pipe(prompts)]
    summaries = '\n\n'.join(cleaned_summaries)

    print(['summarize_text', 'end'])
    return summaries


def summarize_text_v1(text: str):
    print(['summarize_text', 'start'])
    input_text = f'<s>Instruction: Elabora un resume del siguiente texto.\nInput: {text}\nOutput: '
    batch = tokenizer(input_text, return_tensors='pt')
    batch = batch.to(device)
    print(['summarize_text', 'generating'])
    with torch.cuda.amp.autocast():
        output_tokens = model.generate(**batch,
                                       max_new_tokens=512,
                                       generation_config=generation_config
                                       )
    output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
    output = output.replace(input_text, '')
    print(['summarize_text', 'end'])
    return output


def generate_question(text: str):
    print(['generate_question', 'start'])
    # Get a random section of the whole text to generate a question
    fragments = split_text(text)
    rnd_text = random.choice(fragments)
    shared['answer_context'] = rnd_text

    input_text = f'<s>Instruction: Dado el siguiente texto quiero que generes una pregunta cuya respuesta se encuentre en él.\nInput: {rnd_text}\nOutput: '
    batch = tokenizer(input_text, return_tensors='pt')
    print(['generate_question', 'generating'])
    with torch.cuda.amp.autocast():
        output_tokens = model.generate(**batch,
                                       max_new_tokens=256,
                                       generation_config=generation_config)
    output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
    output = output.replace(input_text, '')
    print(['generate_question', 'end'])
    return output


def get_answer_context():
    return shared['answer_context']


def answer_question(question: str):
    print(['answer_question', 'start'])
    full_text = shared['full_text']

    if not shared['embeddings_dataset']:
        build_faiss_index(full_text)
    top_k_samples = get_nearest_examples(question, k=3)

    index_text = {}
    for i, t in enumerate(split_text(full_text)):
        index_text[i] = t

    context = '\n'.join([index_text[id] for id in top_k_samples['id']])

    input_text = f"""<s>Instruction: Te voy a proporcionar un texto del cual deseo que me respondas una pregunta. 
    El texto es el siguiente: `{context}`\nInput: {question}\nOutput: """
    batch = tokenizer(input_text, return_tensors='pt')
    print(['answer_question', 'generating'])
    with torch.cuda.amp.autocast():
        output_tokens = model.generate(**batch,
                                       max_new_tokens=256,
                                       generation_config=generation_config)
    output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
    output = output.replace(input_text, '')
    print(['answer_question', 'end'])
    return output


def load_model(peft_model_id):
    print(['load_model', 'start'])
    config = PeftConfig.from_pretrained(peft_model_id)
    print(['load_model', 'loading model'])
    model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path,
                                                 return_dict=True,
                                                 load_in_8bit=True,
                                                 device_map='auto')
    print(['load_model', 'loading tokenizer'])
    tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
    model = PeftModel.from_pretrained(model, peft_model_id)
    model.config.use_cache = True
    print(['load_model', 'end'])
    return model, tokenizer


def load_embeddings_model(model_ckpt: str):
    print(['load_embeddings_model', 'start'])
    print(['load_embeddings_model', 'loading tokenizer'])
    tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
    print(['load_embeddings_model', 'loading model'])
    model = AutoModel.from_pretrained(model_ckpt)
    model = model.to(device)
    print(['load_embeddings_model', 'end'])
    return model, tokenizer


model, tokenizer = load_model(
    "hackathon-somos-nlp-2023/opt-6.7b-lora-sag-t3000-v300-v2")
pipe = pipeline("text2text-generation", model=model,
                tokenizer=tokenizer, max_new_tokens=100)
llm = HuggingFacePipeline(pipeline=pipe)
emb_model, emb_tokenizer = load_embeddings_model(
    "sentence-transformers/multi-qa-mpnet-base-dot-v1")