File size: 10,176 Bytes
47d82ab
6ce5a5b
c5a914e
 
5537833
47d82ab
c5a914e
 
 
 
47d82ab
c5a914e
 
5537833
b9ceebf
47d82ab
5537833
 
 
 
6ce5a5b
 
 
 
941bfc4
0bcdbb7
6ce5a5b
 
c5a914e
 
5f0ab2a
6ce5a5b
8c77fdd
 
 
 
 
 
 
 
 
 
6ce5a5b
 
 
 
 
 
5355c89
 
6ce5a5b
 
 
 
 
 
5f0ab2a
 
 
 
6ce5a5b
5f0ab2a
6ce5a5b
 
c9916d8
6ce5a5b
 
 
8c77fdd
 
 
 
 
 
 
 
 
6ce5a5b
 
 
5355c89
 
 
 
 
6382446
6ce5a5b
 
 
428a5aa
92bb964
 
8c77fdd
 
 
 
 
 
 
 
 
5537833
92bb964
 
428a5aa
 
 
0bcdbb7
5537833
428a5aa
5537833
 
6ce5a5b
8c77fdd
 
 
 
 
 
 
 
 
6ce5a5b
 
 
 
 
c5a914e
8c77fdd
 
 
 
 
 
 
 
 
c5a914e
8c77fdd
 
 
 
 
 
c5a914e
 
 
 
8c77fdd
 
 
 
 
 
 
 
 
c5a914e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5537833
 
 
6ce5a5b
5537833
 
47d82ab
95b13f9
47d82ab
 
5537833
af90128
5537833
 
 
6ce5a5b
 
8c77fdd
 
 
 
 
 
 
 
 
6ce5a5b
 
 
 
 
 
 
 
 
 
 
 
 
 
a8843ce
6ce5a5b
 
 
a035375
 
6ce5a5b
 
a035375
941bfc4
8c77fdd
 
 
 
 
 
 
 
 
6ce5a5b
5355c89
 
6ce5a5b
5355c89
 
 
 
 
 
6ce5a5b
5355c89
6ce5a5b
 
 
 
 
 
 
 
 
 
c5a914e
6ce5a5b
 
a035375
5537833
 
 
 
 
5bf8ab4
 
 
 
5537833
 
 
 
 
 
 
 
c5a914e
6ce5a5b
 
 
 
 
 
 
 
 
 
065161d
 
 
 
c5a914e
 
 
2a13080
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
import os
import random

import requests
import torch
from bs4 import BeautifulSoup
from datasets import Dataset
from langchain.docstore.document import Document
from langchain.llms import HuggingFacePipeline
from langchain.text_splitter import CharacterTextSplitter
from peft import PeftConfig, PeftModel
from transformers import (AutoModel, AutoModelForCausalLM, AutoTokenizer,
                          GenerationConfig, pipeline)

# os.environ["CUDA_VISIBLE_DEVICES"] = "0"


generation_config = GenerationConfig(temperature=.8,
                                     top_p=0.75,
                                     top_k=40)
device = 'cuda'

shared = {
    'answer_context': None,
    'embeddings_dataset': None,
    'full_text': None,
}

text_splitter = CharacterTextSplitter()


def get_nearest_examples(question: str, k: int):
    """
    Returns the k nearest examples to a given question.

    Args:
        question (str): The input question to find nearest examples for.
        k (int): The number of nearest examples to retrieve.

    Returns:
        The k nearest examples to the given question.
    """
    print(['get_nearest_examples', 'start'])
    question_embedding = get_embeddings([question]).cpu().detach().numpy()
    embeddings_dataset = shared['embeddings_dataset']
    scores, samples = embeddings_dataset.get_nearest_examples(
        "embeddings", question_embedding, k)
    print(['get_nearest_examples', 'scores and samples'])
    print(scores)
    print(samples['id'])
    print(['get_nearest_examples', 'end'])
    return samples


def get_embeddings(text):
    print(['get_embeddings', 'start'])
    encoded_input = emb_tokenizer(text,
                                  padding=True,
                                  truncation=True,
                                  return_tensors="pt")
    encoded_input = {k: v.to('cuda') for k, v in encoded_input.items()}
    model_output = emb_model(**encoded_input)
    model_output = model_output.last_hidden_state[:, 0]
    print(['get_embeddings', 'end'])
    return model_output


def build_faiss_index(text):
    """
    Builds a FAISS index for the given text.

    Args:
        text (str): The input text to build a FAISS index for.

    Returns:
        None.
    """
    print(['build_faiss_index', 'start'])
    text_list = split_text(text)
    emb_list = []
    for i, item in enumerate(text_list):
        emb_list.append({
            "embeddings": get_embeddings(item).cpu().detach().numpy()[0],
            'id': i
        })
    dataset = Dataset.from_list(emb_list)
    dataset.add_faiss_index(column="embeddings")
    shared['embeddings_dataset'] = dataset
    print(['build_faiss_index', 'end'])


def extract_text(url: str):
    """
    Extracts the text content from a given URL and returns it as a string.

    Args:
        url (str): The URL to extract text content from.

    Returns:
        str: The text content extracted from the URL, or an empty string if the URL is invalid.
    """
    print(['extract_text', 'start'])
    if url is None or url.strip() == '':
        return ''
    response = requests.get(url)
    soup = BeautifulSoup(response.text, "html.parser")
    text = '\n\n'.join(map(lambda p: p.text, soup.find_all('p')))
    shared['full_text'] = text
    print(['extract_text', 'end'])
    return text


def split_text(text: str):
    """
    Splits a given text into a list of individual lines.

    Args:
        text (str): The input text to split into lines.

    Returns:
        List[str]: A list of individual lines in the input text.
    """
    lines = text.split('\n')
    lines = [line.strip() for line in lines if line.strip()]
    return lines


def remove_prompt(text: str) -> str:
    """
    Removes the prompt from a given text and returns the resulting text.

    Args:
        text (str): The input text to remove the prompt from.

    Returns:
        str: The input text with the prompt removed, or the original text if the prompt is not found.
    """
    output_prompt = 'Output: '
    try:
        idx = text.index(output_prompt)
        res = text[idx + len(output_prompt):].strip()
        res = res.replace('Input: ', '')
    except ValueError:
        res = text
    return res


def summarize_text(text: str) -> str:
    """
    Generates a summary of the given text using a pre-trained language model.

    Args:
        text (str): The input text to generate a summary for.

    Returns:
        str: The generated summary for the input text.
    """
    print(['summarize_text', 'start'])

    print(['summarize_text', 'splitting text'])
    texts = text_splitter.split_text(text)
    docs = [Document(page_content=t) for t in texts]
    prompts = [f'<s>Instruction: Elabora un resume del siguiente texto.\nInput: {d.page_content}\nOutput: '
               for d in docs]

    print(['summarize_text', 'generating'])
    cleaned_summaries = [remove_prompt(
        s['generated_text']) for s in pipe(prompts)]
    summaries = '\n\n'.join(cleaned_summaries)

    print(['summarize_text', 'end'])
    return summaries


def summarize_text_v1(text: str):
    print(['summarize_text', 'start'])
    input_text = f'<s>Instruction: Elabora un resume del siguiente texto.\nInput: {text}\nOutput: '
    batch = tokenizer(input_text, return_tensors='pt')
    batch = batch.to(device)
    print(['summarize_text', 'generating'])
    with torch.cuda.amp.autocast():
        output_tokens = model.generate(**batch,
                                       max_new_tokens=512,
                                       generation_config=generation_config
                                       )
    output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
    output = output.replace(input_text, '')
    print(['summarize_text', 'end'])
    return output


def generate_question(text: str):
    """
    Generates a question based on a random section of the input text using a pre-trained language model.

    Args:
        text (str): The input text to generate a question for.

    Returns:
        str: The generated question for the input text.
    """
    print(['generate_question', 'start'])
    # Get a random section of the whole text to generate a question
    fragments = split_text(text)
    rnd_text = random.choice(fragments)
    shared['answer_context'] = rnd_text

    input_text = f'<s>Instruction: Dado el siguiente texto quiero que generes una pregunta cuya respuesta se encuentre en él.\nInput: {rnd_text}\nOutput: '
    batch = tokenizer(input_text, return_tensors='pt')
    print(['generate_question', 'generating'])
    with torch.cuda.amp.autocast():
        output_tokens = model.generate(**batch,
                                       max_new_tokens=256,
                                       generation_config=generation_config)
    output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
    output = output.replace(input_text, '')
    print(['generate_question', 'end'])
    return output


def get_answer_context():
    return shared['answer_context']


def answer_question(question: str):
    """
    Generates an answer to the given question based on a pre-trained language model and a pre-built Faiss index.

    Args:
        question (str): The question to generate an answer for.

    Returns:
        str: The generated answer for the question.
    """
    print(['answer_question', 'start'])
    full_text = shared['full_text']

    if not shared['embeddings_dataset']:
        build_faiss_index(full_text)
    top_k_samples = get_nearest_examples(question, k=3)

    index_text = {}
    for i, t in enumerate(split_text(full_text)):
        index_text[i] = t

    context = '\n'.join([index_text[id] for id in top_k_samples['id']])

    input_text = f"""<s>Instruction: Te voy a proporcionar un texto del cual deseo que me respondas una pregunta. 
    El texto es el siguiente: `{context}`\nInput: {question}\nOutput: """
    batch = tokenizer(input_text, return_tensors='pt')
    print(['answer_question', 'generating'])
    with torch.cuda.amp.autocast():
        output_tokens = model.generate(**batch,
                                       max_new_tokens=256,
                                       generation_config=generation_config)
    output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
    output = output.replace(input_text, '')
    print(['answer_question', 'end'])
    return output


def load_model(peft_model_id):
    print(['load_model', 'start'])
    config = PeftConfig.from_pretrained(peft_model_id)
    print(['load_model', 'loading model'])
    model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path,
                                                 return_dict=True,
                                                 load_in_8bit=True,
                                                 device_map='auto')
    print(['load_model', 'loading tokenizer'])
    tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
    model = PeftModel.from_pretrained(model, peft_model_id)
    model.config.use_cache = True
    print(['load_model', 'end'])
    return model, tokenizer


def load_embeddings_model(model_ckpt: str):
    print(['load_embeddings_model', 'start'])
    print(['load_embeddings_model', 'loading tokenizer'])
    tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
    print(['load_embeddings_model', 'loading model'])
    model = AutoModel.from_pretrained(model_ckpt)
    model = model.to(device)
    print(['load_embeddings_model', 'end'])
    return model, tokenizer


# Models trained with LoRA
# - hackathon-somos-nlp-2023/opt-6.7b-lora-sag-t3000-v300-v2
# - hackathon-somos-nlp-2023/opt-6.7b-lora-sag-t14000-v1400-v1
model, tokenizer = load_model("hackathon-somos-nlp-2023/opt-6.7b-lora-sag-t14000-v1400-v1")
pipe = pipeline("text2text-generation", model=model,
                tokenizer=tokenizer, max_new_tokens=100)
llm = HuggingFacePipeline(pipeline=pipe)

# Sentence Transformers models
# - paraphrase-multilingual-MiniLM-L12-v2
# - multi-qa-mpnet-base-dot-v1
emb_model, emb_tokenizer = load_embeddings_model("sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2")