File size: 6,633 Bytes
47d82ab
428a5aa
6ce5a5b
5537833
47d82ab
 
6ce5a5b
0f5a30d
5537833
b9ceebf
47d82ab
5537833
 
 
 
6ce5a5b
 
 
 
941bfc4
0bcdbb7
6ce5a5b
 
5f0ab2a
6ce5a5b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5f0ab2a
 
 
 
6ce5a5b
5f0ab2a
6ce5a5b
5f0ab2a
6ce5a5b
5f0ab2a
6ce5a5b
 
 
 
 
 
 
 
 
 
0f5a30d
 
6ce5a5b
 
 
428a5aa
92bb964
 
5537833
92bb964
 
428a5aa
 
 
0bcdbb7
5537833
428a5aa
5537833
 
6ce5a5b
 
 
 
 
 
5537833
 
 
 
6ce5a5b
5537833
 
47d82ab
95b13f9
47d82ab
 
5537833
af90128
5537833
 
 
6ce5a5b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a8843ce
6ce5a5b
 
 
a035375
 
6ce5a5b
 
a035375
941bfc4
0bcdbb7
6ce5a5b
 
0bcdbb7
6ce5a5b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a035375
5537833
 
 
 
 
5bf8ab4
 
 
 
5537833
 
 
 
 
 
 
 
6ce5a5b
 
 
 
 
 
 
 
 
 
 
 
0bcdbb7
 
6ce5a5b
0bcdbb7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import os
import requests
import random
import torch
from bs4 import BeautifulSoup
from peft import PeftConfig, PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, AutoModel
from datasets import DatasetDict, Dataset

# os.environ["CUDA_VISIBLE_DEVICES"] = "0"


generation_config = GenerationConfig(temperature=.8,
                                     top_p=0.75,
                                     top_k=40)
device = 'cuda'

shared = {
    'answer_context': None,
    'embeddings_dataset': None,
    'full_text': None,
}


def get_nearest_examples(question: str, k: int):
    print(['get_nearest_examples', 'start'])
    question_embedding = get_embeddings([question]).cpu().detach().numpy()
    embeddings_dataset = shared['embeddings_dataset']
    scores, samples = embeddings_dataset.get_nearest_examples(
        "embeddings", question_embedding, k)
    print(['get_nearest_examples', 'scores and samples'])
    for i in range(len(scores)):
        print([scores[i], samples[i]])
    print(['get_nearest_examples', 'end'])
    return samples


def get_embeddings(text):
    print(['get_embeddings', 'start'])
    encoded_input = emb_tokenizer(text,
                                  padding=True,
                                  truncation=True,
                                  return_tensors="pt")
    encoded_input = {k: v.to('cuda') for k, v in encoded_input.items()}
    model_output = emb_model(**encoded_input)
    model_output = model_output.last_hidden_state[:, 0]
    print(model_output)
    emb_item = model_output.detach().cpu().numpy()[0]
    print(emb_item)
    print(['get_embeddings', 'end'])
    return emb_item


def build_faiss_index(text):
    print(['build_faiss_index', 'start'])
    text_list = split_text(text)
    emb_list = []
    for item in text_list:
        emb_list.append({"embeddings": get_embeddings(item)})
    # dataset = DatasetDict({'train': emb_list})
    dataset = Dataset.from_dict(emb_list)
    dataset.add_faiss_index(column="embeddings")
    shared['embeddings_dataset'] = dataset
    print(['build_faiss_index', 'end'])


def extract_text(url: str):
    print(['extract_text', 'start'])
    if url is None or url.strip() == '':
        return ''
    response = requests.get(url)
    soup = BeautifulSoup(response.text, "html.parser")
    text = '\n\n'.join(map(lambda p: p.text, soup.find_all('p')))
    shared['full_text'] = text
    print(['extract_text', 'end'])
    return text


def split_text(text: str):
    lines = text.split('\n')
    lines = [line.strip() for line in lines if line.strip()]
    return lines


def summarize_text(text: str):
    print(['summarize_text', 'start'])
    input_text = f'<s>Instruction: Elabora un resume del siguiente texto.\nInput: {text}\nOutput: '
    batch = tokenizer(input_text, return_tensors='pt')
    batch = batch.to(device)
    print(['summarize_text', 'generating'])
    with torch.cuda.amp.autocast():
        output_tokens = model.generate(**batch,
                                       max_new_tokens=512,
                                       generation_config=generation_config
                                       )
    output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
    output = output.replace(input_text, '')
    print(['summarize_text', 'end'])
    return output


def generate_question(text: str):
    print(['generate_question', 'start'])
    # Get a random section of the whole text to generate a question
    fragments = split_text(text)
    rnd_text = random.choice(fragments)
    shared['answer_context'] = rnd_text

    input_text = f'<s>Instruction: Dado el siguiente texto quiero que generes una pregunta cuya respuesta se encuentre en él.\nInput: {rnd_text}\nOutput: '
    batch = tokenizer(input_text, return_tensors='pt')
    print(['generate_question', 'generating'])
    with torch.cuda.amp.autocast():
        output_tokens = model.generate(**batch,
                                       max_new_tokens=256,
                                       generation_config=generation_config)
    output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
    output = output.replace(input_text, '')
    print(['generate_question', 'end'])
    return output


def get_answer_context():
    return shared['answer_context']


def answer_question(question: str):
    # return ', '.join([len(shared['base_text']), len(question)])
    print(['answer_question', 'start'])
    if not shared['embeddings_dataset']:
        build_faiss_index(shared['full_text'])
    top_k_samples = get_nearest_examples(question, k=5)

    context = '\n'.join(top_k_samples)

    input_text = f"""<s>Instruction: Te voy a proporcionar un texto del cual deseo que me respondas una pregunta. 
    El texto es el siguiente: `{context}`\nInput: {question}\nOutput: """
    batch = tokenizer(input_text, return_tensors='pt')
    print(['answer_question', 'generating'])
    with torch.cuda.amp.autocast():
        output_tokens = model.generate(**batch,
                                       max_new_tokens=256,
                                       generation_config=generation_config)
    output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
    print(['answer_question', 'end'])
    return output


def load_model(peft_model_id):
    print(['load_model', 'start'])
    config = PeftConfig.from_pretrained(peft_model_id)
    print(['load_model', 'loading model'])
    model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path,
                                                 return_dict=True,
                                                 load_in_8bit=True,
                                                 device_map='auto')
    print(['load_model', 'loading tokenizer'])
    tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
    model = PeftModel.from_pretrained(model, peft_model_id)
    model.config.use_cache = True
    print(['load_model', 'end'])
    return model, tokenizer


def load_embeddings_model():
    print(['load_embeddings_model', 'start'])
    model_ckpt = "sentence-transformers/multi-qa-mpnet-base-dot-v1"
    print(['load_embeddings_model', 'loading tokenizer'])
    tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
    print(['load_embeddings_model', 'loading model'])
    model = AutoModel.from_pretrained(model_ckpt)
    model = model.to(device)
    print(['load_embeddings_model', 'end'])
    return model, tokenizer


model, tokenizer = load_model(
    "hackathon-somos-nlp-2023/opt-6.7b-lora-sag-t3000-v300-v2")

emb_model, emb_tokenizer = load_embeddings_model()