Uhhy commited on
Commit
88ae463
1 Parent(s): beeb26c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +246 -0
app.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ from llama_cpp import Llama
4
+ from concurrent.futures import ThreadPoolExecutor, as_completed
5
+ from tqdm import tqdm
6
+ import uvicorn
7
+ from dotenv import load_dotenv
8
+ from difflib import SequenceMatcher
9
+ import re
10
+ import logging
11
+ import os
12
+ import numpy as np
13
+ from functools import lru_cache
14
+ from cachetools import TTLCache
15
+ from multiprocessing import cpu_count
16
+ import threading
17
+ import queue
18
+
19
+ # Configuración de logging para suprimir mensajes de depuración innecesarios
20
+ logging.basicConfig(level=logging.ERROR)
21
+
22
+ # Cargar variables de entorno
23
+ load_dotenv()
24
+
25
+ # Inicializar aplicación FastAPI
26
+ app = FastAPI()
27
+
28
+ # Configuración de la caché
29
+ cache_size = 2000
30
+ cache_ttl = 7200
31
+ cache = TTLCache(maxsize=cache_size, ttl=cache_ttl)
32
+
33
+ # Diccionario global para almacenar los modelos en RAM
34
+ global_data = {
35
+ 'models': {}
36
+ }
37
+
38
+ # Configuración de los modelos
39
+ model_configs = [
40
+ {"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf", "name": "GPT-2 XL"},
41
+ {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-8B Instruct"},
42
+ {"repo_id": "Ffftdtd5dtft/gemma-2-9b-it-Q2_K-GGUF", "filename": "gemma-2-9b-it-q2_k.gguf", "name": "Gemma 2-9B IT"},
43
+ {"repo_id": "Ffftdtd5dtft/gemma-2-27b-Q2_K-GGUF", "filename": "gemma-2-27b-q2_k.gguf", "name": "Gemma 2-27B"},
44
+ {"repo_id": "Ffftdtd5dtft/Phi-3-mini-128k-instruct-Q2_K-GGUF", "filename": "phi-3-mini-128k-instruct-q2_k.gguf", "name": "Phi-3 Mini 128K Instruct"},
45
+ {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-q2_k.gguf", "name": "Meta Llama 3.1-8B"},
46
+ {"repo_id": "Ffftdtd5dtft/Qwen2-7B-Instruct-Q2_K-GGUF", "filename": "qwen2-7b-instruct-q2_k.gguf", "name": "Qwen2 7B Instruct"},
47
+ {"repo_id": "Ffftdtd5dtft/starcoder2-3b-Q2_K-GGUF", "filename": "starcoder2-3b-q2_k.gguf", "name": "Starcoder2 3B"},
48
+ {"repo_id": "Ffftdtd5dtft/Qwen2-1.5B-Instruct-Q2_K-GGUF", "filename": "qwen2-1.5b-instruct-q2_k.gguf", "name": "Qwen2 1.5B Instruct"}
49
+ ]
50
+
51
+ # Clase para gestionar modelos
52
+ class ModelManager:
53
+ def __init__(self):
54
+ self.models = {}
55
+
56
+ def load_model(self, model_config):
57
+ try:
58
+ model = Llama.from_pretrained(repo_id=model_config['repo_id'], filename=model_config['filename'])
59
+ self.models[model_config['name']] = model
60
+ return model
61
+ except Exception as e:
62
+ logging.error(f"Error al cargar el modelo {model_config['name']}: {e}")
63
+ return None
64
+
65
+ def load_all_models(self):
66
+ with ThreadPoolExecutor(max_workers=min(len(model_configs), cpu_count())) as executor:
67
+ futures = [executor.submit(self.load_model, config) for config in model_configs]
68
+ for future in tqdm(as_completed(futures), total=len(model_configs), desc="Cargando modelos", unit="modelo"):
69
+ future.result()
70
+ return self.models
71
+
72
+ # Instanciar ModelManager y cargar modelos
73
+ model_manager = ModelManager()
74
+ model_manager.load_all_models()
75
+ global_data['models'] = model_manager.models
76
+
77
+ # Clase para la solicitud de chat
78
+ class ChatRequest(BaseModel):
79
+ message: str
80
+ top_k: int = 50
81
+ top_p: float = 0.95
82
+ temperature: float = 0.7
83
+
84
+ # Función para generar respuestas de chat
85
+ @lru_cache(maxsize=20000)
86
+ def generate_chat_response(request: ChatRequest, model_name: str):
87
+ cache_key = f"{request.message}_{model_name}"
88
+
89
+ if cache_key in cache:
90
+ return cache[cache_key]
91
+
92
+ model = global_data['models'].get(model_name)
93
+ if not model:
94
+ return {"response": "Error: Modelo no encontrado.", "literal": request.message, "model_name": model_name}
95
+
96
+ try:
97
+ user_input = normalize_input(request.message)
98
+ response = model.create_chat_completion(
99
+ messages=[{"role": "user", "content": user_input}],
100
+ top_k=request.top_k,
101
+ top_p=request.top_p,
102
+ temperature=request.temperature
103
+ )
104
+ reply = response['choices'][0]['message']['content']
105
+
106
+ # Almacenar en caché la respuesta
107
+ cache[cache_key] = {"response": reply, "literal": user_input, "model_name": model_name}
108
+
109
+ return cache[cache_key]
110
+ except Exception as e:
111
+ logging.error(f"Error en la generación de respuesta con el modelo {model_name}: {e}")
112
+ return {"response": f"Error: {str(e)}", "literal": user_input, "model_name": model_name}
113
+
114
+ def normalize_input(input_text):
115
+ return input_text.strip().lower()
116
+
117
+ def remove_duplicates(text):
118
+ text = re.sub(r'(Hello there, how are you\? \[/INST\]){2,}', 'Hello there, how are you? [/INST]', text)
119
+ text = re.sub(r'(How are you\? \[/INST\]){2,}', 'How are you? [/INST]', text)
120
+ text = text.replace('[/INST]', '')
121
+ lines = text.split('\n')
122
+ unique_lines = list(dict.fromkeys(lines))
123
+ return '\n'.join(unique_lines).strip()
124
+
125
+ def remove_repetitive_responses(responses):
126
+ seen = set()
127
+ unique_responses = []
128
+ for response in responses:
129
+ normalized_response = remove_duplicates(response['response'])
130
+ if normalized_response not in seen:
131
+ seen.add(normalized_response)
132
+ unique_responses.append(response)
133
+ return unique_responses
134
+
135
+ def select_best_response(responses):
136
+ responses = remove_repetitive_responses(responses)
137
+ responses = [remove_duplicates(response['response']) for response in responses]
138
+ unique_responses = list(set(responses))
139
+ coherent_responses = filter_by_coherence(unique_responses)
140
+ best_response = filter_by_similarity(coherent_responses)
141
+ return best_response
142
+
143
+ def filter_by_coherence(responses):
144
+ responses.sort(key=len, reverse=True)
145
+ return responses
146
+
147
+ def filter_by_similarity(responses):
148
+ best_response = responses[0]
149
+ for i in range(1, len(responses)):
150
+ ratio = SequenceMatcher(None, best_response, responses[i]).ratio()
151
+ if ratio < 0.9:
152
+ best_response = responses[i]
153
+ break
154
+ return best_response
155
+
156
+ def worker_function(model_name, request, response_queue):
157
+ try:
158
+ response = generate_chat_response(request, model_name)
159
+ response_queue.put((model_name, response))
160
+ except Exception as e:
161
+ logging.error(f"Error en la generación de respuesta con el modelo {model_name}: {e}")
162
+ response_queue.put((model_name, {"response": f"Error: {str(e)}", "literal": request.message, "model_name": model_name}))
163
+
164
+ @app.post("/generate_chat")
165
+ async def generate_chat(request: ChatRequest):
166
+ if not request.message.strip():
167
+ raise HTTPException(status_code=400, detail="The message cannot be empty.")
168
+
169
+ responses = []
170
+ num_models = len(global_data['models'])
171
+ response_queue = queue.Queue()
172
+
173
+ with ThreadPoolExecutor(max_workers=min(num_models, cpu_count())) as executor:
174
+ futures = [executor.submit(worker_function, model_name, request, response_queue) for model_name in global_data['models']]
175
+ for future in tqdm(as_completed(futures), total=num_models, desc="Generando respuestas", unit="modelo"):
176
+ future.result()
177
+
178
+ while not response_queue.empty():
179
+ model_name, response = response_queue.get()
180
+ responses.append(response)
181
+
182
+ best_response = select_best_response(responses)
183
+
184
+ return {
185
+ "best_response": best_response,
186
+ "all_responses": responses
187
+ }
188
+
189
+ # Cargar los modelos en la memoria RAM de manera más eficiente
190
+ def pre_load_models():
191
+ for model_name, model in global_data['models'].items():
192
+ model._load_model() # Método hipotético para pre-cargar modelos en RAM
193
+
194
+ pre_load_models()
195
+
196
+ # Optimización de la carga de modelos en lotes
197
+ def optimize_model_loading():
198
+ # Implementar carga de modelos en lotes con manejo eficiente de recursos
199
+ batch_size = min(len(model_configs), cpu_count() * 2)
200
+ for i in range(0, len(model_configs), batch_size):
201
+ batch_configs = model_configs[i:i + batch_size]
202
+ with ThreadPoolExecutor(max_workers=batch_size) as executor:
203
+ futures = [executor.submit(model_manager.load_model, config) for config in batch_configs]
204
+ for future in tqdm(as_completed(futures), total=len(batch_configs), desc="Optimizando carga de modelos", unit="modelo"):
205
+ try:
206
+ model = future.result()
207
+ global_data['models'][batch_configs[futures.index(future)]['name']] = model
208
+ except Exception as e:
209
+ logging.error(f"Error al optimizar la carga del modelo: {e}")
210
+
211
+ optimize_model_loading()
212
+
213
+ # Implementar técnicas de paralelización en la generación de respuestas
214
+ def parallelize_response_generation(request: ChatRequest):
215
+ response_queue = queue.Queue()
216
+ with ThreadPoolExecutor(max_workers=min(len(global_data['models']), cpu_count())) as executor:
217
+ futures = [executor.submit(worker_function, model_name, request, response_queue) for model_name in global_data['models']]
218
+ for future in tqdm(as_completed(futures), total=len(futures), desc="Generando respuestas en paralelo", unit="modelo"):
219
+ future.result()
220
+
221
+ responses = []
222
+ while not response_queue.empty():
223
+ responses.append(response_queue.get())
224
+ return responses
225
+
226
+ @app.post("/generate_chat_parallel")
227
+ async def generate_chat_parallel(request: ChatRequest):
228
+ if not request.message.strip():
229
+ raise HTTPException(status_code=400, detail="The message cannot be empty.")
230
+
231
+ responses = parallelize_response_generation(request)
232
+ best_response = select_best_response(responses)
233
+
234
+ return {
235
+ "best_response": best_response,
236
+ "all_responses": responses
237
+ }
238
+
239
+ # Optimizar el uso de memoria
240
+ def optimize_memory_usage():
241
+ import gc
242
+ gc.collect()
243
+
244
+ # Ejecutar el servidor FastAPI
245
+ if __name__ == "__main__":
246
+ uvicorn.run(app, host="0.0.0.0", port=8000)