Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ import gc
|
|
3 |
import io
|
4 |
from llama_cpp import Llama
|
5 |
from concurrent.futures import ThreadPoolExecutor, as_completed
|
6 |
-
from fastapi import FastAPI, Request, HTTPException
|
7 |
from fastapi.responses import JSONResponse
|
8 |
from tqdm import tqdm
|
9 |
from dotenv import load_dotenv
|
@@ -17,6 +17,7 @@ import nltk
|
|
17 |
import uvicorn
|
18 |
import psutil
|
19 |
import torch
|
|
|
20 |
|
21 |
nltk.download('punkt')
|
22 |
nltk.download('stopwords')
|
@@ -28,22 +29,29 @@ HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
|
28 |
if HUGGINGFACE_TOKEN:
|
29 |
login(token=HUGGINGFACE_TOKEN)
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
class ModelManager:
|
49 |
def __init__(self):
|
@@ -55,9 +63,12 @@ class ModelManager:
|
|
55 |
model_name = config['name']
|
56 |
if model_name not in self.models:
|
57 |
try:
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
61 |
except Exception as e:
|
62 |
print(f"Error loading model {model_name}: {e}")
|
63 |
self.models[model_name] = None
|
|
|
3 |
import io
|
4 |
from llama_cpp import Llama
|
5 |
from concurrent.futures import ThreadPoolExecutor, as_completed
|
6 |
+
from fastapi import FastAPI, Request, HTTPException, Lifespan
|
7 |
from fastapi.responses import JSONResponse
|
8 |
from tqdm import tqdm
|
9 |
from dotenv import load_dotenv
|
|
|
17 |
import uvicorn
|
18 |
import psutil
|
19 |
import torch
|
20 |
+
import tempfile
|
21 |
|
22 |
nltk.download('punkt')
|
23 |
nltk.download('stopwords')
|
|
|
29 |
if HUGGINGFACE_TOKEN:
|
30 |
login(token=HUGGINGFACE_TOKEN)
|
31 |
|
32 |
+
model_configs = [
|
33 |
+
{"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf", "name": "GPT-2 XL"},
|
34 |
+
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-8B Instruct"},
|
35 |
+
{"repo_id": "Ffftdtd5dtft/gemma-2-9b-it-Q2_K-GGUF", "filename": "gemma-2-9b-it-q2_k.gguf", "name": "Gemma 2-9B IT"},
|
36 |
+
{"repo_id": "Ffftdtd5dtft/gemma-2-27b-Q2_K-GGUF", "filename": "gemma-2-27b-q2_k.gguf", "name": "Gemma 2-27B"},
|
37 |
+
{"repo_id": "Ffftdtd5dtft/Phi-3-mini-128k-instruct-Q2_K-GGUF", "filename": "phi-3-mini-128k-instruct-q2_k.gguf", "name": "Phi-3 Mini 128K Instruct"},
|
38 |
+
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-q2_k.gguf", "name": "Meta Llama 3.1-8B"},
|
39 |
+
{"repo_id": "Ffftdtd5dtft/Qwen2-7B-Instruct-Q2_K-GGUF", "filename": "qwen2-7b-instruct-q2_k.gguf", "name": "Qwen2 7B Instruct"},
|
40 |
+
{"repo_id": "Ffftdtd5dtft/starcoder2-3b-Q2_K-GGUF", "filename": "starcoder2-3b-q2_k.gguf", "name": "Starcoder2 3B"},
|
41 |
+
{"repo_id": "Ffftdtd5dtft/Qwen2-1.5B-Instruct-Q2_K-GGUF", "filename": "qwen2-1.5b-instruct-q2_k.gguf", "name": "Qwen2 1.5B Instruct"},
|
42 |
+
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-70B-Q2_K-GGUF", "filename": "meta-llama-3.1-70b-q2_k.gguf", "name": "Meta Llama 3.1-70B"},
|
43 |
+
{"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Instruct-2407-Q2_K-GGUF", "filename": "mistral-nemo-instruct-2407-q2_k.gguf", "name": "Mistral Nemo Instruct 2407"},
|
44 |
+
{"repo_id": "Ffftdtd5dtft/Hermes-3-Llama-3.1-8B-IQ1_S-GGUF", "filename": "hermes-3-llama-3.1-8b-iq1_s-imat.gguf", "name": "Hermes 3 Llama 3.1-8B"},
|
45 |
+
{"repo_id": "Ffftdtd5dtft/Phi-3.5-mini-instruct-Q2_K-GGUF", "filename": "phi-3.5-mini-instruct-q2_k.gguf", "name": "Phi 3.5 Mini Instruct"},
|
46 |
+
{"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-70B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-70b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-70B Instruct"},
|
47 |
+
{"repo_id": "Ffftdtd5dtft/codegemma-2b-IQ1_S-GGUF", "filename": "codegemma-2b-iq1_s-imat.gguf", "name": "Codegemma 2B"},
|
48 |
+
{"repo_id": "Ffftdtd5dtft/Phi-3-mini-128k-instruct-IQ2_XXS-GGUF", "filename": "phi-3-mini-128k-instruct-iq2_xxs-imat.gguf", "name": "Phi 3 Mini 128K Instruct XXS"},
|
49 |
+
{"repo_id": "Ffftdtd5dtft/TinyLlama-1.1B-Chat-v1.0-IQ1_S-GGUF", "filename": "tinyllama-1.1b-chat-v1.0-iq1_s-imat.gguf", "name": "TinyLlama 1.1B Chat"},
|
50 |
+
{"repo_id": "Ffftdtd5dtft/Mistral-NeMo-Minitron-8B-Base-IQ1_S-GGUF", "filename": "mistral-nemo-minitron-8b-base-iq1_s-imat.gguf", "name": "Mistral NeMo Minitron 8B Base"},
|
51 |
+
{"repo_id": "Ffftdtd5dtft/Mistral-Nemo-Instruct-2407-Q2_K-GGUF", "filename": "mistral-nemo-instruct-2407-q2_k.gguf", "name": "Mistral Nemo Instruct 2407"}
|
52 |
+
]
|
53 |
+
|
54 |
+
global_data = {'model_configs': model_configs, 'training_data': io.StringIO()}
|
55 |
|
56 |
class ModelManager:
|
57 |
def __init__(self):
|
|
|
63 |
model_name = config['name']
|
64 |
if model_name not in self.models:
|
65 |
try:
|
66 |
+
with tempfile.NamedTemporaryFile(suffix=".gguf", delete=False) as temp_file:
|
67 |
+
model_path = hf_hub_download(repo_id=config['repo_id'], filename=temp_file.name, use_auth_token=HUGGINGFACE_TOKEN)
|
68 |
+
model = Llama.from_file(model_path, n_ctx=512, n_gpu=1)
|
69 |
+
self.models[model_name] = model
|
70 |
+
print(f"Model '{model_name}' loaded successfully.")
|
71 |
+
os.remove(temp_file.name)
|
72 |
except Exception as e:
|
73 |
print(f"Error loading model {model_name}: {e}")
|
74 |
self.models[model_name] = None
|