Spaces:
Runtime error
Runtime error
Ffftdtd5dtft
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -2,30 +2,29 @@ import os
|
|
2 |
import shutil
|
3 |
import subprocess
|
4 |
import signal
|
5 |
-
import re
|
6 |
-
|
7 |
-
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
|
8 |
import gradio as gr
|
9 |
-
|
10 |
-
from huggingface_hub import create_repo, HfApi
|
11 |
-
from huggingface_hub import snapshot_download
|
12 |
-
from huggingface_hub import whoami
|
13 |
-
from huggingface_hub import ModelCard
|
14 |
-
from huggingface_hub.utils import RepositoryNotFoundError
|
15 |
-
|
16 |
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
17 |
-
|
18 |
-
from apscheduler.schedulers.background import BackgroundScheduler
|
19 |
-
|
20 |
from textwrap import dedent
|
21 |
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
def generate_importance_matrix(model_path, train_data_path):
|
|
|
25 |
imatrix_command = f"./llama-imatrix -m ../{model_path} -f {train_data_path} -ngl 99 --output-frequency 10"
|
|
|
26 |
os.chdir("llama.cpp")
|
|
|
27 |
if not os.path.isfile(f"../{model_path}"):
|
28 |
-
raise
|
|
|
29 |
process = subprocess.Popen(imatrix_command, shell=True)
|
30 |
try:
|
31 |
process.wait(timeout=60)
|
@@ -35,21 +34,26 @@ def generate_importance_matrix(model_path, train_data_path):
|
|
35 |
process.wait(timeout=5)
|
36 |
except subprocess.TimeoutExpired:
|
37 |
process.kill()
|
|
|
38 |
os.chdir("..")
|
39 |
|
40 |
def split_upload_model(model_path, repo_id, oauth_token, split_max_tensors=256, split_max_size=None):
|
41 |
-
|
42 |
-
|
|
|
|
|
43 |
split_cmd = f"llama.cpp/llama-gguf-split --split --split-max-tensors {split_max_tensors}"
|
44 |
if split_max_size:
|
45 |
split_cmd += f" --split-max-size {split_max_size}"
|
46 |
split_cmd += f" {model_path} {model_path.split('.')[0]}"
|
|
|
47 |
result = subprocess.run(split_cmd, shell=True, capture_output=True, text=True)
|
48 |
if result.returncode != 0:
|
49 |
-
raise
|
|
|
50 |
sharded_model_files = [f for f in os.listdir('.') if f.startswith(model_path.split('.')[0])]
|
51 |
if sharded_model_files:
|
52 |
-
api = HfApi(token=oauth_token
|
53 |
for file in sharded_model_files:
|
54 |
file_path = os.path.join('.', file)
|
55 |
try:
|
@@ -59,142 +63,141 @@ def split_upload_model(model_path, repo_id, oauth_token, split_max_tensors=256,
|
|
59 |
repo_id=repo_id,
|
60 |
)
|
61 |
except Exception as e:
|
62 |
-
raise
|
63 |
else:
|
64 |
-
raise
|
65 |
-
|
66 |
def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_repo, train_data_file, split_model, split_max_tensors, split_max_size, oauth_token):
|
67 |
-
|
68 |
-
|
|
|
69 |
model_name = model_id.split('/')[-1]
|
|
|
|
|
70 |
try:
|
71 |
-
api = HfApi(token=
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
-
|
87 |
-
|
88 |
-
if filename.endswith((".bin", ".pt", ".safetensors")):
|
89 |
-
model_file = os.path.join(model_name, filename)
|
90 |
-
break
|
91 |
-
else:
|
92 |
-
raise ValueError("No model file found in the downloaded files.")
|
93 |
|
94 |
-
# Convert to fp16
|
95 |
-
fp16 = f"{model_name}.fp16.gguf"
|
96 |
conversion_script = "convert_hf_to_gguf.py"
|
97 |
-
fp16_conversion = f"python llama.cpp/{conversion_script} {
|
98 |
result = subprocess.run(fp16_conversion, shell=True, capture_output=True)
|
99 |
if result.returncode != 0:
|
100 |
-
raise
|
101 |
|
102 |
-
# Quantization
|
103 |
imatrix_path = "llama.cpp/imatrix.dat"
|
104 |
if use_imatrix:
|
105 |
if train_data_file:
|
106 |
train_data_path = train_data_file.name
|
107 |
else:
|
108 |
train_data_path = "groups_merged.txt"
|
|
|
109 |
if not os.path.isfile(train_data_path):
|
110 |
-
raise
|
|
|
111 |
generate_importance_matrix(fp16, train_data_path)
|
112 |
-
|
113 |
-
|
114 |
-
quantized_gguf_name = f"{model_name.lower()}-{quant_method.lower()}-imat.gguf" if use_imatrix else f"{model_name.lower()}-{quant_method.lower()}.gguf"
|
115 |
quantized_gguf_path = quantized_gguf_name
|
116 |
-
|
|
|
|
|
117 |
result = subprocess.run(quantise_ggml, shell=True, capture_output=True)
|
118 |
if result.returncode != 0:
|
119 |
-
raise
|
120 |
|
121 |
-
|
122 |
-
new_repo_url = api.create_repo(repo_id=f"{username}/{model_name}-{
|
123 |
new_repo_id = new_repo_url.repo_id
|
|
|
124 |
try:
|
125 |
-
card = ModelCard.load(model_id, token=
|
126 |
-
except
|
127 |
card = ModelCard("")
|
128 |
if card.data.tags is None:
|
129 |
card.data.tags = []
|
130 |
-
card.data.tags.
|
|
|
131 |
card.data.base_model = model_id
|
132 |
card.text = dedent(
|
133 |
f"""
|
134 |
# {new_repo_id}
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
## Use with llama.cpp
|
139 |
-
Install llama.cpp through brew (works on Mac and Linux)
|
140 |
|
|
|
|
|
|
|
141 |
```bash
|
142 |
brew install llama.cpp
|
143 |
```
|
144 |
-
|
|
|
145 |
|
146 |
### CLI:
|
147 |
```bash
|
148 |
-
llama-cli --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -p "
|
149 |
```
|
150 |
|
151 |
-
###
|
152 |
```bash
|
153 |
llama-server --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -c 2048
|
154 |
```
|
155 |
|
156 |
-
|
157 |
-
Step 1: Clone llama.cpp from GitHub.
|
158 |
-
```
|
159 |
-
git clone https://github.com/ggerganov/llama.cpp
|
160 |
-
```
|
161 |
-
Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).
|
162 |
-
```
|
163 |
-
cd llama.cpp && LLAMA_CURL=1 make
|
164 |
-
```
|
165 |
-
Step 3: Run inference through the main binary.
|
166 |
-
```
|
167 |
-
./llama-cli --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -p "The meaning to life and the universe is"
|
168 |
-
```
|
169 |
-
or
|
170 |
-
```
|
171 |
-
./llama-server --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -c 2048
|
172 |
-
```
|
173 |
"""
|
174 |
)
|
175 |
card.save(f"README.md")
|
176 |
|
177 |
if split_model:
|
178 |
-
split_upload_model(quantized_gguf_path, new_repo_id,
|
179 |
else:
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
|
|
|
|
|
|
|
|
185 |
if os.path.isfile(imatrix_path):
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
|
|
|
|
|
|
|
|
191 |
api.upload_file(
|
192 |
path_or_fileobj=f"README.md",
|
193 |
path_in_repo=f"README.md",
|
194 |
repo_id=new_repo_id,
|
195 |
)
|
|
|
196 |
return (
|
197 |
-
f'
|
198 |
"llama.png",
|
199 |
)
|
200 |
except Exception as e:
|
@@ -202,121 +205,32 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
|
|
202 |
finally:
|
203 |
shutil.rmtree(model_name, ignore_errors=True)
|
204 |
|
205 |
-
|
206 |
-
.
|
207 |
-
|
208 |
-
|
209 |
-
with gr.
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
label="
|
215 |
-
|
216 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
217 |
)
|
218 |
|
219 |
-
|
220 |
-
["Q2_K", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_K_S", "Q4_K_M", "Q5_0", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0"],
|
221 |
-
label="Quantization Method",
|
222 |
-
info="GGML quantization type",
|
223 |
-
value="Q4_K_M",
|
224 |
-
filterable=False,
|
225 |
-
visible=True
|
226 |
-
)
|
227 |
-
|
228 |
-
imatrix_q_method = gr.Dropdown(
|
229 |
-
["IQ3_M", "IQ3_XXS", "Q4_K_M", "Q4_K_S", "IQ4_NL", "IQ4_XS", "Q5_K_M", "Q5_K_S"],
|
230 |
-
label="Imatrix Quantization Method",
|
231 |
-
info="GGML imatrix quants type",
|
232 |
-
value="IQ4_NL",
|
233 |
-
filterable=False,
|
234 |
-
visible=False
|
235 |
-
)
|
236 |
-
|
237 |
-
use_imatrix = gr.Checkbox(
|
238 |
-
value=False,
|
239 |
-
label="Use Imatrix Quantization",
|
240 |
-
info="Use importance matrix for quantization."
|
241 |
-
)
|
242 |
-
|
243 |
-
private_repo = gr.Checkbox(
|
244 |
-
value=False,
|
245 |
-
label="Private Repo",
|
246 |
-
info="Create a private repo under your username."
|
247 |
-
)
|
248 |
-
|
249 |
-
train_data_file = gr.File(
|
250 |
-
label="Training Data File",
|
251 |
-
file_types=["txt"],
|
252 |
-
visible=False
|
253 |
-
)
|
254 |
-
|
255 |
-
split_model = gr.Checkbox(
|
256 |
-
value=False,
|
257 |
-
label="Split Model",
|
258 |
-
info="Shard the model using gguf-split."
|
259 |
-
)
|
260 |
-
|
261 |
-
split_max_tensors = gr.Number(
|
262 |
-
value=256,
|
263 |
-
label="Max Tensors per File",
|
264 |
-
info="Maximum number of tensors per file when splitting model.",
|
265 |
-
visible=False
|
266 |
-
)
|
267 |
-
|
268 |
-
split_max_size = gr.Textbox(
|
269 |
-
label="Max File Size",
|
270 |
-
info="Maximum file size when splitting model (--split-max-size). May leave empty to use the default.",
|
271 |
-
visible=False
|
272 |
-
)
|
273 |
-
|
274 |
-
use_imatrix.change(
|
275 |
-
fn=lambda use_imatrix: {
|
276 |
-
q_method: gr.update(visible=not use_imatrix),
|
277 |
-
imatrix_q_method: gr.update(visible=use_imatrix),
|
278 |
-
train_data_file: gr.update(visible=use_imatrix),
|
279 |
-
},
|
280 |
-
inputs=use_imatrix,
|
281 |
-
outputs=[q_method, imatrix_q_method, train_data_file]
|
282 |
-
)
|
283 |
-
|
284 |
-
split_model.change(
|
285 |
-
fn=lambda split_model: {
|
286 |
-
split_max_tensors: gr.update(visible=split_model),
|
287 |
-
split_max_size: gr.update(visible=split_model),
|
288 |
-
},
|
289 |
-
inputs=split_model,
|
290 |
-
outputs=[split_max_tensors, split_max_size]
|
291 |
-
)
|
292 |
-
|
293 |
-
iface = gr.Interface(
|
294 |
-
fn=process_model,
|
295 |
-
inputs=[
|
296 |
-
model_id,
|
297 |
-
q_method,
|
298 |
-
use_imatrix,
|
299 |
-
imatrix_q_method,
|
300 |
-
private_repo,
|
301 |
-
train_data_file,
|
302 |
-
split_model,
|
303 |
-
split_max_tensors,
|
304 |
-
split_max_size,
|
305 |
-
],
|
306 |
-
outputs=[
|
307 |
-
gr.Markdown(label="output"),
|
308 |
-
gr.Image(show_label=False),
|
309 |
-
],
|
310 |
-
title="Create your own GGUF Quants, blazingly fast ⚡!",
|
311 |
-
description="The space takes an HF repo as an input, quantizes it and creates a Public repo containing the selected quant under your HF user namespace.",
|
312 |
-
api_name=False
|
313 |
-
)
|
314 |
-
|
315 |
-
def restart_space():
|
316 |
-
HfApi().restart_space(repo_id="ggml-org/gguf-my-repo", token=HF_TOKEN, factory_reboot=True)
|
317 |
-
|
318 |
-
scheduler = BackgroundScheduler()
|
319 |
-
scheduler.add_job(restart_space, "interval", seconds=21600)
|
320 |
-
scheduler.start()
|
321 |
-
|
322 |
-
demo.queue(default_concurrency_limit=1, max_size=5).launch(debug=True, show_api=False)
|
|
|
2 |
import shutil
|
3 |
import subprocess
|
4 |
import signal
|
|
|
|
|
|
|
5 |
import gradio as gr
|
6 |
+
from huggingface_hub import create_repo, HfApi, snapshot_download, whoami, ModelCard
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
|
|
|
|
|
|
8 |
from textwrap import dedent
|
9 |
|
10 |
+
# Obtener el token de Hugging Face desde el entorno
|
11 |
+
HF_TOKEN = os.getenv("HF_TOKEN", "")
|
12 |
+
|
13 |
+
def ensure_valid_token(oauth_token):
|
14 |
+
"""Verifica si el token es válido."""
|
15 |
+
if not oauth_token or not oauth_token.strip():
|
16 |
+
raise ValueError("Debe proporcionar un token válido.")
|
17 |
+
return oauth_token.strip()
|
18 |
|
19 |
def generate_importance_matrix(model_path, train_data_path):
|
20 |
+
"""Genera la matriz de importancia usando llama-imatrix."""
|
21 |
imatrix_command = f"./llama-imatrix -m ../{model_path} -f {train_data_path} -ngl 99 --output-frequency 10"
|
22 |
+
|
23 |
os.chdir("llama.cpp")
|
24 |
+
|
25 |
if not os.path.isfile(f"../{model_path}"):
|
26 |
+
raise FileNotFoundError(f"Archivo del modelo no encontrado: {model_path}")
|
27 |
+
|
28 |
process = subprocess.Popen(imatrix_command, shell=True)
|
29 |
try:
|
30 |
process.wait(timeout=60)
|
|
|
34 |
process.wait(timeout=5)
|
35 |
except subprocess.TimeoutExpired:
|
36 |
process.kill()
|
37 |
+
|
38 |
os.chdir("..")
|
39 |
|
40 |
def split_upload_model(model_path, repo_id, oauth_token, split_max_tensors=256, split_max_size=None):
|
41 |
+
"""Divide y sube el modelo en partes."""
|
42 |
+
if not oauth_token or not oauth_token.strip():
|
43 |
+
raise ValueError("Debe proporcionar un token válido.")
|
44 |
+
|
45 |
split_cmd = f"llama.cpp/llama-gguf-split --split --split-max-tensors {split_max_tensors}"
|
46 |
if split_max_size:
|
47 |
split_cmd += f" --split-max-size {split_max_size}"
|
48 |
split_cmd += f" {model_path} {model_path.split('.')[0]}"
|
49 |
+
|
50 |
result = subprocess.run(split_cmd, shell=True, capture_output=True, text=True)
|
51 |
if result.returncode != 0:
|
52 |
+
raise RuntimeError(f"Error al dividir el modelo: {result.stderr}")
|
53 |
+
|
54 |
sharded_model_files = [f for f in os.listdir('.') if f.startswith(model_path.split('.')[0])]
|
55 |
if sharded_model_files:
|
56 |
+
api = HfApi(token=oauth_token)
|
57 |
for file in sharded_model_files:
|
58 |
file_path = os.path.join('.', file)
|
59 |
try:
|
|
|
63 |
repo_id=repo_id,
|
64 |
)
|
65 |
except Exception as e:
|
66 |
+
raise RuntimeError(f"Error al subir el archivo {file_path}: {e}")
|
67 |
else:
|
68 |
+
raise FileNotFoundError("No se encontraron archivos divididos.")
|
69 |
+
|
70 |
def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_repo, train_data_file, split_model, split_max_tensors, split_max_size, oauth_token):
|
71 |
+
"""Procesa el modelo descargado y realiza la conversión y subida."""
|
72 |
+
token = ensure_valid_token(oauth_token)
|
73 |
+
|
74 |
model_name = model_id.split('/')[-1]
|
75 |
+
fp16 = f"{model_name}.fp16.gguf"
|
76 |
+
|
77 |
try:
|
78 |
+
api = HfApi(token=token)
|
79 |
+
dl_pattern = [
|
80 |
+
"*.safetensors", "*.bin", "*.pt", "*.onnx", "*.h5", "*.tflite",
|
81 |
+
"*.ckpt", "*.pb", "*.tar", "*.xml", "*.caffemodel",
|
82 |
+
"*.md", "*.json", "*.model"
|
83 |
+
]
|
84 |
+
|
85 |
+
pattern = (
|
86 |
+
"*.safetensors"
|
87 |
+
if any(
|
88 |
+
file.path.endswith(".safetensors")
|
89 |
+
for file in api.list_repo_tree(
|
90 |
+
repo_id=model_id,
|
91 |
+
recursive=True,
|
92 |
+
)
|
93 |
+
)
|
94 |
+
else "*.bin"
|
95 |
+
)
|
96 |
+
dl_pattern += pattern
|
97 |
|
98 |
+
snapshot_download(repo_id=model_id, local_dir=model_name, local_dir_use_symlinks=False, allow_patterns=dl_pattern)
|
99 |
+
print("Modelo descargado exitosamente!")
|
|
|
|
|
|
|
|
|
|
|
100 |
|
|
|
|
|
101 |
conversion_script = "convert_hf_to_gguf.py"
|
102 |
+
fp16_conversion = f"python llama.cpp/{conversion_script} {model_name} --outtype f16 --outfile {fp16}"
|
103 |
result = subprocess.run(fp16_conversion, shell=True, capture_output=True)
|
104 |
if result.returncode != 0:
|
105 |
+
raise RuntimeError(f"Error al convertir a fp16: {result.stderr}")
|
106 |
|
|
|
107 |
imatrix_path = "llama.cpp/imatrix.dat"
|
108 |
if use_imatrix:
|
109 |
if train_data_file:
|
110 |
train_data_path = train_data_file.name
|
111 |
else:
|
112 |
train_data_path = "groups_merged.txt"
|
113 |
+
|
114 |
if not os.path.isfile(train_data_path):
|
115 |
+
raise FileNotFoundError(f"Archivo de datos de entrenamiento no encontrado: {train_data_path}")
|
116 |
+
|
117 |
generate_importance_matrix(fp16, train_data_path)
|
118 |
+
|
119 |
+
quantized_gguf_name = f"{model_name.lower()}-{imatrix_q_method.lower()}-imat.gguf" if use_imatrix else f"{model_name.lower()}-{q_method.lower()}.gguf"
|
|
|
120 |
quantized_gguf_path = quantized_gguf_name
|
121 |
+
|
122 |
+
quantise_ggml = f"./llama.cpp/llama-quantize {'--imatrix ' + imatrix_path if use_imatrix else ''} {fp16} {quantized_gguf_path} {imatrix_q_method if use_imatrix else q_method}"
|
123 |
+
|
124 |
result = subprocess.run(quantise_ggml, shell=True, capture_output=True)
|
125 |
if result.returncode != 0:
|
126 |
+
raise RuntimeError(f"Error al cuantificar: {result.stderr}")
|
127 |
|
128 |
+
username = whoami(token)["name"]
|
129 |
+
new_repo_url = api.create_repo(repo_id=f"{username}/{model_name}-{imatrix_q_method if use_imatrix else q_method}-GGUF", exist_ok=True, private=private_repo)
|
130 |
new_repo_id = new_repo_url.repo_id
|
131 |
+
|
132 |
try:
|
133 |
+
card = ModelCard.load(model_id, token=token)
|
134 |
+
except:
|
135 |
card = ModelCard("")
|
136 |
if card.data.tags is None:
|
137 |
card.data.tags = []
|
138 |
+
card.data.tags.append("llama-cpp")
|
139 |
+
card.data.tags.append("gguf-my-repo")
|
140 |
card.data.base_model = model_id
|
141 |
card.text = dedent(
|
142 |
f"""
|
143 |
# {new_repo_id}
|
144 |
+
Este modelo fue convertido al formato GGUF desde [`{model_id}`](https://huggingface.co/{model_id}) usando llama.cpp a través del espacio GGUF-my-repo.
|
145 |
+
Consulta el [card del modelo original](https://huggingface.co/{model_id}) para más detalles.
|
|
|
|
|
|
|
146 |
|
147 |
+
## Uso con llama.cpp
|
148 |
+
Instala llama.cpp a través de brew (funciona en Mac y Linux)
|
149 |
+
|
150 |
```bash
|
151 |
brew install llama.cpp
|
152 |
```
|
153 |
+
|
154 |
+
Invoca el servidor llama.cpp o la CLI.
|
155 |
|
156 |
### CLI:
|
157 |
```bash
|
158 |
+
llama-cli --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -p "El sentido de la vida y el universo es"
|
159 |
```
|
160 |
|
161 |
+
### Servidor:
|
162 |
```bash
|
163 |
llama-server --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -c 2048
|
164 |
```
|
165 |
|
166 |
+
Nota: También puedes usar este punto de control directamente a través de los [pasos de uso](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listados en el repositorio llama.cpp.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
"""
|
168 |
)
|
169 |
card.save(f"README.md")
|
170 |
|
171 |
if split_model:
|
172 |
+
split_upload_model(quantized_gguf_path, new_repo_id, token, split_max_tensors, split_max_size)
|
173 |
else:
|
174 |
+
try:
|
175 |
+
api.upload_file(
|
176 |
+
path_or_fileobj=quantized_gguf_path,
|
177 |
+
path_in_repo=quantized_gguf_name,
|
178 |
+
repo_id=new_repo_id,
|
179 |
+
)
|
180 |
+
except Exception as e:
|
181 |
+
raise RuntimeError(f"Error al subir el modelo cuantificado: {e}")
|
182 |
+
|
183 |
if os.path.isfile(imatrix_path):
|
184 |
+
try:
|
185 |
+
api.upload_file(
|
186 |
+
path_or_fileobj=imatrix_path,
|
187 |
+
path_in_repo="imatrix.dat",
|
188 |
+
repo_id=new_repo_id,
|
189 |
+
)
|
190 |
+
except Exception as e:
|
191 |
+
raise RuntimeError(f"Error al subir imatrix.dat: {e}")
|
192 |
+
|
193 |
api.upload_file(
|
194 |
path_or_fileobj=f"README.md",
|
195 |
path_in_repo=f"README.md",
|
196 |
repo_id=new_repo_id,
|
197 |
)
|
198 |
+
|
199 |
return (
|
200 |
+
f'Encuentra tu repositorio <a href=\'{new_repo_url}\' target="_blank" style="text-decoration:underline">aquí</a>',
|
201 |
"llama.png",
|
202 |
)
|
203 |
except Exception as e:
|
|
|
205 |
finally:
|
206 |
shutil.rmtree(model_name, ignore_errors=True)
|
207 |
|
208 |
+
with gr.Blocks() as app:
|
209 |
+
gr.Markdown("# Procesamiento de Modelos")
|
210 |
+
|
211 |
+
# Campos de entrada para el procesamiento del modelo
|
212 |
+
with gr.Row():
|
213 |
+
model_id = gr.Textbox(label="ID del Modelo", placeholder="e.g., user/model_name")
|
214 |
+
q_method = gr.Dropdown(["method1", "method2"], label="Método de Cuantización")
|
215 |
+
use_imatrix = gr.Checkbox(label="Usar Matriz de Importancia")
|
216 |
+
imatrix_q_method = gr.Dropdown(["methodA", "methodB"], label="Método de Matriz de Importancia")
|
217 |
+
private_repo = gr.Checkbox(label="Repositorio Privado")
|
218 |
+
train_data_file = gr.File(label="Archivo de Datos de Entrenamiento", type="file")
|
219 |
+
split_model = gr.Checkbox(label="Dividir Modelo")
|
220 |
+
split_max_tensors = gr.Number(label="Max Tensors (para división)", value=256)
|
221 |
+
split_max_size = gr.Number(label="Max Tamaño (para división)", value=None)
|
222 |
+
oauth_token = gr.Textbox(label="Token de Hugging Face", type="password")
|
223 |
+
|
224 |
+
# Campos de salida
|
225 |
+
result = gr.HTML()
|
226 |
+
img = gr.Image()
|
227 |
+
|
228 |
+
# Botón de proceso
|
229 |
+
process_button = gr.Button("Procesar Modelo")
|
230 |
+
process_button.click(
|
231 |
+
process_model,
|
232 |
+
inputs=[model_id, q_method, use_imatrix, imatrix_q_method, private_repo, train_data_file, split_model, split_max_tensors, split_max_size, oauth_token],
|
233 |
+
outputs=[result, img]
|
234 |
)
|
235 |
|
236 |
+
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|