Spaces:
Running
Running
Commit
·
237774d
1
Parent(s):
72a2b4c
Añadir implementación de un entorno de desarrollo y carga de modelos con evaluación de precisión
Browse files- .gitignore +177 -0
- app.py +14 -216
- dataset.py +39 -0
- evaluation.py +49 -0
- generate_models.py +23 -0
- models.py +121 -0
- shell.nix +19 -0
- utils.py +23 -0
.gitignore
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# UV
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
#uv.lock
|
102 |
+
|
103 |
+
# poetry
|
104 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
105 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
106 |
+
# commonly ignored for libraries.
|
107 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
108 |
+
#poetry.lock
|
109 |
+
|
110 |
+
# pdm
|
111 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
112 |
+
#pdm.lock
|
113 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
114 |
+
# in version control.
|
115 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
116 |
+
.pdm.toml
|
117 |
+
.pdm-python
|
118 |
+
.pdm-build/
|
119 |
+
|
120 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
121 |
+
__pypackages__/
|
122 |
+
|
123 |
+
# Celery stuff
|
124 |
+
celerybeat-schedule
|
125 |
+
celerybeat.pid
|
126 |
+
|
127 |
+
# SageMath parsed files
|
128 |
+
*.sage.py
|
129 |
+
|
130 |
+
# Environments
|
131 |
+
.env
|
132 |
+
.venv
|
133 |
+
env/
|
134 |
+
venv/
|
135 |
+
ENV/
|
136 |
+
env.bak/
|
137 |
+
venv.bak/
|
138 |
+
|
139 |
+
# Spyder project settings
|
140 |
+
.spyderproject
|
141 |
+
.spyproject
|
142 |
+
|
143 |
+
# Rope project settings
|
144 |
+
.ropeproject
|
145 |
+
|
146 |
+
# mkdocs documentation
|
147 |
+
/site
|
148 |
+
|
149 |
+
# mypy
|
150 |
+
.mypy_cache/
|
151 |
+
.dmypy.json
|
152 |
+
dmypy.json
|
153 |
+
|
154 |
+
# Pyre type checker
|
155 |
+
.pyre/
|
156 |
+
|
157 |
+
# pytype static type analyzer
|
158 |
+
.pytype/
|
159 |
+
|
160 |
+
# Cython debug symbols
|
161 |
+
cython_debug/
|
162 |
+
|
163 |
+
# PyCharm
|
164 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
165 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
166 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
167 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
168 |
+
#.idea/
|
169 |
+
|
170 |
+
# Ruff stuff:
|
171 |
+
.ruff_cache/
|
172 |
+
|
173 |
+
# PyPI configuration file
|
174 |
+
.pypirc
|
175 |
+
|
176 |
+
# Model test directory
|
177 |
+
model_test/
|
app.py
CHANGED
@@ -7,229 +7,26 @@ from torch.utils.data import Dataset, DataLoader
|
|
7 |
from torchvision import transforms
|
8 |
from safetensors.torch import load_model
|
9 |
from datasets import load_dataset
|
|
|
10 |
|
|
|
|
|
|
|
11 |
|
12 |
-
#
|
13 |
-
|
14 |
-
def __init__(self):
|
15 |
-
super(Stem, self).__init__()
|
16 |
-
self.conv = nn.Sequential(
|
17 |
-
nn.Conv2d(3, 64, kernel_size=7, stride=2),
|
18 |
-
nn.MaxPool2d(kernel_size=3, stride=2),
|
19 |
-
)
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
return x
|
24 |
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
super(ResidualBlock, self).__init__()
|
29 |
-
self.conv1 = nn.Sequential(
|
30 |
-
nn.Conv2d(in_channels, out_channels // 4, stride=1, kernel_size=1),
|
31 |
-
nn.BatchNorm2d(out_channels // 4),
|
32 |
-
nn.ReLU(inplace=True),
|
33 |
-
)
|
34 |
-
self.conv2 = nn.Sequential(
|
35 |
-
nn.Conv2d(
|
36 |
-
out_channels // 4,
|
37 |
-
out_channels // 4,
|
38 |
-
stride=stride,
|
39 |
-
kernel_size=3,
|
40 |
-
padding=1,
|
41 |
-
),
|
42 |
-
nn.BatchNorm2d(out_channels // 4),
|
43 |
-
nn.ReLU(inplace=True),
|
44 |
-
)
|
45 |
-
|
46 |
-
self.conv3 = nn.Sequential(
|
47 |
-
nn.Conv2d(out_channels // 4, out_channels, kernel_size=1, stride=1),
|
48 |
-
nn.BatchNorm2d(out_channels),
|
49 |
-
)
|
50 |
-
|
51 |
-
self.shortcut = (
|
52 |
-
nn.Identity()
|
53 |
-
if in_channels == out_channels
|
54 |
-
else nn.Sequential(
|
55 |
-
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride),
|
56 |
-
nn.BatchNorm2d(out_channels),
|
57 |
-
)
|
58 |
-
)
|
59 |
-
|
60 |
-
self.relu = nn.ReLU(inplace=True)
|
61 |
-
|
62 |
-
def forward(self, x):
|
63 |
-
identity = self.shortcut(x)
|
64 |
-
x = self.conv1(x)
|
65 |
-
x = self.conv2(x)
|
66 |
-
x = self.conv3(x)
|
67 |
-
x += identity
|
68 |
-
x = self.relu(x)
|
69 |
-
return x
|
70 |
-
|
71 |
-
|
72 |
-
def make_layer(in_channels, out_channels, block, num_blocks):
|
73 |
-
layers = []
|
74 |
-
for i in range(num_blocks):
|
75 |
-
layers.append(block(in_channels, out_channels))
|
76 |
-
in_channels = out_channels
|
77 |
-
|
78 |
-
return layers
|
79 |
-
|
80 |
-
|
81 |
-
class FromZero(nn.Module):
|
82 |
-
def __init__(self, num_classes=10):
|
83 |
-
super(FromZero, self).__init__()
|
84 |
-
self.stem = Stem()
|
85 |
-
self.layer1 = nn.Sequential(*make_layer(64, 64, ResidualBlock, 2))
|
86 |
-
self.layer2 = nn.Sequential(
|
87 |
-
ResidualBlock(64, 128, stride=2), ResidualBlock(128, 128)
|
88 |
-
)
|
89 |
-
self.layer3 = nn.Sequential(
|
90 |
-
ResidualBlock(128, 256, stride=2), ResidualBlock(256, 256)
|
91 |
-
)
|
92 |
-
self.layer4 = nn.Sequential(
|
93 |
-
ResidualBlock(256, 512, stride=2), ResidualBlock(512, 512)
|
94 |
-
)
|
95 |
-
|
96 |
-
self.flatten = nn.Flatten()
|
97 |
-
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
|
98 |
-
self.fc = nn.Linear(512, num_classes)
|
99 |
-
|
100 |
-
def forward(self, x):
|
101 |
-
x = self.stem(x)
|
102 |
-
x = self.layer1(x)
|
103 |
-
x = self.layer2(x)
|
104 |
-
x = self.layer3(x)
|
105 |
-
x = self.layer4(x)
|
106 |
-
x = self.avgpool(x)
|
107 |
-
x = self.flatten(x)
|
108 |
-
x = self.fc(x)
|
109 |
-
return x
|
110 |
-
|
111 |
-
|
112 |
-
class PreTrained(nn.Module):
|
113 |
-
def __init__(self, num_classes):
|
114 |
-
super().__init__()
|
115 |
-
self.model = models.resnet18(
|
116 |
-
weights=models.ResNet18_Weights.IMAGENET1K_V1, progress=True
|
117 |
-
)
|
118 |
-
for param in self.model.parameters():
|
119 |
-
param.requires_grad = False
|
120 |
-
|
121 |
-
self.model.fc = nn.Sequential(
|
122 |
-
nn.Linear(self.model.fc.in_features, 512),
|
123 |
-
nn.ReLU(inplace=True),
|
124 |
-
nn.Linear(512, num_classes),
|
125 |
-
)
|
126 |
-
|
127 |
-
def forward(self, x):
|
128 |
-
return self.model(x)
|
129 |
-
|
130 |
-
|
131 |
-
with open("etiquetas.txt", "r") as f:
|
132 |
-
etiquetas = f.read().splitlines()[1:]
|
133 |
-
num_clases = len(etiquetas)
|
134 |
-
codigo = {etiqueta.lower(): i for i, etiqueta in enumerate(etiquetas)}
|
135 |
-
|
136 |
-
|
137 |
-
def codificar_etiqueta(etiqueta):
|
138 |
-
return codigo[etiqueta]
|
139 |
-
|
140 |
-
import os
|
141 |
-
|
142 |
-
key = os.environ.get("HFKEY")
|
143 |
-
dataset = load_dataset(
|
144 |
-
"minoruskore/elementosparaevaluarclases", split="train",
|
145 |
-
token=key
|
146 |
-
)
|
147 |
-
|
148 |
-
|
149 |
-
class imagenDataset(Dataset):
|
150 |
-
def __init__(self, dt, transform):
|
151 |
-
self.dt = dt
|
152 |
-
self.tr = transform
|
153 |
-
|
154 |
-
def __len__(self):
|
155 |
-
return len(self.dt)
|
156 |
-
|
157 |
-
def __getitem__(self, idx):
|
158 |
-
row = self.dt[idx]
|
159 |
-
imagen = row["image"].convert("RGB")
|
160 |
-
label = row["etiqueta"].lower()
|
161 |
-
label = codificar_etiqueta(label)
|
162 |
-
imagen = self.tr(imagen)
|
163 |
-
return imagen, label
|
164 |
-
|
165 |
-
|
166 |
-
tr = transforms.Compose([transforms.Resize([256, 256]), transforms.ToTensor()])
|
167 |
-
test_dataset = imagenDataset(dataset, transform=tr)
|
168 |
-
cpus = os.cpu_count()
|
169 |
-
test_dataloader = DataLoader(test_dataset, batch_size=500, num_workers=cpus)
|
170 |
-
|
171 |
-
|
172 |
-
def multiclass_accuracy(predictions, labels):
|
173 |
-
|
174 |
-
# Obtén las clases predichas (la clase con la mayor probabilidad)
|
175 |
-
_, predicted_classes = torch.max(predictions, 1)
|
176 |
-
|
177 |
-
# Compara las clases predichas con las etiquetas verdaderas
|
178 |
-
correct_predictions = (predicted_classes == labels).sum().item()
|
179 |
-
|
180 |
-
# Calcula la precisión
|
181 |
-
accuracy = correct_predictions / labels.size(0)
|
182 |
-
|
183 |
-
return accuracy
|
184 |
-
|
185 |
-
|
186 |
-
def cargar_evaluar_modelo(archivo, tipo_modelo):
|
187 |
-
try:
|
188 |
-
if tipo_modelo == "tarea_7":
|
189 |
-
modelo = FromZero(num_clases)
|
190 |
-
|
191 |
-
elif tipo_modelo == "tarea_8":
|
192 |
-
modelo = PreTrained(num_clases)
|
193 |
-
|
194 |
-
load_model(modelo, archivo)
|
195 |
-
modelo.eval()
|
196 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
197 |
-
modelo.to(device)
|
198 |
-
accuracy = 0
|
199 |
-
with torch.no_grad():
|
200 |
-
for imagenes, etiquetas in test_dataloader:
|
201 |
-
imagenes = imagenes.to(device)
|
202 |
-
etiquetas = etiquetas.to(device)
|
203 |
-
predictions = modelo(imagenes)
|
204 |
-
accuracy += multiclass_accuracy(predictions, etiquetas)
|
205 |
-
accuracy = accuracy / len(test_dataloader)
|
206 |
-
return accuracy
|
207 |
-
except Exception as e:
|
208 |
-
return f"Error: {str(e)}"
|
209 |
-
|
210 |
-
|
211 |
-
def evaluate_interface(model_file, model_type):
|
212 |
-
if model_file is None:
|
213 |
-
return "Por favor, carga un archivo .safetensor"
|
214 |
-
|
215 |
-
# Verificamos que el archivo sea .safetensor
|
216 |
-
if not model_file.name.endswith(".safetensor"):
|
217 |
-
return "Por favor, carga un archivo con extensión .safetensor"
|
218 |
-
|
219 |
-
# Evaluamos el modelo
|
220 |
-
accuracy = cargar_evaluar_modelo(
|
221 |
-
model_file.name,
|
222 |
-
model_type,
|
223 |
-
)
|
224 |
-
|
225 |
-
if isinstance(accuracy, float):
|
226 |
-
return f"Precisión del modelo: {accuracy*100:.2f}%"
|
227 |
-
else:
|
228 |
-
return accuracy
|
229 |
|
230 |
|
|
|
231 |
demo = gr.Interface(
|
232 |
-
fn=
|
233 |
inputs=[
|
234 |
gr.File(label="Archivo del modelo (.safetensor)"),
|
235 |
gr.Radio(["tarea_7", "tarea_8"], label="Tipo de modelo", value="tarea_7"),
|
@@ -239,4 +36,5 @@ demo = gr.Interface(
|
|
239 |
description="Carga un archivo .safetensor de la tarea 7 o 8 y evalúa su precisión en el conjunto de datos de evaluación.",
|
240 |
)
|
241 |
|
242 |
-
|
|
|
|
7 |
from torchvision import transforms
|
8 |
from safetensors.torch import load_model
|
9 |
from datasets import load_dataset
|
10 |
+
from models import FromZero, PreTrained
|
11 |
|
12 |
+
from utils import cargar_etiquetas
|
13 |
+
from dataset import cargar_dataset
|
14 |
+
from evaluation import evaluate_interface
|
15 |
|
16 |
+
# Cargar etiquetas
|
17 |
+
etiquetas, num_clases, codigo = cargar_etiquetas()
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
+
# Cargar dataset
|
20 |
+
test_dataloader = cargar_dataset(codigo)
|
|
|
21 |
|
22 |
|
23 |
+
def interface_wrapper(model_file, model_type):
|
24 |
+
return evaluate_interface(model_file, model_type, num_clases, test_dataloader)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
|
27 |
+
# Interfaz de Gradio
|
28 |
demo = gr.Interface(
|
29 |
+
fn=interface_wrapper,
|
30 |
inputs=[
|
31 |
gr.File(label="Archivo del modelo (.safetensor)"),
|
32 |
gr.Radio(["tarea_7", "tarea_8"], label="Tipo de modelo", value="tarea_7"),
|
|
|
36 |
description="Carga un archivo .safetensor de la tarea 7 o 8 y evalúa su precisión en el conjunto de datos de evaluación.",
|
37 |
)
|
38 |
|
39 |
+
if __name__ == "__main__":
|
40 |
+
demo.launch()
|
dataset.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
from torch.utils.data import Dataset, DataLoader
|
4 |
+
from torchvision import transforms
|
5 |
+
from datasets import load_dataset
|
6 |
+
|
7 |
+
|
8 |
+
class ImagenDataset(Dataset):
|
9 |
+
def __init__(self, dt, transform, codigo_etiquetas):
|
10 |
+
self.dt = dt
|
11 |
+
self.tr = transform
|
12 |
+
self.codigo = codigo_etiquetas
|
13 |
+
|
14 |
+
def __len__(self):
|
15 |
+
return len(self.dt)
|
16 |
+
|
17 |
+
def __getitem__(self, idx):
|
18 |
+
row = self.dt[idx]
|
19 |
+
imagen = row["image"].convert("RGB")
|
20 |
+
label = row["etiqueta"].lower()
|
21 |
+
label = self.codigo[label]
|
22 |
+
imagen = self.tr(imagen)
|
23 |
+
return imagen, label
|
24 |
+
|
25 |
+
|
26 |
+
def cargar_dataset(codigo_etiquetas):
|
27 |
+
key = os.environ.get("HFKEY")
|
28 |
+
dataset = load_dataset(
|
29 |
+
"minoruskore/elementosparaevaluarclases", split="train", token=key
|
30 |
+
)
|
31 |
+
|
32 |
+
tr = transforms.Compose([transforms.Resize([256, 256]), transforms.ToTensor()])
|
33 |
+
test_dataset = ImagenDataset(
|
34 |
+
dataset, transform=tr, codigo_etiquetas=codigo_etiquetas
|
35 |
+
)
|
36 |
+
cpus = os.cpu_count()
|
37 |
+
test_dataloader = DataLoader(test_dataset, batch_size=500, num_workers=cpus)
|
38 |
+
|
39 |
+
return test_dataloader
|
evaluation.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from safetensors.torch import load_model
|
3 |
+
from models import FromZero, PreTrained
|
4 |
+
from utils import multiclass_accuracy
|
5 |
+
|
6 |
+
|
7 |
+
def cargar_evaluar_modelo(archivo, tipo_modelo, num_clases, test_dataloader):
|
8 |
+
try:
|
9 |
+
if tipo_modelo == "tarea_7":
|
10 |
+
modelo = PreTrained(num_clases)
|
11 |
+
elif tipo_modelo == "tarea_8":
|
12 |
+
modelo = FromZero(num_clases)
|
13 |
+
|
14 |
+
load_model(modelo, archivo)
|
15 |
+
modelo.eval()
|
16 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
17 |
+
modelo.to(device)
|
18 |
+
accuracy = 0
|
19 |
+
|
20 |
+
with torch.no_grad():
|
21 |
+
for imagenes, etiquetas in test_dataloader:
|
22 |
+
imagenes = imagenes.to(device)
|
23 |
+
etiquetas = etiquetas.to(device)
|
24 |
+
predictions = modelo(imagenes)
|
25 |
+
accuracy += multiclass_accuracy(predictions, etiquetas)
|
26 |
+
|
27 |
+
accuracy = accuracy / len(test_dataloader)
|
28 |
+
return accuracy
|
29 |
+
except Exception as e:
|
30 |
+
return f"Error: {str(e)}"
|
31 |
+
|
32 |
+
|
33 |
+
def evaluate_interface(model_file, model_type, num_clases, test_dataloader):
|
34 |
+
if model_file is None:
|
35 |
+
return "Por favor, carga un archivo .safetensor"
|
36 |
+
|
37 |
+
# Verificamos que el archivo sea .safetensor
|
38 |
+
if not model_file.name.endswith(".safetensor"):
|
39 |
+
return "Por favor, carga un archivo con extensión .safetensor"
|
40 |
+
|
41 |
+
# Evaluamos el modelo
|
42 |
+
accuracy = cargar_evaluar_modelo(
|
43 |
+
model_file.name, model_type, num_clases, test_dataloader
|
44 |
+
)
|
45 |
+
|
46 |
+
if isinstance(accuracy, float):
|
47 |
+
return f"Precisión del modelo: {accuracy*100:.2f}%"
|
48 |
+
else:
|
49 |
+
return accuracy
|
generate_models.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
from models import FromZero, PreTrained
|
4 |
+
from safetensors.torch import save_model
|
5 |
+
from utils import cargar_etiquetas
|
6 |
+
|
7 |
+
def main():
|
8 |
+
# Crear la carpeta model_test si no existe
|
9 |
+
os.makedirs("model_test", exist_ok=True)
|
10 |
+
_,num_classes,_ = cargar_etiquetas()
|
11 |
+
# Crear instancias de los modelos
|
12 |
+
from_zero_model = FromZero(num_classes=num_classes)
|
13 |
+
pretrained_model = PreTrained(num_classes=num_classes)
|
14 |
+
|
15 |
+
# Guardar los modelos
|
16 |
+
save_model(from_zero_model, "model_test/from_zero_model.safetensor")
|
17 |
+
save_model(pretrained_model, "model_test/pretrained_model.safetensor")
|
18 |
+
|
19 |
+
print("Los modelos han sido creados y guardados en la carpeta 'model_test'")
|
20 |
+
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
main()
|
models.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from torchvision import models
|
4 |
+
|
5 |
+
|
6 |
+
class Stem(nn.Module):
|
7 |
+
def __init__(self):
|
8 |
+
super(Stem, self).__init__()
|
9 |
+
self.conv = nn.Sequential(
|
10 |
+
nn.Conv2d(3, 64, kernel_size=7, stride=2),
|
11 |
+
nn.MaxPool2d(kernel_size=3, stride=2),
|
12 |
+
)
|
13 |
+
|
14 |
+
def forward(self, x):
|
15 |
+
x = self.conv(x)
|
16 |
+
return x
|
17 |
+
|
18 |
+
|
19 |
+
class ResidualBlock(nn.Module):
|
20 |
+
def __init__(self, in_channels, out_channels, stride=1):
|
21 |
+
super(ResidualBlock, self).__init__()
|
22 |
+
self.conv1 = nn.Sequential(
|
23 |
+
nn.Conv2d(in_channels, out_channels // 4, stride=1, kernel_size=1),
|
24 |
+
nn.BatchNorm2d(out_channels // 4),
|
25 |
+
nn.ReLU(inplace=True),
|
26 |
+
)
|
27 |
+
self.conv2 = nn.Sequential(
|
28 |
+
nn.Conv2d(
|
29 |
+
out_channels // 4,
|
30 |
+
out_channels // 4,
|
31 |
+
stride=stride,
|
32 |
+
kernel_size=3,
|
33 |
+
padding=1,
|
34 |
+
),
|
35 |
+
nn.BatchNorm2d(out_channels // 4),
|
36 |
+
nn.ReLU(inplace=True),
|
37 |
+
)
|
38 |
+
|
39 |
+
self.conv3 = nn.Sequential(
|
40 |
+
nn.Conv2d(out_channels // 4, out_channels, kernel_size=1, stride=1),
|
41 |
+
nn.BatchNorm2d(out_channels),
|
42 |
+
)
|
43 |
+
|
44 |
+
self.shortcut = (
|
45 |
+
nn.Identity()
|
46 |
+
if in_channels == out_channels
|
47 |
+
else nn.Sequential(
|
48 |
+
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride),
|
49 |
+
nn.BatchNorm2d(out_channels),
|
50 |
+
)
|
51 |
+
)
|
52 |
+
|
53 |
+
self.relu = nn.ReLU(inplace=True)
|
54 |
+
|
55 |
+
def forward(self, x):
|
56 |
+
identity = self.shortcut(x)
|
57 |
+
x = self.conv1(x)
|
58 |
+
x = self.conv2(x)
|
59 |
+
x = self.conv3(x)
|
60 |
+
x += identity
|
61 |
+
x = self.relu(x)
|
62 |
+
return x
|
63 |
+
|
64 |
+
|
65 |
+
def make_layer(in_channels, out_channels, block, num_blocks):
|
66 |
+
layers = []
|
67 |
+
for i in range(num_blocks):
|
68 |
+
layers.append(block(in_channels, out_channels))
|
69 |
+
in_channels = out_channels
|
70 |
+
|
71 |
+
return layers
|
72 |
+
|
73 |
+
|
74 |
+
class FromZero(nn.Module):
|
75 |
+
def __init__(self, num_classes=10):
|
76 |
+
super(FromZero, self).__init__()
|
77 |
+
self.stem = Stem()
|
78 |
+
self.layer1 = nn.Sequential(*make_layer(64, 64, ResidualBlock, 2))
|
79 |
+
self.layer2 = nn.Sequential(
|
80 |
+
ResidualBlock(64, 128, stride=2), ResidualBlock(128, 128)
|
81 |
+
)
|
82 |
+
self.layer3 = nn.Sequential(
|
83 |
+
ResidualBlock(128, 256, stride=2), ResidualBlock(256, 256)
|
84 |
+
)
|
85 |
+
self.layer4 = nn.Sequential(
|
86 |
+
ResidualBlock(256, 512, stride=2), ResidualBlock(512, 512)
|
87 |
+
)
|
88 |
+
|
89 |
+
self.flatten = nn.Flatten()
|
90 |
+
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
|
91 |
+
self.fc = nn.Linear(512, num_classes)
|
92 |
+
|
93 |
+
def forward(self, x):
|
94 |
+
x = self.stem(x)
|
95 |
+
x = self.layer1(x)
|
96 |
+
x = self.layer2(x)
|
97 |
+
x = self.layer3(x)
|
98 |
+
x = self.layer4(x)
|
99 |
+
x = self.avgpool(x)
|
100 |
+
x = self.flatten(x)
|
101 |
+
x = self.fc(x)
|
102 |
+
return x
|
103 |
+
|
104 |
+
|
105 |
+
class PreTrained(nn.Module):
|
106 |
+
def __init__(self, num_classes):
|
107 |
+
super().__init__()
|
108 |
+
self.model = models.resnet18(
|
109 |
+
weights=models.ResNet18_Weights.IMAGENET1K_V1, progress=True
|
110 |
+
)
|
111 |
+
for param in self.model.parameters():
|
112 |
+
param.requires_grad = False
|
113 |
+
|
114 |
+
self.model.fc = nn.Sequential(
|
115 |
+
nn.Linear(self.model.fc.in_features, 512),
|
116 |
+
nn.ReLU(inplace=True),
|
117 |
+
nn.Linear(512, num_classes),
|
118 |
+
)
|
119 |
+
|
120 |
+
def forward(self, x):
|
121 |
+
return self.model(x)
|
shell.nix
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{ pkgs ? import <nixpkgs> {} }:
|
2 |
+
|
3 |
+
pkgs.mkShell {
|
4 |
+
buildInputs = [
|
5 |
+
(pkgs.python3.withPackages(ps: with ps; [
|
6 |
+
datasets
|
7 |
+
torch
|
8 |
+
torchvision
|
9 |
+
torchaudio
|
10 |
+
safetensors
|
11 |
+
gradio
|
12 |
+
]))
|
13 |
+
|
14 |
+
];
|
15 |
+
|
16 |
+
shellHook = ''
|
17 |
+
exec fish
|
18 |
+
'';
|
19 |
+
}
|
utils.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
|
4 |
+
def cargar_etiquetas():
|
5 |
+
with open("etiquetas.txt", "r") as f:
|
6 |
+
etiquetas = f.read().splitlines()[1:]
|
7 |
+
num_clases = len(etiquetas)
|
8 |
+
codigo = {etiqueta.lower(): i for i, etiqueta in enumerate(etiquetas)}
|
9 |
+
|
10 |
+
return etiquetas, num_clases, codigo
|
11 |
+
|
12 |
+
|
13 |
+
def multiclass_accuracy(predictions, labels):
|
14 |
+
# Obtén las clases predichas (la clase con la mayor probabilidad)
|
15 |
+
_, predicted_classes = torch.max(predictions, 1)
|
16 |
+
|
17 |
+
# Compara las clases predichas con las etiquetas verdaderas
|
18 |
+
correct_predictions = (predicted_classes == labels).sum().item()
|
19 |
+
|
20 |
+
# Calcula la precisión
|
21 |
+
accuracy = correct_predictions / labels.size(0)
|
22 |
+
|
23 |
+
return accuracy
|