yoel commited on
Commit
d57909e
·
1 Parent(s): 24b78f9

Refactor: elimina soporte para modelo preentrenado, simplifica interfaces y agrega flake.nix

Browse files
Files changed (7) hide show
  1. app.py +3 -6
  2. evaluation.py +5 -10
  3. flake.lock +78 -0
  4. flake.nix +40 -0
  5. generate_models.py +3 -4
  6. models.py +18 -56
  7. shell.nix +0 -19
app.py CHANGED
@@ -10,8 +10,8 @@ etiquetas, num_clases, codigo = cargar_etiquetas()
10
  test_dataloader = cargar_dataset(codigo)
11
 
12
 
13
- def interface_wrapper(model_file, model_type):
14
- return evaluate_interface(model_file, model_type, num_clases, test_dataloader)
15
 
16
 
17
  # Interfaz de Gradio
@@ -19,12 +19,9 @@ demo = gr.Interface(
19
  fn=interface_wrapper,
20
  inputs=[
21
  gr.File(label="Archivo del modelo (.safetensor)"),
22
- gr.Radio(
23
- ["pre_entrenado", "desde_cero"], label="Tipo de modelo", value="desde_cero"
24
- ),
25
  ],
26
  outputs=gr.Textbox(label="Resultado", lines=1),
27
- title="Evaluador de modelos pre-entrenado y desde cero",
28
  description="Carga un archivo .safetensor y evalúa su precisión en el conjunto de datos de evaluación.",
29
  )
30
 
 
10
  test_dataloader = cargar_dataset(codigo)
11
 
12
 
13
+ def interface_wrapper(model_file):
14
+ return evaluate_interface(model_file, num_clases, test_dataloader)
15
 
16
 
17
  # Interfaz de Gradio
 
19
  fn=interface_wrapper,
20
  inputs=[
21
  gr.File(label="Archivo del modelo (.safetensor)"),
 
 
 
22
  ],
23
  outputs=gr.Textbox(label="Resultado", lines=1),
24
+ title="Evaluador de modelos",
25
  description="Carga un archivo .safetensor y evalúa su precisión en el conjunto de datos de evaluación.",
26
  )
27
 
evaluation.py CHANGED
@@ -1,15 +1,12 @@
1
  import torch
2
  from safetensors.torch import load_model
3
- from models import FromZero, PreTrained
4
  from utils import multiclass_accuracy
5
 
6
 
7
- def cargar_evaluar_modelo(archivo, tipo_modelo, num_clases, test_dataloader):
8
  try:
9
- if tipo_modelo == "pre_entrenado":
10
- modelo = PreTrained(num_clases)
11
- elif tipo_modelo == "desde_cero":
12
- modelo = FromZero(num_clases)
13
 
14
  load_model(modelo, archivo)
15
  modelo.eval()
@@ -30,7 +27,7 @@ def cargar_evaluar_modelo(archivo, tipo_modelo, num_clases, test_dataloader):
30
  return f"Error: {str(e)}"
31
 
32
 
33
- def evaluate_interface(model_file, model_type, num_clases, test_dataloader):
34
  if model_file is None:
35
  return "Por favor, carga un archivo .safetensor"
36
 
@@ -41,9 +38,7 @@ def evaluate_interface(model_file, model_type, num_clases, test_dataloader):
41
  return "Por favor, carga un archivo con extensión .safetensor o .safetensors"
42
 
43
  # Evaluamos el modelo
44
- accuracy = cargar_evaluar_modelo(
45
- model_file.name, model_type, num_clases, test_dataloader
46
- )
47
 
48
  if isinstance(accuracy, float):
49
  return f"Precisión del modelo: {accuracy*100:.2f}%"
 
1
  import torch
2
  from safetensors.torch import load_model
3
+ from models import FromZero
4
  from utils import multiclass_accuracy
5
 
6
 
7
+ def cargar_evaluar_modelo(archivo, num_clases, test_dataloader):
8
  try:
9
+ modelo = FromZero(num_clases)
 
 
 
10
 
11
  load_model(modelo, archivo)
12
  modelo.eval()
 
27
  return f"Error: {str(e)}"
28
 
29
 
30
+ def evaluate_interface(model_file, num_clases, test_dataloader):
31
  if model_file is None:
32
  return "Por favor, carga un archivo .safetensor"
33
 
 
38
  return "Por favor, carga un archivo con extensión .safetensor o .safetensors"
39
 
40
  # Evaluamos el modelo
41
+ accuracy = cargar_evaluar_modelo(model_file.name, num_clases, test_dataloader)
 
 
42
 
43
  if isinstance(accuracy, float):
44
  return f"Precisión del modelo: {accuracy*100:.2f}%"
flake.lock ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nodes": {
3
+ "flake-utils": {
4
+ "inputs": {
5
+ "systems": "systems"
6
+ },
7
+ "locked": {
8
+ "lastModified": 1731533236,
9
+ "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
10
+ "owner": "numtide",
11
+ "repo": "flake-utils",
12
+ "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
13
+ "type": "github"
14
+ },
15
+ "original": {
16
+ "owner": "numtide",
17
+ "repo": "flake-utils",
18
+ "type": "github"
19
+ }
20
+ },
21
+ "nixpkgs": {
22
+ "locked": {
23
+ "lastModified": 1753749649,
24
+ "narHash": "sha256-+jkEZxs7bfOKfBIk430K+tK9IvXlwzqQQnppC2ZKFj4=",
25
+ "owner": "NixOS",
26
+ "repo": "nixpkgs",
27
+ "rev": "1f08a4df998e21f4e8be8fb6fbf61d11a1a5076a",
28
+ "type": "github"
29
+ },
30
+ "original": {
31
+ "owner": "NixOS",
32
+ "ref": "nixos-25.05",
33
+ "repo": "nixpkgs",
34
+ "type": "github"
35
+ }
36
+ },
37
+ "root": {
38
+ "inputs": {
39
+ "flake-utils": "flake-utils",
40
+ "nixpkgs": "nixpkgs",
41
+ "unstable": "unstable"
42
+ }
43
+ },
44
+ "systems": {
45
+ "locked": {
46
+ "lastModified": 1681028828,
47
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
48
+ "owner": "nix-systems",
49
+ "repo": "default",
50
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
51
+ "type": "github"
52
+ },
53
+ "original": {
54
+ "owner": "nix-systems",
55
+ "repo": "default",
56
+ "type": "github"
57
+ }
58
+ },
59
+ "unstable": {
60
+ "locked": {
61
+ "lastModified": 1753694789,
62
+ "narHash": "sha256-cKgvtz6fKuK1Xr5LQW/zOUiAC0oSQoA9nOISB0pJZqM=",
63
+ "owner": "NixOS",
64
+ "repo": "nixpkgs",
65
+ "rev": "dc9637876d0dcc8c9e5e22986b857632effeb727",
66
+ "type": "github"
67
+ },
68
+ "original": {
69
+ "owner": "NixOS",
70
+ "ref": "nixos-unstable",
71
+ "repo": "nixpkgs",
72
+ "type": "github"
73
+ }
74
+ }
75
+ },
76
+ "root": "root",
77
+ "version": 7
78
+ }
flake.nix ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ description = "Flake para un entorno Jupyter + IA + Fish 💻🐟";
3
+
4
+ inputs = {
5
+ nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
6
+ unstable.url = "github:NixOS/nixpkgs/nixos-unstable";
7
+ flake-utils.url = "github:numtide/flake-utils";
8
+ };
9
+
10
+ outputs = {
11
+ self,
12
+ nixpkgs,
13
+ unstable,
14
+ flake-utils,
15
+ }:
16
+ flake-utils.lib.eachDefaultSystem (system: let
17
+ pkgs = import nixpkgs {inherit system;};
18
+ unpkgs = import unstable {inherit system;};
19
+ in {
20
+ devShells.default = pkgs.mkShell {
21
+ name = "impurejupyterenv";
22
+
23
+ buildInputs = [
24
+ (pkgs.python3.withPackages (ps:
25
+ with ps; [
26
+ gradio
27
+ torch
28
+ torchvision
29
+ safetensors
30
+ torchaudio
31
+ datasets
32
+ ]))
33
+ ];
34
+
35
+ shellHook = ''
36
+ exec fish
37
+ '';
38
+ };
39
+ });
40
+ }
generate_models.py CHANGED
@@ -1,20 +1,19 @@
1
  import os
2
  import torch
3
- from models import FromZero, PreTrained
4
  from safetensors.torch import save_model
5
  from utils import cargar_etiquetas
6
 
 
7
  def main():
8
  # Crear la carpeta model_test si no existe
9
  os.makedirs("model_test", exist_ok=True)
10
- _,num_classes,_ = cargar_etiquetas()
11
  # Crear instancias de los modelos
12
  from_zero_model = FromZero(num_classes=num_classes)
13
- pretrained_model = PreTrained(num_classes=num_classes)
14
 
15
  # Guardar los modelos
16
  save_model(from_zero_model, "model_test/from_zero_model.safetensor")
17
- save_model(pretrained_model, "model_test/pretrained_model.safetensor")
18
 
19
  print("Los modelos han sido creados y guardados en la carpeta 'model_test'")
20
 
 
1
  import os
2
  import torch
3
+ from models import FromZero
4
  from safetensors.torch import save_model
5
  from utils import cargar_etiquetas
6
 
7
+
8
  def main():
9
  # Crear la carpeta model_test si no existe
10
  os.makedirs("model_test", exist_ok=True)
11
+ _, num_classes, _ = cargar_etiquetas()
12
  # Crear instancias de los modelos
13
  from_zero_model = FromZero(num_classes=num_classes)
 
14
 
15
  # Guardar los modelos
16
  save_model(from_zero_model, "model_test/from_zero_model.safetensor")
 
17
 
18
  print("Los modelos han sido creados y guardados en la carpeta 'model_test'")
19
 
models.py CHANGED
@@ -1,4 +1,5 @@
1
- import torch
 
2
  import torch.nn as nn
3
  from torchvision import models
4
 
@@ -15,67 +16,44 @@ class Stem(nn.Module):
15
  x = self.conv(x)
16
  return x
17
 
18
-
19
  class ResidualBlock(nn.Module):
20
  def __init__(self, in_channels, out_channels, stride=1):
21
- super(ResidualBlock, self).__init__()
22
  self.conv1 = nn.Sequential(
23
- nn.Conv2d(in_channels, out_channels // 4, stride=1, kernel_size=1),
24
- nn.BatchNorm2d(out_channels // 4),
25
- nn.ReLU(inplace=True),
26
  )
27
  self.conv2 = nn.Sequential(
28
- nn.Conv2d(
29
- out_channels // 4,
30
- out_channels // 4,
31
- stride=stride,
32
- kernel_size=3,
33
- padding=1,
34
- ),
35
- nn.BatchNorm2d(out_channels // 4),
36
- nn.ReLU(inplace=True),
37
- )
38
-
39
- self.conv3 = nn.Sequential(
40
- nn.Conv2d(out_channels // 4, out_channels, kernel_size=1, stride=1),
41
  nn.BatchNorm2d(out_channels),
42
  )
43
 
44
  self.shortcut = (
45
  nn.Identity()
46
- if in_channels == out_channels
47
  else nn.Sequential(
48
- nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride),
49
  nn.BatchNorm2d(out_channels),
50
  )
51
  )
52
 
53
- self.relu = nn.ReLU(inplace=True)
54
 
55
  def forward(self, x):
56
  identity = self.shortcut(x)
57
  x = self.conv1(x)
58
  x = self.conv2(x)
59
- x = self.conv3(x)
60
  x += identity
61
- x = self.relu(x)
62
- return x
63
-
64
-
65
- def make_layer(in_channels, out_channels, block, num_blocks):
66
- layers = []
67
- for i in range(num_blocks):
68
- layers.append(block(in_channels, out_channels))
69
- in_channels = out_channels
70
-
71
- return layers
72
-
73
 
74
  class FromZero(nn.Module):
75
  def __init__(self, num_classes=10):
76
  super(FromZero, self).__init__()
77
  self.stem = Stem()
78
- self.layer1 = nn.Sequential(*make_layer(64, 64, ResidualBlock, 2))
 
 
79
  self.layer2 = nn.Sequential(
80
  ResidualBlock(64, 128, stride=2), ResidualBlock(128, 128)
81
  )
@@ -84,12 +62,14 @@ class FromZero(nn.Module):
84
  )
85
  self.layer4 = nn.Sequential(
86
  ResidualBlock(256, 512, stride=2), ResidualBlock(512, 512)
87
- )
88
 
89
  self.flatten = nn.Flatten()
90
  self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
91
- self.fc = nn.Linear(512, num_classes)
92
 
 
 
93
  def forward(self, x):
94
  x = self.stem(x)
95
  x = self.layer1(x)
@@ -101,21 +81,3 @@ class FromZero(nn.Module):
101
  x = self.fc(x)
102
  return x
103
 
104
-
105
- class PreTrained(nn.Module):
106
- def __init__(self, num_classes):
107
- super().__init__()
108
- self.model = models.resnet18(
109
- weights=models.ResNet18_Weights.IMAGENET1K_V1, progress=True
110
- )
111
- for param in self.model.parameters():
112
- param.requires_grad = False
113
-
114
- self.model.fc = nn.Sequential(
115
- nn.Linear(self.model.fc.in_features, 512),
116
- nn.ReLU(inplace=True),
117
- nn.Linear(512, num_classes),
118
- )
119
-
120
- def forward(self, x):
121
- return self.model(x)
 
1
+ 
2
+ import torch
3
  import torch.nn as nn
4
  from torchvision import models
5
 
 
16
  x = self.conv(x)
17
  return x
18
 
 
19
  class ResidualBlock(nn.Module):
20
  def __init__(self, in_channels, out_channels, stride=1):
21
+ super().__init__()
22
  self.conv1 = nn.Sequential(
23
+ nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
24
+ nn.BatchNorm2d(out_channels),
25
+ nn.LeakyReLU(inplace=True),
26
  )
27
  self.conv2 = nn.Sequential(
28
+ nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False),
 
 
 
 
 
 
 
 
 
 
 
 
29
  nn.BatchNorm2d(out_channels),
30
  )
31
 
32
  self.shortcut = (
33
  nn.Identity()
34
+ if in_channels == out_channels and stride == 1
35
  else nn.Sequential(
36
+ nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
37
  nn.BatchNorm2d(out_channels),
38
  )
39
  )
40
 
41
+ self.act = nn.LeakyReLU(inplace=True)
42
 
43
  def forward(self, x):
44
  identity = self.shortcut(x)
45
  x = self.conv1(x)
46
  x = self.conv2(x)
 
47
  x += identity
48
+ return self.act(x)
 
 
 
 
 
 
 
 
 
 
 
49
 
50
  class FromZero(nn.Module):
51
  def __init__(self, num_classes=10):
52
  super(FromZero, self).__init__()
53
  self.stem = Stem()
54
+ self.layer1 = nn.Sequential(
55
+ ResidualBlock(64, 64), ResidualBlock(64, 64)
56
+ )
57
  self.layer2 = nn.Sequential(
58
  ResidualBlock(64, 128, stride=2), ResidualBlock(128, 128)
59
  )
 
62
  )
63
  self.layer4 = nn.Sequential(
64
  ResidualBlock(256, 512, stride=2), ResidualBlock(512, 512)
65
+ ,nn.Dropout(0.2))
66
 
67
  self.flatten = nn.Flatten()
68
  self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
69
+ self.fc = nn.Sequential(
70
 
71
+ nn.Linear(512, num_classes),
72
+ )
73
  def forward(self, x):
74
  x = self.stem(x)
75
  x = self.layer1(x)
 
81
  x = self.fc(x)
82
  return x
83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
shell.nix DELETED
@@ -1,19 +0,0 @@
1
- { pkgs ? import <nixpkgs> {} }:
2
-
3
- pkgs.mkShell {
4
- buildInputs = [
5
- (pkgs.python3.withPackages(ps: with ps; [
6
- datasets
7
- torch
8
- torchvision
9
- torchaudio
10
- safetensors
11
- gradio
12
- ]))
13
-
14
- ];
15
-
16
- shellHook = ''
17
- exec fish
18
- '';
19
- }