taellinglin commited on
Commit
3e23f63
·
verified ·
1 Parent(s): 86e03ae

Create App.py

Browse files
Files changed (1) hide show
  1. app.py +342 -0
app.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.optim as optim
5
+ from torch.utils.data import Dataset, DataLoader
6
+ from torchvision import transforms
7
+ from PIL import Image, ImageFont, ImageDraw
8
+ import numpy as np
9
+ import os
10
+ import string
11
+ import cv2
12
+ from torchvision.transforms.functional import to_pil_image
13
+ import matplotlib.pyplot as plt
14
+ import math
15
+
16
+ # --------- Globals --------- #
17
+ CHARS = string.ascii_uppercase + string.digits
18
+ CHAR2IDX = {c: i + 1 for i, c in enumerate(CHARS)}
19
+ CHAR2IDX["<BLANK>"] = 0
20
+ BLANK_IDX = 0
21
+ IDX2CHAR = {v: k for k, v in CHAR2IDX.items()}
22
+ IMAGE_HEIGHT = 32
23
+ IMAGE_WIDTH = 128
24
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
25
+ font_path = None
26
+ ocr_model = None
27
+
28
+
29
+ # --------- Dataset --------- #
30
+ class OCRDataset(Dataset):
31
+ def __init__(self, font_path, size=1000):
32
+ self.font = ImageFont.truetype(font_path, 32)
33
+ self.samples = ["".join(np.random.choice(list(CHARS), np.random.randint(4, 7)))
34
+ for _ in range(size)]
35
+
36
+ self.transform = transforms.Compose([
37
+ transforms.Grayscale(),
38
+ transforms.Resize((IMAGE_HEIGHT, IMAGE_WIDTH)),
39
+ transforms.ToTensor(),
40
+ transforms.Normalize((0.5,), (0.5,))
41
+ ])
42
+
43
+ def __len__(self):
44
+ return len(self.samples)
45
+
46
+ def __getitem__(self, idx):
47
+ text = self.samples[idx]
48
+ img = self.render_text(text)
49
+ img = self.transform(img) # convert PIL to tensor with normalization
50
+
51
+ label = torch.tensor([CHAR2IDX[c] for c in text], dtype=torch.long)
52
+ return img, label
53
+
54
+
55
+ def render_text(self, text):
56
+ img = Image.new("L", (IMAGE_WIDTH, IMAGE_HEIGHT), color=255)
57
+ draw = ImageDraw.Draw(img)
58
+ bbox = self.font.getbbox(text)
59
+ w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]
60
+ draw.text(((IMAGE_WIDTH - w) // 2, (IMAGE_HEIGHT - h) // 2), text, font=self.font, fill=0)
61
+ return img
62
+
63
+
64
+ # --------- Model --------- #
65
+ class OCRModel(nn.Module):
66
+ def __init__(self, num_classes):
67
+ super().__init__()
68
+ self.conv = nn.Sequential(
69
+ nn.Conv2d(1, 32, 3, padding=1), nn.ReLU(), nn.MaxPool2d((2, 2), (2, 1)), # height↓2, width↓1
70
+ nn.Conv2d(32, 64, 3, padding=1), nn.ReLU(), nn.MaxPool2d((2, 2), (2, 1)) # height↓2 again, width↓1
71
+ )
72
+
73
+
74
+ self.rnn = nn.LSTM(64 * 8, 128, bidirectional=True, num_layers=2, batch_first=True)
75
+ self.fc = nn.Linear(256, num_classes)
76
+ with torch.no_grad():
77
+ self.fc.bias[0] = -5.0 # discourage blank early on
78
+
79
+
80
+ def forward(self, x):
81
+ b, c, h, w = x.size()
82
+ x = self.conv(x)
83
+ x = x.permute(0, 3, 1, 2)
84
+ x = x.reshape(b, x.size(1), -1)
85
+ x, _ = self.rnn(x)
86
+ x = self.fc(x)
87
+ return x
88
+
89
+
90
+ def greedy_decode(log_probs):
91
+ # log_probs shape: (T, B, C)
92
+ # Usually, B=1 during inference
93
+ pred = log_probs.argmax(2).squeeze(1).tolist() # this should give a list of ints
94
+ print(f"Decoded indices: {pred}") # debug print
95
+
96
+ decoded = []
97
+ prev = BLANK_IDX
98
+ for p in pred:
99
+ if p != prev and p != BLANK_IDX:
100
+ decoded.append(IDX2CHAR.get(p, ""))
101
+ prev = p
102
+ return ''.join(decoded)
103
+
104
+
105
+
106
+
107
+ # --------- Custom Collate --------- #
108
+ def custom_collate_fn(batch):
109
+ images, labels = zip(*batch)
110
+ images = torch.stack(images, 0)
111
+
112
+ flat_labels = []
113
+ label_lengths = []
114
+
115
+ for label in labels:
116
+ flat_labels.append(label)
117
+ label_lengths.append(len(label))
118
+
119
+ targets = torch.cat(flat_labels)
120
+ return images, targets, torch.tensor(label_lengths, dtype=torch.long)
121
+
122
+
123
+ # --------- Model Save/Load --------- #
124
+ def list_saved_models():
125
+ return [f for f in os.listdir() if f.endswith(".pth")]
126
+
127
+
128
+ def save_model(model, path):
129
+ torch.save(model.state_dict(), path)
130
+
131
+
132
+ def load_model(path):
133
+ global ocr_model
134
+ model = OCRModel(num_classes=len(CHAR2IDX))
135
+ model.load_state_dict(torch.load(path, map_location=device))
136
+ model.to(device)
137
+ model.eval()
138
+ ocr_model = model
139
+ return f"Model '{path}' loaded."
140
+
141
+
142
+ # --------- Gradio Functions --------- #
143
+ def train_model(font_file, epochs=100, learning_rate=0.001):
144
+ global font_path, ocr_model
145
+
146
+ # Save the uploaded font file
147
+ font_name = os.path.splitext(os.path.basename(font_file.name))[0]
148
+ font_path = f"./{font_name}.ttf"
149
+ with open(font_file.name, "rb") as uploaded:
150
+ with open(font_path, "wb") as f:
151
+ f.write(uploaded.read())
152
+
153
+ # Load dataset
154
+ dataset = OCRDataset(font_path)
155
+ dataloader = DataLoader(dataset, batch_size=16, shuffle=True, collate_fn=custom_collate_fn)
156
+
157
+ # Visualize one sample for sanity check
158
+ img, label = dataset[0]
159
+ print("Label:", ''.join([IDX2CHAR[i.item()] for i in label]))
160
+ plt.imshow(img.permute(1, 2, 0).squeeze(), cmap='gray')
161
+ plt.show()
162
+
163
+ # Initialize model
164
+ model = OCRModel(num_classes=len(CHAR2IDX)).to(device)
165
+ criterion = nn.CTCLoss(blank=0)
166
+ optimizer = optim.Adam(model.parameters(), lr=learning_rate)
167
+
168
+ # Training loop
169
+ for epoch in range(epochs):
170
+ for img, targets, target_lengths in dataloader:
171
+ img = img.to(device)
172
+ targets = targets.to(device)
173
+ target_lengths = target_lengths.to(device)
174
+
175
+ output = model(img)
176
+ batch_size = img.size(0)
177
+ seq_len = output.size(1)
178
+ input_lengths = torch.full(size=(batch_size,), fill_value=seq_len, dtype=torch.long).to(device)
179
+
180
+ loss = criterion(output.log_softmax(2).transpose(0, 1), targets, input_lengths, target_lengths)
181
+ optimizer.zero_grad()
182
+ loss.backward()
183
+ optimizer.step()
184
+
185
+ print(f"Epoch {epoch + 1}, Loss: {loss.item():.4f}")
186
+
187
+ # Save model with structured name
188
+ model_name = f"{font_name}_{epochs}epochs_lr{learning_rate:.0e}.pth"
189
+ save_model(model, model_name)
190
+ ocr_model = model
191
+ return f"Training complete! Model saved as '{model_name}'."
192
+
193
+
194
+
195
+
196
+ def preprocess_image(image: Image.Image):
197
+ img_cv = np.array(image.convert("L"))
198
+
199
+ img_bin = cv2.adaptiveThreshold(img_cv, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
200
+ cv2.THRESH_BINARY_INV, 25, 15)
201
+
202
+ # Invert if background is dark
203
+ white_px = (img_bin == 255).sum()
204
+ black_px = (img_bin == 0).sum()
205
+ if black_px > white_px:
206
+ img_bin = 255 - img_bin
207
+
208
+ # Resize and pad/crop to (IMAGE_HEIGHT, IMAGE_WIDTH)
209
+ h, w = img_bin.shape
210
+ scale = IMAGE_HEIGHT / h
211
+ new_w = int(w * scale)
212
+ resized = cv2.resize(img_bin, (new_w, IMAGE_HEIGHT), interpolation=cv2.INTER_AREA)
213
+
214
+ if new_w < IMAGE_WIDTH:
215
+ pad_width = IMAGE_WIDTH - new_w
216
+ padded = np.pad(resized, ((0, 0), (0, pad_width)), constant_values=255)
217
+ else:
218
+ padded = resized[:, :IMAGE_WIDTH]
219
+
220
+ return to_pil_image(padded)
221
+
222
+
223
+ def predict_text(image: Image.Image):
224
+ if ocr_model is None:
225
+ return "Please load or train a model first."
226
+
227
+ processed = preprocess_image(image)
228
+
229
+ transform = transforms.Compose([
230
+ transforms.ToTensor(),
231
+ transforms.Normalize((0.5,), (0.5,))
232
+ ])
233
+ img_tensor = transform(processed).unsqueeze(0).to(device) # (1, C, H, W)
234
+
235
+ with torch.no_grad():
236
+ output = ocr_model(img_tensor) # (B, T, C)
237
+ log_probs = output.log_softmax(2).permute(1, 0, 2) # (T, B, C)
238
+
239
+ pred = greedy_decode(log_probs) # should be a string now
240
+
241
+ probs = log_probs.exp()
242
+ max_probs = probs.max(2)[0].squeeze(1) # (T,)
243
+ avg_conf = max_probs.mean().item()
244
+
245
+ return f"Prediction: {pred}\nConfidence: {avg_conf:.2%}"
246
+
247
+
248
+
249
+
250
+ # New helper function: generate label images grid
251
+ def generate_labels(font_file=None, num_labels: int = 20):
252
+ global font_path
253
+
254
+ try:
255
+ if font_file:
256
+ font_path = "./temp_font_labels.ttf"
257
+ with open(font_file.name, "rb") as uploaded:
258
+ with open(font_path, "wb") as f:
259
+ f.write(uploaded.read())
260
+
261
+ if font_path is None or not os.path.exists(font_path):
262
+ font = ImageFont.load_default()
263
+ else:
264
+ font = ImageFont.truetype(font_path, 32)
265
+
266
+ labels = ["".join(np.random.choice(list(CHARS), np.random.randint(4, 7))) for _ in range(num_labels)]
267
+
268
+ cols = min(5, num_labels)
269
+ rows = math.ceil(num_labels / cols)
270
+ cell_w, cell_h = IMAGE_WIDTH, IMAGE_HEIGHT
271
+
272
+ grid_img = Image.new("L", (cols * cell_w, rows * cell_h), color=255)
273
+ draw = ImageDraw.Draw(grid_img)
274
+
275
+ spacing = 0 # <-- spacing between characters
276
+
277
+ for idx, label in enumerate(labels):
278
+ x = (idx % cols) * cell_w
279
+ y = (idx // cols) * cell_h
280
+
281
+ # Draw each character with spacing
282
+ char_x = x + 10 # small left margin
283
+ char_y = y + (cell_h - font.size) // 2
284
+
285
+ for char in label:
286
+ draw.text((char_x, char_y), char, font=font, fill=0)
287
+ char_w = font.getbbox(char)[2] - font.getbbox(char)[0]
288
+ char_x += char_w + spacing # move right with spacing
289
+
290
+ return grid_img
291
+
292
+ except Exception as e:
293
+ print("Error in generate_labels:", e)
294
+ error_img = Image.new("RGB", (512, 128), color=(255, 255, 255))
295
+ draw = ImageDraw.Draw(error_img)
296
+ draw.text((10, 50), f"Error: {str(e)}", fill=(255, 0, 0))
297
+ return error_img
298
+
299
+
300
+
301
+ # --------- Updated Gradio UI with new tab --------- #
302
+ with gr.Blocks() as demo:
303
+ with gr.Tab("1. Upload Font & Train"):
304
+ font_file = gr.File(label="Upload .ttf or .otf font", file_types=[".ttf", ".otf"])
305
+ epochs_input = gr.Slider(minimum=1, maximum=4096, value=256, step=1, label="Epochs")
306
+ lr_input = gr.Slider(minimum=0.001, maximum=0.1, value=0.05, step=0.001, label="Learning Rate")
307
+ train_button = gr.Button("Train OCR Model")
308
+ train_status = gr.Textbox(label="Status")
309
+
310
+ train_button.click(fn=train_model, inputs=[font_file, epochs_input, lr_input], outputs=train_status)
311
+
312
+
313
+ with gr.Tab("2. Use Trained Model"):
314
+ model_list = gr.Dropdown(choices=list_saved_models(), label="Select OCR Model")
315
+ refresh_btn = gr.Button("🔄 Refresh Models")
316
+ load_model_btn = gr.Button("Load Model") # <-- new button
317
+
318
+ image_input = gr.Image(type="pil", label="Upload word strip")
319
+ predict_btn = gr.Button("Predict")
320
+ output_text = gr.Textbox(label="Recognized Text")
321
+ model_status = gr.Textbox(label="Model Load Status")
322
+
323
+ # Refresh dropdown choices
324
+ refresh_btn.click(fn=lambda: gr.update(choices=list_saved_models()), outputs=model_list)
325
+
326
+ # Load model on button click, NOT dropdown change
327
+ load_model_btn.click(fn=load_model, inputs=model_list, outputs=model_status)
328
+
329
+ predict_btn.click(fn=predict_text, inputs=image_input, outputs=output_text)
330
+
331
+ with gr.Tab("3. Generate Labels"):
332
+ font_file_labels = gr.File(label="Optional font for label image", file_types=[".ttf", ".otf"])
333
+ num_labels = gr.Number(value=20, label="Number of labels to generate", precision=0, interactive=True)
334
+ gen_button = gr.Button("Generate Label Grid")
335
+ label_image = gr.Image(label="Generated Labels", type="pil")
336
+
337
+ gen_button.click(fn=generate_labels, inputs=[font_file_labels, num_labels], outputs=label_image)
338
+
339
+
340
+
341
+ if __name__ == "__main__":
342
+ demo.launch()