testmail-gmail commited on
Commit
f7cedb3
·
1 Parent(s): a82adc9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -40
app.py CHANGED
@@ -4,18 +4,11 @@ import time
4
  import torch
5
  import argparse
6
  import gradio as gr
7
- import io
8
  from PIL import Image
9
  from numpy import random
10
  from pathlib import Path
11
  import torch.backends.cudnn as cudnn
12
  from models.experimental import attempt_load
13
- import keras_ocr
14
- import matplotlib.pyplot as plt
15
- from numpy import asarray
16
- import pytesseract
17
- from datetime import date
18
-
19
 
20
  from utils.datasets import LoadStreams, LoadImages
21
  from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
@@ -24,15 +17,15 @@ from utils.plots import plot_one_box
24
  from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
25
  os.system("wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt")
26
  os.system("wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6.pt")
27
- pipeline = keras_ocr.pipeline.Pipeline()
28
 
29
- def detect_Custom(img):
30
- model='passport_mrz' # Naming Convention for yolov7 See output file of https://www.kaggle.com/code/owaiskhan9654/training-yolov7-on-kaggle-on-custom-dataset/data
 
31
  parser = argparse.ArgumentParser()
32
  parser.add_argument('--weights', nargs='+', type=str, default=model+".pt", help='model.pt path(s)')
33
  parser.add_argument('--source', type=str, default='Inference/', help='source')
34
  parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
35
- parser.add_argument('--conf-thres', type=float, default=0.45, help='object confidence threshold')
36
  parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
37
  parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
38
  parser.add_argument('--view-img', action='store_true', help='display results')
@@ -100,7 +93,7 @@ def detect_Custom(img):
100
 
101
  if classify:
102
  pred = apply_classifier(pred, modelc, img, im0s)
103
-
104
  for i, det in enumerate(pred):
105
  if webcam:
106
  p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
@@ -131,15 +124,6 @@ def detect_Custom(img):
131
  if save_img or view_img:
132
  label = f'{names[int(cls)]} {conf:.2f}'
133
  plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
134
- if(cls == 1):
135
- x1 = int(xyxy[0].item())
136
- y1 = int(xyxy[1].item())
137
- x2 = int(xyxy[2].item())
138
- y2 = int(xyxy[3].item())
139
- orig_img = im0
140
- crop_img = im0[y1:y2, x1:x2]
141
- cv2.imwrite('MRZ_1.png', crop_img)
142
-
143
  if view_img:
144
  cv2.imshow(str(p), im0)
145
  cv2.waitKey(1)
@@ -162,30 +146,16 @@ def detect_Custom(img):
162
  vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
163
  vid_writer.write(im0)
164
 
165
- output_text = 'This is not a valid Passport'
166
- text = pytesseract.image_to_string(Image.open('MRZ_1.png'))
167
- text = text.replace(" ", "")
168
- text=text[22:28]
169
- today = date.today()
170
- s = today.strftime('%Y%m%d')[2:]
171
- if(text > s):
172
- output_text = 'This is a Valid Passport'
173
- #images = [keras_ocr.tools.read(img) for img in [boundedImage]]
174
- #prediction_groups = pipeline.recognize(images)
175
- #first=prediction_groups[0]
176
- #for text,box in first:
177
- #output_text += ' '+ text
178
-
179
  if save_txt or save_img:
180
  s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
181
 
182
  print(f'Done. ({time.time() - t0:.3f}s)')
183
 
184
- return Image.fromarray(im0[:,:,::-1]), output_text
185
 
186
 
187
- output = gr.Textbox(label="Validation",elem_id="opbox")
188
- Custom_description="<center>Custom Training Performed on Colab <a href='https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov7-object-detection-on-custom-data.ipynb?authuser=2#scrollTo=1iqOPKjr22mL' style='text-decoration: underline' target='_blank'>Link</a> </center><br> <center>Model trained with test dataset of 'aadhar-card', 'credit-card','prescription' and 'passport' </center>"
189
 
190
  Footer = (
191
  "<center>Model Trained by: Owais Ahmad Data Scientist at <b> Thoucentric </b> <a href=\"https://www.linkedin.com/in/owaiskhan9654/\">Visit Profile</a> <br></center>"
@@ -199,7 +169,7 @@ Footer = (
199
 
200
  examples1=[["Image1.jpeg", "Yolo_v7_Custom_trained_By_Owais"],["Image2.jpeg", "Yolo_v7_Custom_trained_By_Owais"],["Image3.jpeg", "Yolo_v7_Custom_trained_By_Owais",],["Image4.jpeg", "Yolo_v7_Custom_trained_By_Owais"],["Image5.jpeg", "Yolo_v7_Custom_trained_By_Owais"],["Image6.jpeg", "Yolo_v7_Custom_trained_By_Owais"],["horses.jpeg", "yolov7"],["horses.jpeg", "yolov7-e6"]]
201
 
202
- Top_Title="<center>Intelligent Image to Text - IIT </center></a>"
203
 
204
  css = ".output-image, .input-image, .image-preview {height: 300px !important}"
205
- gr.Interface(detect_Custom,gr.Image(type="pil"),[gr.Image(type="pil"),output],css=css,title=Top_Title,examples=examples1,description=Custom_description,article=Footer,cache_examples=False).launch()
 
4
  import torch
5
  import argparse
6
  import gradio as gr
 
7
  from PIL import Image
8
  from numpy import random
9
  from pathlib import Path
10
  import torch.backends.cudnn as cudnn
11
  from models.experimental import attempt_load
 
 
 
 
 
 
12
 
13
  from utils.datasets import LoadStreams, LoadImages
14
  from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
 
17
  from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
18
  os.system("wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt")
19
  os.system("wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6.pt")
 
20
 
21
+ def detect_Custom(img,model):
22
+ if model =='Yolo_v7_Custom_trained_By_Owais':
23
+ model='best' # Naming Convention for yolov7 See output file of https://www.kaggle.com/code/owaiskhan9654/training-yolov7-on-kaggle-on-custom-dataset/data
24
  parser = argparse.ArgumentParser()
25
  parser.add_argument('--weights', nargs='+', type=str, default=model+".pt", help='model.pt path(s)')
26
  parser.add_argument('--source', type=str, default='Inference/', help='source')
27
  parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
28
+ parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
29
  parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
30
  parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
31
  parser.add_argument('--view-img', action='store_true', help='display results')
 
93
 
94
  if classify:
95
  pred = apply_classifier(pred, modelc, img, im0s)
96
+
97
  for i, det in enumerate(pred):
98
  if webcam:
99
  p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
 
124
  if save_img or view_img:
125
  label = f'{names[int(cls)]} {conf:.2f}'
126
  plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
 
 
 
 
 
 
 
 
 
127
  if view_img:
128
  cv2.imshow(str(p), im0)
129
  cv2.waitKey(1)
 
146
  vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
147
  vid_writer.write(im0)
148
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  if save_txt or save_img:
150
  s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
151
 
152
  print(f'Done. ({time.time() - t0:.3f}s)')
153
 
154
+ return Image.fromarray(im0[:,:,::-1])
155
 
156
 
157
+
158
+ Custom_description="<center>Custom Training Performed on Kaggle <a href='https://www.kaggle.com/code/owaiskhan9654/training-yolov7-on-kaggle-on-custom-dataset/notebook' style='text-decoration: underline' target='_blank'>Link</a> </center><br> <center>Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors </center> <br> <b>1st</b> class is for Person Detected<br><b>2nd</b> class is for Car Detected"
159
 
160
  Footer = (
161
  "<center>Model Trained by: Owais Ahmad Data Scientist at <b> Thoucentric </b> <a href=\"https://www.linkedin.com/in/owaiskhan9654/\">Visit Profile</a> <br></center>"
 
169
 
170
  examples1=[["Image1.jpeg", "Yolo_v7_Custom_trained_By_Owais"],["Image2.jpeg", "Yolo_v7_Custom_trained_By_Owais"],["Image3.jpeg", "Yolo_v7_Custom_trained_By_Owais",],["Image4.jpeg", "Yolo_v7_Custom_trained_By_Owais"],["Image5.jpeg", "Yolo_v7_Custom_trained_By_Owais"],["Image6.jpeg", "Yolo_v7_Custom_trained_By_Owais"],["horses.jpeg", "yolov7"],["horses.jpeg", "yolov7-e6"]]
171
 
172
+ Top_Title="<center>Yolov7 🚀 Custom Trained by <a href='https://www.linkedin.com/in/owaiskhan9654/' style='text-decoration: underline' target='_blank'>Owais Ahmad </center></a>🚗Car and 👦Person Detection Class"
173
 
174
  css = ".output-image, .input-image, .image-preview {height: 300px !important}"
175
+ gr.Interface(detect_Custom,[gr.Image(type="pil"),gr.Dropdown(default="Yolo_v7_Custom_trained_By_Owais",choices=["Yolo_v7_Custom_trained_By_Owais","yolov7","yolov7-e6"])],gr.Image(type="pil"),css=css,title=Top_Title,examples=examples1,description=Custom_description,article=Footer,cache_examples=False).launch()