Upload 12 files
Browse files- __pycache__/utils.cpython-39.pyc +0 -0
- __pycache__/views.cpython-39.pyc +0 -0
- app.py +6 -0
- models/ocr_big_1.h5 +3 -0
- models/ocr_new_1.h5 +3 -0
- requirements.txt +0 -0
- static/images/dollar.gif +0 -0
- static/script.js +110 -0
- static/styles.css +149 -0
- templates/index.html +38 -0
- utils.py +112 -0
- views.py +56 -0
__pycache__/utils.cpython-39.pyc
ADDED
Binary file (3.38 kB). View file
|
|
__pycache__/views.cpython-39.pyc
ADDED
Binary file (1.58 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask
|
2 |
+
from views import views
|
3 |
+
|
4 |
+
app = Flask(__name__)
|
5 |
+
app.register_blueprint(views,url_prefix='/views')
|
6 |
+
app.config['MAX_CONTENT_LENGTH'] = 128 * 1024 * 1024
|
models/ocr_big_1.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9eee9e8ab96c1073eb117ff8c40d9e24bf30cfadc8d6290c9cf76c3f0d02fe3f
|
3 |
+
size 6394352
|
models/ocr_new_1.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b99062efc07f1a03a59c5563e0359d2f9afb3d0a5110cdb84430603c32dae6a9
|
3 |
+
size 6394984
|
requirements.txt
ADDED
Binary file (3 kB). View file
|
|
static/images/dollar.gif
ADDED
![]() |
static/script.js
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
document.addEventListener('DOMContentLoaded', () => {
|
2 |
+
document.getElementById('outimg').style.display = 'none'
|
3 |
+
document.getElementById('loader').style.visibility = 'hidden'
|
4 |
+
const canvas = document.getElementById('drawing-area');
|
5 |
+
const canvasContext = canvas.getContext('2d');
|
6 |
+
const clearButton = document.getElementById('clear-button');
|
7 |
+
const saveButton = document.getElementById('save-button');
|
8 |
+
const state = {
|
9 |
+
mousedown: false
|
10 |
+
};
|
11 |
+
|
12 |
+
const baseLineWidth = 3;
|
13 |
+
const devicePixelRatio = window.devicePixelRatio || 1;
|
14 |
+
const lineWidth = baseLineWidth * devicePixelRatio*(7/9);
|
15 |
+
const strokeStyle = '#333';
|
16 |
+
|
17 |
+
canvas.addEventListener('mousedown', handleWritingStart);
|
18 |
+
canvas.addEventListener('mousemove', handleWritingInProgress);
|
19 |
+
canvas.addEventListener('mouseup', handleDrawingEnd);
|
20 |
+
canvas.addEventListener('mouseout', handleDrawingEnd);
|
21 |
+
|
22 |
+
canvas.addEventListener('touchstart', handleWritingStart);
|
23 |
+
canvas.addEventListener('touchmove', handleWritingInProgress);
|
24 |
+
canvas.addEventListener('touchend', handleDrawingEnd);
|
25 |
+
|
26 |
+
clearButton.addEventListener('click', handleClearButtonClick);
|
27 |
+
saveButton.addEventListener('click', handleSaveButtonClick);
|
28 |
+
|
29 |
+
function handleWritingStart(event) {
|
30 |
+
event.preventDefault();
|
31 |
+
state.mousedown = true;
|
32 |
+
const mousePos = getMousePositionOnCanvas(event);
|
33 |
+
canvasContext.beginPath();
|
34 |
+
canvasContext.moveTo(mousePos.x, mousePos.y);
|
35 |
+
canvasContext.lineWidth = lineWidth;
|
36 |
+
canvasContext.strokeStyle = strokeStyle;
|
37 |
+
canvasContext.shadowColor = null;
|
38 |
+
canvasContext.shadowBlur = 0;
|
39 |
+
}
|
40 |
+
|
41 |
+
function handleWritingInProgress(event) {
|
42 |
+
event.preventDefault();
|
43 |
+
if (state.mousedown) {
|
44 |
+
const mousePos = getMousePositionOnCanvas(event);
|
45 |
+
canvasContext.lineTo(mousePos.x, mousePos.y);
|
46 |
+
canvasContext.stroke();
|
47 |
+
}
|
48 |
+
}
|
49 |
+
|
50 |
+
function handleDrawingEnd(event) {
|
51 |
+
event.preventDefault();
|
52 |
+
if (state.mousedown) {
|
53 |
+
canvasContext.shadowColor = null;
|
54 |
+
canvasContext.shadowBlur = 0;
|
55 |
+
canvasContext.stroke();
|
56 |
+
}
|
57 |
+
state.mousedown = false;
|
58 |
+
}
|
59 |
+
|
60 |
+
function handleClearButtonClick(event) {
|
61 |
+
event.preventDefault();
|
62 |
+
document.getElementById('outimg').style.display = 'none'
|
63 |
+
document.getElementById('outtext').textContent = 'Your Output will be displayed here'
|
64 |
+
clearCanvas();
|
65 |
+
}
|
66 |
+
|
67 |
+
function handleSaveButtonClick(event) {
|
68 |
+
event.preventDefault();
|
69 |
+
const dataUrl = canvas.toDataURL();
|
70 |
+
sendDataToFlask(dataUrl);
|
71 |
+
}
|
72 |
+
|
73 |
+
function getMousePositionOnCanvas(event) {
|
74 |
+
const clientX = event.clientX || (event.touches && event.touches[0].clientX);
|
75 |
+
const clientY = event.clientY || (event.touches && event.touches[0].clientY);
|
76 |
+
const rect = canvas.getBoundingClientRect();
|
77 |
+
const scaleX = canvas.width / rect.width;
|
78 |
+
const scaleY = canvas.height / rect.height;
|
79 |
+
const canvasX = (clientX - rect.left) * scaleX;
|
80 |
+
const canvasY = (clientY - rect.top) * scaleY;
|
81 |
+
return { x: canvasX, y: canvasY };
|
82 |
+
}
|
83 |
+
|
84 |
+
function clearCanvas() {
|
85 |
+
canvasContext.clearRect(0, 0, canvas.width, canvas.height);
|
86 |
+
}
|
87 |
+
|
88 |
+
async function sendDataToFlask(dataUrl) {
|
89 |
+
document.getElementById('loader').style.visibility = 'visible'
|
90 |
+
const response = await fetch('/views', {
|
91 |
+
method: 'POST',
|
92 |
+
headers: {
|
93 |
+
'Content-Type': 'application/json'
|
94 |
+
},
|
95 |
+
body: JSON.stringify({ image: dataUrl })
|
96 |
+
})
|
97 |
+
|
98 |
+
const data = await response.json()
|
99 |
+
const output_image = `data:image/png;base64,${data.output_image}`
|
100 |
+
const output_string = data.output_string
|
101 |
+
if(output_string){
|
102 |
+
document.getElementById('outimg').style.display = "block"
|
103 |
+
document.getElementById('outtext').textContent = output_string
|
104 |
+
document.getElementById('outimg').src = output_image;
|
105 |
+
}
|
106 |
+
document.getElementById('loader').style.visibility = 'hidden'
|
107 |
+
}
|
108 |
+
|
109 |
+
|
110 |
+
});
|
static/styles.css
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
* {
|
2 |
+
margin: 0;
|
3 |
+
padding: 0;
|
4 |
+
}
|
5 |
+
.main {
|
6 |
+
width: 100%;
|
7 |
+
min-height: 100vh;
|
8 |
+
background: linear-gradient(
|
9 |
+
90deg,
|
10 |
+
#f51d88 0%,
|
11 |
+
#ff6200 50%,
|
12 |
+
black 50%,
|
13 |
+
black 100%
|
14 |
+
);
|
15 |
+
color: white;
|
16 |
+
display: flex;
|
17 |
+
flex-direction: column;
|
18 |
+
align-items: center;
|
19 |
+
}
|
20 |
+
|
21 |
+
.nav {
|
22 |
+
width: 100%;
|
23 |
+
min-height: 20vh;
|
24 |
+
display: flex;
|
25 |
+
justify-content: center;
|
26 |
+
text-align: center;
|
27 |
+
margin-top: 2rem;
|
28 |
+
}
|
29 |
+
|
30 |
+
.title {
|
31 |
+
font-weight: 800;
|
32 |
+
font-size: 3rem;
|
33 |
+
text-decoration: underline;
|
34 |
+
font-family: Arial, Helvetica, sans-serif;
|
35 |
+
}
|
36 |
+
|
37 |
+
.drawing-area {
|
38 |
+
background-color: white;
|
39 |
+
width: 100%;
|
40 |
+
height: 450px;
|
41 |
+
cursor: pointer;
|
42 |
+
border: 2px solid white;
|
43 |
+
border-radius: 12px;
|
44 |
+
}
|
45 |
+
|
46 |
+
.clear-button {
|
47 |
+
margin: 2em;
|
48 |
+
font-size: 16px;
|
49 |
+
}
|
50 |
+
.maincontainer {
|
51 |
+
background-color: transparent;
|
52 |
+
width: 65%;
|
53 |
+
margin-top: 1rem;
|
54 |
+
display: flex;
|
55 |
+
flex-direction: column;
|
56 |
+
border: 2px solid white;
|
57 |
+
border-radius: 12px 12px;
|
58 |
+
padding: 12px;
|
59 |
+
backdrop-filter: blur(25px);
|
60 |
+
}
|
61 |
+
|
62 |
+
.form-label {
|
63 |
+
font-weight: 600 !important;
|
64 |
+
}
|
65 |
+
|
66 |
+
.inputcont {
|
67 |
+
margin-bottom: 1rem;
|
68 |
+
}
|
69 |
+
|
70 |
+
.buttoncont {
|
71 |
+
display: flex;
|
72 |
+
margin-bottom: 0.2rem;
|
73 |
+
align-items: center;
|
74 |
+
justify-content: center;
|
75 |
+
}
|
76 |
+
|
77 |
+
.btn {
|
78 |
+
background-color: black;
|
79 |
+
width: 130px;
|
80 |
+
height: 50px;
|
81 |
+
color: white;
|
82 |
+
font-weight: 700;
|
83 |
+
font-size: 20px;
|
84 |
+
border: 2px solid white;
|
85 |
+
transition: 0.5s all;
|
86 |
+
}
|
87 |
+
.clear-button:hover {
|
88 |
+
background-color: black;
|
89 |
+
box-shadow: 10px 10px white;
|
90 |
+
}
|
91 |
+
.save-button {
|
92 |
+
background: linear-gradient(90deg, #f51d88 0%, #ff6200 50%, #ff6200 100%);
|
93 |
+
}
|
94 |
+
|
95 |
+
.save-button:hover {
|
96 |
+
background: linear-gradient(90deg, #f51d88 0%, #ff6200 50%, #ff6200 100%);
|
97 |
+
box-shadow: 10px 10px white;
|
98 |
+
}
|
99 |
+
.outimg {
|
100 |
+
width: 100%;
|
101 |
+
height: 450px;
|
102 |
+
border: 2px solid white;
|
103 |
+
border-radius: 12px;
|
104 |
+
}
|
105 |
+
|
106 |
+
.outtextcont {
|
107 |
+
display: flex;
|
108 |
+
justify-content: center;
|
109 |
+
min-height: 2rem;
|
110 |
+
border: 1px solid white;
|
111 |
+
border-radius: 10px;
|
112 |
+
margin-bottom: 1rem;
|
113 |
+
backdrop-filter: blur(30px);
|
114 |
+
padding: 5px;
|
115 |
+
}
|
116 |
+
|
117 |
+
.outtext {
|
118 |
+
font-weight: 700;
|
119 |
+
font-size: 1.5rem;
|
120 |
+
}
|
121 |
+
|
122 |
+
.loader {
|
123 |
+
width: 100%;
|
124 |
+
height: 100vh;
|
125 |
+
position: fixed;
|
126 |
+
background-color: black;
|
127 |
+
opacity: 0.7;
|
128 |
+
color: white;
|
129 |
+
display: flex;
|
130 |
+
justify-content: center;
|
131 |
+
align-items: center;
|
132 |
+
z-index: 2;
|
133 |
+
}
|
134 |
+
|
135 |
+
.gifdiv {
|
136 |
+
background-color: transparent;
|
137 |
+
}
|
138 |
+
.gif {
|
139 |
+
border-radius: 12px !important;
|
140 |
+
}
|
141 |
+
@media (max-width: 800px) {
|
142 |
+
.maincontainer {
|
143 |
+
width: 90%;
|
144 |
+
}
|
145 |
+
|
146 |
+
.clear-button {
|
147 |
+
margin-left: 0;
|
148 |
+
}
|
149 |
+
}
|
templates/index.html
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>OCR_RECOGNITON</title>
|
7 |
+
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css">
|
8 |
+
<link rel="stylesheet" href="{{ url_for('static', filename='styles.css') }}">
|
9 |
+
<script src=" https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js"></script>
|
10 |
+
</head>
|
11 |
+
<body>
|
12 |
+
<div id="loader" class="loader">
|
13 |
+
<div class="gifdiv">
|
14 |
+
<img class="gif" src="{{ url_for('static', filename='images/dollar.gif') }}" alt="gif">
|
15 |
+
</div>
|
16 |
+
</div>
|
17 |
+
<div class="main">
|
18 |
+
<nav class="nav"> <div class="title">Optical Character Recognition</div></nav>
|
19 |
+
<div class="maincontainer">
|
20 |
+
<div class="outtextcont">
|
21 |
+
<div class="outtext" id="outtext"> Your Output will be displayed here</div>
|
22 |
+
</div>
|
23 |
+
<div class="canvascontainer">
|
24 |
+
<canvas id="drawing-area" class="drawing-area"></canvas>
|
25 |
+
<div class="buttoncont">
|
26 |
+
<button type="submit" class="btn clear-button" id="clear-button">Clear</button>
|
27 |
+
<button id="save-button" class="save-button btn" type="button">Save</button>
|
28 |
+
</div>
|
29 |
+
</div>
|
30 |
+
|
31 |
+
<div class="outimgcont">
|
32 |
+
<img class="outimg" id="outimg" src="" alt="">
|
33 |
+
</div>
|
34 |
+
</div>
|
35 |
+
</div>
|
36 |
+
<script src="{{ url_for('static', filename='script.js') }}"></script>
|
37 |
+
</body>
|
38 |
+
</html>
|
utils.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
char_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C',
|
4 |
+
'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'P', 'Q',
|
5 |
+
'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
|
6 |
+
|
7 |
+
|
8 |
+
char_list_sub = ['A','9','G','Q','8','6']
|
9 |
+
|
10 |
+
mappings = {i: char_list[i] for i in range(len(char_list))}
|
11 |
+
|
12 |
+
font = cv2.FONT_HERSHEY_TRIPLEX
|
13 |
+
font_scale = 0.5
|
14 |
+
color = (255, 255, 0)
|
15 |
+
thickness = 1
|
16 |
+
|
17 |
+
def predict(model, sub_model, img,mappings):
|
18 |
+
|
19 |
+
kernel = np.array([[-1, -1, -1],
|
20 |
+
[-1, 9, -1],
|
21 |
+
[-1, -1, -1]])
|
22 |
+
img = cv2.resize(img,(32,32))
|
23 |
+
img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
|
24 |
+
|
25 |
+
img = cv2.filter2D(img, -1, kernel)
|
26 |
+
_, img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
27 |
+
img = cv2.dilate(img,(3,3),iterations=1)
|
28 |
+
img = img/255.
|
29 |
+
img = img.astype('float32')
|
30 |
+
|
31 |
+
predval = model.predict(np.expand_dims(img,axis=0))
|
32 |
+
argval =np.argmax(predval,axis=-1)
|
33 |
+
result = mappings[argval[0]]
|
34 |
+
|
35 |
+
if result in char_list_sub:
|
36 |
+
predval_new = sub_model.predict(np.expand_dims(img,axis=0))
|
37 |
+
argval_new =np.argmax(predval_new,axis=-1)
|
38 |
+
result_new = mappings[argval_new[0]]
|
39 |
+
|
40 |
+
if result == '6' and result_new == 'G':
|
41 |
+
return result
|
42 |
+
return result_new
|
43 |
+
|
44 |
+
return result
|
45 |
+
|
46 |
+
|
47 |
+
def get_results(uploaded_image):
|
48 |
+
original_image = uploaded_image
|
49 |
+
image = cv2.cvtColor(original_image,cv2.COLOR_BGR2GRAY)
|
50 |
+
x,y = image.shape
|
51 |
+
maxH = min(1000,x)
|
52 |
+
maxW = min(1000,y)
|
53 |
+
original_image = cv2.resize(original_image,(maxW,maxH))
|
54 |
+
|
55 |
+
|
56 |
+
image = cv2.resize(image,(maxW,maxH))
|
57 |
+
|
58 |
+
_, black_white_image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
59 |
+
|
60 |
+
if black_white_image[10,10] == 255 or black_white_image[10,len(image[0])-10] == 255 or black_white_image[len(black_white_image)-1,10] == 255 or black_white_image[len(image)-10,len(image[0])-10] == 255:
|
61 |
+
black_white_image = 255 - black_white_image
|
62 |
+
|
63 |
+
num_labels, _, stats, _ = cv2.connectedComponentsWithStats(black_white_image, connectivity=8)
|
64 |
+
output_image = original_image.copy()
|
65 |
+
|
66 |
+
sorted_indices = sorted(range(num_labels), key=lambda i: (stats[i, cv2.CC_STAT_TOP],stats[i, cv2.CC_STAT_LEFT]))
|
67 |
+
|
68 |
+
grouped_indices = []
|
69 |
+
current_group = [sorted_indices[0]]
|
70 |
+
|
71 |
+
for i in range(1, num_labels):
|
72 |
+
current_index = sorted_indices[i]
|
73 |
+
prev_index = current_group[-1]
|
74 |
+
|
75 |
+
if abs(stats[current_index, cv2.CC_STAT_TOP] - stats[prev_index, cv2.CC_STAT_TOP]) <= 20:
|
76 |
+
current_group.append(current_index)
|
77 |
+
else:
|
78 |
+
current_group.sort(key=lambda idx: stats[idx, cv2.CC_STAT_LEFT])
|
79 |
+
grouped_indices.extend(current_group)
|
80 |
+
|
81 |
+
current_group = [current_index]
|
82 |
+
|
83 |
+
current_group.sort(key=lambda idx: stats[idx, cv2.CC_STAT_LEFT])
|
84 |
+
grouped_indices.extend(current_group)
|
85 |
+
sorted_indices = grouped_indices
|
86 |
+
|
87 |
+
|
88 |
+
detected_contours = []
|
89 |
+
coords = []
|
90 |
+
for i in sorted_indices:
|
91 |
+
if i == 0:
|
92 |
+
continue
|
93 |
+
x, y, w, h, area = stats[i]
|
94 |
+
|
95 |
+
widthFlag = w > 2 and w < 500 and w < (maxW-50)
|
96 |
+
heightFlag = h > 15 and h < 500 and h < (maxH-10)
|
97 |
+
areaFlag = area > 15 and area < 100000
|
98 |
+
|
99 |
+
if widthFlag and heightFlag and areaFlag:
|
100 |
+
samp = image[y:y+h,x:x+w]
|
101 |
+
samp = cv2.resize(samp,(32,32))
|
102 |
+
coords.append((x,y))
|
103 |
+
detected_contour = output_image[y:y+h,x:x+w]
|
104 |
+
bordered_contour = cv2.copyMakeBorder(detected_contour, 20, 20, 20, 20, cv2.BORDER_CONSTANT, value=[0, 0, 0])
|
105 |
+
detected_contours.append(bordered_contour)
|
106 |
+
cv2.rectangle(output_image, (x-2, y-2), (x + w+2, y + h+2), (0, 1, 0), 2)
|
107 |
+
output_image = output_image*255
|
108 |
+
return detected_contours,output_image,coords
|
109 |
+
|
110 |
+
|
111 |
+
|
112 |
+
|
views.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from utils import *
|
2 |
+
from flask import Blueprint,render_template,request,jsonify
|
3 |
+
import tensorflow as tf
|
4 |
+
import base64
|
5 |
+
import io
|
6 |
+
from PIL import Image
|
7 |
+
views = Blueprint("views",__name__)
|
8 |
+
|
9 |
+
ocr_model = None
|
10 |
+
ocr_sub_model = None
|
11 |
+
|
12 |
+
|
13 |
+
MODEL_PATH = 'models/ocr_big_1.h5'
|
14 |
+
SUB_MODEL_PATH= 'models/ocr_new_1.h5'
|
15 |
+
|
16 |
+
@views.route('/',methods=['GET','POST'])
|
17 |
+
def index():
|
18 |
+
global ocr_model
|
19 |
+
global ocr_sub_model
|
20 |
+
|
21 |
+
if request.method == 'POST':
|
22 |
+
data = request.json['image']
|
23 |
+
head, data = data.split(',', 1)
|
24 |
+
image_data = base64.b64decode(data)
|
25 |
+
|
26 |
+
image = Image.open(io.BytesIO(image_data)).convert('RGB')
|
27 |
+
|
28 |
+
processed_image = np.array(image)
|
29 |
+
|
30 |
+
if not ocr_model:
|
31 |
+
ocr_model = tf.keras.models.load_model(MODEL_PATH)
|
32 |
+
if not ocr_sub_model:
|
33 |
+
ocr_sub_model = tf.keras.models.load_model(SUB_MODEL_PATH)
|
34 |
+
|
35 |
+
|
36 |
+
detected_contours,output_image, coords = get_results(processed_image)
|
37 |
+
output_string = ""
|
38 |
+
for i in range(len(detected_contours)):
|
39 |
+
predicted_char = predict(ocr_model, ocr_sub_model,detected_contours[i],mappings)
|
40 |
+
|
41 |
+
if predicted_char == '0':
|
42 |
+
output_string += 'O'
|
43 |
+
cv2.putText(output_image, 'O', (coords[i][0]+5,coords[i][1]-5), font, font_scale, color, thickness)
|
44 |
+
else:
|
45 |
+
cv2.putText(output_image, predicted_char, (coords[i][0]+5,coords[i][1]-5), font, font_scale, color, thickness)
|
46 |
+
output_string += predicted_char
|
47 |
+
|
48 |
+
|
49 |
+
output_image = Image.fromarray(output_image, 'RGB')
|
50 |
+
data = io.BytesIO()
|
51 |
+
output_image.save(data, "JPEG")
|
52 |
+
output_image = base64.b64encode(data.getvalue()).decode('utf-8')
|
53 |
+
return jsonify({'output_image': output_image, 'output_string': output_string})
|
54 |
+
return render_template('index.html')
|
55 |
+
|
56 |
+
|