shengqiangShi commited on
Commit
3643023
·
verified ·
1 Parent(s): 91af7f9

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +199 -100
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,100 +1,199 @@
1
- import streamlit as st
2
-
3
- import tensorflow as tf
4
- from PIL import Image
5
- import numpy as np
6
- import cv2
7
- from huggingface_hub import from_pretrained_keras
8
-
9
-
10
- try:
11
- model=from_pretrained_keras("SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net")
12
- except:
13
- model=tf.keras.models.load_model("dental_xray_seg.h5")
14
- pass
15
-
16
- st.header("Segmentation of Teeth in Panoramic X-ray Image Using UNet")
17
-
18
- examples=["107.png","108.png","109.png"]
19
- link='Check Out Our Github Repo ! [link](https://github.com/SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net)'
20
- st.markdown(link,unsafe_allow_html=True)
21
-
22
-
23
- def load_image(image_file):
24
- img = Image.open(image_file)
25
- return img
26
-
27
- def convert_one_channel(img):
28
- #some images have 3 channels , although they are grayscale image
29
- if len(img.shape)>2:
30
- img= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
31
- return img
32
- else:
33
- return img
34
-
35
- def convert_rgb(img):
36
- #some images have 3 channels , although they are grayscale image
37
- if len(img.shape)==2:
38
- img= cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
39
- return img
40
- else:
41
- return img
42
-
43
-
44
- st.subheader("Upload Dental Panoramic X-ray Image Image")
45
- image_file = st.file_uploader("Upload Images", type=["png","jpg","jpeg"])
46
-
47
-
48
- col1, col2, col3 = st.columns(3)
49
- with col1:
50
- ex=load_image(examples[0])
51
- st.image(ex,width=200)
52
- if st.button('Example 1'):
53
- image_file=examples[0]
54
-
55
- with col2:
56
- ex1=load_image(examples[1])
57
- st.image(ex1,width=200)
58
- if st.button('Example 2'):
59
- image_file=examples[1]
60
-
61
-
62
- with col3:
63
- ex2=load_image(examples[2])
64
- st.image(ex2,width=200)
65
- if st.button('Example 3'):
66
- image_file=examples[2]
67
-
68
-
69
- if image_file is not None:
70
-
71
- img=load_image(image_file)
72
-
73
- st.text("Making A Prediction ....")
74
- st.image(img,width=850)
75
-
76
- img=np.asarray(img)
77
-
78
- img_cv=convert_one_channel(img)
79
- img_cv=cv2.resize(img_cv,(512,512), interpolation=cv2.INTER_LANCZOS4)
80
- img_cv=np.float32(img_cv/255)
81
-
82
- img_cv=np.reshape(img_cv,(1,512,512,1))
83
- prediction=model.predict(img_cv)
84
- predicted=prediction[0]
85
- predicted = cv2.resize(predicted, (img.shape[1],img.shape[0]), interpolation=cv2.INTER_LANCZOS4)
86
- mask=np.uint8(predicted*255)#
87
- _, mask = cv2.threshold(mask, thresh=0, maxval=255, type=cv2.THRESH_BINARY+cv2.THRESH_OTSU)
88
- kernel =( np.ones((5,5), dtype=np.float32))
89
- mask=cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel,iterations=1 )
90
- mask=cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel,iterations=1 )
91
- cnts,hieararch=cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
92
- output = cv2.drawContours(convert_rgb(img), cnts, -1, (255, 0, 0) , 3)
93
-
94
-
95
- if output is not None :
96
- st.subheader("Predicted Image")
97
- st.write(output.shape)
98
- st.image(output,width=850)
99
-
100
- st.text("DONE ! ....")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import tensorflow as tf
3
+ from PIL import Image
4
+ import numpy as np
5
+ import cv2
6
+ import matplotlib.pyplot as plt
7
+ from imutils import perspective
8
+ from scipy.spatial import distance as dist
9
+
10
+ model=tf.keras.models.load_model("dental_xray_seg.h5")
11
+
12
+ st.header("Segmentation of Teeth in Panoramic X-ray Image")
13
+
14
+ examples=["teeth_01.png","teeth_02.png","teeth_03.png","teeth_04.png"]
15
+
16
+ def load_image(image_file):
17
+ img = Image.open(image_file)
18
+ img_gray = img.convert('L')
19
+ img_np = np.array(img_gray)
20
+ return img_np
21
+
22
+ def convert_one_channel(img):
23
+ if len(img.shape)>2:
24
+ img= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
25
+ return img
26
+
27
+ def convert_rgb(img):
28
+ if len(img.shape)==2:
29
+ img= cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
30
+ return img
31
+
32
+ def midpoint(ptA, ptB):
33
+ return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
34
+
35
+ def CCA_Analysis(orig_image,predict_image,erode_iteration,open_iteration):
36
+ kernel1 =( np.ones((5,5), dtype=np.float32))
37
+ kernel_sharpening = np.array([[-1,-1,-1],
38
+ [-1,9,-1],
39
+ [-1,-1,-1]])
40
+ image = predict_image
41
+ image2 =orig_image
42
+ image=cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel1,iterations=open_iteration )
43
+ image = cv2.filter2D(image, -1, kernel_sharpening)
44
+ image=cv2.erode(image,kernel1,iterations =erode_iteration)
45
+ image=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
46
+ thresh = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
47
+ labels=cv2.connectedComponents(thresh,connectivity=8)[1]
48
+ a=np.unique(labels)
49
+ count2=0
50
+ for label in a:
51
+ if label == 0:
52
+ continue
53
+
54
+ # Create a mask
55
+ mask = np.zeros(thresh.shape, dtype="uint8")
56
+ mask[labels == label] = 255
57
+ # Find contours and determine contour area
58
+ cnts,hieararch = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
59
+ cnts = cnts[0]
60
+ c_area = cv2.contourArea(cnts)
61
+ # threshhold for tooth count
62
+ if c_area>2000:
63
+ count2+=1
64
+
65
+ (x,y),radius = cv2.minEnclosingCircle(cnts)
66
+ rect = cv2.minAreaRect(cnts)
67
+ box = cv2.boxPoints(rect)
68
+ box = np.array(box, dtype="int")
69
+ box = perspective.order_points(box)
70
+ color1 = (list(np.random.choice(range(150), size=3)))
71
+ color =[int(color1[0]), int(color1[1]), int(color1[2])]
72
+ cv2.drawContours(image2,[box.astype("int")],0,color,2)
73
+ (tl,tr,br,bl)=box
74
+
75
+ (tltrX,tltrY)=midpoint(tl,tr)
76
+ (blbrX,blbrY)=midpoint(bl,br)
77
+ # compute the midpoint between the top-left and top-right points,
78
+ # followed by the midpoint between the top-righ and bottom-right
79
+ (tlblX,tlblY)=midpoint(tl,bl)
80
+ (trbrX,trbrY)=midpoint(tr,br)
81
+ # draw the midpoints on the image
82
+ cv2.circle(image2, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
83
+ cv2.circle(image2, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
84
+ cv2.circle(image2, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
85
+ cv2.circle(image2, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
86
+ cv2.line(image2, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),color, 2)
87
+ cv2.line(image2, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),color, 2)
88
+ dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
89
+ dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
90
+
91
+
92
+
93
+ pixelsPerMetric=1
94
+ dimA = dA * pixelsPerMetric
95
+ dimB = dB *pixelsPerMetric
96
+ cv2.putText(image2, "{:.1f}pixel".format(dimA),(int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,0.65, color, 2)
97
+ cv2.putText(image2, "{:.1f}pixel".format(dimB),(int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,0.65, color, 2)
98
+ cv2.putText(image2, "{:.1f}".format(label),(int(tltrX - 35), int(tltrY - 5)), cv2.FONT_HERSHEY_SIMPLEX,0.65, color, 2)
99
+ teeth_count=count2
100
+ return image2,teeth_count
101
+
102
+ def detect_decays_static_th(images, dental_masks=None, threshhold=0.9):
103
+ decay_masks = []
104
+ for image, dental_mask in zip(images, dental_masks):
105
+ decay_mask = np.zeros_like(dental_mask)
106
+ image_masked_with_dental_mask = image * dental_mask
107
+ decay_mask[image_masked_with_dental_mask > threshhold*255] = 1
108
+ decay_masks.append(decay_mask)
109
+ decay_masks = np.array(decay_masks)
110
+ return decay_masks
111
+
112
+
113
+ st.subheader("Upload Dental Panoramic X-ray Image")
114
+ image_file = st.file_uploader("Upload Images", type=["png","jpg","jpeg"])
115
+
116
+ col1, col2, col3, col4 = st.columns(4)
117
+ with col1:
118
+ ex=load_image(examples[0])
119
+ st.image(ex,width=200)
120
+ if st.button('Example 1'):
121
+ image_file=examples[0]
122
+
123
+ with col2:
124
+ ex1=load_image(examples[1])
125
+ st.image(ex1,width=200)
126
+ if st.button('Example 2'):
127
+ image_file=examples[1]
128
+
129
+ with col3:
130
+ ex2=load_image(examples[2])
131
+ st.image(ex2,width=200)
132
+ if st.button('Example 3'):
133
+ image_file=examples[2]
134
+
135
+ with col4:
136
+ ex2=load_image(examples[3])
137
+ st.image(ex2,width=200)
138
+ if st.button('Example 4'):
139
+ image_file=examples[3]
140
+
141
+ if image_file is not None:
142
+
143
+ image_original = Image.open(image_file)
144
+ image=np.asarray(image_original)
145
+ image = convert_rgb(image)
146
+ st.subheader("Original Image")
147
+ st.image(image,width=1100)
148
+
149
+ st.text("Making A Prediction ....")
150
+
151
+ img=np.asarray(image)
152
+
153
+ img_cv=convert_one_channel(img)
154
+ img_cv=cv2.resize(img_cv,(512,512), interpolation=cv2.INTER_LANCZOS4)
155
+ img_cv=np.float32(img_cv/255)
156
+
157
+ img_cv=np.reshape(img_cv,(1,512,512,1))
158
+ prediction=model.predict(img_cv)
159
+ predicted=prediction[0]
160
+ predicted_rgb = np.expand_dims(predicted, axis=-1)
161
+ plt.imsave("predict.png",predicted_rgb)
162
+
163
+ predict1 = cv2.resize(predicted, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_LANCZOS4)
164
+ img_dc=convert_one_channel(img)
165
+ decay_mask = detect_decays_static_th(img_dc, predict1)
166
+
167
+
168
+ mask = np.uint8(predict1 * 255)
169
+ _, mask = cv2.threshold(mask, thresh=255/2, maxval=255, type=cv2.THRESH_BINARY)
170
+ cnts, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
171
+ img = cv2.drawContours(img, cnts, -1, (0, 0, 255), 2)
172
+
173
+ mask = np.uint8(decay_mask * 255)
174
+ _, mask = cv2.threshold(mask, thresh=255/2, maxval=255, type=cv2.THRESH_BINARY)
175
+ cnts, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
176
+ img = cv2.fillPoly(img, cnts, (255, 0, 0))
177
+
178
+ if img is not None :
179
+ st.subheader("Predicted teeth shape + caries zones")
180
+ st.write(img.shape)
181
+ st.image(img,width=1100)
182
+
183
+ image=np.asarray(image_original)
184
+ image = convert_rgb(image)
185
+ if image.shape[1] < 3000:
186
+ image = cv2.resize(image,(3100,1150),interpolation=cv2.INTER_LANCZOS4)
187
+ predicted=cv2.imread("predict.png")
188
+ predicted = cv2.resize(predicted, (image.shape[1],image.shape[0]), interpolation=cv2.INTER_LANCZOS4)
189
+ cca_result,teeth_count=CCA_Analysis(image,predicted,3,2)
190
+
191
+ if cca_result is not None :
192
+ st.subheader("Seperate predicted teeth")
193
+ st.write(cca_result.shape)
194
+ st.image(cca_result,width=1100)
195
+
196
+ st.text("Teeth Count = " + str(teeth_count))
197
+
198
+ st.text("DONE ! ....")
199
+
requirements.txt CHANGED
@@ -4,4 +4,5 @@ Pillow
4
  scipy
5
  streamlit
6
  tensorflow
7
- opencv-python-headless
 
 
4
  scipy
5
  streamlit
6
  tensorflow
7
+ opencv-python-headless
8
+ matplotlib