Commit
·
69e68df
1
Parent(s):
505f5b6
Initialization
Browse files
0.png
ADDED
![]() |
100.png
ADDED
![]() |
134.png
ADDED
![]() |
19.png
ADDED
![]() |
36.png
ADDED
![]() |
44.png
ADDED
![]() |
602.png
ADDED
![]() |
7316.png
ADDED
![]() |
929.png
ADDED
![]() |
app.py
ADDED
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import cv2
|
3 |
+
import streamlit as st
|
4 |
+
import pandas as pd
|
5 |
+
import base64
|
6 |
+
from openai import OpenAI
|
7 |
+
import copy
|
8 |
+
import time
|
9 |
+
import io
|
10 |
+
|
11 |
+
client = OpenAI(api_key='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')
|
12 |
+
|
13 |
+
st.set_page_config(
|
14 |
+
page_title="Image to Analyze",
|
15 |
+
page_icon="📝",
|
16 |
+
layout="wide",
|
17 |
+
initial_sidebar_state="expanded",
|
18 |
+
)
|
19 |
+
|
20 |
+
col1, col2 = st.columns(2)
|
21 |
+
|
22 |
+
wl1, wl2 = st.columns(2)
|
23 |
+
|
24 |
+
def to_base64(uploaded_file):
|
25 |
+
file_buffer = uploaded_file.read()
|
26 |
+
b64 = base64.b64encode(file_buffer).decode()
|
27 |
+
return f"data:image/png;base64,{b64}"
|
28 |
+
|
29 |
+
def generate_df(new_img_list):
|
30 |
+
|
31 |
+
# if st.session_state.buttondo:
|
32 |
+
# for img in new_img_list:
|
33 |
+
# with open(img, mode="rb") as f:
|
34 |
+
# uploaded_file = f.read()
|
35 |
+
|
36 |
+
# current_df = pd.DataFrame(
|
37 |
+
# {
|
38 |
+
# "image_id": img,
|
39 |
+
# "image": f"data:image/png;base64,{base64.b64encode(uploaded_file).decode()}",
|
40 |
+
# "name": img,
|
41 |
+
# "defect_type": "",
|
42 |
+
# "description": "",
|
43 |
+
# }
|
44 |
+
# )
|
45 |
+
|
46 |
+
# else:
|
47 |
+
current_df = pd.DataFrame(
|
48 |
+
{
|
49 |
+
"image_id": [img.file_id for img in st.session_state.images],
|
50 |
+
"image": [to_base64(img) for img in st.session_state.images],
|
51 |
+
"name": [img.name for img in st.session_state.images],
|
52 |
+
"defect_type": [""] * len(st.session_state.images),
|
53 |
+
"description": [""] * len(st.session_state.images),
|
54 |
+
}
|
55 |
+
)
|
56 |
+
|
57 |
+
if "df" not in st.session_state:
|
58 |
+
st.session_state.df = current_df
|
59 |
+
return
|
60 |
+
|
61 |
+
new_df = pd.merge(current_df, st.session_state.df, on=["image_id"], how="outer", indicator=True)
|
62 |
+
new_df = new_df[new_df["_merge"] != "right_only"].drop(columns=["_merge", "name_y", "image_y", "description_x"])
|
63 |
+
new_df = new_df.rename(columns={"name_x": "name", "image_x": "image", "description_y": "description"})
|
64 |
+
new_df["defect_type"] = new_df["defect_type"].fillna("")
|
65 |
+
new_df["description"] = new_df["description"].fillna("")
|
66 |
+
|
67 |
+
st.session_state.df = new_df
|
68 |
+
|
69 |
+
|
70 |
+
def render_df():
|
71 |
+
def highlight_col(x):
|
72 |
+
r = 'background-color: red'
|
73 |
+
df1 = pd.DataFrame('', index=x.index, columns=x.columns)
|
74 |
+
df1.iloc[:, 2] = r
|
75 |
+
return df1
|
76 |
+
|
77 |
+
st.session_state.df.style.apply(highlight_col, axis=None)
|
78 |
+
|
79 |
+
st.data_editor(
|
80 |
+
st.session_state.df,
|
81 |
+
column_config={
|
82 |
+
"image": st.column_config.ImageColumn(
|
83 |
+
"Preview Image", help="Image preview", width=100
|
84 |
+
),
|
85 |
+
"name": st.column_config.Column("Name", help="Image name", width=200),
|
86 |
+
"defect_type": st.column_config.Column(
|
87 |
+
"Defect Type", help="Defect description", width=400
|
88 |
+
),
|
89 |
+
"description": st.column_config.Column(
|
90 |
+
"Description", help="Image description", width=800
|
91 |
+
),
|
92 |
+
},
|
93 |
+
disabled=True,
|
94 |
+
hide_index=True,
|
95 |
+
height=500,
|
96 |
+
column_order=["image", "name", "defect_type", "description"],
|
97 |
+
use_container_width=True,
|
98 |
+
)
|
99 |
+
|
100 |
+
|
101 |
+
def generate_description(image_base64):
|
102 |
+
response = client.chat.completions.create(
|
103 |
+
model="gpt-4-vision-preview",
|
104 |
+
messages=[
|
105 |
+
{
|
106 |
+
"role": "user",
|
107 |
+
"content": [
|
108 |
+
{"type": "text", "text": """Please answer at the following format:
|
109 |
+
Wafer Defection Type: <put the defect type here only>
|
110 |
+
Description: <Analyze how the defect type occur>
|
111 |
+
Solution: <Suggest solution to solve this in the future>"""},
|
112 |
+
{
|
113 |
+
"type": "image_url",
|
114 |
+
"image_url": {
|
115 |
+
"url": image_base64,
|
116 |
+
},
|
117 |
+
},
|
118 |
+
],
|
119 |
+
}
|
120 |
+
],
|
121 |
+
max_tokens=50,
|
122 |
+
)
|
123 |
+
return response.choices[0].message.content
|
124 |
+
|
125 |
+
|
126 |
+
def update_df():
|
127 |
+
indexes = st.session_state.df[st.session_state.df["description"] == ""].index
|
128 |
+
for idx in indexes:
|
129 |
+
description = generate_description(st.session_state.df.loc[idx, "image"])
|
130 |
+
st.session_state.df.loc[idx, "defect_type"] = description.split("\n")[0]
|
131 |
+
st.session_state.df.loc[idx, "description"] = description.split("\n")[-1]
|
132 |
+
|
133 |
+
#Creating title for Streamlit app
|
134 |
+
st.title("Wafer Defect Detection with LLM Classification and Analyze")
|
135 |
+
|
136 |
+
with col1:
|
137 |
+
st.components.v1.iframe("http://localhost:7456/", width=None, height=500, scrolling=False)
|
138 |
+
|
139 |
+
with col2:
|
140 |
+
st.components.v1.iframe("http://localhost:7457/", width=None, height=500, scrolling=False)
|
141 |
+
|
142 |
+
default_img = ""
|
143 |
+
st.session_state.buttondo = False
|
144 |
+
|
145 |
+
with st.sidebar:
|
146 |
+
st.title("Upload Your Images")
|
147 |
+
st.session_state.images = st.file_uploader(label=" ", accept_multiple_files=True)
|
148 |
+
|
149 |
+
genre = st.radio(
|
150 |
+
"Baseline",
|
151 |
+
["Propose Solution", "Baseline 1", "Baseline 0.2", "Baseline 0.1"])
|
152 |
+
|
153 |
+
Center = st.checkbox('Center')
|
154 |
+
if Center:
|
155 |
+
st.image('44.png', width=100)
|
156 |
+
|
157 |
+
Donut = st.checkbox('Donut')
|
158 |
+
if Donut:
|
159 |
+
st.image('7316.png', width=100)
|
160 |
+
|
161 |
+
EdgeLoc = st.checkbox('Edge-Loc')
|
162 |
+
if EdgeLoc:
|
163 |
+
st.image('36.png', width=100)
|
164 |
+
|
165 |
+
EdgeRing = st.checkbox('Edge-Ring')
|
166 |
+
if EdgeRing:
|
167 |
+
st.image('100.png', width=100)
|
168 |
+
|
169 |
+
Loc = st.checkbox('Loc')
|
170 |
+
if Loc:
|
171 |
+
st.image('19.png', width=100)
|
172 |
+
|
173 |
+
NearFull = st.checkbox('Near-Full')
|
174 |
+
if NearFull:
|
175 |
+
st.image('929.png', width=100)
|
176 |
+
|
177 |
+
NoDefect = st.checkbox('No Defect')
|
178 |
+
if NoDefect:
|
179 |
+
st.image('0.png', width=100)
|
180 |
+
|
181 |
+
Random = st.checkbox('Random')
|
182 |
+
if Random:
|
183 |
+
st.image('602.png', width=100)
|
184 |
+
|
185 |
+
Scratch = st.checkbox('Scratch')
|
186 |
+
if Scratch:
|
187 |
+
st.image('134.png', width=100)
|
188 |
+
|
189 |
+
if st.button("Detect", type="primary"):
|
190 |
+
st.session_state.buttondo = True
|
191 |
+
|
192 |
+
#uploading file for processing
|
193 |
+
# uploaded_file = st.file_uploader("Choose an image", type=["jpg", "jpeg", "png"])
|
194 |
+
|
195 |
+
if st.session_state.images or st.session_state.buttondo:
|
196 |
+
imgs = []
|
197 |
+
new_img_list = copy.deepcopy(st.session_state.images)
|
198 |
+
# if st.session_state.buttondo:
|
199 |
+
# new_img_list = [default_img]
|
200 |
+
# else:
|
201 |
+
# new_img_list = copy.deepcopy(st.session_state.images)
|
202 |
+
|
203 |
+
for img in new_img_list:
|
204 |
+
image = cv2.imdecode(np.frombuffer(img.read(), np.uint8), 1)
|
205 |
+
# if st.session_state.buttondo:
|
206 |
+
# image = cv2.imread(img)
|
207 |
+
# else:
|
208 |
+
# image = cv2.imdecode(np.frombuffer(img.read(), np.uint8), 1)
|
209 |
+
|
210 |
+
img = image.copy()
|
211 |
+
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
212 |
+
|
213 |
+
blur=cv2.blur(gray,(10,10))
|
214 |
+
|
215 |
+
dst=cv2.fastNlMeansDenoising(blur,None,10,7,21)
|
216 |
+
|
217 |
+
_,binary=cv2.threshold(dst,127,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
|
218 |
+
|
219 |
+
kernel=np.ones((5,5),np.uint8)
|
220 |
+
|
221 |
+
erosion=cv2.erode(binary,kernel,iterations=1)
|
222 |
+
dilation=cv2.dilate(binary,kernel,iterations=1)
|
223 |
+
|
224 |
+
if (dilation==0).sum()>1:
|
225 |
+
contours,_=cv2.findContours(dilation,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
|
226 |
+
for i in contours:
|
227 |
+
if cv2.contourArea(i)<261121.0:
|
228 |
+
cv2.drawContours(img,i,-1,(0,0,255),3)
|
229 |
+
cv2.putText(img,"",(30,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),2)
|
230 |
+
else:
|
231 |
+
cv2.putText(img, "Good wafer", (30, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
|
232 |
+
|
233 |
+
# st.image(image,caption="original image",channels="BGR")
|
234 |
+
# st.image(blur,caption="blur")
|
235 |
+
# st.image(binary,caption="binary")
|
236 |
+
# st.image(erosion,caption="erosion")
|
237 |
+
# st.image(dilation,caption="dilation")
|
238 |
+
imgs.append(img)
|
239 |
+
|
240 |
+
# st.image(imgs)
|
241 |
+
|
242 |
+
with st.spinner('Wait for it...'):
|
243 |
+
time.sleep(10)
|
244 |
+
st.success('Done!')
|
245 |
+
|
246 |
+
generate_df(new_img_list)
|
247 |
+
# print(st.session_state.images)
|
248 |
+
update_df()
|
249 |
+
|
250 |
+
st.download_button(
|
251 |
+
"Download descriptions as CSV",
|
252 |
+
st.session_state.df.drop(['image', "image_id"], axis=1).to_csv(index=False),
|
253 |
+
"descriptions.csv",
|
254 |
+
"text/csv",
|
255 |
+
use_container_width=True
|
256 |
+
)
|
257 |
+
|
258 |
+
render_df()
|
259 |
+
# cv2.waitKey(0)
|
260 |
+
# cv2.destroyAllWindows()
|
w3.jpg
ADDED
![]() |