Commit
·
236dfd7
1
Parent(s):
0679e0d
update: tracking
Browse files- app.py +148 -236
- requirements.txt +2 -51
app.py
CHANGED
@@ -9,6 +9,12 @@ import time
|
|
9 |
import io
|
10 |
import uuid
|
11 |
import html
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
st.set_page_config(
|
14 |
page_title="Image to Analyze",
|
@@ -17,191 +23,71 @@ st.set_page_config(
|
|
17 |
initial_sidebar_state="expanded",
|
18 |
)
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
f"""
|
25 |
-
<div style="display:none" id="{div_id}">
|
26 |
-
<iframe src="javascript: \
|
27 |
-
var script = document.createElement('script'); \
|
28 |
-
script.type = 'text/javascript'; \
|
29 |
-
script.text = {html.escape(repr(source))}; \
|
30 |
-
var div = window.parent.document.getElementById('{div_id}'); \
|
31 |
-
div.appendChild(script); \
|
32 |
-
div.parentElement.parentElement.parentElement.style.display = 'none'; \
|
33 |
-
"/>
|
34 |
-
</div>
|
35 |
-
""",
|
36 |
-
unsafe_allow_html=True,
|
37 |
-
)
|
38 |
-
|
39 |
-
def screenshot_window() -> None:
|
40 |
-
# JS Code to be executed
|
41 |
-
source = """
|
42 |
-
// Function to detect if the current browser is Chrome
|
43 |
-
const isChrome = () => /Chrome/.test(navigator.userAgent) && /Google Inc/.test(navigator.vendor);
|
44 |
-
|
45 |
-
/*
|
46 |
-
const button = document.getElementById('reportButton');
|
47 |
-
button.addEventListener('click', function() {
|
48 |
-
// Alert and exit if the browser is Chrome
|
49 |
-
if (isChrome()) {
|
50 |
-
//alert("Currently this function is available only on Firefox!");
|
51 |
-
//button.style.display = 'none'; // Hides the button
|
52 |
-
//return;
|
53 |
-
}
|
54 |
-
|
55 |
-
// Load a script dynamically and execute a callback after loading
|
56 |
-
const loadScript = (url, isLoaded, callback) => {
|
57 |
-
if (!isLoaded()) {
|
58 |
-
const script = document.createElement('script');
|
59 |
-
script.type = 'text/javascript';
|
60 |
-
script.onload = callback;
|
61 |
-
script.src = url;
|
62 |
-
document.head.appendChild(script);
|
63 |
-
} else {
|
64 |
-
callback();
|
65 |
-
}
|
66 |
-
};
|
67 |
-
|
68 |
-
// Check if html2canvas library is loaded
|
69 |
-
const isHtml2CanvasLoaded = () => typeof html2canvas !== 'undefined';
|
70 |
-
|
71 |
-
// Capture an individual iframe and call a callback with the result
|
72 |
-
const captureIframe = (iframe, callback) => {
|
73 |
-
try {
|
74 |
-
const iframeDoc = iframe.contentDocument || iframe.contentWindow.document;
|
75 |
-
console.log(iframeDoc)
|
76 |
-
html2canvas(iframeDoc.body, {
|
77 |
-
scale: 1,
|
78 |
-
logging: true,
|
79 |
-
useCORS: true,
|
80 |
-
allowTaint: true
|
81 |
-
}).then(canvas => {
|
82 |
-
callback(canvas ? canvas : null);
|
83 |
-
}).catch(error => {
|
84 |
-
console.error('Could not capture iframe:', error);
|
85 |
-
callback(null);
|
86 |
-
});
|
87 |
-
} catch (error) {
|
88 |
-
console.error('Could not access iframe:', error);
|
89 |
-
callback(null);
|
90 |
-
}
|
91 |
-
};
|
92 |
-
|
93 |
-
// Main function to capture all windows
|
94 |
-
const captureAllWindows = () => {
|
95 |
-
const streamlitDoc = window.parent.document;
|
96 |
-
const stApp = streamlitDoc.querySelector('.main > .block-container');
|
97 |
-
const iframes = Array.from(stApp.querySelectorAll('iframe'));
|
98 |
-
let capturedImages = [];
|
99 |
-
|
100 |
-
// Process each iframe sequentially
|
101 |
-
const processIframes = (index = 0) => {
|
102 |
-
if (index < iframes.length) {
|
103 |
-
captureIframe(iframes[index], function(canvas) {
|
104 |
-
if (canvas) {
|
105 |
-
const img = document.createElement('img');
|
106 |
-
img.src = canvas.toDataURL('image/png');
|
107 |
-
capturedImages.push({iframe: iframes[index], img: img});
|
108 |
-
} else {
|
109 |
-
console.error('Skipping an iframe due to capture failure.');
|
110 |
-
}
|
111 |
-
processIframes(index + 1);
|
112 |
-
});
|
113 |
-
} else {
|
114 |
-
// Capture the main app window after processing all iframes
|
115 |
-
html2canvas(stApp, {
|
116 |
-
onclone: function(clonedDocument) {
|
117 |
-
const clonedIframes = Array.from(clonedDocument.querySelectorAll('iframe'));
|
118 |
-
capturedImages.forEach(({img}, index) => {
|
119 |
-
if (index < clonedIframes.length) {
|
120 |
-
const clonedIframe = clonedIframes[index];
|
121 |
-
clonedIframe.parentNode.replaceChild(img, clonedIframe);
|
122 |
-
}
|
123 |
-
});
|
124 |
-
},
|
125 |
-
scale: 1,
|
126 |
-
logging: true,
|
127 |
-
useCORS: true,
|
128 |
-
allowTaint: true,
|
129 |
-
ignoreElements: () => {}
|
130 |
-
}).then(finalCanvas => {
|
131 |
-
// Create a download link for the captured image
|
132 |
-
finalCanvas.toBlob(blob => {
|
133 |
-
const url = window.URL.createObjectURL(blob);
|
134 |
-
var link = document.createElement('a');
|
135 |
-
link.style.display = 'none';
|
136 |
-
link.href = url;
|
137 |
-
link.download = 'screenshot.png';
|
138 |
-
document.body.appendChild(link);
|
139 |
-
link.click();
|
140 |
-
document.body.removeChild(link);
|
141 |
-
window.URL.revokeObjectURL(url);
|
142 |
-
});
|
143 |
-
}).catch(error => {
|
144 |
-
console.error('Screenshot capture failed:', error);
|
145 |
-
});
|
146 |
-
}
|
147 |
-
};
|
148 |
-
|
149 |
-
processIframes();
|
150 |
-
};
|
151 |
-
|
152 |
-
loadScript(
|
153 |
-
'https://cdnjs.cloudflare.com/ajax/libs/html2canvas/1.3.2/html2canvas.min.js',
|
154 |
-
isHtml2CanvasLoaded,
|
155 |
-
captureAllWindows
|
156 |
-
);
|
157 |
-
|
158 |
-
});
|
159 |
-
*/
|
160 |
-
"""
|
161 |
-
|
162 |
-
inject_js_code(source=source)
|
163 |
-
|
164 |
-
def add_reportgen_button():
|
165 |
-
st.markdown(
|
166 |
-
"""
|
167 |
-
<!-- <button id="reportButton" class="st-style-button">Generate Page Report</button> -->
|
168 |
-
|
169 |
-
<style>
|
170 |
-
.st-style-button {
|
171 |
-
display: inline-flex;
|
172 |
-
-webkit-box-align: center;
|
173 |
-
align-items: center;
|
174 |
-
-webkit-box-pack: center;
|
175 |
-
justify-content: center;
|
176 |
-
font-weight: 400;
|
177 |
-
padding: 0.25rem 0.75rem;
|
178 |
-
border-radius: 0.5rem;
|
179 |
-
min-height: 38.4px;
|
180 |
-
margin: 0px;
|
181 |
-
line-height: 1.6;
|
182 |
-
color: inherit;
|
183 |
-
width: auto;
|
184 |
-
user-select: none;
|
185 |
-
background-color: white; /* Set a white background */
|
186 |
-
border: 1px solid rgba(49, 51, 63, 0.2);
|
187 |
-
outline: none; !important
|
188 |
-
box-shadow: none !important;
|
189 |
-
}
|
190 |
|
191 |
-
|
192 |
-
.st-style-button:hover {
|
193 |
-
background-color: white;
|
194 |
-
color: #0A04D2;
|
195 |
-
border: 1px solid #0A04D2;
|
196 |
-
}
|
197 |
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
)
|
202 |
-
screenshot_window()
|
203 |
|
204 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
205 |
|
206 |
client = OpenAI(api_key='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')
|
207 |
|
@@ -294,7 +180,7 @@ def generate_description(image_base64):
|
|
294 |
"role": "user",
|
295 |
"content": [
|
296 |
{"type": "text", "text": """Please answer at the following format:
|
297 |
-
Defect
|
298 |
Description: <Analyze how the wafer defect type in the image>"""},
|
299 |
{
|
300 |
"type": "image_url",
|
@@ -305,7 +191,7 @@ def generate_description(image_base64):
|
|
305 |
],
|
306 |
}
|
307 |
],
|
308 |
-
max_tokens=50,
|
309 |
)
|
310 |
return response.choices[0].message.content
|
311 |
|
@@ -320,57 +206,71 @@ def update_df():
|
|
320 |
#Creating title for Streamlit app
|
321 |
st.title("Wafer Defect Detection with LLM Classification and Analyze")
|
322 |
|
323 |
-
st.
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
374 |
|
375 |
#uploading file for processing
|
376 |
# uploaded_file = st.file_uploader("Choose an image", type=["jpg", "jpeg", "png"])
|
@@ -440,4 +340,16 @@ if st.session_state.images or st.session_state.buttondo:
|
|
440 |
|
441 |
render_df()
|
442 |
# cv2.waitKey(0)
|
443 |
-
# cv2.destroyAllWindows()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
import io
|
10 |
import uuid
|
11 |
import html
|
12 |
+
import time
|
13 |
+
import os
|
14 |
+
import urllib.request
|
15 |
+
|
16 |
+
from skimage.metrics import structural_similarity as compare_ssim
|
17 |
+
import imutils
|
18 |
|
19 |
st.set_page_config(
|
20 |
page_title="Image to Analyze",
|
|
|
23 |
initial_sidebar_state="expanded",
|
24 |
)
|
25 |
|
26 |
+
default_img = ""
|
27 |
+
st.session_state.buttondo = False
|
28 |
+
st.session_state.buttontrack = False
|
29 |
+
st.session_state.images = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
+
with st.sidebar:
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
+
input_source = st.sidebar.radio(
|
34 |
+
"Select input source",
|
35 |
+
('Realtime', 'Presets', 'Upload'))
|
|
|
|
|
36 |
|
37 |
+
genre = st.radio(
|
38 |
+
"Baseline",
|
39 |
+
["Propose Solution", "Baseline 1", "Baseline 2", "Baseline 3"])
|
40 |
+
|
41 |
+
if input_source == "Presets":
|
42 |
+
|
43 |
+
Center = st.checkbox('Center')
|
44 |
+
if Center:
|
45 |
+
st.image('44.png', width=100)
|
46 |
+
|
47 |
+
Donut = st.checkbox('Donut')
|
48 |
+
if Donut:
|
49 |
+
st.image('7316.png', width=100)
|
50 |
+
|
51 |
+
EdgeLoc = st.checkbox('Edge-Loc')
|
52 |
+
if EdgeLoc:
|
53 |
+
st.image('36.png', width=100)
|
54 |
+
|
55 |
+
EdgeRing = st.checkbox('Edge-Ring')
|
56 |
+
if EdgeRing:
|
57 |
+
st.image('100.png', width=100)
|
58 |
+
|
59 |
+
Loc = st.checkbox('Loc')
|
60 |
+
if Loc:
|
61 |
+
st.image('19.png', width=100)
|
62 |
+
|
63 |
+
NearFull = st.checkbox('Near-Full')
|
64 |
+
if NearFull:
|
65 |
+
st.image('929.png', width=100)
|
66 |
+
|
67 |
+
NoDefect = st.checkbox('No Defect')
|
68 |
+
if NoDefect:
|
69 |
+
st.image('0.png', width=100)
|
70 |
+
|
71 |
+
Random = st.checkbox('Random')
|
72 |
+
if Random:
|
73 |
+
st.image('602.png', width=100)
|
74 |
+
|
75 |
+
Scratch = st.checkbox('Scratch')
|
76 |
+
if Scratch:
|
77 |
+
st.image('134.png', width=100)
|
78 |
+
|
79 |
+
NoDefect = st.checkbox('No-Defect')
|
80 |
+
if NoDefect:
|
81 |
+
st.image('0.png', width=100)
|
82 |
+
|
83 |
+
if st.button("Detect", type="primary"):
|
84 |
+
st.session_state.buttondo = True
|
85 |
+
elif input_source == "Upload":
|
86 |
+
st.title("Upload Your Images")
|
87 |
+
st.session_state.images = st.file_uploader(label=" ", accept_multiple_files=True)
|
88 |
+
else:
|
89 |
+
if st.button("Start Tracking", type="primary"):
|
90 |
+
st.session_state.buttontrack = True
|
91 |
|
92 |
client = OpenAI(api_key='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')
|
93 |
|
|
|
180 |
"role": "user",
|
181 |
"content": [
|
182 |
{"type": "text", "text": """Please answer at the following format:
|
183 |
+
Defect type: Center
|
184 |
Description: <Analyze how the wafer defect type in the image>"""},
|
185 |
{
|
186 |
"type": "image_url",
|
|
|
191 |
],
|
192 |
}
|
193 |
],
|
194 |
+
# max_tokens=50,
|
195 |
)
|
196 |
return response.choices[0].message.content
|
197 |
|
|
|
206 |
#Creating title for Streamlit app
|
207 |
st.title("Wafer Defect Detection with LLM Classification and Analyze")
|
208 |
|
209 |
+
if st.session_state.buttontrack:
|
210 |
+
stframe = st.empty()
|
211 |
+
|
212 |
+
vcap = cv2.VideoCapture('https://whitewolf21.github.io/live/real-time-wafer-tracking.mp4')
|
213 |
+
#if not vcap.isOpened():
|
214 |
+
# print "File Cannot be Opened"
|
215 |
+
|
216 |
+
col1, col2 = st.columns(2)
|
217 |
+
|
218 |
+
previous_img = None
|
219 |
+
count = 0
|
220 |
+
while True:
|
221 |
+
count += 1
|
222 |
+
# Capture frame-by-frame
|
223 |
+
ret, frame = vcap.read()
|
224 |
+
#print cap.isOpened(), ret
|
225 |
+
if frame is not None:
|
226 |
+
current_img = frame.copy()
|
227 |
+
draw_current_img = frame.copy()
|
228 |
+
if previous_img is None:
|
229 |
+
previous_img = frame.copy()
|
230 |
+
draw_previous_img = frame.copy()
|
231 |
+
else:
|
232 |
+
grayA = cv2.cvtColor(current_img, cv2.COLOR_BGR2GRAY)
|
233 |
+
grayB = cv2.cvtColor(previous_img, cv2.COLOR_BGR2GRAY)
|
234 |
+
|
235 |
+
(score, diff) = compare_ssim(grayA, grayB, full=True)
|
236 |
+
diff = (diff * 255).astype("uint8")
|
237 |
+
|
238 |
+
# print("SSIM: {}".format(score))
|
239 |
+
# if score < 0.15:
|
240 |
+
# cv2.imwrite(f"test/A{count}.png", grayA)
|
241 |
+
# cv2.imwrite(f"test/B{count}.png", grayB)
|
242 |
+
# cv2.imwrite(f"test/{count}.png", diff)
|
243 |
+
# print("SSIM: {}".format(score))
|
244 |
+
|
245 |
+
thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
|
246 |
+
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
247 |
+
cnts = imutils.grab_contours(cnts)
|
248 |
+
|
249 |
+
for c in cnts:
|
250 |
+
(x, y, w, h) = cv2.boundingRect(c)
|
251 |
+
# print((x, y, w, h))
|
252 |
+
if w > 15 and h > 15 and w < 100 and h < 100:
|
253 |
+
cv2.rectangle(draw_current_img, (x, y), (x + w, y + h), (0, 0, 255), 2)
|
254 |
+
cv2.rectangle(draw_previous_img, (x, y), (x + w, y + h), (0, 0, 255), 2)
|
255 |
+
if x > 130 and x < 150 and y > 270 and y < 300:
|
256 |
+
# print((x, y, w, h))
|
257 |
+
# cv2.rectangle(draw_current_img, (150, 200), (150 + 1, 200 + 1), (0, 0, 255), 2)
|
258 |
+
cv2.imwrite(f"test/crop{(x, y, w, h)}.png", draw_current_img[y:y+h, x:x+w])
|
259 |
+
|
260 |
+
previous_img = current_img.copy()
|
261 |
+
|
262 |
+
with col1:
|
263 |
+
stframe.image(draw_current_img, channels="BGR")
|
264 |
+
|
265 |
+
|
266 |
+
# Display the resulting frame
|
267 |
+
# cv2.imshow('frame',frame)
|
268 |
+
# Press q to close the video windows before it ends if you want
|
269 |
+
# if cv2.waitKey(22) & 0xFF == ord('q'):
|
270 |
+
# break
|
271 |
+
else:
|
272 |
+
print("Frame is None")
|
273 |
+
vcap = cv2.VideoCapture('https://whitewolf21.github.io/live/real-time-wafer-tracking.mp4')
|
274 |
|
275 |
#uploading file for processing
|
276 |
# uploaded_file = st.file_uploader("Choose an image", type=["jpg", "jpeg", "png"])
|
|
|
340 |
|
341 |
render_df()
|
342 |
# cv2.waitKey(0)
|
343 |
+
# cv2.destroyAllWindows()
|
344 |
+
|
345 |
+
|
346 |
+
# while True:
|
347 |
+
# try:
|
348 |
+
# req = urllib.request.urlopen('http://localhost/current-realtime-tracking-wafer.png')
|
349 |
+
# arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
|
350 |
+
# img = cv2.imdecode(arr, -1)
|
351 |
+
|
352 |
+
# if stframe is not None:
|
353 |
+
# stframe.image(img, channels="BGR")
|
354 |
+
# except:
|
355 |
+
# pass
|
requirements.txt
CHANGED
@@ -1,59 +1,10 @@
|
|
1 |
-
altair==5.2.0
|
2 |
-
annotated-types==0.6.0
|
3 |
-
anyio==4.3.0
|
4 |
-
attrs==23.2.0
|
5 |
-
blinker==1.7.0
|
6 |
-
cachetools==5.3.3
|
7 |
-
certifi==2024.2.2
|
8 |
-
charset-normalizer==3.3.2
|
9 |
-
click==8.1.7
|
10 |
-
colorama==0.4.6
|
11 |
-
distro==1.9.0
|
12 |
-
exceptiongroup==1.2.0
|
13 |
-
gitdb==4.0.11
|
14 |
-
GitPython==3.1.42
|
15 |
-
h11==0.14.0
|
16 |
-
httpcore==1.0.4
|
17 |
-
httpx==0.27.0
|
18 |
-
idna==3.6
|
19 |
-
importlib_metadata==7.0.2
|
20 |
-
Jinja2==3.1.3
|
21 |
-
jsonschema==4.21.1
|
22 |
-
jsonschema-specifications==2023.12.1
|
23 |
-
markdown-it-py==3.0.0
|
24 |
-
MarkupSafe==2.1.5
|
25 |
-
mdurl==0.1.2
|
26 |
numpy==1.26.4
|
27 |
openai==1.12.0
|
28 |
opencv-python==4.9.0.80
|
29 |
-
packaging==23.2
|
30 |
pandas==2.2.0
|
31 |
pillow==10.2.0
|
32 |
protobuf==4.25.3
|
33 |
-
pyarrow==15.0.1
|
34 |
-
pydantic==2.6.4
|
35 |
-
pydantic_core==2.16.3
|
36 |
-
pydeck==0.8.1b0
|
37 |
-
Pygments==2.17.2
|
38 |
-
python-dateutil==2.9.0.post0
|
39 |
-
pytz==2024.1
|
40 |
-
referencing==0.33.0
|
41 |
-
requests==2.31.0
|
42 |
-
rich==13.7.1
|
43 |
-
rpds-py==0.18.0
|
44 |
-
six==1.16.0
|
45 |
-
smmap==5.0.1
|
46 |
-
sniffio==1.3.1
|
47 |
streamlit==1.31.0
|
48 |
-
tenacity==8.2.3
|
49 |
-
toml==0.10.2
|
50 |
-
toolz==0.12.1
|
51 |
-
tornado==6.4
|
52 |
-
tqdm==4.66.2
|
53 |
-
typing_extensions==4.10.0
|
54 |
-
tzdata==2024.1
|
55 |
-
tzlocal==5.2
|
56 |
urllib3==2.2.1
|
57 |
-
|
58 |
-
|
59 |
-
zipp==3.18.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
numpy==1.26.4
|
2 |
openai==1.12.0
|
3 |
opencv-python==4.9.0.80
|
|
|
4 |
pandas==2.2.0
|
5 |
pillow==10.2.0
|
6 |
protobuf==4.25.3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
streamlit==1.31.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
urllib3==2.2.1
|
9 |
+
scikit-image==0.22.0
|
10 |
+
imutils==0.5.4
|
|