awacke1 commited on
Commit
2578d93
·
verified ·
1 Parent(s): e1bf9f9

Create backup5.app.py

Browse files
Files changed (1) hide show
  1. backup5.app.py +263 -0
backup5.app.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ import glob
4
+ import time
5
+ import streamlit as st
6
+ from PIL import Image
7
+ import torch
8
+ from transformers import AutoProcessor, Qwen2VLForConditionalGeneration, AutoTokenizer, AutoModel, TrOCRProcessor, VisionEncoderDecoderModel
9
+ from diffusers import StableDiffusionPipeline
10
+ import cv2
11
+ import numpy as np
12
+ import logging
13
+ import asyncio
14
+ import aiofiles
15
+ from io import BytesIO
16
+
17
+ # Logging setup
18
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
19
+ logger = logging.getLogger(__name__)
20
+ log_records = []
21
+
22
+ class LogCaptureHandler(logging.Handler):
23
+ def emit(self, record):
24
+ log_records.append(record)
25
+
26
+ logger.addHandler(LogCaptureHandler())
27
+
28
+ # Page Configuration
29
+ st.set_page_config(
30
+ page_title="AI Vision Titans 🚀",
31
+ page_icon="🤖",
32
+ layout="wide",
33
+ initial_sidebar_state="expanded",
34
+ menu_items={'About': "AI Vision Titans: OCR, Image Gen, Line Drawings on CPU! 🌌"}
35
+ )
36
+
37
+ # Initialize st.session_state
38
+ if 'captured_images' not in st.session_state:
39
+ st.session_state['captured_images'] = []
40
+ if 'processing' not in st.session_state:
41
+ st.session_state['processing'] = {}
42
+
43
+ # Utility Functions
44
+ def generate_filename(sequence, ext="png"):
45
+ from datetime import datetime
46
+ import pytz
47
+ central = pytz.timezone('US/Central')
48
+ timestamp = datetime.now(central).strftime("%d%m%Y%H%M%S%p")
49
+ return f"{sequence}{timestamp}.{ext}"
50
+
51
+ def get_gallery_files(file_types):
52
+ return sorted([f for ext in file_types for f in glob.glob(f"*.{ext}")])
53
+
54
+ def update_gallery():
55
+ media_files = get_gallery_files(["png", "txt"])
56
+ if media_files:
57
+ cols = st.sidebar.columns(2)
58
+ for idx, file in enumerate(media_files[:gallery_size * 2]):
59
+ with cols[idx % 2]:
60
+ if file.endswith(".png"):
61
+ st.image(Image.open(file), caption=file, use_container_width=True)
62
+ elif file.endswith(".txt"):
63
+ with open(file, "r") as f:
64
+ st.text(f.read()[:50] + "..." if len(f.read()) > 50 else f.read(), help=file)
65
+
66
+ # Model Loaders (Smaller, CPU-focused)
67
+ def load_ocr_qwen2vl():
68
+ model_id = "prithivMLmods/Qwen2-VL-OCR-2B-Instruct"
69
+ processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
70
+ model = Qwen2VLForConditionalGeneration.from_pretrained(model_id, trust_remote_code=True, torch_dtype=torch.float32).to("cpu").eval()
71
+ return processor, model
72
+
73
+ def load_ocr_trocr():
74
+ model_id = "microsoft/trocr-small-handwritten" # ~250 MB
75
+ processor = TrOCRProcessor.from_pretrained(model_id)
76
+ model = VisionEncoderDecoderModel.from_pretrained(model_id, torch_dtype=torch.float32).to("cpu").eval()
77
+ return processor, model
78
+
79
+ def load_image_gen():
80
+ model_id = "OFA-Sys/small-stable-diffusion-v0" # ~300 MB
81
+ pipeline = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32).to("cpu")
82
+ return pipeline
83
+
84
+ def load_line_drawer():
85
+ # Simplified OpenCV-based edge detection (CPU-friendly substitute for Torch Space UNet)
86
+ def edge_detection(image):
87
+ img_np = np.array(image.convert("RGB"))
88
+ gray = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
89
+ edges = cv2.Canny(gray, 100, 200)
90
+ return Image.fromarray(edges)
91
+ return edge_detection
92
+
93
+ # Async Processing Functions
94
+ async def process_ocr(image, prompt, model_name, output_file):
95
+ start_time = time.time()
96
+ status = st.empty()
97
+ status.text(f"Processing {model_name} OCR... (0s)")
98
+ if model_name == "Qwen2-VL-OCR-2B":
99
+ processor, model = load_ocr_qwen2vl()
100
+ # Corrected input format: apply chat template
101
+ messages = [{"role": "user", "content": [{"type": "image", "image": image}, {"type": "text", "text": prompt}]}]
102
+ text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
103
+ inputs = processor(text=[text], images=[image], return_tensors="pt", padding=True).to("cpu")
104
+ outputs = model.generate(**inputs, max_new_tokens=1024)
105
+ result = processor.batch_decode(outputs, skip_special_tokens=True)[0]
106
+ else: # TrOCR
107
+ processor, model = load_ocr_trocr()
108
+ pixel_values = processor(images=image, return_tensors="pt").pixel_values.to("cpu")
109
+ outputs = model.generate(pixel_values)
110
+ result = processor.batch_decode(outputs, skip_special_tokens=True)[0]
111
+ elapsed = int(time.time() - start_time)
112
+ status.text(f"{model_name} OCR completed in {elapsed}s!")
113
+ async with aiofiles.open(output_file, "w") as f:
114
+ await f.write(result)
115
+ st.session_state['captured_images'].append(output_file)
116
+ return result
117
+
118
+ async def process_image_gen(prompt, output_file):
119
+ start_time = time.time()
120
+ status = st.empty()
121
+ status.text("Processing Image Gen... (0s)")
122
+ pipeline = load_image_gen()
123
+ gen_image = pipeline(prompt, num_inference_steps=20).images[0] # Reduced steps for speed
124
+ elapsed = int(time.time() - start_time)
125
+ status.text(f"Image Gen completed in {elapsed}s!")
126
+ gen_image.save(output_file)
127
+ st.session_state['captured_images'].append(output_file)
128
+ return gen_image
129
+
130
+ async def process_line_drawing(image, output_file):
131
+ start_time = time.time()
132
+ status = st.empty()
133
+ status.text("Processing Line Drawing... (0s)")
134
+ edge_fn = load_line_drawer()
135
+ line_drawing = edge_fn(image)
136
+ elapsed = int(time.time() - start_time)
137
+ status.text(f"Line Drawing completed in {elapsed}s!")
138
+ line_drawing.save(output_file)
139
+ st.session_state['captured_images'].append(output_file)
140
+ return line_drawing
141
+
142
+ # Main App
143
+ st.title("AI Vision Titans 🚀 (OCR, Gen, Drawings!)")
144
+
145
+ # Sidebar Gallery
146
+ st.sidebar.header("Captured Images 🎨")
147
+ gallery_size = st.sidebar.slider("Gallery Size", 1, 10, 4)
148
+ update_gallery()
149
+
150
+ st.sidebar.subheader("Action Logs 📜")
151
+ log_container = st.sidebar.empty()
152
+ with log_container:
153
+ for record in log_records:
154
+ st.write(f"{record.asctime} - {record.levelname} - {record.message}")
155
+
156
+ # Tabs
157
+ tab1, tab2, tab3, tab4 = st.tabs(["Camera Snap 📷", "Test OCR 🔍", "Test Image Gen 🎨", "Test Line Drawings ✏️"])
158
+
159
+ with tab1:
160
+ st.header("Camera Snap 📷")
161
+ st.subheader("Single Capture")
162
+ cols = st.columns(2)
163
+ with cols[0]:
164
+ cam0_img = st.camera_input("Take a picture - Cam 0", key="cam0")
165
+ if cam0_img:
166
+ filename = generate_filename(0)
167
+ if filename not in st.session_state['captured_images']:
168
+ with open(filename, "wb") as f:
169
+ f.write(cam0_img.getvalue())
170
+ st.image(Image.open(filename), caption=filename, use_container_width=True)
171
+ logger.info(f"Saved snapshot from Camera 0: {filename}")
172
+ st.session_state['captured_images'].append(filename)
173
+ update_gallery()
174
+ with cols[1]:
175
+ cam1_img = st.camera_input("Take a picture - Cam 1", key="cam1")
176
+ if cam1_img:
177
+ filename = generate_filename(1)
178
+ if filename not in st.session_state['captured_images']:
179
+ with open(filename, "wb") as f:
180
+ f.write(cam1_img.getvalue())
181
+ st.image(Image.open(filename), caption=filename, use_container_width=True)
182
+ logger.info(f"Saved snapshot from Camera 1: {filename}")
183
+ st.session_state['captured_images'].append(filename)
184
+ update_gallery()
185
+
186
+ st.subheader("Burst Capture")
187
+ slice_count = st.number_input("Number of Frames", min_value=1, max_value=20, value=10, key="burst_count")
188
+ if st.button("Start Burst Capture 📸"):
189
+ st.session_state['burst_frames'] = []
190
+ placeholder = st.empty()
191
+ for i in range(slice_count):
192
+ with placeholder.container():
193
+ st.write(f"Capturing frame {i+1}/{slice_count}...")
194
+ img = st.camera_input(f"Frame {i}", key=f"burst_{i}_{time.time()}")
195
+ if img:
196
+ filename = generate_filename(f"burst_{i}")
197
+ if filename not in st.session_state['captured_images']:
198
+ with open(filename, "wb") as f:
199
+ f.write(img.getvalue())
200
+ st.session_state['burst_frames'].append(filename)
201
+ logger.info(f"Saved burst frame {i}: {filename}")
202
+ st.image(Image.open(filename), caption=filename, use_container_width=True)
203
+ time.sleep(0.5) # Small delay for visibility
204
+ st.session_state['captured_images'].extend([f for f in st.session_state['burst_frames'] if f not in st.session_state['captured_images']])
205
+ update_gallery()
206
+ placeholder.success(f"Captured {len(st.session_state['burst_frames'])} frames!")
207
+
208
+ with tab2:
209
+ st.header("Test OCR 🔍")
210
+ captured_images = get_gallery_files(["png"])
211
+ if captured_images:
212
+ selected_image = st.selectbox("Select Image", captured_images, key="ocr_select")
213
+ image = Image.open(selected_image)
214
+ st.image(image, caption="Input Image", use_container_width=True)
215
+ ocr_model = st.selectbox("Select OCR Model", ["Qwen2-VL-OCR-2B", "TrOCR-Small"], key="ocr_model_select")
216
+ prompt = st.text_area("Prompt", "Extract text from the image", key="ocr_prompt")
217
+ if st.button("Run OCR 🚀", key="ocr_run"):
218
+ output_file = generate_filename("ocr_output", "txt")
219
+ st.session_state['processing']['ocr'] = True
220
+ result = asyncio.run(process_ocr(image, prompt, ocr_model, output_file))
221
+ st.text_area("OCR Result", result, height=200, key="ocr_result")
222
+ st.success(f"OCR output saved to {output_file}")
223
+ st.session_state['processing']['ocr'] = False
224
+ else:
225
+ st.warning("No images captured yet. Use Camera Snap first!")
226
+
227
+ with tab3:
228
+ st.header("Test Image Gen 🎨")
229
+ captured_images = get_gallery_files(["png"])
230
+ if captured_images:
231
+ selected_image = st.selectbox("Select Image", captured_images, key="gen_select")
232
+ image = Image.open(selected_image)
233
+ st.image(image, caption="Reference Image", use_container_width=True)
234
+ prompt = st.text_area("Prompt", "Generate a similar superhero image", key="gen_prompt")
235
+ if st.button("Run Image Gen 🚀", key="gen_run"):
236
+ output_file = generate_filename("gen_output", "png")
237
+ st.session_state['processing']['gen'] = True
238
+ result = asyncio.run(process_image_gen(prompt, output_file))
239
+ st.image(result, caption="Generated Image", use_container_width=True)
240
+ st.success(f"Image saved to {output_file}")
241
+ st.session_state['processing']['gen'] = False
242
+ else:
243
+ st.warning("No images captured yet. Use Camera Snap first!")
244
+
245
+ with tab4:
246
+ st.header("Test Line Drawings ✏️")
247
+ captured_images = get_gallery_files(["png"])
248
+ if captured_images:
249
+ selected_image = st.selectbox("Select Image", captured_images, key="line_select")
250
+ image = Image.open(selected_image)
251
+ st.image(image, caption="Input Image", use_container_width=True)
252
+ if st.button("Run Line Drawing 🚀", key="line_run"):
253
+ output_file = generate_filename("line_output", "png")
254
+ st.session_state['processing']['line'] = True
255
+ result = asyncio.run(process_line_drawing(image, output_file))
256
+ st.image(result, caption="Line Drawing", use_container_width=True)
257
+ st.success(f"Line drawing saved to {output_file}")
258
+ st.session_state['processing']['line'] = False
259
+ else:
260
+ st.warning("No images captured yet. Use Camera Snap first!")
261
+
262
+ # Initial Gallery Update
263
+ update_gallery()