|
|
|
import os |
|
import streamlit as st |
|
from PIL import Image |
|
import torch |
|
from transformers import AutoProcessor, Qwen2VLForConditionalGeneration, AutoTokenizer, AutoModel |
|
from diffusers import StableDiffusionPipeline |
|
import cv2 |
|
import numpy as np |
|
import logging |
|
from io import BytesIO |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") |
|
logger = logging.getLogger(__name__) |
|
log_records = [] |
|
|
|
class LogCaptureHandler(logging.Handler): |
|
def emit(self, record): |
|
log_records.append(record) |
|
|
|
logger.addHandler(LogCaptureHandler()) |
|
|
|
|
|
st.set_page_config( |
|
page_title="AI Vision Titans 🚀", |
|
page_icon="🤖", |
|
layout="wide", |
|
initial_sidebar_state="expanded", |
|
menu_items={'About': "AI Vision Titans: OCR, Image Gen, Line Drawings on CPU! 🌌"} |
|
) |
|
|
|
|
|
if 'captured_images' not in st.session_state: |
|
st.session_state['captured_images'] = [] |
|
|
|
|
|
def generate_filename(sequence, ext="png"): |
|
from datetime import datetime |
|
import pytz |
|
central = pytz.timezone('US/Central') |
|
timestamp = datetime.now(central).strftime("%d%m%Y%H%M%S%p") |
|
return f"{sequence}{timestamp}.{ext}" |
|
|
|
def get_gallery_files(file_types): |
|
return sorted([f for ext in file_types for f in glob.glob(f"*.{ext}")]) |
|
|
|
def update_gallery(): |
|
media_files = get_gallery_files(["png"]) |
|
if media_files: |
|
cols = st.sidebar.columns(2) |
|
for idx, file in enumerate(media_files[:gallery_size * 2]): |
|
with cols[idx % 2]: |
|
st.image(Image.open(file), caption=file, use_container_width=True) |
|
|
|
|
|
def load_ocr_qwen2vl(): |
|
model_id = "prithivMLmods/Qwen2-VL-OCR-2B-Instruct" |
|
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True) |
|
model = Qwen2VLForConditionalGeneration.from_pretrained(model_id, trust_remote_code=True, torch_dtype=torch.float32).to("cpu").eval() |
|
return processor, model |
|
|
|
def load_ocr_got(): |
|
model_id = "ucaslcl/GOT-OCR2_0" |
|
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) |
|
model = AutoModel.from_pretrained(model_id, trust_remote_code=True, torch_dtype=torch.float32).to("cpu").eval() |
|
return tokenizer, model |
|
|
|
def load_image_gen(): |
|
model_id = "OFA-Sys/small-stable-diffusion-v0" |
|
pipeline = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32).to("cpu") |
|
return pipeline |
|
|
|
def load_line_drawer(): |
|
|
|
|
|
def edge_detection(image): |
|
img_np = np.array(image.convert("RGB")) |
|
gray = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY) |
|
edges = cv2.Canny(gray, 100, 200) |
|
return Image.fromarray(edges) |
|
return edge_detection |
|
|
|
|
|
st.title("AI Vision Titans 🚀 (OCR, Gen, Drawings!)") |
|
|
|
|
|
st.sidebar.header("Captured Images 🎨") |
|
gallery_size = st.sidebar.slider("Gallery Size", 1, 10, 4) |
|
update_gallery() |
|
|
|
st.sidebar.subheader("Action Logs 📜") |
|
log_container = st.sidebar.empty() |
|
with log_container: |
|
for record in log_records: |
|
st.write(f"{record.asctime} - {record.levelname} - {record.message}") |
|
|
|
|
|
tab1, tab2, tab3, tab4 = st.tabs(["Camera Snap 📷", "Test OCR 🔍", "Test Image Gen 🎨", "Test Line Drawings ✏️"]) |
|
|
|
with tab1: |
|
st.header("Camera Snap 📷") |
|
slice_count = st.number_input("Image Slice Count", min_value=1, max_value=20, value=10) |
|
cols = st.columns(2) |
|
with cols[0]: |
|
st.subheader("Camera 0") |
|
cam0_img = st.camera_input("Take a picture - Cam 0", key="cam0") |
|
if cam0_img: |
|
filename = generate_filename(0) |
|
if filename not in st.session_state['captured_images']: |
|
with open(filename, "wb") as f: |
|
f.write(cam0_img.getvalue()) |
|
st.image(Image.open(filename), caption=filename, use_container_width=True) |
|
logger.info(f"Saved snapshot from Camera 0: {filename}") |
|
st.session_state['captured_images'].append(filename) |
|
update_gallery() |
|
if st.button(f"Capture {slice_count} Frames - Cam 0 📸"): |
|
st.session_state['cam0_frames'] = [] |
|
for i in range(slice_count): |
|
img = st.camera_input(f"Frame {i} - Cam 0", key=f"cam0_frame_{i}_{time.time()}") |
|
if img: |
|
filename = generate_filename(f"0_{i}") |
|
if filename not in st.session_state['captured_images']: |
|
with open(filename, "wb") as f: |
|
f.write(img.getvalue()) |
|
st.session_state['cam0_frames'].append(filename) |
|
logger.info(f"Saved frame {i} from Camera 0: {filename}") |
|
time.sleep(1.0 / slice_count) |
|
st.session_state['captured_images'].extend([f for f in st.session_state['cam0_frames'] if f not in st.session_state['captured_images']]) |
|
update_gallery() |
|
for frame in st.session_state['cam0_frames']: |
|
st.image(Image.open(frame), caption=frame, use_container_width=True) |
|
with cols[1]: |
|
st.subheader("Camera 1") |
|
cam1_img = st.camera_input("Take a picture - Cam 1", key="cam1") |
|
if cam1_img: |
|
filename = generate_filename(1) |
|
if filename not in st.session_state['captured_images']: |
|
with open(filename, "wb") as f: |
|
f.write(cam1_img.getvalue()) |
|
st.image(Image.open(filename), caption=filename, use_container_width=True) |
|
logger.info(f"Saved snapshot from Camera 1: {filename}") |
|
st.session_state['captured_images'].append(filename) |
|
update_gallery() |
|
if st.button(f"Capture {slice_count} Frames - Cam 1 📸"): |
|
st.session_state['cam1_frames'] = [] |
|
for i in range(slice_count): |
|
img = st.camera_input(f"Frame {i} - Cam 1", key=f"cam1_frame_{i}_{time.time()}") |
|
if img: |
|
filename = generate_filename(f"1_{i}") |
|
if filename not in st.session_state['captured_images']: |
|
with open(filename, "wb") as f: |
|
f.write(img.getvalue()) |
|
st.session_state['cam1_frames'].append(filename) |
|
logger.info(f"Saved frame {i} from Camera 1: {filename}") |
|
time.sleep(1.0 / slice_count) |
|
st.session_state['captured_images'].extend([f for f in st.session_state['cam1_frames'] if f not in st.session_state['captured_images']]) |
|
update_gallery() |
|
for frame in st.session_state['cam1_frames']: |
|
st.image(Image.open(frame), caption=frame, use_container_width=True) |
|
|
|
with tab2: |
|
st.header("Test OCR 🔍") |
|
captured_images = get_gallery_files(["png"]) |
|
if captured_images: |
|
selected_image = st.selectbox("Select Image", captured_images) |
|
image = Image.open(selected_image) |
|
st.image(image, caption="Input Image", use_container_width=True) |
|
ocr_model = st.selectbox("Select OCR Model", ["Qwen2-VL-OCR-2B", "GOT-OCR2_0"]) |
|
prompt = st.text_area("Prompt", "Extract text from the image") |
|
if st.button("Run OCR 🚀"): |
|
if ocr_model == "Qwen2-VL-OCR-2B": |
|
processor, model = load_ocr_qwen2vl() |
|
inputs = processor(text=[prompt], images=[image], return_tensors="pt").to("cpu") |
|
outputs = model.generate(**inputs, max_new_tokens=1024) |
|
text = processor.decode(outputs[0], skip_special_tokens=True) |
|
else: |
|
tokenizer, model = load_ocr_got() |
|
with open(selected_image, "rb") as f: |
|
img_bytes = f.read() |
|
img = Image.open(BytesIO(img_bytes)) |
|
text = model.chat(tokenizer, img, ocr_type='ocr') |
|
st.text_area("OCR Result", text, height=200) |
|
|
|
with tab3: |
|
st.header("Test Image Gen 🎨") |
|
captured_images = get_gallery_files(["png"]) |
|
if captured_images: |
|
selected_image = st.selectbox("Select Image", captured_images) |
|
image = Image.open(selected_image) |
|
st.image(image, caption="Reference Image", use_container_width=True) |
|
prompt = st.text_area("Prompt", "Generate a similar superhero image") |
|
if st.button("Run Image Gen 🚀"): |
|
pipeline = load_image_gen() |
|
gen_image = pipeline(prompt, num_inference_steps=50).images[0] |
|
st.image(gen_image, caption="Generated Image", use_container_width=True) |
|
|
|
with tab4: |
|
st.header("Test Line Drawings ✏️") |
|
captured_images = get_gallery_files(["png"]) |
|
if captured_images: |
|
selected_image = st.selectbox("Select Image", captured_images) |
|
image = Image.open(selected_image) |
|
st.image(image, caption="Input Image", use_container_width=True) |
|
if st.button("Run Line Drawing 🚀"): |
|
edge_fn = load_line_drawer() |
|
line_drawing = edge_fn(image) |
|
st.image(line_drawing, caption="Line Drawing", use_container_width=True) |
|
|
|
|
|
update_gallery() |