awacke1 commited on
Commit
f8b170f
·
verified ·
1 Parent(s): c338af7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +595 -143
app.py CHANGED
@@ -3,48 +3,145 @@ import os
3
  import glob
4
  import base64
5
  import time
 
 
 
 
 
 
 
 
 
 
 
6
  import streamlit as st
 
 
7
  import fitz
8
  import requests
9
  from PIL import Image
10
- import asyncio
11
- import aiofiles
12
- from io import BytesIO
13
- import zipfile
14
- import random
15
- import re
16
- from openai import OpenAI
17
- import logging
18
 
 
19
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
20
  logger = logging.getLogger(__name__)
 
 
 
 
 
21
 
 
22
  st.set_page_config(
23
- page_title="AI Document Processor 🚀",
24
  page_icon="🤖",
25
  layout="wide",
26
  initial_sidebar_state="expanded",
 
 
 
 
 
27
  )
28
 
29
- # Session state initialization
30
  if 'history' not in st.session_state:
31
  st.session_state['history'] = []
 
 
 
 
32
  if 'processing' not in st.session_state:
33
  st.session_state['processing'] = {}
34
  if 'asset_checkboxes' not in st.session_state:
35
  st.session_state['asset_checkboxes'] = {}
 
 
36
  if 'unique_counter' not in st.session_state:
37
  st.session_state['unique_counter'] = 0
38
- if 'messages' not in st.session_state:
39
- st.session_state['messages'] = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- # OpenAI setup
42
- openai_api_key = os.getenv('OPENAI_API_KEY')
43
- openai_org_id = os.getenv('OPENAI_ORG_ID')
44
- client = OpenAI(api_key=openai_api_key, organization=openai_org_id)
45
- GPT_MODEL = "gpt-4o-2024-05-13"
46
- GPT_MINI_MODEL = "o3-mini-high" # Placeholder, adjust as per actual model name
 
 
 
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  def generate_filename(sequence, ext="png"):
49
  timestamp = time.strftime("%d%m%Y%H%M%S")
50
  return f"{sequence}_{timestamp}.{ext}"
@@ -59,15 +156,23 @@ def get_download_link(file_path, mime_type="application/pdf", label="Download"):
59
  b64 = base64.b64encode(data).decode()
60
  return f'<a href="data:{mime_type};base64,{b64}" download="{os.path.basename(file_path)}">{label}</a>'
61
 
62
- def get_gallery_files(file_types=["png", "pdf", "md"]):
63
- return sorted(list(set([f for ext in file_types for f in glob.glob(f"*.{ext}")])))
 
 
 
 
 
 
 
 
 
 
 
64
 
65
  def get_pdf_files():
66
  return sorted(glob.glob("*.pdf"))
67
 
68
- def get_md_files():
69
- return sorted(glob.glob("*.md"))
70
-
71
  def download_pdf(url, output_path):
72
  try:
73
  response = requests.get(url, stream=True, timeout=10)
@@ -80,149 +185,496 @@ def download_pdf(url, output_path):
80
  logger.error(f"Failed to download {url}: {e}")
81
  return False
82
 
83
- async def process_pdf_to_images(pdf_path, mode="double"):
84
- doc = fitz.open(pdf_path)
85
- output_files = []
86
- step = 2 if mode == "double" else 1
87
- for i in range(0, len(doc), step):
88
- if mode == "double" and i + 1 < len(doc):
89
- # Combine two pages into one image
90
- page1 = doc[i]
91
- page2 = doc[i + 1]
92
- pix1 = page1.get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
93
- pix2 = page2.get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
94
- combined_width = pix1.width + pix2.width
95
- combined_height = max(pix1.height, pix2.height)
96
- combined_pix = fitz.Pixmap(fitz.csRGB, combined_width, combined_height)
97
- combined_pix.set_rect(fitz.IRect(0, 0, pix1.width, pix1.height), pix1)
98
- combined_pix.set_rect(fitz.IRect(pix1.width, 0, combined_width, pix2.height), pix2)
99
- output_file = generate_filename(f"double_page_{i}", "png")
100
- combined_pix.save(output_file)
101
- output_files.append(output_file)
102
- else:
103
- page = doc[i]
104
  pix = page.get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
105
- output_file = generate_filename(f"page_{i}", "png")
106
  pix.save(output_file)
107
  output_files.append(output_file)
108
- doc.close()
109
- return output_files
110
-
111
- async def extract_text_from_image(image_path):
112
- with open(image_path, "rb") as image_file:
113
- base64_image = base64.b64encode(image_file.read()).decode("utf-8")
114
- response = client.chat.completions.create(
115
- model=GPT_MODEL,
116
- messages=[{"role": "user", "content": [
117
- {"type": "text", "text": "Extract the electronic text from this image"},
118
- {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}]}],
119
- temperature=0.0
120
- )
121
- return response.choices[0].message.content
 
 
 
 
 
 
 
 
122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  def update_gallery():
124
  all_files = get_gallery_files()
125
  if all_files:
126
  st.sidebar.subheader("Asset Gallery 📸📖")
127
  cols = st.sidebar.columns(2)
128
- for idx, file in enumerate(all_files[:4]): # Limit to 4 for brevity
 
 
129
  with cols[idx % 2]:
130
  st.session_state['unique_counter'] += 1
131
  unique_id = st.session_state['unique_counter']
132
  if file.endswith('.png'):
133
  st.image(Image.open(file), caption=os.path.basename(file), use_container_width=True)
134
- elif file.endswith('.pdf'):
135
  doc = fitz.open(file)
136
  pix = doc[0].get_pixmap(matrix=fitz.Matrix(0.5, 0.5))
137
  img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
138
  st.image(img, caption=os.path.basename(file), use_container_width=True)
139
  doc.close()
140
- else: # .md files
141
- st.write(f"📜 {os.path.basename(file)}")
142
- st.markdown(get_download_link(file, "application/octet-stream", "Download"), unsafe_allow_html=True)
143
-
144
- st.title("AI Document Processor 🚀")
145
-
146
- # Sidebar
147
- st.sidebar.header("Captured Files 📜")
148
- if st.sidebar.button("Zap All! 🗑️"):
149
- for file in get_gallery_files():
150
- os.remove(file)
151
- st.session_state['asset_checkboxes'].clear()
152
- st.sidebar.success("All assets vaporized! 💨")
153
- st.rerun()
154
  update_gallery()
155
 
156
- tab1, tab2, tab3 = st.tabs(["PDF Processing 📖", "Image Processing 🖼️", "Markdown Management 📝"])
157
-
158
- with tab1:
159
- st.header("PDF Processing 📖")
160
- pdf_files = st.file_uploader("Upload PDFs", type=["pdf"], accept_multiple_files=True)
161
- if pdf_files and st.button("Process PDFs"):
162
- for pdf_file in pdf_files:
163
- pdf_path = f"uploaded_{pdf_file.name}"
164
- with open(pdf_path, "wb") as f:
165
- f.write(pdf_file.getvalue())
166
- images = asyncio.run(process_pdf_to_images(pdf_path, mode="double"))
167
- full_text = ""
168
- for img in images:
169
- text = asyncio.run(extract_text_from_image(img))
170
- full_text += f"# Page {images.index(img) + 1}\n\n{text}\n\n"
171
- md_file = f"{os.path.splitext(pdf_path)[0]}.md"
172
- with open(md_file, "w") as f:
173
- f.write(full_text)
174
- st.image([Image.open(img) for img in images], caption=images, width=300)
175
- st.markdown(get_download_link(md_file, "text/markdown", "Download Markdown"), unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  update_gallery()
177
 
178
- with tab2:
179
- st.header("Image Processing 🖼️")
180
- prompt = st.text_area("Enter Prompt for Images", "Extract the electronic text from this image")
181
- image_files = st.file_uploader("Upload Images", type=["png", "jpg", "jpeg"], accept_multiple_files=True)
182
- if image_files and st.button("Process Images"):
183
- full_text = ""
184
- for img_file in image_files:
185
- img_path = f"uploaded_{img_file.name}"
186
- with open(img_path, "wb") as f:
187
- f.write(img_file.getvalue())
188
- text = asyncio.run(extract_text_from_image(img_path))
189
- full_text += f"# {img_file.name}\n\n{text}\n\n"
190
- st.image(Image.open(img_path), caption=img_file.name, width=300)
191
- md_file = generate_filename("image_ocr", "md")
192
- with open(md_file, "w") as f:
193
- f.write(full_text)
194
- st.markdown(get_download_link(md_file, "text/markdown", "Download Markdown"), unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
  update_gallery()
196
-
197
- with tab3:
198
- st.header("Markdown Management 📝")
199
- md_files = get_md_files()
200
- col1, col2 = st.columns(2)
201
- with col1:
202
- st.subheader("File Listing")
203
- selected_files = []
204
- for md_file in md_files:
205
- if st.checkbox(md_file, key=f"md_{md_file}"):
206
- selected_files.append(md_file)
207
- with col2:
208
- st.subheader("Process Selected Files")
209
- default_prompt = "Summarize this into markdown outline with emojis and number the topics 1..12"
210
- prompt = st.text_area("Enter Prompt", default_prompt)
211
- if st.button("Process with GPT") and selected_files:
212
- combined_text = ""
213
- for md_file in selected_files:
214
- with open(md_file, "r") as f:
215
- combined_text += f.read() + "\n\n"
216
- response = client.chat.completions.create(
217
- model=GPT_MINI_MODEL, # Replace with actual model if different
218
- messages=[{"role": "user", "content": f"{prompt}\n\n{combined_text}"}],
219
- temperature=0.0
220
- )
221
- output_md = generate_filename("gpt_output", "md")
222
- with open(output_md, "w") as f:
223
- f.write(response.choices[0].message.content)
224
- st.markdown(response.choices[0].message.content)
225
- st.markdown(get_download_link(output_md, "text/markdown", "Download Output"), unsafe_allow_html=True)
226
  update_gallery()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
 
228
- update_gallery()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import glob
4
  import base64
5
  import time
6
+ import shutil
7
+ import zipfile
8
+ import re
9
+ import logging
10
+ import asyncio
11
+ from io import BytesIO
12
+ from datetime import datetime
13
+ import pytz
14
+ from dataclasses import dataclass
15
+ from typing import Optional
16
+
17
  import streamlit as st
18
+ import pandas as pd
19
+ import torch
20
  import fitz
21
  import requests
22
  from PIL import Image
23
+ from diffusers import StableDiffusionPipeline
24
+ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModel
25
+
26
+ # --- OpenAI Setup (for GPT related features) ---
27
+ import openai
28
+ openai.api_key = os.getenv('OPENAI_API_KEY')
29
+ openai.organization = os.getenv('OPENAI_ORG_ID')
 
30
 
31
+ # --- Logging ---
32
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
33
  logger = logging.getLogger(__name__)
34
+ log_records = []
35
+ class LogCaptureHandler(logging.Handler):
36
+ def emit(self, record):
37
+ log_records.append(record)
38
+ logger.addHandler(LogCaptureHandler())
39
 
40
+ # --- Streamlit Page Config ---
41
  st.set_page_config(
42
+ page_title="AI Vision & SFT Titans 🚀",
43
  page_icon="🤖",
44
  layout="wide",
45
  initial_sidebar_state="expanded",
46
+ menu_items={
47
+ 'Get Help': 'https://huggingface.co/awacke1',
48
+ 'Report a Bug': 'https://huggingface.co/spaces/awacke1',
49
+ 'About': "AI Vision & SFT Titans: PDFs, OCR, Image Gen, Line Drawings, Custom Diffusion, and SFT on CPU! 🌌"
50
+ }
51
  )
52
 
53
+ # --- Session State Defaults ---
54
  if 'history' not in st.session_state:
55
  st.session_state['history'] = []
56
+ if 'builder' not in st.session_state:
57
+ st.session_state['builder'] = None
58
+ if 'model_loaded' not in st.session_state:
59
+ st.session_state['model_loaded'] = False
60
  if 'processing' not in st.session_state:
61
  st.session_state['processing'] = {}
62
  if 'asset_checkboxes' not in st.session_state:
63
  st.session_state['asset_checkboxes'] = {}
64
+ if 'downloaded_pdfs' not in st.session_state:
65
+ st.session_state['downloaded_pdfs'] = {}
66
  if 'unique_counter' not in st.session_state:
67
  st.session_state['unique_counter'] = 0
68
+ if 'selected_model_type' not in st.session_state:
69
+ st.session_state['selected_model_type'] = "Causal LM"
70
+ if 'selected_model' not in st.session_state:
71
+ st.session_state['selected_model'] = "None"
72
+ if 'cam0_file' not in st.session_state:
73
+ st.session_state['cam0_file'] = None
74
+ if 'cam1_file' not in st.session_state:
75
+ st.session_state['cam1_file'] = None
76
+
77
+ # --- Model & Diffusion DataClasses ---
78
+ @dataclass
79
+ class ModelConfig:
80
+ name: str
81
+ base_model: str
82
+ size: str
83
+ domain: Optional[str] = None
84
+ model_type: str = "causal_lm"
85
+ @property
86
+ def model_path(self):
87
+ return f"models/{self.name}"
88
 
89
+ @dataclass
90
+ class DiffusionConfig:
91
+ name: str
92
+ base_model: str
93
+ size: str
94
+ domain: Optional[str] = None
95
+ @property
96
+ def model_path(self):
97
+ return f"diffusion_models/{self.name}"
98
 
99
+ # --- Model Builders ---
100
+ class ModelBuilder:
101
+ def __init__(self):
102
+ self.config = None
103
+ self.model = None
104
+ self.tokenizer = None
105
+ self.jokes = ["Why did the AI go to therapy? Too many layers to unpack! 😂",
106
+ "Training complete! Time for a binary coffee break. ☕"]
107
+ def load_model(self, model_path: str, config: Optional[ModelConfig] = None):
108
+ with st.spinner(f"Loading {model_path}... ⏳"):
109
+ self.model = AutoModelForCausalLM.from_pretrained(model_path)
110
+ self.tokenizer = AutoTokenizer.from_pretrained(model_path)
111
+ if self.tokenizer.pad_token is None:
112
+ self.tokenizer.pad_token = self.tokenizer.eos_token
113
+ if config:
114
+ self.config = config
115
+ self.model.to("cuda" if torch.cuda.is_available() else "cpu")
116
+ st.success(f"Model loaded! 🎉 {random.choice(self.jokes)}")
117
+ return self
118
+ def save_model(self, path: str):
119
+ with st.spinner("Saving model... 💾"):
120
+ os.makedirs(os.path.dirname(path), exist_ok=True)
121
+ self.model.save_pretrained(path)
122
+ self.tokenizer.save_pretrained(path)
123
+ st.success(f"Model saved at {path}! ✅")
124
+
125
+ class DiffusionBuilder:
126
+ def __init__(self):
127
+ self.config = None
128
+ self.pipeline = None
129
+ def load_model(self, model_path: str, config: Optional[DiffusionConfig] = None):
130
+ with st.spinner(f"Loading diffusion model {model_path}... ⏳"):
131
+ self.pipeline = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float32).to("cpu")
132
+ if config:
133
+ self.config = config
134
+ st.success("Diffusion model loaded! 🎨")
135
+ return self
136
+ def save_model(self, path: str):
137
+ with st.spinner("Saving diffusion model... 💾"):
138
+ os.makedirs(os.path.dirname(path), exist_ok=True)
139
+ self.pipeline.save_pretrained(path)
140
+ st.success(f"Diffusion model saved at {path}! ✅")
141
+ def generate(self, prompt: str):
142
+ return self.pipeline(prompt, num_inference_steps=20).images[0]
143
+
144
+ # --- Utility Functions ---
145
  def generate_filename(sequence, ext="png"):
146
  timestamp = time.strftime("%d%m%Y%H%M%S")
147
  return f"{sequence}_{timestamp}.{ext}"
 
156
  b64 = base64.b64encode(data).decode()
157
  return f'<a href="data:{mime_type};base64,{b64}" download="{os.path.basename(file_path)}">{label}</a>'
158
 
159
+ def zip_directory(directory_path, zip_path):
160
+ with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
161
+ for root, _, files in os.walk(directory_path):
162
+ for file in files:
163
+ zipf.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), os.path.dirname(directory_path)))
164
+
165
+ def get_model_files(model_type="causal_lm"):
166
+ path = "models/*" if model_type == "causal_lm" else "diffusion_models/*"
167
+ dirs = [d for d in glob.glob(path) if os.path.isdir(d)]
168
+ return dirs if dirs else ["None"]
169
+
170
+ def get_gallery_files(file_types=["png", "pdf"]):
171
+ return sorted(list(set([f for ext in file_types for f in glob.glob(f"*.{ext}")]))) # Deduplicate files
172
 
173
  def get_pdf_files():
174
  return sorted(glob.glob("*.pdf"))
175
 
 
 
 
176
  def download_pdf(url, output_path):
177
  try:
178
  response = requests.get(url, stream=True, timeout=10)
 
185
  logger.error(f"Failed to download {url}: {e}")
186
  return False
187
 
188
+ # --- Original PDF Snapshot & OCR Functions ---
189
+ async def process_pdf_snapshot(pdf_path, mode="single"):
190
+ start_time = time.time()
191
+ status = st.empty()
192
+ status.text(f"Processing PDF Snapshot ({mode})... (0s)")
193
+ try:
194
+ doc = fitz.open(pdf_path)
195
+ output_files = []
196
+ if mode == "single":
197
+ page = doc[0]
 
 
 
 
 
 
 
 
 
 
 
198
  pix = page.get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
199
+ output_file = generate_filename("single", "png")
200
  pix.save(output_file)
201
  output_files.append(output_file)
202
+ elif mode == "twopage":
203
+ for i in range(min(2, len(doc))):
204
+ page = doc[i]
205
+ pix = page.get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
206
+ output_file = generate_filename(f"twopage_{i}", "png")
207
+ pix.save(output_file)
208
+ output_files.append(output_file)
209
+ elif mode == "allpages":
210
+ for i in range(len(doc)):
211
+ page = doc[i]
212
+ pix = page.get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
213
+ output_file = generate_filename(f"page_{i}", "png")
214
+ pix.save(output_file)
215
+ output_files.append(output_file)
216
+ doc.close()
217
+ elapsed = int(time.time() - start_time)
218
+ status.text(f"PDF Snapshot ({mode}) completed in {elapsed}s!")
219
+ update_gallery()
220
+ return output_files
221
+ except Exception as e:
222
+ status.error(f"Failed to process PDF: {str(e)}")
223
+ return []
224
 
225
+ async def process_ocr(image, output_file):
226
+ start_time = time.time()
227
+ status = st.empty()
228
+ status.text("Processing GOT-OCR2_0... (0s)")
229
+ tokenizer = AutoTokenizer.from_pretrained("ucaslcl/GOT-OCR2_0", trust_remote_code=True)
230
+ model = AutoModel.from_pretrained("ucaslcl/GOT-OCR2_0", trust_remote_code=True, torch_dtype=torch.float32).to("cpu").eval()
231
+ temp_file = f"temp_{int(time.time())}.png"
232
+ image.save(temp_file)
233
+ result = model.chat(tokenizer, temp_file, ocr_type='ocr')
234
+ os.remove(temp_file)
235
+ elapsed = int(time.time() - start_time)
236
+ status.text(f"GOT-OCR2_0 completed in {elapsed}s!")
237
+ async with aiofiles.open(output_file, "w") as f:
238
+ await f.write(result)
239
+ update_gallery()
240
+ return result
241
+
242
+ async def process_image_gen(prompt, output_file):
243
+ start_time = time.time()
244
+ status = st.empty()
245
+ status.text("Processing Image Gen... (0s)")
246
+ if st.session_state['builder'] and isinstance(st.session_state['builder'], DiffusionBuilder) and st.session_state['builder'].pipeline:
247
+ pipeline = st.session_state['builder'].pipeline
248
+ else:
249
+ pipeline = StableDiffusionPipeline.from_pretrained("OFA-Sys/small-stable-diffusion-v0", torch_dtype=torch.float32).to("cpu")
250
+ gen_image = pipeline(prompt, num_inference_steps=20).images[0]
251
+ elapsed = int(time.time() - start_time)
252
+ status.text(f"Image Gen completed in {elapsed}s!")
253
+ gen_image.save(output_file)
254
+ update_gallery()
255
+ return gen_image
256
+
257
+ # --- New Function: Process an image (PIL) with a custom prompt using GPT ---
258
+ def process_image_with_prompt(image, prompt, model="o3-mini-high"):
259
+ buffered = BytesIO()
260
+ image.save(buffered, format="PNG")
261
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
262
+ messages = [{
263
+ "role": "user",
264
+ "content": [
265
+ {"type": "text", "text": prompt},
266
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_str}"}}
267
+ ]
268
+ }]
269
+ try:
270
+ response = openai.ChatCompletion.create(model=model, messages=messages)
271
+ return response.choices[0].message.content
272
+ except Exception as e:
273
+ return f"Error processing image with GPT: {str(e)}"
274
+
275
+ # --- Gallery Update ---
276
  def update_gallery():
277
  all_files = get_gallery_files()
278
  if all_files:
279
  st.sidebar.subheader("Asset Gallery 📸📖")
280
  cols = st.sidebar.columns(2)
281
+ for idx, file in enumerate(all_files[:st.sidebar.slider("Gallery Size", 1, 10, 2, key="gallery_size_update")]):
282
+
283
+ #for idx, file in enumerate(all_files[:st.sidebar.slider("Gallery Size", 1, 10, 2)]):
284
  with cols[idx % 2]:
285
  st.session_state['unique_counter'] += 1
286
  unique_id = st.session_state['unique_counter']
287
  if file.endswith('.png'):
288
  st.image(Image.open(file), caption=os.path.basename(file), use_container_width=True)
289
+ else:
290
  doc = fitz.open(file)
291
  pix = doc[0].get_pixmap(matrix=fitz.Matrix(0.5, 0.5))
292
  img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
293
  st.image(img, caption=os.path.basename(file), use_container_width=True)
294
  doc.close()
295
+ checkbox_key = f"asset_{file}_{unique_id}"
296
+ st.session_state['asset_checkboxes'][file] = st.checkbox("Use for SFT/Input", value=st.session_state['asset_checkboxes'].get(file, False), key=checkbox_key)
297
+ mime_type = "image/png" if file.endswith('.png') else "application/pdf"
298
+ st.markdown(get_download_link(file, mime_type, "Snag It! 📥"), unsafe_allow_html=True)
299
+ if st.button("Zap It! 🗑️", key=f"delete_{file}_{unique_id}"):
300
+ os.remove(file)
301
+ st.session_state['asset_checkboxes'].pop(file, None)
302
+ st.sidebar.success(f"Asset {os.path.basename(file)} vaporized! 💨")
303
+ st.experimental_rerun()
 
 
 
 
 
304
  update_gallery()
305
 
306
+ # --- Sidebar Logs & History ---
307
+ st.sidebar.subheader("Action Logs 📜")
308
+ with st.sidebar:
309
+ for record in log_records:
310
+ st.write(f"{record.asctime} - {record.levelname} - {record.message}")
311
+ st.sidebar.subheader("History 📜")
312
+ with st.sidebar:
313
+ for entry in st.session_state['history']:
314
+ st.write(entry)
315
+
316
+ # --- Create Tabs (Existing + New) ---
317
+ tabs = st.tabs([
318
+ "Camera Snap 📷",
319
+ "Download PDFs 📥",
320
+ "Test OCR 🔍",
321
+ "Build Titan 🌱",
322
+ "Test Image Gen 🎨",
323
+ "PDF Process 📄",
324
+ "Image Process 🖼️",
325
+ "MD Gallery 📚"
326
+ ])
327
+ (tab_camera, tab_download, tab_ocr, tab_build, tab_imggen, tab_pdf_process, tab_image_process, tab_md_gallery) = tabs
328
+
329
+ # === Tab: Camera Snap (existing) ===
330
+ with tab_camera:
331
+ st.header("Camera Snap 📷")
332
+ st.subheader("Single Capture")
333
+ cols = st.columns(2)
334
+ with cols[0]:
335
+ cam0_img = st.camera_input("Take a picture - Cam 0", key="cam0")
336
+ if cam0_img:
337
+ filename = generate_filename("cam0")
338
+ if st.session_state['cam0_file'] and os.path.exists(st.session_state['cam0_file']):
339
+ os.remove(st.session_state['cam0_file'])
340
+ with open(filename, "wb") as f:
341
+ f.write(cam0_img.getvalue())
342
+ st.session_state['cam0_file'] = filename
343
+ entry = f"Snapshot from Cam 0: {filename}"
344
+ if entry not in st.session_state['history']:
345
+ st.session_state['history'] = [e for e in st.session_state['history'] if not e.startswith("Snapshot from Cam 0:")] + [entry]
346
+ st.image(Image.open(filename), caption="Camera 0", use_container_width=True)
347
+ logger.info(f"Saved snapshot from Camera 0: {filename}")
348
+ update_gallery()
349
+ with cols[1]:
350
+ cam1_img = st.camera_input("Take a picture - Cam 1", key="cam1")
351
+ if cam1_img:
352
+ filename = generate_filename("cam1")
353
+ if st.session_state['cam1_file'] and os.path.exists(st.session_state['cam1_file']):
354
+ os.remove(st.session_state['cam1_file'])
355
+ with open(filename, "wb") as f:
356
+ f.write(cam1_img.getvalue())
357
+ st.session_state['cam1_file'] = filename
358
+ entry = f"Snapshot from Cam 1: {filename}"
359
+ if entry not in st.session_state['history']:
360
+ st.session_state['history'] = [e for e in st.session_state['history'] if not e.startswith("Snapshot from Cam 1:")] + [entry]
361
+ st.image(Image.open(filename), caption="Camera 1", use_container_width=True)
362
+ logger.info(f"Saved snapshot from Camera 1: {filename}")
363
  update_gallery()
364
 
365
+ # === Tab: Download PDFs (existing) ===
366
+ with tab_download:
367
+ st.header("Download PDFs 📥")
368
+ if st.button("Examples 📚"):
369
+ example_urls = [
370
+ "https://arxiv.org/pdf/2308.03892",
371
+ "https://arxiv.org/pdf/1912.01703",
372
+ "https://arxiv.org/pdf/2408.11039",
373
+ "https://arxiv.org/pdf/2109.10282",
374
+ "https://arxiv.org/pdf/2112.10752",
375
+ "https://arxiv.org/pdf/2308.11236",
376
+ "https://arxiv.org/pdf/1706.03762",
377
+ "https://arxiv.org/pdf/2006.11239",
378
+ "https://arxiv.org/pdf/2305.11207",
379
+ "https://arxiv.org/pdf/2106.09685",
380
+ "https://arxiv.org/pdf/2005.11401",
381
+ "https://arxiv.org/pdf/2106.10504"
382
+ ]
383
+ st.session_state['pdf_urls'] = "\n".join(example_urls)
384
+
385
+ url_input = st.text_area("Enter PDF URLs (one per line)", value=st.session_state.get('pdf_urls', ""), height=200)
386
+ if st.button("Robo-Download 🤖"):
387
+ urls = url_input.strip().split("\n")
388
+ progress_bar = st.progress(0)
389
+ status_text = st.empty()
390
+ total_urls = len(urls)
391
+ existing_pdfs = get_pdf_files()
392
+ for idx, url in enumerate(urls):
393
+ if url:
394
+ output_path = pdf_url_to_filename(url)
395
+ status_text.text(f"Fetching {idx + 1}/{total_urls}: {os.path.basename(output_path)}...")
396
+ if output_path not in existing_pdfs:
397
+ if download_pdf(url, output_path):
398
+ st.session_state['downloaded_pdfs'][url] = output_path
399
+ logger.info(f"Downloaded PDF from {url} to {output_path}")
400
+ entry = f"Downloaded PDF: {output_path}"
401
+ if entry not in st.session_state['history']:
402
+ st.session_state['history'].append(entry)
403
+ st.session_state['asset_checkboxes'][output_path] = True
404
+ else:
405
+ st.error(f"Failed to nab {url} 😿")
406
+ else:
407
+ st.info(f"Already got {os.path.basename(output_path)}! Skipping... 🐾")
408
+ st.session_state['downloaded_pdfs'][url] = output_path
409
+ progress_bar.progress((idx + 1) / total_urls)
410
+ status_text.text("Robo-Download complete! 🚀")
411
  update_gallery()
412
+ mode = st.selectbox("Snapshot Mode", ["Single Page (High-Res)", "Two Pages (High-Res)", "All Pages (High-Res)"], key="download_mode")
413
+ if st.button("Snapshot Selected 📸"):
414
+ selected_pdfs = [path for path in get_gallery_files() if path.endswith('.pdf') and st.session_state['asset_checkboxes'].get(path, False)]
415
+ if selected_pdfs:
416
+ for pdf_path in selected_pdfs:
417
+ mode_key = {"Single Page (High-Res)": "single", "Two Pages (High-Res)": "twopage", "All Pages (High-Res)": "allpages"}[mode]
418
+ snapshots = asyncio.run(process_pdf_snapshot(pdf_path, mode_key))
419
+ for snapshot in snapshots:
420
+ st.image(Image.open(snapshot), caption=snapshot, use_container_width=True)
421
+ st.session_state['asset_checkboxes'][snapshot] = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422
  update_gallery()
423
+ else:
424
+ st.warning("No PDFs selected for snapshotting! Check some boxes in the sidebar.")
425
+
426
+ # === Tab: Test OCR (existing) ===
427
+ with tab_ocr:
428
+ st.header("Test OCR 🔍")
429
+ all_files = get_gallery_files()
430
+ if all_files:
431
+ if st.button("OCR All Assets 🚀"):
432
+ full_text = "# OCR Results\n\n"
433
+ for file in all_files:
434
+ if file.endswith('.png'):
435
+ image = Image.open(file)
436
+ else:
437
+ doc = fitz.open(file)
438
+ pix = doc[0].get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
439
+ image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
440
+ doc.close()
441
+ output_file = generate_filename(f"ocr_{os.path.basename(file)}", "txt")
442
+ result = asyncio.run(process_ocr(image, output_file))
443
+ full_text += f"## {os.path.basename(file)}\n\n{result}\n\n"
444
+ entry = f"OCR Test: {file} -> {output_file}"
445
+ if entry not in st.session_state['history']:
446
+ st.session_state['history'].append(entry)
447
+ md_output_file = f"full_ocr_{int(time.time())}.md"
448
+ with open(md_output_file, "w") as f:
449
+ f.write(full_text)
450
+ st.success(f"Full OCR saved to {md_output_file}")
451
+ st.markdown(get_download_link(md_output_file, "text/markdown", "Download Full OCR Markdown"), unsafe_allow_html=True)
452
+ selected_file = st.selectbox("Select Image or PDF", all_files, key="ocr_select")
453
+ if selected_file:
454
+ if selected_file.endswith('.png'):
455
+ image = Image.open(selected_file)
456
+ else:
457
+ doc = fitz.open(selected_file)
458
+ pix = doc[0].get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
459
+ image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
460
+ doc.close()
461
+ st.image(image, caption="Input Image", use_container_width=True)
462
+ if st.button("Run OCR 🚀", key="ocr_run"):
463
+ output_file = generate_filename("ocr_output", "txt")
464
+ st.session_state['processing']['ocr'] = True
465
+ result = asyncio.run(process_ocr(image, output_file))
466
+ entry = f"OCR Test: {selected_file} -> {output_file}"
467
+ if entry not in st.session_state['history']:
468
+ st.session_state['history'].append(entry)
469
+ st.text_area("OCR Result", result, height=200, key="ocr_result")
470
+ st.success(f"OCR output saved to {output_file}")
471
+ st.session_state['processing']['ocr'] = False
472
+ if selected_file.endswith('.pdf') and st.button("OCR All Pages 🚀", key="ocr_all_pages"):
473
+ doc = fitz.open(selected_file)
474
+ full_text = f"# OCR Results for {os.path.basename(selected_file)}\n\n"
475
+ for i in range(len(doc)):
476
+ pix = doc[i].get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
477
+ image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
478
+ output_file = generate_filename(f"ocr_page_{i}", "txt")
479
+ result = asyncio.run(process_ocr(image, output_file))
480
+ full_text += f"## Page {i + 1}\n\n{result}\n\n"
481
+ entry = f"OCR Test: {selected_file} Page {i + 1} -> {output_file}"
482
+ if entry not in st.session_state['history']:
483
+ st.session_state['history'].append(entry)
484
+ md_output_file = f"full_ocr_{os.path.basename(selected_file)}_{int(time.time())}.md"
485
+ with open(md_output_file, "w") as f:
486
+ f.write(full_text)
487
+ st.success(f"Full OCR saved to {md_output_file}")
488
+ st.markdown(get_download_link(md_output_file, "text/markdown", "Download Full OCR Markdown"), unsafe_allow_html=True)
489
+ else:
490
+ st.warning("No assets in gallery yet. Use Camera Snap or Download PDFs!")
491
+
492
+ # === Tab: Build Titan (existing) ===
493
+ with tab_build:
494
+ st.header("Build Titan 🌱")
495
+ model_type = st.selectbox("Model Type", ["Causal LM", "Diffusion"], key="build_type")
496
+ base_model = st.selectbox("Select Tiny Model",
497
+ ["HuggingFaceTB/SmolLM-135M", "Qwen/Qwen1.5-0.5B-Chat"] if model_type == "Causal LM" else
498
+ ["OFA-Sys/small-stable-diffusion-v0", "stabilityai/stable-diffusion-2-base"])
499
+ model_name = st.text_input("Model Name", f"tiny-titan-{int(time.time())}")
500
+ domain = st.text_input("Target Domain", "general")
501
+ if st.button("Download Model ⬇️"):
502
+ config = (ModelConfig if model_type == "Causal LM" else DiffusionConfig)(name=model_name, base_model=base_model, size="small", domain=domain)
503
+ builder = ModelBuilder() if model_type == "Causal LM" else DiffusionBuilder()
504
+ builder.load_model(base_model, config)
505
+ builder.save_model(config.model_path)
506
+ st.session_state['builder'] = builder
507
+ st.session_state['model_loaded'] = True
508
+ st.session_state['selected_model_type'] = model_type
509
+ st.session_state['selected_model'] = config.model_path
510
+ entry = f"Built {model_type} model: {model_name}"
511
+ if entry not in st.session_state['history']:
512
+ st.session_state['history'].append(entry)
513
+ st.success(f"Model downloaded and saved to {config.model_path}! 🎉")
514
+ st.experimental_rerun()
515
+
516
+ # === Tab: Test Image Gen (existing) ===
517
+ with tab_imggen:
518
+ st.header("Test Image Gen 🎨")
519
+ all_files = get_gallery_files()
520
+ if all_files:
521
+ selected_file = st.selectbox("Select Image or PDF", all_files, key="gen_select")
522
+ if selected_file:
523
+ if selected_file.endswith('.png'):
524
+ image = Image.open(selected_file)
525
+ else:
526
+ doc = fitz.open(selected_file)
527
+ pix = doc[0].get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
528
+ image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
529
+ doc.close()
530
+ st.image(image, caption="Reference Image", use_container_width=True)
531
+ prompt = st.text_area("Prompt", "Generate a neon superhero version of this image", key="gen_prompt")
532
+ if st.button("Run Image Gen 🚀", key="gen_run"):
533
+ output_file = generate_filename("gen_output", "png")
534
+ st.session_state['processing']['gen'] = True
535
+ result = asyncio.run(process_image_gen(prompt, output_file))
536
+ entry = f"Image Gen Test: {prompt} -> {output_file}"
537
+ if entry not in st.session_state['history']:
538
+ st.session_state['history'].append(entry)
539
+ st.image(result, caption="Generated Image", use_container_width=True)
540
+ st.success(f"Image saved to {output_file}")
541
+ st.session_state['processing']['gen'] = False
542
+ else:
543
+ st.warning("No images or PDFs in gallery yet. Use Camera Snap or Download PDFs!")
544
+ update_gallery()
545
+
546
+ # === New Tab: PDF Process ===
547
+ with tab_pdf_process:
548
+ st.header("PDF Process")
549
+ st.subheader("Upload PDFs for GPT-based text extraction")
550
+ uploaded_pdfs = st.file_uploader("Upload PDF files", type=["pdf"], accept_multiple_files=True, key="pdf_process_uploader")
551
+ view_mode = st.selectbox("View Mode", ["Single Page", "Double Page"], key="pdf_view_mode")
552
+ if st.button("Process Uploaded PDFs", key="process_pdfs"):
553
+ combined_text = ""
554
+ for pdf_file in uploaded_pdfs:
555
+ pdf_bytes = pdf_file.read()
556
+ temp_pdf_path = f"temp_{pdf_file.name}"
557
+ with open(temp_pdf_path, "wb") as f:
558
+ f.write(pdf_bytes)
559
+ try:
560
+ doc = fitz.open(temp_pdf_path)
561
+ st.write(f"Processing {pdf_file.name} with {len(doc)} pages")
562
+ if view_mode == "Single Page":
563
+ for i, page in enumerate(doc):
564
+ pix = page.get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
565
+ img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
566
+ st.image(img, caption=f"{pdf_file.name} Page {i+1}")
567
+ gpt_text = process_image_with_prompt(img, "Extract the electronic text from image")
568
+ combined_text += f"\n## {pdf_file.name} - Page {i+1}\n\n{gpt_text}\n"
569
+ else: # Double Page: combine two consecutive pages
570
+ pages = list(doc)
571
+ for i in range(0, len(pages), 2):
572
+ if i+1 < len(pages):
573
+ pix1 = pages[i].get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
574
+ img1 = Image.frombytes("RGB", [pix1.width, pix1.height], pix1.samples)
575
+ pix2 = pages[i+1].get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
576
+ img2 = Image.frombytes("RGB", [pix2.width, pix2.height], pix2.samples)
577
+ total_width = img1.width + img2.width
578
+ max_height = max(img1.height, img2.height)
579
+ combined_img = Image.new("RGB", (total_width, max_height))
580
+ combined_img.paste(img1, (0, 0))
581
+ combined_img.paste(img2, (img1.width, 0))
582
+ st.image(combined_img, caption=f"{pdf_file.name} Pages {i+1}-{i+2}")
583
+ gpt_text = process_image_with_prompt(combined_img, "Extract the electronic text from image")
584
+ combined_text += f"\n## {pdf_file.name} - Pages {i+1}-{i+2}\n\n{gpt_text}\n"
585
+ else:
586
+ pix = pages[i].get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
587
+ img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
588
+ st.image(img, caption=f"{pdf_file.name} Page {i+1}")
589
+ gpt_text = process_image_with_prompt(img, "Extract the electronic text from image")
590
+ combined_text += f"\n## {pdf_file.name} - Page {i+1}\n\n{gpt_text}\n"
591
+ doc.close()
592
+ except Exception as e:
593
+ st.error(f"Error processing {pdf_file.name}: {str(e)}")
594
+ finally:
595
+ os.remove(temp_pdf_path)
596
+ output_filename = generate_filename("processed_pdf", "md")
597
+ with open(output_filename, "w", encoding="utf-8") as f:
598
+ f.write(combined_text)
599
+ st.success(f"PDF processing complete. MD file saved as {output_filename}")
600
+ st.markdown(get_download_link(output_filename, "text/markdown", "Download Processed PDF MD"), unsafe_allow_html=True)
601
 
602
+ # === New Tab: Image Process ===
603
+ with tab_image_process:
604
+ st.header("Image Process")
605
+ st.subheader("Upload Images for GPT-based OCR")
606
+ prompt_img = st.text_input("Enter prompt for image processing", "Extract the electronic text from image", key="img_process_prompt")
607
+ uploaded_images = st.file_uploader("Upload image files", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key="image_process_uploader")
608
+ if st.button("Process Uploaded Images", key="process_images"):
609
+ combined_text = ""
610
+ for img_file in uploaded_images:
611
+ try:
612
+ img = Image.open(img_file)
613
+ st.image(img, caption=img_file.name)
614
+ gpt_text = process_image_with_prompt(img, prompt_img)
615
+ combined_text += f"\n## {img_file.name}\n\n{gpt_text}\n"
616
+ except Exception as e:
617
+ st.error(f"Error processing image {img_file.name}: {str(e)}")
618
+ output_filename = generate_filename("processed_image", "md")
619
+ with open(output_filename, "w", encoding="utf-8") as f:
620
+ f.write(combined_text)
621
+ st.success(f"Image processing complete. MD file saved as {output_filename}")
622
+ st.markdown(get_download_link(output_filename, "text/markdown", "Download Processed Image MD"), unsafe_allow_html=True)
623
+
624
+ # === New Tab: MD Gallery ===
625
+ with tab_md_gallery:
626
+ st.header("MD Gallery and GPT Processing")
627
+ md_files = sorted(glob.glob("*.md"))
628
+ if md_files:
629
+ st.subheader("Individual File Processing")
630
+ cols = st.columns(2)
631
+ for idx, md_file in enumerate(md_files):
632
+ with cols[idx % 2]:
633
+ st.write(md_file)
634
+ if st.button(f"Process {md_file}", key=f"process_md_{md_file}"):
635
+ try:
636
+ with open(md_file, "r", encoding="utf-8") as f:
637
+ content = f.read()
638
+ prompt_md = "Summarize this into markdown outline with emojis and number the topics 1..12"
639
+ messages = [{"role": "user", "content": prompt_md + "\n\n" + content}]
640
+ response = openai.ChatCompletion.create(model="o3-mini-high", messages=messages)
641
+ result_text = response.choices[0].message.content
642
+ st.markdown(result_text)
643
+ output_filename = generate_filename(f"processed_{os.path.splitext(md_file)[0]}", "md")
644
+ with open(output_filename, "w", encoding="utf-8") as f:
645
+ f.write(result_text)
646
+ st.markdown(get_download_link(output_filename, "text/markdown", f"Download {output_filename}"), unsafe_allow_html=True)
647
+ except Exception as e:
648
+ st.error(f"Error processing {md_file}: {str(e)}")
649
+ st.subheader("Batch Processing")
650
+ st.write("Select MD files to combine and process:")
651
+ selected_md = {}
652
+ for md_file in md_files:
653
+ selected_md[md_file] = st.checkbox(md_file, key=f"checkbox_md_{md_file}")
654
+ batch_prompt = st.text_input("Enter batch processing prompt", "Summarize this into markdown outline with emojis and number the topics 1..12", key="batch_prompt")
655
+ if st.button("Process Selected MD Files", key="process_batch_md"):
656
+ combined_content = ""
657
+ for md_file, selected in selected_md.items():
658
+ if selected:
659
+ try:
660
+ with open(md_file, "r", encoding="utf-8") as f:
661
+ combined_content += f"\n## {md_file}\n" + f.read() + "\n"
662
+ except Exception as e:
663
+ st.error(f"Error reading {md_file}: {str(e)}")
664
+ if combined_content:
665
+ messages = [{"role": "user", "content": batch_prompt + "\n\n" + combined_content}]
666
+ try:
667
+ response = openai.ChatCompletion.create(model="o3-mini-high", messages=messages)
668
+ result_text = response.choices[0].message.content
669
+ st.markdown(result_text)
670
+ output_filename = generate_filename("batch_processed_md", "md")
671
+ with open(output_filename, "w", encoding="utf-8") as f:
672
+ f.write(result_text)
673
+ st.success(f"Batch processing complete. MD file saved as {output_filename}")
674
+ st.markdown(get_download_link(output_filename, "text/markdown", "Download Batch Processed MD"), unsafe_allow_html=True)
675
+ except Exception as e:
676
+ st.error(f"Error processing batch: {str(e)}")
677
+ else:
678
+ st.warning("No MD files selected.")
679
+ else:
680
+ st.warning("No MD files found.")