awacke1 commited on
Commit
457db8f
·
verified ·
1 Parent(s): eead881

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +500 -360
app.py CHANGED
@@ -53,20 +53,24 @@ st.set_page_config(
53
  }
54
  )
55
 
56
- st.session_state.setdefault('history', []) # 🌱 History: starting fresh if empty!
57
- st.session_state.setdefault('builder', None) # 🛠️ Builder: set up the builder if it's missing!
58
- st.session_state.setdefault('model_loaded', False) # 🚦 Model Loaded: mark as not loaded by default!
59
- st.session_state.setdefault('processing', {}) # Processing: initialize processing state as an empty dict!
60
- st.session_state.setdefault('asset_checkboxes', {}) # ✅ Asset Checkboxes: default to an empty dictionary!
61
- st.session_state.setdefault('downloaded_pdfs', {}) # 📄 Downloaded PDFs: start with no PDFs downloaded!
62
- st.session_state.setdefault('unique_counter', 0) # 🔢 Unique Counter: initialize the counter to zero!
63
- st.session_state.setdefault('selected_model_type', "Causal LM") # 🧠 Selected Model Type: default to "Causal LM"!
64
- st.session_state.setdefault('selected_model', "None") # 🤖 Selected Model: set to "None" if not already set!
65
- st.session_state.setdefault('cam0_file', None) # 📸 Cam0 File: no file loaded by default!
66
- st.session_state.setdefault('cam1_file', None) # 📸 Cam1 File: no file loaded by default!
67
-
68
-
69
- @dataclass # 🎨 ModelConfig: A blueprint for model configurations!
 
 
 
 
70
  class ModelConfig:
71
  name: str
72
  base_model: str
@@ -74,43 +78,48 @@ class ModelConfig:
74
  domain: Optional[str] = None
75
  model_type: str = "causal_lm"
76
  @property
77
- def model_path(self): return f"models/{self.name}" # 🚀 Model Path: Home base for brilliance!
 
78
 
79
- @dataclass # 🎨 DiffusionConfig: Where diffusion magic takes shape!
80
  class DiffusionConfig:
81
  name: str
82
  base_model: str
83
  size: str
84
  domain: Optional[str] = None
85
  @property
86
- def model_path(self): return f"diffusion_models/{self.name}" # 🚀 Diffusion Path: Let the diffusion begin!
87
-
88
- class ModelBuilder: # 🔧 ModelBuilder: Crafting AI wonders with wit!
89
- def __init__(self): # 🚀 Initialize: Setting up the AI factory!
90
- self.config = None # No config yet—waiting for genius!
91
- self.model = None # Model not built until the magic happens!
92
- self.tokenizer = None # Tokenizer: Ready to speak in AI!
93
- self.jokes = [ # 🤣 Jokes to keep the circuits laughing!
 
94
  "Why did the AI go to therapy? Too many layers to unpack! 😂",
95
  "Training complete! Time for a binary coffee break. ☕",
96
  "I told my neural network a joke; it couldn't stop dropping bits! 🤖",
97
  "I asked the AI for a pun, and it said, 'I'm punning on parallel processing!' 😄",
98
  "Debugging my code is like a stand-up routine—always a series of exceptions! 😆"
99
  ]
100
- def load_model(self, model_path: str, config: Optional[ModelConfig] = None): # 🔄 load_model: Booting up genius!
101
- with st.spinner(f"Loading {model_path}... ⏳"): # ⏳ Spinner: Genius loading...
102
  self.model = AutoModelForCausalLM.from_pretrained(model_path)
103
  self.tokenizer = AutoTokenizer.from_pretrained(model_path)
104
- if self.tokenizer.pad_token is None: self.tokenizer.pad_token = self.tokenizer.eos_token # 🔧 Fix pad token if missing!
105
- if config: self.config = config # 🛠️ Config loaded—setting the stage!
106
- self.model.to("cuda" if torch.cuda.is_available() else "cpu") # 💻 Deploying the model to its device!
107
- st.success(f"Model loaded! 🎉 {random.choice(self.jokes)}") # 🎉 Success: Model is now in orbit!
 
 
108
  return self
109
- def save_model(self, path: str): # 💾 save_model: Securing your masterpiece!
110
- with st.spinner("Saving model... 💾"): # ⏳ Spinner: Saving brilliance...
111
- os.makedirs(os.path.dirname(path), exist_ok=True); self.model.save_pretrained(path); self.tokenizer.save_pretrained(path) # 📂 Directory magic: Creating and saving!
112
- st.success(f"Model saved at {path}! ✅") # ✅ Success: Your model is safely stored!
113
-
 
114
 
115
  class DiffusionBuilder:
116
  def __init__(self):
@@ -131,144 +140,205 @@ class DiffusionBuilder:
131
  def generate(self, prompt: str):
132
  return self.pipeline(prompt, num_inference_steps=20).images[0]
133
 
134
- def generate_filename(sequence, ext="png"): return f"{sequence}_{time.strftime('%d%m%Y%H%M%S')}.{ext}" # ⏳ Generate filename with timestamp magic!
 
 
135
  def pdf_url_to_filename(url):
136
- return re.sub(r'[<>:"/\\|?*]', '_', url) + ".pdf" # 📄 Convert URL to a safe PDF filename – no hackers allowed!
137
- def get_download_link(file_path, mime_type="application/pdf", label="Download"): return f'<a href="data:{mime_type};base64,{base64.b64encode(open(file_path, "rb").read()).decode()}" download="{os.path.basename(file_path)}">{label}</a>' # 🔗 Create a download link – click it like it's hot!
138
- def zip_directory(directory_path, zip_path):
139
- with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: [zipf.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), os.path.dirname(directory_path))) for root, _, files in os.walk(directory_path) for file in files] # 🎁 Zip directory: Packing files faster than Santa on Christmas Eve!
140
- def get_model_files(model_type="causal_lm"): return [d for d in glob.glob("models/*" if model_type == "causal_lm" else "diffusion_models/*") if os.path.isdir(d)] or ["None"] # 📂 Get model files: Hunting directories like a pro!
141
- def get_gallery_files(file_types=["png", "pdf"]): return sorted(list({f for ext in file_types for f in glob.glob(f"*.{ext}")})) # 🖼️ Get gallery files: Finding art in a digital haystack!
142
- def get_pdf_files(): return sorted(glob.glob("*.pdf")) # 📄 Get PDF files: Sorted and served – no paper cuts here!
143
-
144
- # 📥 Download PDF: Delivering docs faster than a caffeinated courier!
 
 
 
 
 
 
 
 
 
 
145
  def download_pdf(url, output_path):
146
- try:
147
- response = requests.get(url, stream=True, timeout=10); [open(output_path, "wb").write(chunk) for chunk in response.iter_content(chunk_size=8192)] if response.status_code == 200 else None; ret = True if response.status_code == 200 else False
148
- except requests.RequestException as e:
149
- logger.error(f"Failed to download {url}: {e}"); ret = False
150
- return ret
151
-
152
- # 📚 Async PDF Snapshot: Snap your PDF pages without blocking—juggle pages like a ninja! 🥷
153
- async def process_pdf_snapshot(pdf_path, mode="single"):
154
- start_time = time.time(); status = st.empty(); status.text(f"Processing PDF Snapshot ({mode})... (0s)")
155
  try:
156
- doc = fitz.open(pdf_path); output_files = []
157
- if mode == "single": page = doc[0]; pix = page.get_pixmap(matrix=fitz.Matrix(2.0, 2.0)); output_file = generate_filename("single", "png"); pix.save(output_file); output_files.append(output_file)
158
- elif mode == "twopage":
159
- for i in range(min(2, len(doc))): page = doc[i]; pix = page.get_pixmap(matrix=fitz.Matrix(2.0, 2.0)); output_file = generate_filename(f"twopage_{i}", "png"); pix.save(output_file); output_files.append(output_file)
160
- elif mode == "allpages":
161
- for i in range(len(doc)): page = doc[i]; pix = page.get_pixmap(matrix=fitz.Matrix(2.0, 2.0)); output_file = generate_filename(f"page_{i}", "png"); pix.save(output_file); output_files.append(output_file)
162
- doc.close(); elapsed = int(time.time() - start_time); status.text(f"PDF Snapshot ({mode}) completed in {elapsed}s!"); update_gallery(); return output_files
163
- except Exception as e: status.error(f"Failed to process PDF: {str(e)}"); return []
164
-
165
- # 😎 Async OCR: Convert images to text while your app keeps on groovin'—no blocking, just rocking! 🎸
166
- async def process_ocr(image, output_file):
167
- start_time = time.time(); status = st.empty(); status.text("Processing GOT-OCR2_0... (0s)")
168
- tokenizer = AutoTokenizer.from_pretrained("ucaslcl/GOT-OCR2_0", trust_remote_code=True); model = AutoModel.from_pretrained("ucaslcl/GOT-OCR2_0", trust_remote_code=True, torch_dtype=torch.float32).to("cpu").eval()
169
- temp_file = f"temp_{int(time.time())}.png"; image.save(temp_file)
170
- result = model.chat(tokenizer, temp_file, ocr_type='ocr'); os.remove(temp_file)
171
- elapsed = int(time.time() - start_time); status.text(f"GOT-OCR2_0 completed in {elapsed}s!")
172
- async with aiofiles.open(output_file, "w") as f: await f.write(result)
173
- update_gallery(); return result
174
-
175
- # 🧞 Async Image Gen: Your image genie—wishing up pictures while the event loop keeps the party going! 🎉
176
- async def process_image_gen(prompt, output_file):
177
- start_time = time.time(); status = st.empty(); status.text("Processing Image Gen... (0s)")
178
- pipeline = st.session_state['builder'].pipeline if st.session_state.get('builder') and isinstance(st.session_state['builder'], DiffusionBuilder) and st.session_state['builder'].pipeline else StableDiffusionPipeline.from_pretrained("OFA-Sys/small-stable-diffusion-v0", torch_dtype=torch.float32).to("cpu")
179
- gen_image = pipeline(prompt, num_inference_steps=20).images[0]; elapsed = int(time.time() - start_time)
180
- status.text(f"Image Gen completed in {elapsed}s!"); gen_image.save(output_file); update_gallery(); return gen_image
181
-
182
- # 🖼️ GPT-Image Interpreter: Turning pixels into prose!
183
- def process_image_with_prompt(image, prompt, model="gpt-4o-mini", detail="auto"):
184
- buffered = BytesIO(); image.save(buffered, format="PNG") # 💾 Save the image in-memory as PNG—no hard drives harmed!
185
- img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") # 🔐 Encode image data in Base64 for secure, inline transmission!
186
- messages = [{"role": "user", "content": [{"type": "text", "text": prompt}, {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_str}", "detail": detail}}]}] # 💬 Build the GPT conversation with your prompt and image!
187
  try:
188
- response = client.chat.completions.create(model=model, messages=messages, max_tokens=300); return response.choices[0].message.content # 🤖 Invoke GPT’s magic and return its dazzling output!
189
- except Exception as e: return f"Error processing image with GPT: {str(e)}" # ⚠️ Oops—GPT encountered a snag, so we catch and report the error!
190
-
191
- # 📝 GPT-Text Alchemist: Merging your prompt and text into digital gold!
192
- def process_text_with_prompt(text, prompt, model="gpt-4o-mini"):
193
- messages = [{"role": "user", "content": f"{prompt}\n\n{text}"}] # 🛠️ Constructing the conversation input like a master wordsmith!
194
- try:
195
- response = client.chat.completions.create(model=model, messages=messages, max_tokens=300); return response.choices[0].message.content # 🤖 Summon GPT’s wisdom and return its brilliant answer!
196
- except Exception as e: return f"Error processing text with GPT: {str(e)}" # ⚠️ Oops, GPT stumbled—catching and reporting the error!
197
-
198
- st.sidebar.subheader("Gallery Settings") # 🎨 Sidebar Gallery: Customize your creative space!
199
- st.session_state.setdefault('gallery_size', 2) # 🔧 Setting default gallery size to 2 if it's missing!
200
- st.session_state['gallery_size'] = st.sidebar.slider("Gallery Size", 1, 10, st.session_state['gallery_size'], key="gallery_size_slider") # 🎚️ Slide to adjust your gallery size and bring balance to your art!
201
-
202
- # 📸 Gallery Updater: Making your assets dazzle and disappear faster than a magician's rabbit! 🐇✨
203
- def update_gallery():
204
- all_files = get_gallery_files() # 🔍 Grab all gallery files like a digital treasure hunt!
205
- if all_files: # ✅ If assets are found, let the show begin!
206
- st.sidebar.subheader("Asset Gallery 📸📖"); cols = st.sidebar.columns(2) # 🎨 Set up a stylish 2-column layout in the sidebar!
207
- for idx, file in enumerate(all_files[:st.session_state['gallery_size']]): # 🖼️ Loop through your favorite files, limited by gallery size!
208
- with cols[idx % 2]: # 🔄 Alternate columns—because balance is key (and funny)!
209
- st.session_state['unique_counter'] += 1; unique_id = st.session_state['unique_counter'] # 🚀 Increment your asset counter—every asset gets its moment in the spotlight!
210
- if file.endswith('.png'): st.image(Image.open(file), caption=os.path.basename(file), use_container_width=True) # 🖼️ Display the image like a masterpiece!
211
- else: # 📄 For PDFs, we snap their first page like a paparazzo!
212
- doc = fitz.open(file); pix = doc[0].get_pixmap(matrix=fitz.Matrix(0.5, 0.5)); img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples); st.image(img, caption=os.path.basename(file), use_container_width=True); doc.close()
213
- checkbox_key = f"asset_{file}_{unique_id}" # 🔑 Create a unique key—because every asset deserves VIP treatment!
214
- st.session_state['asset_checkboxes'][file] = st.checkbox("Use for SFT/Input", value=st.session_state['asset_checkboxes'].get(file, False), key=checkbox_key) # ✅ Checkbox: Pick your asset for magic (or SFT)!
215
- mime_type = "image/png" if file.endswith('.png') else "application/pdf" # 📎 Determine MIME type—like sorting your socks, but cooler!
216
- st.markdown(get_download_link(file, mime_type, "Snag It! 📥"), unsafe_allow_html=True) # 🔗 Provide a download link—grab your asset faster than a flash sale!
217
- if st.button("Zap It! 🗑️", key=f"delete_{file}_{unique_id}"): # ⚡ "Zap It!" button: Because sometimes you just gotta make stuff disappear!
218
- os.remove(file); st.session_state['asset_checkboxes'].pop(file, None); st.sidebar.success(f"Asset {os.path.basename(file)} vaporized! 💨"); st.rerun() # 💥 Delete the file and refresh the gallery—poof, it's gone!
219
- #update_gallery() # 🎉 Launch the gallery update—let the art party commence! (Joke: Why did the asset cross the road? To get zapped on the other side! 😆)
220
-
221
- st.sidebar.subheader("Action Logs 📜") # 📝 Action Logs: Where our system whispers its secrets!
222
- with st.sidebar: [st.write(f"{record.asctime} - {record.levelname} - {record.message}") for record in log_records] # 📚 Loop through log records and display them like diary entries!
223
-
224
- st.sidebar.subheader("History 📜") # 🕰️ History: A walk down memory lane, one log at a time!
225
- with st.sidebar: [st.write(entry) for entry in st.session_state['history']] # ⏳ Display every historic moment with style!
226
-
227
- tabs = st.tabs(["Camera Snap 📷", "Download PDFs 📥", "Test OCR 🔍", "Build Titan 🌱", "Test Image Gen 🎨", "PDF Process 📄", "Image Process 🖼️", "MD Gallery 📚"]) # 🎭 Tabs: Navigate your AI universe like a boss!
228
- (tab_camera, tab_download, tab_ocr, tab_build, tab_imggen, tab_pdf_process, tab_image_process, tab_md_gallery) = tabs # 🚀 Unpack the tabs and get ready to explore—because even tabs need to party!
229
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  with tab_camera:
231
- st.header("Camera Snap 📷") # 🎥 Header: Let’s capture those Kodak moments!
232
- st.subheader("Single Capture") # 📸 Subheader: One snap at a time, no double exposure!
233
- cols = st.columns(2) # 🧩 Creating two columns for double-camera action!
234
-
235
  with cols[0]:
236
- cam0_img = st.camera_input("Take a picture - Cam 0", key="cam0") # 📷 Cam 0: Say cheese!
237
  if cam0_img:
238
- filename = generate_filename("cam0") # 🏷️ Filename for Cam 0 snapshot generated!
239
- if st.session_state['cam0_file'] and os.path.exists(st.session_state['cam0_file']): os.remove(st.session_state['cam0_file']) # 🗑️ Out with the old Cam 0 snap!
240
- with open(filename, "wb") as f: f.write(cam0_img.getvalue()) # 💾 Saving Cam 0 image like a boss!
241
- st.session_state['cam0_file'] = filename # 🔄 Updating session state for Cam 0 file!
242
- entry = f"Snapshot from Cam 0: {filename}" # 📝 History entry: Cam 0 snapshot recorded!
243
- if entry not in st.session_state['history']:
244
- st.session_state['history'] = [e for e in st.session_state['history'] if not e.startswith("Snapshot from Cam 0:")] + [entry] # 🧹 Cleaning and updating history!
245
- st.image(Image.open(filename), caption="Camera 0", use_container_width=True) # 🖼️ Displaying the fresh Cam 0 image!
246
- logger.info(f"Saved snapshot from Camera 0: {filename}") # 🔍 Logging: Cam 0 snapshot saved!
247
- update_gallery() # 🔄 Refreshing gallery to show the new snap!
248
-
249
  with cols[1]:
250
- cam1_img = st.camera_input("Take a picture - Cam 1", key="cam1") # 📷 Cam 1: Capture your best side!
251
  if cam1_img:
252
- filename = generate_filename("cam1") # 🏷️ Filename for Cam 1 snapshot generated!
253
- if st.session_state['cam1_file'] and os.path.exists(st.session_state['cam1_file']): os.remove(st.session_state['cam1_file']) # 🗑️ Out with the old Cam 1 snap!
254
- with open(filename, "wb") as f: f.write(cam1_img.getvalue()) # 💾 Saving Cam 1 image like a pro!
255
- st.session_state['cam1_file'] = filename # 🔄 Updating session state for Cam 1 file!
256
- entry = f"Snapshot from Cam 1: {filename}" # 📝 History entry: Cam 1 snapshot recorded!
257
- if entry not in st.session_state['history']:
258
- st.session_state['history'] = [e for e in st.session_state['history'] if not e.startswith("Snapshot from Cam 1:")] + [entry] # 🧹 Cleaning and updating history!
259
- st.image(Image.open(filename), caption="Camera 1", use_container_width=True) # 🖼️ Displaying the fresh Cam 1 image!
260
- logger.info(f"Saved snapshot from Camera 1: {filename}") # 🔍 Logging: Cam 1 snapshot saved!
261
- update_gallery() # 🔄 Refreshing gallery to show the new snap!
262
-
263
- # === Tab: Download PDFs ===
264
  with tab_download:
265
- st.header("Download PDFs 📥") # 📥 Header: Ready to snag PDFs like a digital ninja!
266
- if st.button("Examples 📚"): # 📚 Button: Load up some scholarly URLs for instant fun!
267
- example_urls = ["https://arxiv.org/pdf/2308.03892", "https://arxiv.org/pdf/1912.01703", "https://arxiv.org/pdf/2408.11039", "https://arxiv.org/pdf/2109.10282", "https://arxiv.org/pdf/2112.10752", "https://arxiv.org/pdf/2308.11236", "https://arxiv.org/pdf/1706.03762", "https://arxiv.org/pdf/2006.11239", "https://arxiv.org/pdf/2305.11207", "https://arxiv.org/pdf/2106.09685", "https://arxiv.org/pdf/2005.11401", "https://arxiv.org/pdf/2106.10504"]; st.session_state['pdf_urls'] = "\n".join(example_urls) # 📚 Examples loaded into session!
268
-
269
- url_input = st.text_area("Enter PDF URLs (one per line)", value=st.session_state.get('pdf_urls', ""), height=200) # 📝 Text area: Paste your PDF URLs here—no commas needed!
270
-
271
- # --- Download PDFs Tab (modified section) ---
 
 
 
 
 
 
 
 
 
 
 
272
  if st.button("Robo-Download 🤖"):
273
  urls = url_input.strip().split("\n")
274
  progress_bar = st.progress(0)
@@ -284,8 +354,7 @@ with tab_download:
284
  st.session_state['downloaded_pdfs'][url] = output_path
285
  logger.info(f"Downloaded PDF from {url} to {output_path}")
286
  entry = f"Downloaded PDF: {output_path}"
287
- if entry not in st.session_state['history']:
288
- st.session_state['history'].append(entry)
289
  st.session_state['asset_checkboxes'][output_path] = True
290
  else:
291
  st.error(f"Failed to nab {url} 😿")
@@ -294,249 +363,320 @@ with tab_download:
294
  st.session_state['downloaded_pdfs'][url] = output_path
295
  progress_bar.progress((idx + 1) / total_urls)
296
  status_text.text("Robo-Download complete! 🚀")
297
- update_gallery()
298
-
299
-
300
- mode = st.selectbox("Snapshot Mode", ["Single Page (High-Res)", "Two Pages (High-Res)", "All Pages (High-Res)"], key="download_mode") # 🎛️ Selectbox: Choose your snapshot resolution!
301
  if st.button("Snapshot Selected 📸"):
302
- selected_pdfs = [path for path in get_gallery_files()
303
- if path.endswith('.pdf') and st.session_state['asset_checkboxes'].get(path, False)]
304
  if selected_pdfs:
305
  for pdf_path in selected_pdfs:
306
  if not os.path.exists(pdf_path):
307
  st.warning(f"File not found: {pdf_path}. Skipping.")
308
  continue
309
- mode_key = {"Single Page (High-Res)": "single",
310
- "Two Pages (High-Res)": "twopage",
311
  "All Pages (High-Res)": "allpages"}[mode]
312
  snapshots = asyncio.run(process_pdf_snapshot(pdf_path, mode_key))
313
  for snapshot in snapshots:
314
  st.image(Image.open(snapshot), caption=snapshot, use_container_width=True)
315
  st.session_state['asset_checkboxes'][snapshot] = True
316
- update_gallery()
317
  else:
318
  st.warning("No PDFs selected for snapshotting! Check some boxes in the sidebar.")
319
 
320
-
321
- # === Tab: Test OCR ===
322
  with tab_ocr:
323
- st.header("Test OCR 🔍") # 🔍 Header: Time to turn images into text—magic for your eyeballs!
324
- all_files = get_gallery_files(); # 📂 Gathering all assets from the gallery!
325
  if all_files:
326
- if st.button("OCR All Assets 🚀"): # 🚀 Button: Blast OCR on every asset in one go!
327
- full_text = "# OCR Results\n\n"; # 📝 Starting a full OCR report!
328
  for file in all_files:
329
- if file.endswith('.png'): image = Image.open(file) # 🖼️ PNG? Open image directly!
330
- else:
331
- doc = fitz.open(file); pix = doc[0].get_pixmap(matrix=fitz.Matrix(2.0, 2.0)); image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples); doc.close() # 📄 PDF? Grab a snapshot of the first page!
332
- output_file = generate_filename(f"ocr_{os.path.basename(file)}", "txt"); # 💾 Create a unique filename for the OCR text!
333
- result = asyncio.run(process_ocr(image, output_file)); # 🤖 Run OCR asynchronously—non-blocking wizardry!
334
- full_text += f"## {os.path.basename(file)}\n\n{result}\n\n"; # 📝 Append the OCR result to the full report!
335
- entry = f"OCR Test: {file} -> {output_file}"; # 📝 Log this OCR operation!
336
- if entry not in st.session_state['history']: st.session_state['history'].append(entry) # ✅ Update history if this entry is new!
337
- md_output_file = f"full_ocr_{int(time.time())}.md"; # 📝 Generate a markdown filename for the full OCR report!
338
- with open(md_output_file, "w") as f: f.write(full_text); # 💾 Write the full OCR report to disk!
339
- st.success(f"Full OCR saved to {md_output_file}"); # 🎉 Success: Full OCR report is saved!
340
- st.markdown(get_download_link(md_output_file, "text/markdown", "Download Full OCR Markdown"), unsafe_allow_html=True) # 🔗 Provide a download link for your OCR masterpiece!
341
- selected_file = st.selectbox("Select Image or PDF", all_files, key="ocr_select"); # 🔍 Selectbox: Pick an asset for individual OCR!
 
 
 
 
 
342
  if selected_file:
343
- if selected_file.endswith('.png'): image = Image.open(selected_file) # 🖼️ Open the selected PNG image!
344
- else:
345
- doc = fitz.open(selected_file); pix = doc[0].get_pixmap(matrix=fitz.Matrix(2.0, 2.0)); image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples); doc.close() # 📄 For PDFs, extract a snapshot from the first page!
346
- st.image(image, caption="Input Image", use_container_width=True); # 🖼️ Display the selected asset for OCR review!
347
- if st.button("Run OCR 🚀", key="ocr_run"): # 🚀 Button: Run OCR on the selected asset!
348
- output_file = generate_filename("ocr_output", "txt"); st.session_state['processing']['ocr'] = True; # 💾 Generate output filename and flag processing!
349
- result = asyncio.run(process_ocr(image, output_file)); # 🤖 Execute OCR asynchronously!
350
- entry = f"OCR Test: {selected_file} -> {output_file}"; # 📝 Create a log entry for this OCR run!
351
- if entry not in st.session_state['history']: st.session_state['history'].append(entry); # Update history if new!
352
- st.text_area("OCR Result", result, height=200, key="ocr_result"); # 📄 Show the OCR result in a text area!
353
- st.success(f"OCR output saved to {output_file}"); st.session_state['processing']['ocr'] = False # 🎉 Success: OCR result saved and processing flag reset!
354
- if selected_file.endswith('.pdf') and st.button("OCR All Pages 🚀", key="ocr_all_pages"): # 📄 Button: Run OCR on every page of a PDF!
355
- doc = fitz.open(selected_file); full_text = f"# OCR Results for {os.path.basename(selected_file)}\n\n"; # 📝 Start a report for multi-page PDF OCR!
 
 
 
 
 
 
 
356
  for i in range(len(doc)):
357
- pix = doc[i].get_pixmap(matrix=fitz.Matrix(2.0, 2.0)); image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples); # 🖼️ Capture each page as an image!
358
- output_file = generate_filename(f"ocr_page_{i}", "txt"); result = asyncio.run(process_ocr(image, output_file)); # 💾 Generate filename and process OCR for the page!
359
- full_text += f"## Page {i + 1}\n\n{result}\n\n"; # 📝 Append the page's OCR result to the report!
360
- entry = f"OCR Test: {selected_file} Page {i + 1} -> {output_file}"; # 📝 Log this page's OCR operation!
361
- if entry not in st.session_state['history']: st.session_state['history'].append(entry) # ✅ Update history if this entry is new!
362
- md_output_file = f"full_ocr_{os.path.basename(selected_file)}_{int(time.time())}.md"; # 📝 Create a markdown filename for the full multi-page OCR report!
363
- with open(md_output_file, "w") as f: f.write(full_text); # 💾 Write the full multi-page OCR report to disk!
364
- st.success(f"Full OCR saved to {md_output_file}"); # 🎉 Success: Multi-page OCR report is saved!
365
- st.markdown(get_download_link(md_output_file, "text/markdown", "Download Full OCR Markdown"), unsafe_allow_html=True) # 🔗 Provide a download link for the multi-page OCR report!
 
 
 
366
  else:
367
- st.warning("No assets in gallery yet. Use Camera Snap or Download PDFs!") # ⚠️ Warning: Your gallery is empty—capture or download some assets first!
368
 
369
- # === Tab: Build Titan ===
370
  with tab_build:
371
- st.header("Build Titan 🌱") # 🌱 Header: Build your own Titan—tiny models, huge ambitions!
372
- model_type = st.selectbox("Model Type", ["Causal LM", "Diffusion"], key="build_type") # 🔍 Choose your model flavor!
373
  base_model = st.selectbox(
374
  "Select Tiny Model",
375
- ["HuggingFaceTB/SmolLM-135M", "Qwen/Qwen1.5-0.5B-Chat"] if model_type == "Causal LM"
376
  else ["OFA-Sys/small-stable-diffusion-v0", "stabilityai/stable-diffusion-2-base"]
377
- ) # 🤖 Pick a tiny model based on your choice!
378
- model_name = st.text_input("Model Name", f"tiny-titan-{int(time.time())}") # 🏷️ Auto-generate a cool model name with a timestamp!
379
- domain = st.text_input("Target Domain", "general") # 🎯 Specify your target domain (default: general)!
380
- if st.button("Download Model ⬇️"): # ⬇️ Button: Download your model and get ready to unleash the Titan!
381
  config = (ModelConfig if model_type == "Causal LM" else DiffusionConfig)(
382
  name=model_name, base_model=base_model, size="small", domain=domain
383
- ) # 📝 Create model configuration on the fly!
384
- builder = ModelBuilder() if model_type == "Causal LM" else DiffusionBuilder() # 🔧 Instantiate the builder for your model type!
385
- builder.load_model(base_model, config); builder.save_model(config.model_path) # 🚀 Load and save the model—instant Titan assembly!
386
- st.session_state['builder'] = builder; st.session_state['model_loaded'] = True # ⚙️ Update session state: model is now loaded!
387
- st.session_state['selected_model_type'] = model_type; st.session_state['selected_model'] = config.model_path # 🔑 Store your selection for posterity!
388
- entry = f"Built {model_type} model: {model_name}" # 📝 Log the build event in history!
389
- if entry not in st.session_state['history']: st.session_state['history'].append(entry)
390
- st.success(f"Model downloaded and saved to {config.model_path}! 🎉"); st.rerun() # 🎉 Success: Titan built, now re-run to refresh the interface!
391
-
392
- # === Tab: Test Image Gen ===
 
 
 
 
393
  with tab_imggen:
394
- st.header("Test Image Gen 🎨") # 🎨 Header: Time to get creative with AI image generation!
395
- all_files = get_gallery_files() # 📂 Retrieve all gallery assets for selection.
396
  if all_files:
397
- selected_file = st.selectbox("Select Image or PDF", all_files, key="gen_select") # 🔍 Select an asset to spark creativity!
398
  if selected_file:
399
- if selected_file.endswith('.png'):
400
- image = Image.open(selected_file) # 🖼️ Directly open PNG images!
401
  else:
402
- doc = fitz.open(selected_file); pix = doc[0].get_pixmap(matrix=fitz.Matrix(2.0, 2.0));
403
- image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples); doc.close() # 📄 For PDFs, extract the first page as an image!
404
- st.image(image, caption="Reference Image", use_container_width=True) # 🖼️ Display the chosen asset as reference.
405
- prompt = st.text_area("Prompt", "Generate a neon superhero version of this image", key="gen_prompt") # ✍️ Enter a creative prompt to transform the image!
406
- if st.button("Run Image Gen 🚀", key="gen_run"): # 🚀 Button: Ignite the image generator!
407
- output_file = generate_filename("gen_output", "png"); st.session_state['processing']['gen'] = True # 💾 Create output filename and flag processing status.
408
- result = asyncio.run(process_image_gen(prompt, output_file)) # 🤖 Run the async image generation—non-blocking magic in action!
409
- entry = f"Image Gen Test: {prompt} -> {output_file}" # 📝 Log the image generation event!
410
- if entry not in st.session_state['history']: st.session_state['history'].append(entry)
411
- st.image(result, caption="Generated Image", use_container_width=True) # 🖼️ Showcase the newly generated image!
412
- st.success(f"Image saved to {output_file}"); st.session_state['processing']['gen'] = False # 🎉 Success: Your masterpiece is saved and processing is complete!
 
 
 
 
413
  else:
414
- st.warning("No images or PDFs in gallery yet. Use Camera Snap or Download PDFs!") # ⚠️ Warning: No assets available—capture or download some first!
415
- update_gallery() # 🔄 Refresh the gallery to display any updates!
416
 
417
- # === Updated Tab: PDF Process ===
418
  with tab_pdf_process:
419
- st.header("PDF Process") # 📄 Header: Ready to transform your PDFs into text with GPT magic!
420
- st.subheader("Upload PDFs for GPT-based text extraction") # 🚀 Subheader: Upload your PDFs and let the AI do the reading!
421
- gpt_models = ["gpt-4o", "gpt-4o-mini"] # 🤖 GPT Models: Pick your AI wizard—more vision-capable models may join the party!
422
- selected_gpt_model = st.selectbox("Select GPT Model", gpt_models, key="pdf_gpt_model") # 🔍 Select your GPT model and let it work its charm!
423
- detail_level = st.selectbox("Detail Level", ["auto", "low", "high"], key="pdf_detail_level") # 🎚️ Detail Level: Fine-tune your extraction’s precision!
424
- uploaded_pdfs = st.file_uploader("Upload PDF files", type=["pdf"], accept_multiple_files=True, key="pdf_process_uploader") # 📤 Uploader: Drag & drop your PDFs for processing!
425
- view_mode = st.selectbox("View Mode", ["Single Page", "Double Page"], key="pdf_view_mode") # 👀 View Mode: Choose single or double page snapshots!
426
-
427
- if st.button("Process Uploaded PDFs", key="process_pdfs"): # ⚙️ Button: Kick off the PDF processing extravaganza!
428
- combined_text = "" # 📝 Initialize a blank slate for the GPT output!
429
- for pdf_file in uploaded_pdfs: # 🔄 Loop through each uploaded PDF file!
430
- pdf_bytes = pdf_file.read() # 📥 Read the PDF bytes into memory!
431
- temp_pdf_path = f"temp_{pdf_file.name}" # 🏷️ Create a temporary filename for processing!
432
- with open(temp_pdf_path, "wb") as f: f.write(pdf_bytes) # 💾 Write the PDF to a temporary file!
433
  try:
434
- doc = fitz.open(temp_pdf_path) # 📄 Open the temporary PDF document!
435
- st.write(f"Processing {pdf_file.name} with {len(doc)} pages") # 🔍 Log: Display file name and page count!
436
- if view_mode == "Single Page": # 📑 Single Page Mode: Process each page separately!
437
  for i, page in enumerate(doc):
438
- pix = page.get_pixmap(matrix=fitz.Matrix(2.0, 2.0)); # 🎞️ Create a high-res pixmap of the page!
439
- img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples); # 🖼️ Convert the pixmap to an image!
440
- st.image(img, caption=f"{pdf_file.name} Page {i+1}"); # 🖼️ Display the page image!
441
- gpt_text = process_image_with_prompt(
442
- img, "Extract the electronic text from image", model=selected_gpt_model, detail=detail_level
443
- ); # 🤖 Run GPT to extract text from the image!
444
- combined_text += f"\n## {pdf_file.name} - Page {i+1}\n\n{gpt_text}\n"; # 📝 Append the result to the combined text!
445
- else: # 📄 Double Page Mode: Process pages in pairs!
446
- pages = list(doc); # 🔢 Convert document pages to a list!
447
  for i in range(0, len(pages), 2):
448
- if i+1 < len(pages): # 👯 Process two pages if available!
449
- pix1 = pages[i].get_pixmap(matrix=fitz.Matrix(2.0, 2.0)); img1 = Image.frombytes("RGB", [pix1.width, pix1.height], pix1.samples); # 🖼️ Process first page!
450
- pix2 = pages[i+1].get_pixmap(matrix=fitz.Matrix(2.0, 2.0)); img2 = Image.frombytes("RGB", [pix2.width, pix2.height], pix2.samples); # 🖼️ Process second page!
451
- total_width = img1.width + img2.width; max_height = max(img1.height, img2.height); # 📏 Calculate dimensions for the combined image!
452
- combined_img = Image.new("RGB", (total_width, max_height)); # 🖼️ Create a blank canvas for the two pages!
453
- combined_img.paste(img1, (0, 0)); combined_img.paste(img2, (img1.width, 0)); # 🎨 Paste the images side by side!
454
- st.image(combined_img, caption=f"{pdf_file.name} Pages {i+1}-{i+2}"); # 🖼️ Display the combined image!
455
- gpt_text = process_image_with_prompt(
456
- combined_img, "Extract the electronic text from image", model=selected_gpt_model, detail=detail_level
457
- ); # 🤖 Extract text from the combined image!
458
- combined_text += f"\n## {pdf_file.name} - Pages {i+1}-{i+2}\n\n{gpt_text}\n"; # 📝 Append the result to the combined text!
459
- else: # 🔹 If there's an odd page out, process it solo!
460
- pix = pages[i].get_pixmap(matrix=fitz.Matrix(2.0, 2.0)); img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples); # 🖼️ Process the single remaining page!
461
- st.image(img, caption=f"{pdf_file.name} Page {i+1}"); # 🖼️ Display the solo page image!
462
- gpt_text = process_image_with_prompt(
463
- img, "Extract the electronic text from image", model=selected_gpt_model, detail=detail_level
464
- ); # 🤖 Run GPT extraction on the solo page!
465
- combined_text += f"\n## {pdf_file.name} - Page {i+1}\n\n{gpt_text}\n"; # 📝 Append the result!
466
- doc.close(); # Close the PDF document to free up resources!
467
- except Exception as e:
468
- st.error(f"Error processing {pdf_file.name}: {str(e)}"); # ⚠️ Error: Report any issues during processing!
469
- finally:
470
- os.remove(temp_pdf_path); # 🧹 Cleanup: Remove the temporary PDF file!
471
- output_filename = generate_filename("processed_pdf", "md"); # 🏷️ Generate a unique filename for the Markdown output!
472
- with open(output_filename, "w", encoding="utf-8") as f: f.write(combined_text); # 💾 Write the combined GPT text to the Markdown file!
473
- st.success(f"PDF processing complete. MD file saved as {output_filename}"); # 🎉 Success: Notify the user of completion!
474
- st.markdown(get_download_link(output_filename, "text/markdown", "Download Processed PDF MD"), unsafe_allow_html=True); # 🔗 Provide a download link for your processed file!
475
-
476
- # === Updated Tab: Image Process ===
 
 
477
  with tab_image_process:
478
- st.header("Image Process") # 🖼️ Header: Transform images into text with GPT magic!
479
- st.subheader("Upload Images for GPT-based OCR") # 🚀 Subheader: Let your images speak for themselves!
480
- gpt_models = ["gpt-4o", "gpt-4o-mini"] # 🤖 GPT Models: Choose your image wizard!
481
- selected_gpt_model = st.selectbox("Select GPT Model", gpt_models, key="img_gpt_model") # 🔍 Pick your GPT model for image processing!
482
- detail_level = st.selectbox("Detail Level", ["auto", "low", "high"], key="img_detail_level") # 🎚️ Detail Level: Set your extraction precision!
483
- prompt_img = st.text_input("Enter prompt for image processing", "Extract the electronic text from image", key="img_process_prompt") # ✍️ Prompt: Tell GPT what to extract!
484
- uploaded_images = st.file_uploader("Upload image files", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key="image_process_uploader") # 📤 Uploader: Drag & drop your images here!
485
- if st.button("Process Uploaded Images", key="process_images"): # 🚀 Button: Fire up the image processing!
486
- combined_text = "" # 📝 Initialize combined text output!
487
  for img_file in uploaded_images:
488
  try:
489
- img = Image.open(img_file); st.image(img, caption=img_file.name) # 📸 Display each uploaded image!
490
- gpt_text = process_image_with_prompt(img, prompt_img, model=selected_gpt_model, detail=detail_level) # 🤖 Process image with GPT magic!
491
- combined_text += f"\n## {img_file.name}\n\n{gpt_text}\n" # 📝 Append GPT output with file header!
492
- except Exception as e: st.error(f"Error processing image {img_file.name}: {str(e)}") # ⚠️ Oops: Report errors if any!
493
- output_filename = generate_filename("processed_image", "md") # 💾 Generate a unique filename for the Markdown output!
494
- with open(output_filename, "w", encoding="utf-8") as f: f.write(combined_text) # 📝 Save the combined GPT output!
495
- st.success(f"Image processing complete. MD file saved as {output_filename}") # 🎉 Success: Notify the user!
496
- st.markdown(get_download_link(output_filename, "text/markdown", "Download Processed Image MD"), unsafe_allow_html=True) # 🔗 Provide a download link!
497
-
498
- # === Updated Tab: MD Gallery ===
 
 
 
499
  with tab_md_gallery:
500
- st.header("MD Gallery and GPT Processing") # 📚 Header: Where markdown meets GPT wizardry!
501
- gpt_models = ["gpt-4o", "gpt-4o-mini"] # 🤖 GPT Models: Pick your processing partner!
502
- selected_gpt_model = st.selectbox("Select GPT Model", gpt_models, key="md_gpt_model") # 🔍 Select a GPT model for MD processing!
503
- md_files = sorted(glob.glob("*.md")) # 📂 Gather all Markdown files in the directory!
504
  if md_files:
505
- st.subheader("Individual File Processing") # 🔍 Subheader: Process files one at a time!
506
- cols = st.columns(2) # 🧩 Set up two columns for a balanced view!
507
  for idx, md_file in enumerate(md_files):
508
  with cols[idx % 2]:
509
- st.write(md_file) # 📄 Show the filename!
510
- if st.button(f"Process {md_file}", key=f"process_md_{md_file}"): # 🚀 Button: Process this file!
511
  try:
512
- with open(md_file, "r", encoding="utf-8") as f: content = f.read() # 📖 Read file content!
513
- prompt_md = "Summarize this into markdown outline with emojis and number the topics 1..12" # ✍️ Prompt: Summarize with style!
514
- result_text = process_text_with_prompt(content, prompt_md, model=selected_gpt_model) # 🤖 Let GPT work its magic!
515
- st.markdown(result_text) # 🎨 Display the GPT output!
516
- output_filename = generate_filename(f"processed_{os.path.splitext(md_file)[0]}", "md") # 💾 Create a unique output filename!
517
- with open(output_filename, "w", encoding="utf-8") as f: f.write(result_text) # 📝 Save the processed content!
518
- st.markdown(get_download_link(output_filename, "text/markdown", f"Download {output_filename}"), unsafe_allow_html=True) # 🔗 Provide a download link!
519
- except Exception as e: st.error(f"Error processing {md_file}: {str(e)}") # ⚠️ Report errors if processing fails!
520
- st.subheader("Batch Processing") # 📚 Subheader: Combine and process multiple files at once!
521
- st.write("Select MD files to combine and process:") # 🔍 Instruction: Choose files for batch processing!
522
- selected_md = {} # 🗂️ Initialize selection dictionary!
523
- for md_file in md_files: selected_md[md_file] = st.checkbox(md_file, key=f"checkbox_md_{md_file}") # ✅ Create checkboxes for each file!
524
- batch_prompt = st.text_input("Enter batch processing prompt", "Summarize this into markdown outline with emojis and number the topics 1..12", key="batch_prompt") # ✍️ Batch prompt: Set your summarization style!
525
- if st.button("Process Selected MD Files", key="process_batch_md"): # 🚀 Button: Process the selected files!
526
- combined_content = "" # 📝 Initialize combined content string!
 
 
 
 
527
  for md_file, selected in selected_md.items():
528
  if selected:
529
  try:
530
- with open(md_file, "r", encoding="utf-8") as f: combined_content += f"\n## {md_file}\n" + f.read() + "\n" # 📄 Append each selected file's content!
531
- except Exception as e: st.error(f"Error reading {md_file}: {str(e)}") # ⚠️ Report errors if file reading fails!
 
 
532
  if combined_content:
533
- result_text = process_text_with_prompt(combined_content, batch_prompt, model=selected_gpt_model) # 🤖 Process the batch with GPT!
534
- st.markdown(result_text) # 🎨 Display the combined GPT output!
535
- output_filename = generate_filename("batch_processed_md", "md") # 💾 Generate a unique filename for the batch output!
536
- with open(output_filename, "w", encoding="utf-8") as f: f.write(result_text) # 📝 Save the batch processed text!
537
- st.success(f"Batch processing complete. MD file saved as {output_filename}") # 🎉 Notify success!
538
- st.markdown(get_download_link(output_filename, "text/markdown", "Download Batch Processed MD"), unsafe_allow_html=True) # 🔗 Provide a download link!
 
539
  else:
540
- st.warning("No MD files selected.") # ⚠️ Warning: No files were chosen for batch processing!
541
  else:
542
- st.warning("No MD files found.") # ⚠️ Warning: Your gallery is empty—no markdown files available!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  }
54
  )
55
 
56
+ # Set up default session state values.
57
+ st.session_state.setdefault('history', []) # History: starting fresh if empty!
58
+ st.session_state.setdefault('builder', None) # Builder: set up if missing.
59
+ st.session_state.setdefault('model_loaded', False) # Model Loaded: not loaded by default.
60
+ st.session_state.setdefault('processing', {}) # Processing: initialize as an empty dict.
61
+ st.session_state.setdefault('asset_checkboxes', {}) # Asset Checkboxes: default to an empty dict.
62
+ st.session_state.setdefault('downloaded_pdfs', {}) # Downloaded PDFs: start with none.
63
+ st.session_state.setdefault('unique_counter', 0) # Unique Counter: initialize to zero.
64
+ st.session_state.setdefault('selected_model_type', "Causal LM")
65
+ st.session_state.setdefault('selected_model', "None")
66
+ st.session_state.setdefault('cam0_file', None)
67
+ st.session_state.setdefault('cam1_file', None)
68
+
69
+ # Create a single container for the asset gallery in the sidebar.
70
+ if 'asset_gallery_container' not in st.session_state:
71
+ st.session_state['asset_gallery_container'] = st.sidebar.empty()
72
+
73
+ @dataclass # ModelConfig: A blueprint for model configurations.
74
  class ModelConfig:
75
  name: str
76
  base_model: str
 
78
  domain: Optional[str] = None
79
  model_type: str = "causal_lm"
80
  @property
81
+ def model_path(self):
82
+ return f"models/{self.name}"
83
 
84
+ @dataclass # DiffusionConfig: Where diffusion magic takes shape.
85
  class DiffusionConfig:
86
  name: str
87
  base_model: str
88
  size: str
89
  domain: Optional[str] = None
90
  @property
91
+ def model_path(self):
92
+ return f"diffusion_models/{self.name}"
93
+
94
+ class ModelBuilder:
95
+ def __init__(self):
96
+ self.config = None
97
+ self.model = None
98
+ self.tokenizer = None
99
+ self.jokes = [
100
  "Why did the AI go to therapy? Too many layers to unpack! 😂",
101
  "Training complete! Time for a binary coffee break. ☕",
102
  "I told my neural network a joke; it couldn't stop dropping bits! 🤖",
103
  "I asked the AI for a pun, and it said, 'I'm punning on parallel processing!' 😄",
104
  "Debugging my code is like a stand-up routine—always a series of exceptions! 😆"
105
  ]
106
+ def load_model(self, model_path: str, config: Optional[ModelConfig] = None):
107
+ with st.spinner(f"Loading {model_path}... ⏳"):
108
  self.model = AutoModelForCausalLM.from_pretrained(model_path)
109
  self.tokenizer = AutoTokenizer.from_pretrained(model_path)
110
+ if self.tokenizer.pad_token is None:
111
+ self.tokenizer.pad_token = self.tokenizer.eos_token
112
+ if config:
113
+ self.config = config
114
+ self.model.to("cuda" if torch.cuda.is_available() else "cpu")
115
+ st.success(f"Model loaded! 🎉 {random.choice(self.jokes)}")
116
  return self
117
+ def save_model(self, path: str):
118
+ with st.spinner("Saving model... 💾"):
119
+ os.makedirs(os.path.dirname(path), exist_ok=True)
120
+ self.model.save_pretrained(path)
121
+ self.tokenizer.save_pretrained(path)
122
+ st.success(f"Model saved at {path}! ✅")
123
 
124
  class DiffusionBuilder:
125
  def __init__(self):
 
140
  def generate(self, prompt: str):
141
  return self.pipeline(prompt, num_inference_steps=20).images[0]
142
 
143
+ def generate_filename(sequence, ext="png"):
144
+ return f"{sequence}_{time.strftime('%d%m%Y%H%M%S')}.{ext}"
145
+
146
  def pdf_url_to_filename(url):
147
+ return re.sub(r'[<>:"/\\|?*]', '_', url) + ".pdf"
148
+
149
+ def get_download_link(file_path, mime_type="application/pdf", label="Download"):
150
+ return f'<a href="data:{mime_type};base64,{base64.b64encode(open(file_path, "rb").read()).decode()}" download="{os.path.basename(file_path)}">{label}</a>'
151
+
152
+ def zip_directory(directory_path, zip_path):
153
+ with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
154
+ [zipf.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), os.path.dirname(directory_path)))
155
+ for root, _, files in os.walk(directory_path) for file in files]
156
+
157
+ def get_model_files(model_type="causal_lm"):
158
+ return [d for d in glob.glob("models/*" if model_type == "causal_lm" else "diffusion_models/*") if os.path.isdir(d)] or ["None"]
159
+
160
+ def get_gallery_files(file_types=["png", "pdf"]):
161
+ return sorted(list({f for ext in file_types for f in glob.glob(f"*.{ext}")}))
162
+
163
+ def get_pdf_files():
164
+ return sorted(glob.glob("*.pdf"))
165
+
166
  def download_pdf(url, output_path):
 
 
 
 
 
 
 
 
 
167
  try:
168
+ response = requests.get(url, stream=True, timeout=10)
169
+ if response.status_code == 200:
170
+ with open(output_path, "wb") as f:
171
+ for chunk in response.iter_content(chunk_size=8192):
172
+ f.write(chunk)
173
+ ret = True
174
+ else:
175
+ ret = False
176
+ except requests.RequestException as e:
177
+ logger.error(f"Failed to download {url}: {e}")
178
+ ret = False
179
+ return ret
180
+
181
+ # Async PDF Snapshot: Snap your PDF pages without blocking.
182
+ async def process_pdf_snapshot(pdf_path, mode="single"):
183
+ start_time = time.time()
184
+ status = st.empty()
185
+ status.text(f"Processing PDF Snapshot ({mode})... (0s)")
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  try:
187
+ doc = fitz.open(pdf_path)
188
+ output_files = []
189
+ if mode == "single":
190
+ page = doc[0]
191
+ pix = page.get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
192
+ output_file = generate_filename("single", "png")
193
+ pix.save(output_file)
194
+ output_files.append(output_file)
195
+ elif mode == "twopage":
196
+ for i in range(min(2, len(doc))):
197
+ page = doc[i]
198
+ pix = page.get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
199
+ output_file = generate_filename(f"twopage_{i}", "png")
200
+ pix.save(output_file)
201
+ output_files.append(output_file)
202
+ elif mode == "allpages":
203
+ for i in range(len(doc)):
204
+ page = doc[i]
205
+ pix = page.get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
206
+ output_file = generate_filename(f"page_{i}", "png")
207
+ pix.save(output_file)
208
+ output_files.append(output_file)
209
+ doc.close()
210
+ elapsed = int(time.time() - start_time)
211
+ status.text(f"PDF Snapshot ({mode}) completed in {elapsed}s!")
212
+ return output_files
213
+ except Exception as e:
214
+ status.error(f"Failed to process PDF: {str(e)}")
215
+ return []
216
+
217
+ # Async OCR: Convert images to text.
218
+ async def process_ocr(image, output_file):
219
+ start_time = time.time()
220
+ status = st.empty()
221
+ status.text("Processing GOT-OCR2_0... (0s)")
222
+ tokenizer = AutoTokenizer.from_pretrained("ucaslcl/GOT-OCR2_0", trust_remote_code=True)
223
+ model = AutoModel.from_pretrained("ucaslcl/GOT-OCR2_0", trust_remote_code=True, torch_dtype=torch.float32).to("cpu").eval()
224
+ temp_file = f"temp_{int(time.time())}.png"
225
+ image.save(temp_file)
226
+ result = model.chat(tokenizer, temp_file, ocr_type='ocr')
227
+ os.remove(temp_file)
228
+ elapsed = int(time.time() - start_time)
229
+ status.text(f"GOT-OCR2_0 completed in {elapsed}s!")
230
+ async with aiofiles.open(output_file, "w") as f:
231
+ await f.write(result)
232
+ return result
233
+
234
+ # Async Image Gen: Your image genie.
235
+ async def process_image_gen(prompt, output_file):
236
+ start_time = time.time()
237
+ status = st.empty()
238
+ status.text("Processing Image Gen... (0s)")
239
+ pipeline = (st.session_state['builder'].pipeline
240
+ if st.session_state.get('builder') and isinstance(st.session_state['builder'], DiffusionBuilder)
241
+ and st.session_state['builder'].pipeline
242
+ else StableDiffusionPipeline.from_pretrained("OFA-Sys/small-stable-diffusion-v0", torch_dtype=torch.float32).to("cpu"))
243
+ gen_image = pipeline(prompt, num_inference_steps=20).images[0]
244
+ elapsed = int(time.time() - start_time)
245
+ status.text(f"Image Gen completed in {elapsed}s!")
246
+ gen_image.save(output_file)
247
+ return gen_image
248
+
249
+ # GPT-Image Interpreter: Turning pixels into prose!
250
+ def process_image_with_prompt(image, prompt, model="gpt-4o-mini", detail="auto"):
251
+ buffered = BytesIO()
252
+ image.save(buffered, format="PNG")
253
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
254
+ messages = [{
255
+ "role": "user",
256
+ "content": [
257
+ {"type": "text", "text": prompt},
258
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_str}", "detail": detail}}
259
+ ]
260
+ }]
261
+ try:
262
+ response = client.chat.completions.create(model=model, messages=messages, max_tokens=300)
263
+ return response.choices[0].message.content
264
+ except Exception as e:
265
+ return f"Error processing image with GPT: {str(e)}"
266
+
267
+ # GPT-Text Alchemist: Merging prompt and text.
268
+ def process_text_with_prompt(text, prompt, model="gpt-4o-mini"):
269
+ messages = [{"role": "user", "content": f"{prompt}\n\n{text}"}]
270
+ try:
271
+ response = client.chat.completions.create(model=model, messages=messages, max_tokens=300)
272
+ return response.choices[0].message.content
273
+ except Exception as e:
274
+ return f"Error processing text with GPT: {str(e)}"
275
+
276
+ # ----------------- SIDEBAR UPDATES -----------------
277
+
278
+ # Sidebar: Gallery Settings
279
+ st.sidebar.subheader("Gallery Settings")
280
+ st.session_state.setdefault('gallery_size', 2)
281
+ st.session_state['gallery_size'] = st.sidebar.slider("Gallery Size", 1, 10, st.session_state['gallery_size'], key="gallery_size_slider")
282
+
283
+ # ----------------- TAB SETUP -----------------
284
+ tabs = st.tabs([
285
+ "Camera Snap 📷", "Download PDFs 📥", "Test OCR 🔍", "Build Titan 🌱",
286
+ "Test Image Gen 🎨", "PDF Process 📄", "Image Process 🖼️", "MD Gallery 📚"
287
+ ])
288
+ (tab_camera, tab_download, tab_ocr, tab_build, tab_imggen, tab_pdf_process, tab_image_process, tab_md_gallery) = tabs
289
+
290
+ # ----------------- TAB: Camera Snap -----------------
291
  with tab_camera:
292
+ st.header("Camera Snap 📷")
293
+ st.subheader("Single Capture")
294
+ cols = st.columns(2)
 
295
  with cols[0]:
296
+ cam0_img = st.camera_input("Take a picture - Cam 0", key="cam0")
297
  if cam0_img:
298
+ filename = generate_filename("cam0")
299
+ if st.session_state['cam0_file'] and os.path.exists(st.session_state['cam0_file']):
300
+ os.remove(st.session_state['cam0_file'])
301
+ with open(filename, "wb") as f:
302
+ f.write(cam0_img.getvalue())
303
+ st.session_state['cam0_file'] = filename
304
+ entry = f"Snapshot from Cam 0: {filename}"
305
+ st.session_state['history'].append(entry)
306
+ st.image(Image.open(filename), caption="Camera 0", use_container_width=True)
307
+ logger.info(f"Saved snapshot from Camera 0: {filename}")
 
308
  with cols[1]:
309
+ cam1_img = st.camera_input("Take a picture - Cam 1", key="cam1")
310
  if cam1_img:
311
+ filename = generate_filename("cam1")
312
+ if st.session_state['cam1_file'] and os.path.exists(st.session_state['cam1_file']):
313
+ os.remove(st.session_state['cam1_file'])
314
+ with open(filename, "wb") as f:
315
+ f.write(cam1_img.getvalue())
316
+ st.session_state['cam1_file'] = filename
317
+ entry = f"Snapshot from Cam 1: {filename}"
318
+ st.session_state['history'].append(entry)
319
+ st.image(Image.open(filename), caption="Camera 1", use_container_width=True)
320
+ logger.info(f"Saved snapshot from Camera 1: {filename}")
321
+
322
+ # ----------------- TAB: Download PDFs -----------------
323
  with tab_download:
324
+ st.header("Download PDFs 📥")
325
+ if st.button("Examples 📚"):
326
+ example_urls = [
327
+ "https://arxiv.org/pdf/2308.03892",
328
+ "https://arxiv.org/pdf/1912.01703",
329
+ "https://arxiv.org/pdf/2408.11039",
330
+ "https://arxiv.org/pdf/2109.10282",
331
+ "https://arxiv.org/pdf/2112.10752",
332
+ "https://arxiv.org/pdf/2308.11236",
333
+ "https://arxiv.org/pdf/1706.03762",
334
+ "https://arxiv.org/pdf/2006.11239",
335
+ "https://arxiv.org/pdf/2305.11207",
336
+ "https://arxiv.org/pdf/2106.09685",
337
+ "https://arxiv.org/pdf/2005.11401",
338
+ "https://arxiv.org/pdf/2106.10504"
339
+ ]
340
+ st.session_state['pdf_urls'] = "\n".join(example_urls)
341
+ url_input = st.text_area("Enter PDF URLs (one per line)", value=st.session_state.get('pdf_urls', ""), height=200)
342
  if st.button("Robo-Download 🤖"):
343
  urls = url_input.strip().split("\n")
344
  progress_bar = st.progress(0)
 
354
  st.session_state['downloaded_pdfs'][url] = output_path
355
  logger.info(f"Downloaded PDF from {url} to {output_path}")
356
  entry = f"Downloaded PDF: {output_path}"
357
+ st.session_state['history'].append(entry)
 
358
  st.session_state['asset_checkboxes'][output_path] = True
359
  else:
360
  st.error(f"Failed to nab {url} 😿")
 
363
  st.session_state['downloaded_pdfs'][url] = output_path
364
  progress_bar.progress((idx + 1) / total_urls)
365
  status_text.text("Robo-Download complete! 🚀")
366
+ mode = st.selectbox("Snapshot Mode", ["Single Page (High-Res)", "Two Pages (High-Res)", "All Pages (High-Res)"], key="download_mode")
 
 
 
367
  if st.button("Snapshot Selected 📸"):
368
+ selected_pdfs = [path for path in get_gallery_files() if path.endswith('.pdf') and st.session_state['asset_checkboxes'].get(path, False)]
 
369
  if selected_pdfs:
370
  for pdf_path in selected_pdfs:
371
  if not os.path.exists(pdf_path):
372
  st.warning(f"File not found: {pdf_path}. Skipping.")
373
  continue
374
+ mode_key = {"Single Page (High-Res)": "single",
375
+ "Two Pages (High-Res)": "twopage",
376
  "All Pages (High-Res)": "allpages"}[mode]
377
  snapshots = asyncio.run(process_pdf_snapshot(pdf_path, mode_key))
378
  for snapshot in snapshots:
379
  st.image(Image.open(snapshot), caption=snapshot, use_container_width=True)
380
  st.session_state['asset_checkboxes'][snapshot] = True
381
+ # No update_gallery() call here; will update once later.
382
  else:
383
  st.warning("No PDFs selected for snapshotting! Check some boxes in the sidebar.")
384
 
385
+ # ----------------- TAB: Test OCR -----------------
 
386
  with tab_ocr:
387
+ st.header("Test OCR 🔍")
388
+ all_files = get_gallery_files()
389
  if all_files:
390
+ if st.button("OCR All Assets 🚀"):
391
+ full_text = "# OCR Results\n\n"
392
  for file in all_files:
393
+ if file.endswith('.png'):
394
+ image = Image.open(file)
395
+ else:
396
+ doc = fitz.open(file)
397
+ pix = doc[0].get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
398
+ image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
399
+ doc.close()
400
+ output_file = generate_filename(f"ocr_{os.path.basename(file)}", "txt")
401
+ result = asyncio.run(process_ocr(image, output_file))
402
+ full_text += f"## {os.path.basename(file)}\n\n{result}\n\n"
403
+ entry = f"OCR Test: {file} -> {output_file}"
404
+ st.session_state['history'].append(entry)
405
+ md_output_file = f"full_ocr_{int(time.time())}.md"
406
+ with open(md_output_file, "w") as f:
407
+ f.write(full_text)
408
+ st.success(f"Full OCR saved to {md_output_file}")
409
+ st.markdown(get_download_link(md_output_file, "text/markdown", "Download Full OCR Markdown"), unsafe_allow_html=True)
410
+ selected_file = st.selectbox("Select Image or PDF", all_files, key="ocr_select")
411
  if selected_file:
412
+ if selected_file.endswith('.png'):
413
+ image = Image.open(selected_file)
414
+ else:
415
+ doc = fitz.open(selected_file)
416
+ pix = doc[0].get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
417
+ image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
418
+ doc.close()
419
+ st.image(image, caption="Input Image", use_container_width=True)
420
+ if st.button("Run OCR 🚀", key="ocr_run"):
421
+ output_file = generate_filename("ocr_output", "txt")
422
+ st.session_state['processing']['ocr'] = True
423
+ result = asyncio.run(process_ocr(image, output_file))
424
+ entry = f"OCR Test: {selected_file} -> {output_file}"
425
+ st.session_state['history'].append(entry)
426
+ st.text_area("OCR Result", result, height=200, key="ocr_result")
427
+ st.success(f"OCR output saved to {output_file}")
428
+ st.session_state['processing']['ocr'] = False
429
+ if selected_file.endswith('.pdf') and st.button("OCR All Pages 🚀", key="ocr_all_pages"):
430
+ doc = fitz.open(selected_file)
431
+ full_text = f"# OCR Results for {os.path.basename(selected_file)}\n\n"
432
  for i in range(len(doc)):
433
+ pix = doc[i].get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
434
+ image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
435
+ output_file = generate_filename(f"ocr_page_{i}", "txt")
436
+ result = asyncio.run(process_ocr(image, output_file))
437
+ full_text += f"## Page {i + 1}\n\n{result}\n\n"
438
+ entry = f"OCR Test: {selected_file} Page {i + 1} -> {output_file}"
439
+ st.session_state['history'].append(entry)
440
+ md_output_file = f"full_ocr_{os.path.basename(selected_file)}_{int(time.time())}.md"
441
+ with open(md_output_file, "w") as f:
442
+ f.write(full_text)
443
+ st.success(f"Full OCR saved to {md_output_file}")
444
+ st.markdown(get_download_link(md_output_file, "text/markdown", "Download Full OCR Markdown"), unsafe_allow_html=True)
445
  else:
446
+ st.warning("No assets in gallery yet. Use Camera Snap or Download PDFs!")
447
 
448
+ # ----------------- TAB: Build Titan -----------------
449
  with tab_build:
450
+ st.header("Build Titan 🌱")
451
+ model_type = st.selectbox("Model Type", ["Causal LM", "Diffusion"], key="build_type")
452
  base_model = st.selectbox(
453
  "Select Tiny Model",
454
+ ["HuggingFaceTB/SmolLM-135M", "Qwen/Qwen1.5-0.5B-Chat"] if model_type == "Causal LM"
455
  else ["OFA-Sys/small-stable-diffusion-v0", "stabilityai/stable-diffusion-2-base"]
456
+ )
457
+ model_name = st.text_input("Model Name", f"tiny-titan-{int(time.time())}")
458
+ domain = st.text_input("Target Domain", "general")
459
+ if st.button("Download Model ⬇️"):
460
  config = (ModelConfig if model_type == "Causal LM" else DiffusionConfig)(
461
  name=model_name, base_model=base_model, size="small", domain=domain
462
+ )
463
+ builder = ModelBuilder() if model_type == "Causal LM" else DiffusionBuilder()
464
+ builder.load_model(base_model, config)
465
+ builder.save_model(config.model_path)
466
+ st.session_state['builder'] = builder
467
+ st.session_state['model_loaded'] = True
468
+ st.session_state['selected_model_type'] = model_type
469
+ st.session_state['selected_model'] = config.model_path
470
+ entry = f"Built {model_type} model: {model_name}"
471
+ st.session_state['history'].append(entry)
472
+ st.success(f"Model downloaded and saved to {config.model_path}! 🎉")
473
+ st.experimental_rerun()
474
+
475
+ # ----------------- TAB: Test Image Gen -----------------
476
  with tab_imggen:
477
+ st.header("Test Image Gen 🎨")
478
+ all_files = get_gallery_files()
479
  if all_files:
480
+ selected_file = st.selectbox("Select Image or PDF", all_files, key="gen_select")
481
  if selected_file:
482
+ if selected_file.endswith('.png'):
483
+ image = Image.open(selected_file)
484
  else:
485
+ doc = fitz.open(selected_file)
486
+ pix = doc[0].get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
487
+ image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
488
+ doc.close()
489
+ st.image(image, caption="Reference Image", use_container_width=True)
490
+ prompt = st.text_area("Prompt", "Generate a neon superhero version of this image", key="gen_prompt")
491
+ if st.button("Run Image Gen 🚀", key="gen_run"):
492
+ output_file = generate_filename("gen_output", "png")
493
+ st.session_state['processing']['gen'] = True
494
+ result = asyncio.run(process_image_gen(prompt, output_file))
495
+ entry = f"Image Gen Test: {prompt} -> {output_file}"
496
+ st.session_state['history'].append(entry)
497
+ st.image(result, caption="Generated Image", use_container_width=True)
498
+ st.success(f"Image saved to {output_file}")
499
+ st.session_state['processing']['gen'] = False
500
  else:
501
+ st.warning("No images or PDFs in gallery yet. Use Camera Snap or Download PDFs!")
 
502
 
503
+ # ----------------- TAB: PDF Process -----------------
504
  with tab_pdf_process:
505
+ st.header("PDF Process")
506
+ st.subheader("Upload PDFs for GPT-based text extraction")
507
+ gpt_models = ["gpt-4o", "gpt-4o-mini"]
508
+ selected_gpt_model = st.selectbox("Select GPT Model", gpt_models, key="pdf_gpt_model")
509
+ detail_level = st.selectbox("Detail Level", ["auto", "low", "high"], key="pdf_detail_level")
510
+ uploaded_pdfs = st.file_uploader("Upload PDF files", type=["pdf"], accept_multiple_files=True, key="pdf_process_uploader")
511
+ view_mode = st.selectbox("View Mode", ["Single Page", "Double Page"], key="pdf_view_mode")
512
+ if st.button("Process Uploaded PDFs", key="process_pdfs"):
513
+ combined_text = ""
514
+ for pdf_file in uploaded_pdfs:
515
+ pdf_bytes = pdf_file.read()
516
+ temp_pdf_path = f"temp_{pdf_file.name}"
517
+ with open(temp_pdf_path, "wb") as f:
518
+ f.write(pdf_bytes)
519
  try:
520
+ doc = fitz.open(temp_pdf_path)
521
+ st.write(f"Processing {pdf_file.name} with {len(doc)} pages")
522
+ if view_mode == "Single Page":
523
  for i, page in enumerate(doc):
524
+ pix = page.get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
525
+ img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
526
+ st.image(img, caption=f"{pdf_file.name} Page {i+1}")
527
+ gpt_text = process_image_with_prompt(img, "Extract the electronic text from image", model=selected_gpt_model, detail=detail_level)
528
+ combined_text += f"\n## {pdf_file.name} - Page {i+1}\n\n{gpt_text}\n"
529
+ else:
530
+ pages = list(doc)
 
 
531
  for i in range(0, len(pages), 2):
532
+ if i+1 < len(pages):
533
+ pix1 = pages[i].get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
534
+ img1 = Image.frombytes("RGB", [pix1.width, pix1.height], pix1.samples)
535
+ pix2 = pages[i+1].get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
536
+ img2 = Image.frombytes("RGB", [pix2.width, pix2.height], pix2.samples)
537
+ total_width = img1.width + img2.width
538
+ max_height = max(img1.height, img2.height)
539
+ combined_img = Image.new("RGB", (total_width, max_height))
540
+ combined_img.paste(img1, (0, 0))
541
+ combined_img.paste(img2, (img1.width, 0))
542
+ st.image(combined_img, caption=f"{pdf_file.name} Pages {i+1}-{i+2}")
543
+ gpt_text = process_image_with_prompt(combined_img, "Extract the electronic text from image", model=selected_gpt_model, detail=detail_level)
544
+ combined_text += f"\n## {pdf_file.name} - Pages {i+1}-{i+2}\n\n{gpt_text}\n"
545
+ else:
546
+ pix = pages[i].get_pixmap(matrix=fitz.Matrix(2.0, 2.0))
547
+ img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
548
+ st.image(img, caption=f"{pdf_file.name} Page {i+1}")
549
+ gpt_text = process_image_with_prompt(img, "Extract the electronic text from image", model=selected_gpt_model, detail=detail_level)
550
+ combined_text += f"\n## {pdf_file.name} - Page {i+1}\n\n{gpt_text}\n"
551
+ doc.close()
552
+ except Exception as e:
553
+ st.error(f"Error processing {pdf_file.name}: {str(e)}")
554
+ finally:
555
+ os.remove(temp_pdf_path)
556
+ output_filename = generate_filename("processed_pdf", "md")
557
+ with open(output_filename, "w", encoding="utf-8") as f:
558
+ f.write(combined_text)
559
+ st.success(f"PDF processing complete. MD file saved as {output_filename}")
560
+ st.markdown(get_download_link(output_filename, "text/markdown", "Download Processed PDF MD"), unsafe_allow_html=True)
561
+
562
+ # ----------------- TAB: Image Process -----------------
563
  with tab_image_process:
564
+ st.header("Image Process")
565
+ st.subheader("Upload Images for GPT-based OCR")
566
+ gpt_models = ["gpt-4o", "gpt-4o-mini"]
567
+ selected_gpt_model = st.selectbox("Select GPT Model", gpt_models, key="img_gpt_model")
568
+ detail_level = st.selectbox("Detail Level", ["auto", "low", "high"], key="img_detail_level")
569
+ prompt_img = st.text_input("Enter prompt for image processing", "Extract the electronic text from image", key="img_process_prompt")
570
+ uploaded_images = st.file_uploader("Upload image files", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key="image_process_uploader")
571
+ if st.button("Process Uploaded Images", key="process_images"):
572
+ combined_text = ""
573
  for img_file in uploaded_images:
574
  try:
575
+ img = Image.open(img_file)
576
+ st.image(img, caption=img_file.name)
577
+ gpt_text = process_image_with_prompt(img, prompt_img, model=selected_gpt_model, detail=detail_level)
578
+ combined_text += f"\n## {img_file.name}\n\n{gpt_text}\n"
579
+ except Exception as e:
580
+ st.error(f"Error processing image {img_file.name}: {str(e)}")
581
+ output_filename = generate_filename("processed_image", "md")
582
+ with open(output_filename, "w", encoding="utf-8") as f:
583
+ f.write(combined_text)
584
+ st.success(f"Image processing complete. MD file saved as {output_filename}")
585
+ st.markdown(get_download_link(output_filename, "text/markdown", "Download Processed Image MD"), unsafe_allow_html=True)
586
+
587
+ # ----------------- TAB: MD Gallery -----------------
588
  with tab_md_gallery:
589
+ st.header("MD Gallery and GPT Processing")
590
+ gpt_models = ["gpt-4o", "gpt-4o-mini"]
591
+ selected_gpt_model = st.selectbox("Select GPT Model", gpt_models, key="md_gpt_model")
592
+ md_files = sorted(glob.glob("*.md"))
593
  if md_files:
594
+ st.subheader("Individual File Processing")
595
+ cols = st.columns(2)
596
  for idx, md_file in enumerate(md_files):
597
  with cols[idx % 2]:
598
+ st.write(md_file)
599
+ if st.button(f"Process {md_file}", key=f"process_md_{md_file}"):
600
  try:
601
+ with open(md_file, "r", encoding="utf-8") as f:
602
+ content = f.read()
603
+ prompt_md = "Summarize this into markdown outline with emojis and number the topics 1..12"
604
+ result_text = process_text_with_prompt(content, prompt_md, model=selected_gpt_model)
605
+ st.markdown(result_text)
606
+ output_filename = generate_filename(f"processed_{os.path.splitext(md_file)[0]}", "md")
607
+ with open(output_filename, "w", encoding="utf-8") as f:
608
+ f.write(result_text)
609
+ st.markdown(get_download_link(output_filename, "text/markdown", f"Download {output_filename}"), unsafe_allow_html=True)
610
+ except Exception as e:
611
+ st.error(f"Error processing {md_file}: {str(e)}")
612
+ st.subheader("Batch Processing")
613
+ st.write("Select MD files to combine and process:")
614
+ selected_md = {}
615
+ for md_file in md_files:
616
+ selected_md[md_file] = st.checkbox(md_file, key=f"checkbox_md_{md_file}")
617
+ batch_prompt = st.text_input("Enter batch processing prompt", "Summarize this into markdown outline with emojis and number the topics 1..12", key="batch_prompt")
618
+ if st.button("Process Selected MD Files", key="process_batch_md"):
619
+ combined_content = ""
620
  for md_file, selected in selected_md.items():
621
  if selected:
622
  try:
623
+ with open(md_file, "r", encoding="utf-8") as f:
624
+ combined_content += f"\n## {md_file}\n" + f.read() + "\n"
625
+ except Exception as e:
626
+ st.error(f"Error reading {md_file}: {str(e)}")
627
  if combined_content:
628
+ result_text = process_text_with_prompt(combined_content, batch_prompt, model=selected_gpt_model)
629
+ st.markdown(result_text)
630
+ output_filename = generate_filename("batch_processed_md", "md")
631
+ with open(output_filename, "w", encoding="utf-8") as f:
632
+ f.write(result_text)
633
+ st.success(f"Batch processing complete. MD file saved as {output_filename}")
634
+ st.markdown(get_download_link(output_filename, "text/markdown", "Download Batch Processed MD"), unsafe_allow_html=True)
635
  else:
636
+ st.warning("No MD files selected.")
637
  else:
638
+ st.warning("No MD files found.")
639
+
640
+ # ----------------- FINAL SIDEBAR UPDATE -----------------
641
+ # Update the asset gallery once (using its container).
642
+ def update_gallery():
643
+ container = st.session_state['asset_gallery_container']
644
+ container.empty() # Clear previous gallery content.
645
+ all_files = get_gallery_files()
646
+ if all_files:
647
+ container.markdown("### Asset Gallery 📸📖")
648
+ cols = container.columns(2)
649
+ for idx, file in enumerate(all_files[:st.session_state['gallery_size']]):
650
+ with cols[idx % 2]:
651
+ st.session_state['unique_counter'] += 1
652
+ unique_id = st.session_state['unique_counter']
653
+ if file.endswith('.png'):
654
+ st.image(Image.open(file), caption=os.path.basename(file), use_container_width=True)
655
+ else:
656
+ doc = fitz.open(file)
657
+ pix = doc[0].get_pixmap(matrix=fitz.Matrix(0.5, 0.5))
658
+ img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
659
+ st.image(img, caption=os.path.basename(file), use_container_width=True)
660
+ doc.close()
661
+ checkbox_key = f"asset_{file}_{unique_id}"
662
+ st.session_state['asset_checkboxes'][file] = st.checkbox("Use for SFT/Input", value=st.session_state['asset_checkboxes'].get(file, False), key=checkbox_key)
663
+ mime_type = "image/png" if file.endswith('.png') else "application/pdf"
664
+ st.markdown(get_download_link(file, mime_type, "Snag It! 📥"), unsafe_allow_html=True)
665
+ if st.button("Zap It! 🗑️", key=f"delete_{file}_{unique_id}"):
666
+ os.remove(file)
667
+ st.session_state['asset_checkboxes'].pop(file, None)
668
+ st.success(f"Asset {os.path.basename(file)} vaporized! 💨")
669
+ st.experimental_rerun()
670
+
671
+ # Call the gallery update once after all tabs have been processed.
672
+ update_gallery()
673
+
674
+ # Finally, update the Action Logs and History in the sidebar.
675
+ st.sidebar.subheader("Action Logs 📜")
676
+ for record in log_records:
677
+ st.sidebar.write(f"{record.asctime} - {record.levelname} - {record.message}")
678
+
679
+ st.sidebar.subheader("History 📜")
680
+ for entry in st.session_state.get("history", []):
681
+ if entry is not None:
682
+ st.sidebar.write(entry)