mgbam commited on
Commit
f025bf0
Β·
verified Β·
1 Parent(s): bc80edf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +364 -497
app.py CHANGED
@@ -1,516 +1,383 @@
1
- # advanced_archsketch_app.py
2
- import os
3
  import streamlit as st
4
- from streamlit_drawable_canvas import st_canvas
5
- from PIL import Image, ImageDraw, ImageFont, UnidentifiedImageError
6
- import requests # For potential real API calls later
7
- import openai # Used notionally
8
- from io import BytesIO
9
  import json
10
- import uuid
11
- import time
12
- import random
13
- import base64 # For potential image encoding if needed
14
-
15
- # ─── 1. Configuration & Secrets ─────────────────────────────────────────────
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  try:
17
- openai.api_key = st.secrets["OPENAI_API_KEY"]
18
- except Exception:
19
- st.error("OpenAI API Key not found. Please set it in Streamlit secrets.")
20
- # openai.api_key = "YOUR_FALLBACK_KEY_FOR_LOCAL_TESTING" # Or load from env
21
-
22
- st.set_page_config(page_title="ArchSketch AI [Advanced]", layout="wide", page_icon="πŸ—οΈ")
23
-
24
- # --- Simulated Backend API Endpoints ---
25
- # Replace with your actual endpoints if building a real backend
26
- API_SUBMIT_URL = "http://your-backend.com/api/v1/submit_arch_job"
27
- API_STATUS_URL = "http://your-backend.com/api/v1/job_status/{job_id}"
28
- API_RESULT_URL = "http://your-backend.com/api/v1/job_result/{job_id}" # Might return data directly or a URL
29
-
30
- # ─── 2. State Initialization & Authentication ───────────────────────────────
31
-
32
- def initialize_state():
33
- """Initializes all necessary session state variables."""
34
- defaults = {
35
- 'logged_in': False,
36
- 'username': None,
37
- 'current_job_id': None,
38
- 'job_status': 'IDLE', # IDLE, SUBMITTED, PENDING, PROCESSING, COMPLETED, FAILED
39
- 'job_progress': {}, # Progress dict per job_id
40
- 'job_errors': {}, # Error dict per job_id
41
- 'job_results': {}, # Stores result data/references per job_id {job_id: {'type': 'image'/'svg'/'json', 'data': path_or_data, 'params':{...}, 'prompt': '...'}}
42
- 'selected_history_job_id': None,
43
- 'annotations': {}, # {job_id: [annotation_objects]}
44
- # Input specific state
45
- 'input_prompt': "",
46
- 'input_staging_image_bytes': None,
47
- 'input_staging_image_preview': None,
48
- 'input_filename': None, # Store filename of uploaded staging image
49
- }
50
- for key, value in defaults.items():
51
- if key not in st.session_state:
52
- st.session_state[key] = value
53
-
54
- initialize_state()
55
-
56
- def show_login_form():
57
- """Displays the login form."""
58
- st.warning("Login Required")
59
- with st.form("login_form"):
60
- username = st.text_input("Username", key="login_user")
61
- password = st.text_input("Password", type="password", key="login_pass")
62
- submitted = st.form_submit_button("Login")
63
- if submitted:
64
- # --- !!! INSECURE - DEMO ONLY !!! ---
65
- if username == "arch_user" and password == "pass123":
66
- st.session_state.logged_in = True
67
- st.session_state.username = username
68
- st.success("Login successful!")
69
- time.sleep(1)
70
- st.rerun()
71
- else:
72
- st.error("Invalid credentials.")
73
-
74
- # --- Authentication Gate ---
75
- if not st.session_state.logged_in:
76
- show_login_form()
77
  st.stop()
78
 
79
- # ─── 3. Simulated Backend Interaction Functions ───────────────────────────────
80
-
81
- def submit_job_to_backend(payload: dict) -> tuple[str | None, str | None]:
82
- """Simulates submitting job, returns (job_id, error)."""
83
- st.info("Submitting job to backend simulation...")
84
- print(f"SIMULATING API SUBMIT to {API_SUBMIT_URL}")
85
- # In reality: response = requests.post(API_SUBMIT_URL, json=payload, headers=auth_headers)
86
- time.sleep(1.5) # Simulate network + queue time
87
- if random.random() < 0.95:
88
- job_id = f"archjob_{uuid.uuid4().hex[:12]}"
89
- print(f"API Submit SUCCESS: Job ID = {job_id}")
90
- st.session_state.job_progress[job_id] = 0
91
- st.session_state.job_errors[job_id] = None
92
- # Store essential info with job immediately
93
- st.session_state.job_results[job_id] = {
94
- 'type': None, 'data': None, # Will be filled on completion
95
- 'params': payload.get('parameters', {}), # Store settings used
96
- 'prompt': payload.get('prompt', '')
97
- }
98
- return job_id, None
99
- else:
100
- error_msg = "Simulated API Error: Failed to submit (server busy/invalid payload)."
101
- print(f"API Submit FAILED: {error_msg}")
102
- return None, error_msg
103
 
104
- def check_job_status_backend(job_id: str) -> tuple[str, dict | None]:
105
- """Simulates checking job status, returns (status, result_info | None)."""
106
- status_url = API_STATUS_URL.format(job_id=job_id)
107
- print(f"SIMULATING API STATUS CHECK: {status_url}")
108
- # In reality: response = requests.get(status_url, headers=auth_headers)
109
- time.sleep(0.7) # Simulate network latency
110
-
111
- if job_id not in st.session_state.job_progress:
112
- st.session_state.job_progress[job_id] = 0
113
-
114
- current_progress = st.session_state.job_progress[job_id]
115
- status = "UNKNOWN"
116
- result_info = None
117
-
118
- # Simulate progress and potential states
119
- if current_progress < 0.1:
120
- status = "PENDING"
121
- st.session_state.job_progress[job_id] += random.uniform(0.05, 0.15)
122
- elif current_progress < 0.9:
123
- status = "PROCESSING"
124
- st.session_state.job_progress[job_id] += random.uniform(0.1, 0.3)
125
- # Simulate potential failure during processing
126
- if random.random() < 0.03: # 3% chance of failure mid-run
127
- status = "FAILED"
128
- st.session_state.job_errors[job_id] = "Simulated AI failure during processing."
129
- print(f"API Status SIMULATION: Job {job_id} FAILED processing.")
130
- elif current_progress >= 0.9: # Consider it done
131
- status = "COMPLETED"
132
- print(f"API Status SIMULATION: Job {job_id} COMPLETED.")
133
- # Determine simulated result type based on original request stored in job_results
134
- job_mode = st.session_state.job_results.get(job_id, {}).get('params', {}).get('mode', 'Unknown')
135
-
136
- if job_mode == "Floor Plan":
137
- # Simulate returning path to an SVG or structured JSON data
138
- placeholder_path = "assets/placeholder_floorplan.svg" # Need this file
139
- if not os.path.exists(placeholder_path): placeholder_path = "assets/placeholder_floorplan.json" # Fallback - need JSON too
140
- result_info = {'type': 'svg' if '.svg' in placeholder_path else 'json', 'data_path': placeholder_path}
141
- else: # Virtual Staging
142
- placeholder_path = "assets/placeholder_image.png" # Need this file
143
- result_info = {'type': 'image', 'data_path': placeholder_path}
144
-
145
- print(f"API Status SIMULATION: Job {job_id} Status={status}, Progress={st.session_state.job_progress.get(job_id, 0):.2f}")
146
- return status, result_info
147
-
148
- def fetch_result_data(result_info: dict):
149
- """Simulates fetching/loading result data based on info from status check."""
150
- result_type = result_info['type']
151
- data_path = result_info['data_path'] # In real app, might be URL
152
- print(f"SIMULATING Fetching {result_type} result from: {data_path}")
153
- # In reality: if URL, use requests.get(data_path).content
154
-
155
- if not os.path.exists(data_path):
156
- print(f"ERROR: Result placeholder not found at {data_path}")
157
- raise FileNotFoundError(f"Result file missing: {data_path}")
158
 
159
  try:
160
- if result_type == 'image':
161
- img = Image.open(data_path).convert("RGB")
162
- return img
163
- elif result_type == 'svg':
164
- with open(data_path, 'r', encoding='utf-8') as f:
165
- svg_content = f.read()
166
- return svg_content # Return raw SVG string
167
- elif result_type == 'json':
168
- with open(data_path, 'r', encoding='utf-8') as f:
169
- json_data = json.load(f)
170
- return json_data # Return parsed JSON
171
- else:
172
- raise ValueError(f"Unsupported result type: {result_type}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  except Exception as e:
174
- print(f"ERROR loading result from {data_path}: {e}")
175
- raise
176
-
177
- # ─── 4. Sidebar UI ───────────────────────────────────────────────────────────
178
- with st.sidebar:
179
- st.header(f"πŸ—οΈ ArchSketch AI")
180
- st.caption(f"User: {st.session_state.username}")
181
- if st.button("Logout", key="logout_btn"):
182
- # Clear sensitive parts of state, re-initialize others
183
- keys_to_clear = list(st.session_state.keys())
184
- for key in keys_to_clear:
185
- del st.session_state[key]
186
- initialize_state()
187
- st.rerun()
188
- st.markdown("---")
189
-
190
- st.header("βš™οΈ Project Configuration")
191
-
192
- # Disable controls while job is active
193
- ui_disabled = st.session_state.job_status in ["SUBMITTED", "PENDING", "PROCESSING"]
194
-
195
- mode = st.radio("Mode", ["Floor Plan", "Virtual Staging"], key="mode_radio", disabled=ui_disabled)
196
-
197
- # --- Conditional Input for Staging ---
198
- if mode == "Virtual Staging":
199
- staging_image_file = st.file_uploader(
200
- "Upload Empty Room Image:",
201
- type=["png", "jpg", "jpeg", "webp"],
202
- key="staging_uploader",
203
- disabled=ui_disabled,
204
- help="Required for Virtual Staging mode."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  )
206
- if staging_image_file:
207
- if staging_image_file.name != st.session_state.input_filename: # Detect new upload
208
- st.info("Processing staging image...")
209
- try:
210
- img_bytes = staging_image_file.getvalue()
211
- image = Image.open(io.BytesIO(img_bytes)).convert("RGB")
212
- image.thumbnail((1024, 1024), Image.Resampling.LANCZOS) # Resize preview
213
- st.session_state.input_staging_image_bytes = img_bytes # Store bytes for API
214
- st.session_state.input_staging_image_preview = image
215
- st.session_state.input_filename = staging_image_file.name
216
- st.success("Staging image loaded.")
217
- # Don't rerun here, let user configure other options
218
- except UnidentifiedImageError:
219
- st.error("Invalid image file.")
220
- st.session_state.input_staging_image_bytes = None
221
- st.session_state.input_staging_image_preview = None
222
- st.session_state.input_filename = None
223
- except Exception as e:
224
- st.error(f"Error loading image: {e}")
225
- st.session_state.input_staging_image_bytes = None
226
- st.session_state.input_staging_image_preview = None
227
- st.session_state.input_filename = None
228
-
229
- elif st.session_state.input_filename: # User cleared the uploader
230
- st.session_state.input_staging_image_bytes = None
231
- st.session_state.input_staging_image_preview = None
232
- st.session_state.input_filename = None
233
-
234
-
235
- st.markdown("---")
236
- st.header("✨ AI Parameters")
237
- # Note: Different models might be chosen by the backend based on mode/style
238
- model_hint = st.selectbox("Model Preference (Hint for Backend)", ["Auto", "GPT‑4o (Text/Layout)", "Stable Diffusion (Image Gen)", "ControlNet (Editing)"], key="model_select", disabled=ui_disabled)
239
- style = st.selectbox("Style Preset", ["Modern", "Minimalist", "Rustic", "Industrial", "Coastal", "Custom"], key="style_select", disabled=ui_disabled)
240
- resolution = st.select_slider("Target Resolution (Approx.)", options=[512, 768, 1024], value=768, key="res_slider", disabled=ui_disabled)
241
-
242
- with st.expander("Optional Metadata"):
243
- project_id = st.text_input("Project ID", key="proj_id_input", disabled=ui_disabled)
244
- location = st.text_input("Location / Address", key="loc_input", disabled=ui_disabled)
245
- client_notes = st.text_area("Client Notes", key="notes_area", disabled=ui_disabled)
246
-
247
- # ─── 5. Main Area UI ─────────────────────────────────────────────────────────
248
-
249
- st.title("Advanced AI Architectural Visualizer")
250
-
251
- # --- Prompt Input Area ---
252
- st.subheader("πŸ“ Describe Your Request")
253
- prompt_text = st.text_area(
254
- "Enter detailed prompt:",
255
- placeholder=(
256
- "Floor Plan Example: 'Generate a detailed 2D floor plan SVG for a 4-bedroom modern farmhouse, approx 2500 sq ft, main floor master suite, large open concept kitchen/living area, separate office, mudroom entrance.'\n"
257
- "Staging Example: 'Virtually stage the uploaded living room image in a minimalist Scandinavian style. Include a light grey sectional sofa, a geometric rug, light wood coffee table, and several potted plants. Ensure bright, natural lighting.'"
258
- ),
259
- height=150,
260
- key="prompt_input",
261
- disabled=ui_disabled # Disable if job running
262
- )
263
- st.session_state.input_prompt = prompt_text # Keep state updated
264
-
265
- # --- Submit Button ---
266
- can_submit = bool(st.session_state.input_prompt.strip())
267
- if mode == "Virtual Staging":
268
- can_submit = can_submit and (st.session_state.input_staging_image_bytes is not None)
269
-
270
- submit_button = st.button(
271
- "πŸš€ Submit Visualization Job",
272
- key="submit_btn",
273
- use_container_width=True,
274
- disabled=ui_disabled or not can_submit,
275
- help="Requires a prompt. Staging mode also requires an uploaded image."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
  )
 
277
 
278
- if not can_submit and not ui_disabled:
279
- if mode == "Virtual Staging" and not st.session_state.input_staging_image_bytes:
280
- st.warning("Please upload an image for Virtual Staging mode.")
281
- elif not st.session_state.input_prompt.strip():
282
- st.warning("Please enter a prompt describing your request.")
283
-
284
-
285
- # --- Job Submission Logic ---
286
- if submit_button:
287
- st.session_state.job_status = "SUBMITTED"
288
- st.session_state.current_job_id = None # Clear old ID before new submission attempt
289
- st.session_state.ai_result_image = None # Clear old result display
290
-
291
- # Prepare Payload
292
- api_payload = {
293
- "prompt": st.session_state.input_prompt,
294
- "parameters": {
295
- "mode": mode,
296
- "model_preference": model_hint,
297
- "style": style,
298
- "resolution": resolution,
299
- "project_id": project_id,
300
- "location": location,
301
- "client_notes": client_notes,
302
- },
303
- "user_id": st.session_state.username,
304
- }
305
-
306
- # Add image data for staging mode (handle carefully in production!)
307
- if mode == "Virtual Staging" and st.session_state.input_staging_image_bytes:
308
- # Option 1: Send as base64 (simpler for demo, BAD for large files)
309
- api_payload["base_image_b64"] = base64.b64encode(st.session_state.input_staging_image_bytes).decode('utf-8')
310
- api_payload["base_image_filename"] = st.session_state.input_filename
311
- # Option 2 (Production): Upload to S3/GCS first, send URL/key
312
- # api_payload["base_image_url"] = "s3://bucket/path/to/uploaded_image.jpg"
313
-
314
- job_id, error = submit_job_to_backend(api_payload)
315
-
316
- if job_id:
317
- st.session_state.current_job_id = job_id
318
- st.session_state.job_status = "PENDING" # Move to pending after successful submit
319
- st.session_state.selected_history_job_id = job_id # Auto-select the new job
320
- # Store params with result structure immediately
321
- if job_id in st.session_state.job_results:
322
- st.session_state.job_results[job_id]['params'] = api_payload['parameters']
323
- st.session_state.job_results[job_id]['prompt'] = api_payload['prompt']
324
-
325
- st.success(f"Job submitted! ID: {job_id}. Status will update below.")
326
- st.rerun() # Start the polling loop
327
- else:
328
- st.error(f"Job submission failed: {error}")
329
- st.session_state.job_status = "FAILED"
330
 
 
 
331
 
332
- # --- Status & Result Display Area ---
333
- st.markdown("---")
334
- st.subheader("πŸ“Š Job Status & Result")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
 
336
- current_job_id = st.session_state.current_job_id
337
- status = st.session_state.job_status
338
 
339
- if not current_job_id:
340
- st.info("Submit a job using the controls above.")
341
- else:
342
- # Display status updates
343
- if status == "SUBMITTED":
344
- st.warning(f"Job Status: Submitted... Waiting for confirmation (ID: {current_job_id})")
345
- time.sleep(2) # Short delay before first poll
346
- st.rerun()
347
- elif status == "PENDING":
348
- st.info(f"Job Status: Pending in queue... (ID: {current_job_id})")
349
- time.sleep(5) # Poll interval
350
- st.rerun()
351
- elif status == "PROCESSING":
352
- progress = st.session_state.job_progress.get(current_job_id, 0)
353
- st.progress(min(progress, 1.0), text=f"Job Status: Processing... ({int(min(progress,1.0)*100)}%) (ID: {current_job_id})")
354
- time.sleep(3) # Poll interval during processing
355
- st.rerun()
356
- elif status == "COMPLETED":
357
- st.success(f"Job Status: Completed! (ID: {current_job_id})")
358
- # Result display handled below in results/history section
359
- elif status == "FAILED":
360
- error_msg = st.session_state.job_errors.get(current_job_id, "Unknown error")
361
- st.error(f"Job Status: Failed! (ID: {current_job_id}) - Error: {error_msg}")
362
- elif status == "IDLE":
363
- st.info("Submit a job to see status.")
364
- else: # Should not happen
365
- st.error(f"Unknown Job Status: {status}")
366
-
367
- # --- Status Update Logic (if job is active) ---
368
- if status in ["SUBMITTED", "PENDING", "PROCESSING"]:
369
- new_status, result_info = check_job_status_backend(current_job_id)
370
- st.session_state.job_status = new_status
371
-
372
- if new_status == "COMPLETED" and result_info:
373
- try:
374
- result_data = fetch_result_data(result_info)
375
- # Store result data associated with job_id
376
- st.session_state.job_results[current_job_id]['type'] = result_info['type']
377
- st.session_state.job_results[current_job_id]['data'] = result_data
378
- st.session_state.selected_history_job_id = current_job_id # Ensure completed job is selected
379
- st.rerun() # Rerun to display result
380
- except Exception as e:
381
- st.error(f"Failed to load result data: {e}")
382
- st.session_state.job_status = "FAILED"
383
- st.session_state.job_errors[current_job_id] = f"Failed to load result: {e}"
384
- st.rerun()
385
- elif new_status == "FAILED":
386
- if not st.session_state.job_errors.get(current_job_id):
387
- st.session_state.job_errors[current_job_id] = "Job failed during processing (unknown reason)."
388
- st.rerun() # Rerun to show failed status
389
-
390
-
391
- # --- Result Display / History / Annotation Area ---
392
- st.markdown("---")
393
- col_results, col_history = st.columns([3, 1]) # Main area for result, smaller sidebar for history
394
-
395
- with col_history:
396
- st.subheader("πŸ“š History")
397
- if not st.session_state.job_results:
398
- st.caption("No jobs run yet in this session.")
399
- else:
400
- # Display history items (most recent first)
401
- sorted_job_ids = sorted(st.session_state.job_results.keys(), reverse=True)
402
- for job_id in sorted_job_ids:
403
- job_info = st.session_state.job_results[job_id]
404
- prompt_short = job_info.get('prompt', 'No Prompt')[:40] + "..." if len(job_info.get('prompt', '')) > 40 else job_info.get('prompt', 'No Prompt')
405
- mode_display = job_info.get('params',{}).get('mode', '?')
406
- item_label = f"[{mode_display}] {prompt_short}"
407
-
408
- # Use button to select history item
409
- if st.button(item_label, key=f"history_{job_id}", use_container_width=True,
410
- help=f"View result for Job ID: {job_id}\nPrompt: {job_info.get('prompt', '')}"):
411
- st.session_state.selected_history_job_id = job_id
412
- st.rerun() # Rerun to update the main display
413
-
414
- if st.session_state.job_results:
415
- st.download_button(
416
- "⬇️ Export History (JSON)",
417
- data=json.dumps(st.session_state.job_results, indent=2, default=str), # Default=str for non-serializable
418
- file_name="archsketch_history.json",
419
- mime="application/json"
420
- )
421
-
422
-
423
- with col_results:
424
- selected_job_id = st.session_state.selected_history_job_id
425
- if not selected_job_id or selected_job_id not in st.session_state.job_results:
426
- st.info("Select a job from the history panel to view details and annotate.")
427
  else:
428
- result_info = st.session_state.job_results[selected_job_id]
429
- result_type = result_info.get('type')
430
- result_data = result_info.get('data')
431
- result_params = result_info.get('params', {})
432
- result_prompt = result_info.get('prompt', 'N/A')
433
-
434
- st.subheader(f"πŸ” Viewing Result: {selected_job_id}")
435
- st.caption(f"**Mode:** {result_params.get('mode', 'N/A')} | **Style:** {result_params.get('style', 'N/A')}")
436
- st.markdown(f"**Prompt:** *{result_prompt}*")
437
-
438
- display_image = None # Image to use for canvas background
439
-
440
- if result_type == 'image' and isinstance(result_data, Image.Image):
441
- st.image(result_data, caption="Generated Visualization", use_column_width=True)
442
- display_image = result_data
443
- # Add image download button
444
- buf = BytesIO(); result_data.save(buf, format="PNG")
445
- st.download_button("⬇️ Download Image (PNG)", buf.getvalue(), f"{selected_job_id}_result.png", "image/png")
446
-
447
- elif result_type == 'svg' and isinstance(result_data, str):
448
- st.image(result_data, caption="Generated Floor Plan (SVG)", use_column_width=True)
449
- # SVG Download
450
- st.download_button("⬇️ Download SVG", result_data, f"{selected_job_id}_floorplan.svg", "image/svg+xml")
451
- # Cannot easily use SVG as canvas background directly - maybe render SVG to PNG first?
452
- st.warning("Annotation on SVG is not directly supported in this demo. Showing base image if available.")
453
- # If staging mode produced SVG somehow (unlikely), use the input image for annotation context
454
- if result_params.get('mode') == 'Virtual Staging' and st.session_state.input_staging_image_preview:
455
- display_image = st.session_state.input_staging_image_preview
456
-
457
- elif result_type == 'json' and isinstance(result_data, dict):
458
- st.json(result_data, expanded=False)
459
- st.caption("Generated Structured Data (JSON)")
460
- # JSON Download
461
- st.download_button("⬇️ Download JSON", json.dumps(result_data, indent=2), f"{selected_job_id}_data.json", "application/json")
462
- st.warning("Annotation not applicable for JSON results. Showing base image if available.")
463
- if result_params.get('mode') == 'Virtual Staging' and st.session_state.input_staging_image_preview:
464
- display_image = st.session_state.input_staging_image_preview
465
- elif result_data is None:
466
- st.warning("Result data is not available for this job (may still be processing or failed).")
467
- else:
468
- st.error("Result type or data is invalid.")
469
-
470
-
471
- # --- Annotation Canvas ---
472
- if display_image:
473
- st.markdown("---")
474
- st.subheader("✏️ Annotate / Edit")
475
- # Load existing annotations for this job_id if they exist
476
- initial_drawing = {"objects": st.session_state.annotations.get(selected_job_id, [])}
477
-
478
- canvas = st_canvas(
479
- fill_color="rgba(255, 0, 0, 0.2)", # Red annotation
480
- stroke_width=3,
481
- stroke_color="#FF0000",
482
- background_image=display_image,
483
- update_streamlit=[" Mosul", "mouseup"], # Update on drawing release
484
- height=500, # Adjust height as needed
485
- width=700, # Adjust width as needed
486
- drawing_mode=st.selectbox("Drawing tool:", ("freedraw", "line", "rect", "circle", "transform"), key=f"draw_mode_{selected_job_id}"),
487
- key=f"canvas_{selected_job_id}" # Key tied to job ID
488
- # Removed initial_drawing for simplicity now, add back if needed carefully
489
- )
490
-
491
- # Save annotations when canvas updates
492
- if canvas.json_data is not None and canvas.json_data["objects"]:
493
- st.session_state.annotations[selected_job_id] = canvas.json_data["objects"]
494
-
495
- # Display current annotations (optional) & Export
496
- current_annotations = st.session_state.annotations.get(selected_job_id)
497
- if current_annotations:
498
- with st.expander("View/Export Current Annotations (JSON)"):
499
- st.json(current_annotations)
500
- st.download_button(
501
- "⬇️ Export Annotations",
502
- data=json.dumps({selected_job_id: current_annotations}, indent=2),
503
- file_name=f"{selected_job_id}_annotations.json",
504
- mime="application/json"
505
- )
506
- else:
507
- st.caption("Annotation requires a viewable image result.")
508
 
 
 
509
 
510
- # ─── Footer & Disclaimer ─────────────────────────────────────────────────────
511
- st.markdown("---")
512
- st.warning("""
513
- **Disclaimer:** This is an **advanced conceptual blueprint**. User authentication is **not secure**.
514
- Backend API calls, asynchronous job handling, status polling, AI model execution (image generation, floor plan logic, staging),
515
- and result data fetching are **simulated**. Building the real backend requires substantial AI and infrastructure expertise.
516
- """)
 
 
 
1
  import streamlit as st
2
+ import google.generativeai as genai
3
+ import zipfile
4
+ import io
 
 
5
  import json
6
+ import os
7
+ from pathlib import Path
8
+
9
+ # --- Configuration ---
10
+ GEMINI_MODEL_NAME = "gemini-2.5-pro-preview-03-25"
11
+ # Maximum estimated tokens to try fitting into a single prompt
12
+ # Adjust based on typical file sizes and Gemini limits/performance
13
+ # 1M tokens is roughly 4MB-5MB of text, but structure matters. Start lower.
14
+ MAX_PROMPT_TOKENS_ESTIMATE = 800000 # Be conservative initially
15
+
16
+ # Define the types of analysis available
17
+ AVAILABLE_ANALYSES = {
18
+ "generate_docs": "Generate Missing Docstrings/Comments",
19
+ "find_bugs": "Identify Potential Bugs & Anti-patterns",
20
+ "check_style": "Check Style Guide Compliance (General)",
21
+ "summarize_modules": "Summarize Complex Modules/Files",
22
+ "suggest_refactoring": "Suggest Refactoring Opportunities"
23
+ }
24
+
25
+ # Define common code file extensions to include
26
+ CODE_EXTENSIONS = {'.py', '.js', '.java', '.c', '.cpp', '.h', '.cs', '.go', '.rb', '.php', '.swift', '.kt', '.ts', '.html', '.css', '.scss', '.sql'}
27
+
28
+ # --- Gemini API Setup ---
29
  try:
30
+ if 'GEMINI_API_KEY' not in st.secrets:
31
+ st.error("🚨 Gemini API Key not found. Add it to `.streamlit/secrets.toml`.")
32
+ st.stop()
33
+ genai.configure(api_key=st.secrets["GEMINI_API_KEY"])
34
+ model = genai.GenerativeModel(GEMINI_MODEL_NAME)
35
+ print("Gemini Model Initialized.")
36
+ except Exception as e:
37
+ st.error(f"🚨 Error initializing Gemini SDK: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  st.stop()
39
 
40
+ # --- Helper Functions ---
41
+
42
+ def estimate_token_count(text):
43
+ """Roughly estimate token count (4 chars per token is a common rule of thumb)."""
44
+ return len(text) // 3 # Be generous here
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
+ def process_zip_file(uploaded_file):
47
+ """Extracts code files and their content from the uploaded zip file."""
48
+ code_files = {}
49
+ total_chars = 0
50
+ file_count = 0
51
+ ignored_files = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
  try:
54
+ with zipfile.ZipFile(io.BytesIO(uploaded_file.getvalue()), 'r') as zip_ref:
55
+ for member in zip_ref.infolist():
56
+ # Skip directories and files in hidden folders like .git, __pycache__
57
+ if member.is_dir() or member.filename.startswith('.') or '__' in member.filename:
58
+ continue
59
+
60
+ file_path = Path(member.filename)
61
+ # Check if the file extension is in our allowed list
62
+ if file_path.suffix.lower() in CODE_EXTENSIONS:
63
+ try:
64
+ with zip_ref.open(member) as file:
65
+ # Decode defensively, try common encodings
66
+ try:
67
+ content = file.read().decode('utf-8')
68
+ except UnicodeDecodeError:
69
+ try:
70
+ content = file.read().decode('latin-1')
71
+ except Exception as decode_err:
72
+ ignored_files.append(f"{member.filename} (Decode Error: {decode_err})")
73
+ continue # Skip if undecodable
74
+
75
+ code_files[member.filename] = content
76
+ total_chars += len(content)
77
+ file_count += 1
78
+ except Exception as read_err:
79
+ ignored_files.append(f"{member.filename} (Read Error: {read_err})")
80
+ else:
81
+ ignored_files.append(f"{member.filename} (Skipped Extension: {file_path.suffix})")
82
+
83
+ except zipfile.BadZipFile:
84
+ st.error("🚨 Invalid or corrupted ZIP file.")
85
+ return None, 0, 0, []
86
  except Exception as e:
87
+ st.error(f"🚨 Error processing ZIP file: {e}")
88
+ return None, 0, 0, []
89
+
90
+ return code_files, total_chars, file_count, ignored_files
91
+
92
+ def construct_analysis_prompt(code_files_dict, requested_analyses):
93
+ """Constructs the prompt for Gemini, including code content and JSON structure request."""
94
+ prompt_content = "Analyze the following codebase provided as a collection of file paths and their content.\n\n"
95
+ current_token_estimate = estimate_token_count(prompt_content)
96
+
97
+ # Concatenate file content with markers
98
+ included_files = []
99
+ concatenated_code = ""
100
+ for filename, content in code_files_dict.items():
101
+ file_marker = f"--- START FILE: {filename} ---\n"
102
+ file_content = f"{content}\n"
103
+ file_end_marker = f"--- END FILE: {filename} ---\n\n"
104
+ segment = file_marker + file_content + file_end_marker
105
+
106
+ segment_token_estimate = estimate_token_count(segment)
107
+
108
+ if current_token_estimate + segment_token_estimate <= MAX_PROMPT_TOKENS_ESTIMATE:
109
+ concatenated_code += segment
110
+ current_token_estimate += segment_token_estimate
111
+ included_files.append(filename)
112
+ else:
113
+ st.warning(f"⚠️ Codebase likely exceeds context window estimate ({MAX_PROMPT_TOKENS_ESTIMATE} tokens). Analysis will be performed only on the first {len(included_files)} files ({current_token_estimate} tokens). Consider analyzing smaller parts separately.")
114
+ break # Stop adding files if limit reached
115
+
116
+ if not included_files:
117
+ st.error("🚨 No code files could be included within the estimated token limit.")
118
+ return None, []
119
+
120
+ prompt_content += concatenated_code
121
+
122
+ # Define the requested JSON structure based on selections
123
+ json_structure_description = "{\n"
124
+ if "generate_docs" in requested_analyses:
125
+ json_structure_description += ' "documentation_suggestions": [{"file": "path/to/file", "line": number, "suggestion": "Suggested docstring/comment"}],\n'
126
+ if "find_bugs" in requested_analyses:
127
+ json_structure_description += ' "potential_bugs": [{"file": "path/to/file", "line": number, "description": "Description of potential bug/anti-pattern", "severity": "High/Medium/Low"}],\n'
128
+ if "check_style" in requested_analyses:
129
+ json_structure_description += ' "style_issues": [{"file": "path/to/file", "line": number, "description": "Description of style deviation"}],\n'
130
+ if "summarize_modules" in requested_analyses:
131
+ json_structure_description += ' "module_summaries": [{"file": "path/to/file", "summary": "One-paragraph summary of the file purpose/functionality"}],\n'
132
+ if "suggest_refactoring" in requested_analyses:
133
+ json_structure_description += ' "refactoring_suggestions": [{"file": "path/to/file", "line": number, "area": "e.g., function name, class name", "suggestion": "Description of refactoring suggestion"}],\n'
134
+
135
+ # Remove trailing comma and add closing brace
136
+ if json_structure_description.endswith(',\n'):
137
+ json_structure_description = json_structure_description[:-2] + "\n}"
138
+ else:
139
+ json_structure_description += "}" # Handle case where no sections selected (though UI should prevent)
140
+
141
+
142
+ prompt_footer = f"""
143
+ **Analysis Task:**
144
+ Perform the analyses corresponding to the keys present in the JSON structure below, based *only* on the provided code files ({', '.join(included_files)}).
145
+
146
+ **Output Format:**
147
+ Respond ONLY with a single, valid JSON object adhering strictly to the following structure. If no issues/suggestions are found for a category, provide an empty list `[]`. Do not include explanations outside the JSON structure.
148
+
149
+ {json_structure_description}
150
+
151
+ **JSON Output Only:**
152
+ """
153
+ full_prompt = prompt_content + prompt_footer
154
+ # print(f"--- PROMPT (First 500 chars): ---\n{full_prompt[:500]}\n--------------------------") # Debug: Print start of prompt
155
+ return full_prompt, included_files
156
+
157
+
158
+ def call_gemini_api(prompt):
159
+ """Calls the Gemini API and attempts to parse the JSON response."""
160
+ if not prompt:
161
+ return None, "Prompt generation failed."
162
+
163
+ try:
164
+ st.write(f"πŸ“‘ Sending request to {GEMINI_MODEL_NAME}...") # Progress update
165
+ response = model.generate_content(
166
+ prompt,
167
+ generation_config=genai.types.GenerationConfig(
168
+ # candidate_count=1, # Default is 1
169
+ # stop_sequences=['...'], # Optional stop sequences
170
+ # max_output_tokens=..., # Can be useful, but JSON structure might vary
171
+ temperature=0.2 # Lower temperature for more deterministic code analysis
172
+ ),
173
+ safety_settings=[ # Adjust as needed, might need to be less strict for code
174
+ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
175
+ {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
176
+ {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
177
+ # Be cautious with dangerous content, code analysis might trigger it
178
+ {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
179
+ ]
180
  )
181
+ st.write("βœ… Response received from AI.")
182
+
183
+ # Debug: Print raw response
184
+ # print(f"--- RAW API RESPONSE ---\n{response.text}\n------------------------")
185
+
186
+ # Attempt to parse the JSON response - more robust extraction
187
+ try:
188
+ # Find the start and end of the JSON block
189
+ json_start = response.text.find('{')
190
+ json_end = response.text.rfind('}') + 1
191
+ if json_start != -1 and json_end != -1:
192
+ json_response_text = response.text[json_start:json_end]
193
+ insights = json.loads(json_response_text)
194
+ return insights, None
195
+ else:
196
+ # Fallback if no {} found - maybe simple text response?
197
+ st.warning("⚠️ Could not find JSON structure in response. Displaying raw text.")
198
+ return {"raw_response": response.text}, "AI response was not valid JSON, showing raw text."
199
+
200
+ except json.JSONDecodeError as json_err:
201
+ st.error(f"🚨 Error parsing JSON response from AI: {json_err}")
202
+ st.error("Raw AI Response:")
203
+ st.code(response.text, language='text')
204
+ return None, f"AI response was not valid JSON: {json_err}"
205
+ except Exception as e:
206
+ st.error(f"🚨 Unexpected issue processing AI response: {e}")
207
+ try: st.code(f"Response object: {response}", language='text')
208
+ except: pass
209
+ return None, f"Unexpected response structure: {e}"
210
+
211
+ except Exception as e:
212
+ st.error(f"🚨 An error occurred during API call: {e}")
213
+ # Add specific error checks if possible (e.g., quota, safety blocks)
214
+ error_msg = f"API call failed: {e}"
215
+ if "block_reason: SAFETY" in str(e):
216
+ error_msg = "Content blocked due to safety settings. Code or AI response may have triggered filters."
217
+ elif "429" in str(e): # Crude check for quota error
218
+ error_msg = "API Quota Exceeded or Rate Limit hit. Check your Google AI Studio dashboard."
219
+ return None, error_msg
220
+
221
+
222
+ def display_results(results_json, requested_analyses):
223
+ """Renders the analysis results in Streamlit."""
224
+ st.header("πŸ“Š Analysis Report")
225
+
226
+ if not isinstance(results_json, dict):
227
+ st.error("Invalid results format received.")
228
+ st.json(results_json) # Show what was received
229
+ return
230
+
231
+ # Handle raw response fallback
232
+ if "raw_response" in results_json:
233
+ st.subheader("Raw AI Response (JSON Parsing Failed)")
234
+ st.code(results_json["raw_response"], language='text')
235
+ return
236
+
237
+ # Display each requested section
238
+ if "generate_docs" in requested_analyses:
239
+ st.subheader(AVAILABLE_ANALYSES["generate_docs"])
240
+ suggestions = results_json.get("documentation_suggestions", [])
241
+ if suggestions:
242
+ for item in suggestions:
243
+ st.markdown(f"- **File:** `{item.get('file', 'N/A')}` (Line: {item.get('line', 'N/A')})")
244
+ st.code(item.get('suggestion', ''), language='text') # Show suggestion as code/text
245
+ else:
246
+ st.markdown("_No documentation suggestions provided._")
247
+ st.divider()
248
+
249
+ if "find_bugs" in requested_analyses:
250
+ st.subheader(AVAILABLE_ANALYSES["find_bugs"])
251
+ bugs = results_json.get("potential_bugs", [])
252
+ if bugs:
253
+ for item in bugs:
254
+ st.markdown(f"- **File:** `{item.get('file', 'N/A')}` (Line: {item.get('line', 'N/A')}) - **Severity:** {item.get('severity', 'Unknown')}")
255
+ st.markdown(f" Description: {item.get('description', 'N/A')}")
256
+ else:
257
+ st.markdown("_No potential bugs identified._")
258
+ st.divider()
259
+
260
+ if "check_style" in requested_analyses:
261
+ st.subheader(AVAILABLE_ANALYSES["check_style"])
262
+ issues = results_json.get("style_issues", [])
263
+ if issues:
264
+ for item in issues:
265
+ st.markdown(f"- **File:** `{item.get('file', 'N/A')}` (Line: {item.get('line', 'N/A')})")
266
+ st.markdown(f" Issue: {item.get('description', 'N/A')}")
267
+ else:
268
+ st.markdown("_No style issues identified._")
269
+ st.divider()
270
+
271
+ if "summarize_modules" in requested_analyses:
272
+ st.subheader(AVAILABLE_ANALYSES["summarize_modules"])
273
+ summaries = results_json.get("module_summaries", [])
274
+ if summaries:
275
+ for item in summaries:
276
+ st.markdown(f"**File:** `{item.get('file', 'N/A')}`")
277
+ st.markdown(f"> {item.get('summary', 'N/A')}") # Blockquote for summary
278
+ else:
279
+ st.markdown("_No module summaries provided._")
280
+ st.divider()
281
+
282
+ if "suggest_refactoring" in requested_analyses:
283
+ st.subheader(AVAILABLE_ANALYSES["suggest_refactoring"])
284
+ suggestions = results_json.get("refactoring_suggestions", [])
285
+ if suggestions:
286
+ for item in suggestions:
287
+ st.markdown(f"- **File:** `{item.get('file', 'N/A')}` (Line: {item.get('line', 'N/A')}) - **Area:** {item.get('area', 'N/A')}")
288
+ st.markdown(f" Suggestion: {item.get('suggestion', 'N/A')}")
289
+ else:
290
+ st.markdown("_No refactoring suggestions provided._")
291
+ st.divider()
292
+
293
+ # Option to download the raw JSON results
294
+ st.download_button(
295
+ label="Download Full Report (JSON)",
296
+ data=json.dumps(results_json, indent=4),
297
+ file_name="code_audit_report.json",
298
+ mime="application/json"
299
+ )
300
+
301
+
302
+ # --- Streamlit App Main Interface ---
303
+ st.set_page_config(page_title="Codebase Audit Assistant", layout="wide")
304
+
305
+ st.title("πŸ€– Codebase Audit & Documentation Assistant")
306
+ st.markdown(f"Upload your codebase (`.zip`) for analysis using **{GEMINI_MODEL_NAME}**.")
307
+ st.warning("⚠️ **Privacy Notice:** Your code content will be sent to the Google Gemini API for analysis. Do not upload highly sensitive or proprietary code if you are not comfortable with this.")
308
+
309
+ # Sidebar for options
310
+ st.sidebar.header("πŸ› οΈ Analysis Options")
311
+ selected_analyses = []
312
+ for key, name in AVAILABLE_ANALYSES.items():
313
+ if st.sidebar.checkbox(name, value=True, key=f"cb_{key}"):
314
+ selected_analyses.append(key)
315
+
316
+ st.sidebar.header("πŸ“„ How To Use")
317
+ st.sidebar.info(
318
+ "1. Ensure `GEMINI_API_KEY` is in `.streamlit/secrets.toml`.\n"
319
+ "2. Select desired analyses in the sidebar.\n"
320
+ "3. Create a **ZIP archive** of your codebase.\n"
321
+ "4. Upload the `.zip` file below.\n"
322
+ "5. Click 'Analyze Codebase'.\n"
323
+ "6. Review the report generated."
324
  )
325
+ st.sidebar.info(f"**Note:** Only files with common code extensions ({', '.join(CODE_EXTENSIONS)}) within the ZIP will be processed. Analysis might be limited by token estimates (~{MAX_PROMPT_TOKENS_ESTIMATE} tokens).")
326
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327
 
328
+ # Main content area
329
+ uploaded_file = st.file_uploader("πŸ“ Upload Codebase ZIP File", type=['zip'])
330
 
331
+ if uploaded_file:
332
+ st.success(f"βœ… File '{uploaded_file.name}' uploaded successfully.")
333
+
334
+ # Process the zip file immediately to give feedback
335
+ with st.spinner("Inspecting ZIP file..."):
336
+ code_files, total_chars, file_count, ignored_files = process_zip_file(uploaded_file)
337
+
338
+ if code_files is not None:
339
+ st.info(f"Found **{file_count}** relevant code files ({total_chars:,} characters). Estimated tokens: ~{estimate_token_count(total_chars):,}")
340
+ if ignored_files:
341
+ with st.expander(f"View {len(ignored_files)} Skipped/Ignored Files"):
342
+ st.json(ignored_files)
343
+
344
+ # Analysis Button
345
+ analyze_button = st.button("Analyze Codebase", type="primary", disabled=(not selected_analyses or file_count == 0))
346
+
347
+ if not selected_analyses and analyze_button:
348
+ st.warning("Please select at least one analysis type from the sidebar.")
349
+ elif file_count == 0 and analyze_button:
350
+ st.warning("No relevant code files found in the ZIP archive to analyze.")
351
+
352
+
353
+ if analyze_button and selected_analyses and file_count > 0:
354
+ st.divider()
355
+ with st.spinner(f"πŸš€ Preparing prompt and contacting {GEMINI_MODEL_NAME}... This may take several minutes for large codebases."):
356
+ # 1. Construct the prompt
357
+ analysis_prompt, included_files_in_prompt = construct_analysis_prompt(code_files, selected_analyses)
358
+
359
+ if analysis_prompt and included_files_in_prompt:
360
+ st.write(f"Analyzing {len(included_files_in_prompt)} files...")
361
+ # 2. Call the API
362
+ results_json, error_message = call_gemini_api(analysis_prompt)
363
+
364
+ # 3. Display Results
365
+ if error_message:
366
+ st.error(f"Analysis Failed: {error_message}")
367
+ elif results_json:
368
+ display_results(results_json, selected_analyses)
369
+ else:
370
+ st.error("Analysis did not return results or an unknown error occurred.")
371
+ elif not included_files_in_prompt:
372
+ st.error("Could not proceed: No files were included in the prompt (likely due to token limits or processing errors).")
373
 
 
 
374
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
375
  else:
376
+ # Error message already shown by process_zip_file
377
+ pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
378
 
379
+ else:
380
+ st.info("Upload a ZIP file containing your source code to begin.")
381
 
382
+ st.divider()
383
+ st.markdown("_Assistant powered by Google Gemini._")