Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -13,54 +13,45 @@ from pygments import highlight
|
|
13 |
from pygments.lexers import PythonLexer
|
14 |
from pygments.formatters import HtmlFormatter
|
15 |
import base64
|
16 |
-
from transformers import pipeline
|
17 |
import torch
|
18 |
import re
|
19 |
import shutil
|
20 |
import time
|
21 |
-
from datetime import datetime
|
22 |
import streamlit.components.v1 as components
|
23 |
import uuid
|
24 |
-
import platform
|
25 |
import pandas as pd
|
26 |
import plotly.express as px
|
27 |
-
import markdown
|
28 |
import zipfile
|
29 |
-
import contextlib
|
30 |
-
import threading
|
31 |
import traceback
|
32 |
-
from io import StringIO, BytesIO
|
33 |
|
34 |
# Set up enhanced logging
|
35 |
logging.basicConfig(
|
36 |
level=logging.INFO,
|
37 |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
38 |
-
handlers=[
|
39 |
-
logging.StreamHandler()
|
40 |
-
]
|
41 |
)
|
42 |
logger = logging.getLogger(__name__)
|
43 |
|
44 |
-
# Model configuration mapping
|
45 |
MODEL_CONFIGS = {
|
46 |
-
"DeepSeek-V3-0324": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "DeepSeek"
|
47 |
-
"DeepSeek-R1": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "DeepSeek"
|
48 |
-
"Llama-4-Scout-17B-16E-Instruct": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Meta"
|
49 |
-
"Llama-4-Maverick-17B-128E-Instruct-FP8": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Meta"
|
50 |
-
"gpt-4o-mini": {"max_tokens": 15000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI"
|
51 |
-
"gpt-4o": {"max_tokens": 16000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI"
|
52 |
-
"gpt-4.1": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI"
|
53 |
-
"gpt-4.1-mini": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI"
|
54 |
-
"gpt-4.1-nano": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI"
|
55 |
-
"
|
56 |
-
"o1": {"max_completion_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI"
|
57 |
-
"o1-mini": {"max_completion_tokens": 66000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI"
|
58 |
-
"o1-preview": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI"
|
59 |
-
"Phi-4-multimodal-instruct": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Microsoft"
|
60 |
-
"Mistral-large-2407": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Mistral"
|
61 |
-
"Codestral-2501": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Mistral"
|
62 |
-
|
63 |
-
"default": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Other", "warning": None}
|
64 |
}
|
65 |
|
66 |
# Try to import Streamlit Ace
|
@@ -69,28 +60,21 @@ try:
|
|
69 |
ACE_EDITOR_AVAILABLE = True
|
70 |
except ImportError:
|
71 |
ACE_EDITOR_AVAILABLE = False
|
72 |
-
logger.warning("streamlit-ace not available, falling back to
|
73 |
|
74 |
def prepare_api_params(messages, model_name):
|
75 |
-
"""Create appropriate API parameters based on model configuration"""
|
76 |
config = MODEL_CONFIGS.get(model_name, MODEL_CONFIGS["default"])
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
}
|
81 |
-
token_param = config["param_name"]
|
82 |
-
api_params[token_param] = config.get(token_param)
|
83 |
-
return api_params, config
|
84 |
|
85 |
def get_secret(env_var):
|
86 |
-
"""Retrieve a secret from environment variables"""
|
87 |
val = os.environ.get(env_var)
|
88 |
if not val:
|
89 |
logger.warning(f"Secret '{env_var}' not found")
|
90 |
return val
|
91 |
|
92 |
def check_password():
|
93 |
-
"""Verify password entered against secret"""
|
94 |
correct = get_secret("password")
|
95 |
if not correct:
|
96 |
st.error("Admin password not configured")
|
@@ -111,23 +95,12 @@ def check_password():
|
|
111 |
|
112 |
def ensure_packages():
|
113 |
required = {
|
114 |
-
'manim': '0.17.3',
|
115 |
-
'
|
116 |
-
'
|
117 |
-
'
|
118 |
-
'
|
119 |
-
'
|
120 |
-
'streamlit-ace': '0.1.1',
|
121 |
-
'pydub': '0.25.1',
|
122 |
-
'plotly': '5.14.0',
|
123 |
-
'pandas': '2.0.0',
|
124 |
-
'python-pptx': '0.6.21',
|
125 |
-
'markdown': '3.4.3',
|
126 |
-
'fpdf': '1.7.2',
|
127 |
-
'matplotlib': '3.5.0',
|
128 |
-
'seaborn': '0.11.2',
|
129 |
-
'scipy': '1.7.3',
|
130 |
-
'huggingface_hub': '0.16.0',
|
131 |
}
|
132 |
missing = {}
|
133 |
for pkg, ver in required.items():
|
@@ -158,8 +131,7 @@ def install_custom_packages(pkgs):
|
|
158 |
return True, "No valid packages"
|
159 |
sidebar_txt = st.sidebar.empty()
|
160 |
bar = st.sidebar.progress(0)
|
161 |
-
results = []
|
162 |
-
success = True
|
163 |
for i, p in enumerate(parts):
|
164 |
bar.progress(i / len(parts))
|
165 |
sidebar_txt.text(f"Installing {p}...")
|
@@ -193,97 +165,88 @@ def init_ai_models_direct():
|
|
193 |
logger.error(str(e))
|
194 |
return None
|
195 |
|
196 |
-
def generate_manim_preview(
|
197 |
-
|
198 |
-
if "Circle" in
|
199 |
-
if "Square" in
|
200 |
-
if "MathTex" in
|
201 |
-
if "Text" in
|
202 |
-
if "Axes" in
|
203 |
-
icons =
|
204 |
-
|
205 |
-
html = f"""
|
206 |
<div style="background:#000;color:#fff;padding:1rem;border-radius:10px;text-align:center;">
|
207 |
<h3>Animation Preview</h3>
|
208 |
-
<div>{
|
209 |
-
<p>
|
210 |
-
<p style="opacity:0.7;">Full rendering required for accurate preview</p>
|
211 |
</div>
|
212 |
"""
|
213 |
-
return html
|
214 |
|
215 |
-
def extract_scene_class_name(
|
216 |
-
|
217 |
-
return
|
218 |
|
219 |
-
def mp4_to_gif(
|
220 |
cmd = [
|
221 |
-
"ffmpeg","-i",
|
222 |
-
"-vf",f"fps={fps},scale=640:-1:flags=lanczos,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse",
|
223 |
-
"-loop","0",
|
224 |
]
|
225 |
-
|
226 |
-
return
|
227 |
|
228 |
-
def generate_manim_video(code,
|
229 |
temp_dir = tempfile.mkdtemp(prefix="manim_")
|
230 |
-
|
231 |
-
|
232 |
-
with open(
|
233 |
f.write(code)
|
234 |
-
|
235 |
-
qf =
|
236 |
-
|
237 |
-
cmd = ["manim", file_py, scene_class, qf, fmt_arg]
|
238 |
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
|
239 |
-
output = []
|
240 |
-
out_path = None
|
241 |
-
mp4_path = None
|
242 |
-
bar = st.empty()
|
243 |
log = st.empty()
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
if m:
|
254 |
-
out_path = m.group(2)
|
255 |
-
if out_path.endswith(".mp4"):
|
256 |
-
mp4_path = out_path
|
257 |
proc.wait()
|
258 |
time.sleep(1)
|
259 |
-
|
260 |
-
|
261 |
-
gif = os.path.join(temp_dir, scene_class+"_conv.gif")
|
262 |
conv = mp4_to_gif(mp4_path, gif)
|
263 |
if conv and os.path.exists(conv):
|
264 |
out_path = conv
|
|
|
265 |
if out_path and os.path.exists(out_path):
|
266 |
-
with open(out_path,"rb") as f:
|
|
|
267 |
shutil.rmtree(temp_dir)
|
268 |
if data:
|
269 |
-
|
|
|
270 |
else:
|
271 |
-
return None, "❌ No output generated.
|
272 |
|
273 |
def detect_input_calls(code):
|
274 |
-
calls
|
275 |
-
for i,
|
276 |
if "input(" in line and not line.strip().startswith("#"):
|
277 |
-
m
|
278 |
-
prompt
|
279 |
calls.append({"line":i,"prompt":prompt})
|
280 |
return calls
|
281 |
|
282 |
def run_python_script(code, inputs=None, timeout=60):
|
283 |
-
|
284 |
-
mod
|
285 |
if inputs:
|
286 |
-
mod
|
287 |
__INPUTS={inputs}
|
288 |
__IDX=0
|
289 |
def input(prompt=''):
|
@@ -296,152 +259,172 @@ def input(prompt=''):
|
|
296 |
print()
|
297 |
return ''
|
298 |
"""
|
299 |
-
|
300 |
with tempfile.TemporaryDirectory() as td:
|
301 |
-
|
302 |
-
with open(
|
303 |
-
outf = os.path.join(td,"out.txt")
|
304 |
-
errf = os.path.join(td,"err.txt")
|
305 |
start=time.time()
|
306 |
try:
|
307 |
with open(outf,"w") as o, open(errf,"w") as e:
|
308 |
-
proc=subprocess.Popen([sys.executable,
|
309 |
proc.wait(timeout=timeout)
|
310 |
except subprocess.TimeoutExpired:
|
311 |
proc.kill()
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
return
|
318 |
|
319 |
-
def display_python_script_results(
|
320 |
-
st.info(f"Completed in {
|
321 |
-
if
|
322 |
-
st.error(f"Exception: {
|
323 |
-
if
|
324 |
st.error("Errors:")
|
325 |
-
st.code(
|
326 |
-
if
|
327 |
st.markdown("### Plots")
|
328 |
-
cols
|
329 |
-
for i,p in enumerate(
|
330 |
cols[i%len(cols)].image(p,use_column_width=True)
|
331 |
-
if
|
332 |
st.markdown("### DataFrames")
|
333 |
-
for df in
|
334 |
-
with st.expander(f"{df['name']}
|
335 |
st.markdown(df["preview_html"], unsafe_allow_html=True)
|
336 |
-
if
|
337 |
st.markdown("### Output")
|
338 |
-
st.code(
|
339 |
|
340 |
-
# Main app
|
341 |
def main():
|
342 |
if 'init' not in st.session_state:
|
343 |
st.session_state.update({
|
344 |
'init':True, 'video_data':None, 'status':None, 'ai_models':None,
|
345 |
'generated_code':"", 'code':"", 'temp_code':"", 'editor_key':str(uuid.uuid4()),
|
346 |
-
'packages_checked':False, '
|
347 |
-
'
|
348 |
-
'
|
349 |
-
'
|
350 |
-
'password_entered':False, 'custom_model':"gpt-4o", 'first_load_complete':False,
|
351 |
-
'pending_tab_switch':None
|
352 |
})
|
353 |
st.set_page_config(page_title="Manim Animation Studio", page_icon="🎬", layout="wide")
|
|
|
354 |
if not st.session_state.packages_checked:
|
355 |
if ensure_packages():
|
356 |
st.session_state.packages_checked=True
|
357 |
else:
|
358 |
-
st.error("
|
359 |
return
|
360 |
|
361 |
-
tab_names=[
|
|
|
|
|
|
|
362 |
tabs = st.tabs(tab_names)
|
363 |
|
364 |
-
# Editor
|
365 |
with tabs[0]:
|
366 |
col1,col2 = st.columns([3,2])
|
367 |
with col1:
|
368 |
st.markdown("### 📝 Animation Editor")
|
369 |
-
mode = st.radio("Code Input",["Type Code","Upload File"], key="editor_mode")
|
370 |
if mode=="Upload File":
|
371 |
-
up=st.file_uploader("Upload .py
|
372 |
if up:
|
373 |
txt=up.getvalue().decode()
|
374 |
if txt.strip():
|
375 |
st.session_state.code=txt
|
376 |
st.session_state.temp_code=txt
|
377 |
if ACE_EDITOR_AVAILABLE:
|
378 |
-
st.session_state.temp_code = st_ace(
|
|
|
|
|
|
|
|
|
379 |
else:
|
380 |
-
st.session_state.temp_code = st.text_area(
|
|
|
|
|
|
|
381 |
if st.session_state.temp_code!=st.session_state.code:
|
382 |
st.session_state.code=st.session_state.temp_code
|
383 |
if st.button("🚀 Generate Animation"):
|
384 |
if not st.session_state.code:
|
385 |
st.error("Enter code first")
|
386 |
else:
|
387 |
-
|
388 |
st.session_state.code,
|
389 |
st.session_state.settings["format_type"],
|
390 |
st.session_state.settings["quality"],
|
391 |
{"Slow":0.5,"Normal":1.0,"Fast":2.0,"Very Fast":3.0}[st.session_state.settings["animation_speed"]],
|
392 |
st.session_state.audio_path
|
393 |
)
|
394 |
-
st.session_state.video_data=
|
395 |
-
st.session_state.status=
|
396 |
with col2:
|
397 |
if st.session_state.code:
|
398 |
-
|
399 |
-
|
400 |
-
|
|
|
401 |
if st.session_state.video_data:
|
402 |
fmt=st.session_state.settings["format_type"]
|
403 |
if fmt=="png_sequence":
|
404 |
-
st.download_button(
|
|
|
|
|
|
|
|
|
405 |
elif fmt=="svg":
|
406 |
try:
|
407 |
svg=st.session_state.video_data.decode('utf-8')
|
408 |
components.html(svg, height=400)
|
409 |
except:
|
410 |
st.error("Cannot display SVG")
|
411 |
-
st.download_button(
|
|
|
|
|
|
|
412 |
else:
|
413 |
st.video(st.session_state.video_data, format=fmt)
|
414 |
-
st.download_button(
|
|
|
|
|
|
|
415 |
if st.session_state.status:
|
416 |
-
if "
|
417 |
st.error(st.session_state.status)
|
418 |
else:
|
419 |
st.success(st.session_state.status)
|
420 |
|
421 |
-
# AI Assistant
|
422 |
with tabs[1]:
|
423 |
st.markdown("### 🤖 AI Animation Assistant")
|
424 |
if check_password():
|
425 |
client_data = init_ai_models_direct()
|
426 |
if client_data:
|
427 |
if st.button("Test API Connection"):
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
st.error("❌ No response")
|
437 |
if st.session_state.ai_models:
|
438 |
st.info(f"Using model {st.session_state.ai_models['model_name']}")
|
439 |
prompt = st.text_area("Describe animation or paste partial code", height=150)
|
440 |
if st.button("Generate Animation Code"):
|
441 |
if prompt.strip():
|
442 |
from azure.ai.inference.models import UserMessage
|
443 |
-
|
444 |
-
|
|
|
|
|
|
|
445 |
if resp.choices:
|
446 |
code = resp.choices[0].message.content
|
447 |
if "```python" in code:
|
@@ -459,102 +442,47 @@ def main():
|
|
459 |
st.session_state.pending_tab_switch=0
|
460 |
st.rerun()
|
461 |
else:
|
462 |
-
st.info("Enter password to access")
|
463 |
|
464 |
-
#
|
465 |
with tabs[2]:
|
466 |
-
st.markdown("### 📚 LaTeX Formula Builder")
|
467 |
-
col1,col2=st.columns([3,2])
|
468 |
-
with col1:
|
469 |
-
latex_input = st.text_area("LaTeX Formula", value=st.session_state.latex_formula, height=100, placeholder=r"e^{i\pi}+1=0")
|
470 |
-
st.session_state.latex_formula=latex_input
|
471 |
-
if latex_input:
|
472 |
-
manim_latex_code = f"""
|
473 |
-
# LaTeX formula
|
474 |
-
formula = MathTex(r"{latex_input}")
|
475 |
-
self.play(Write(formula))
|
476 |
-
self.wait(2)
|
477 |
-
"""
|
478 |
-
st.code(manim_latex_code, language="python")
|
479 |
-
if st.button("Insert into Editor"):
|
480 |
-
if st.session_state.code:
|
481 |
-
if "def construct(self):" in st.session_state.code:
|
482 |
-
lines=st.session_state.code.split("\n")
|
483 |
-
idx=-1
|
484 |
-
for i,l in enumerate(lines):
|
485 |
-
if "def construct(self):" in l:
|
486 |
-
idx=i; break
|
487 |
-
if idx>=0:
|
488 |
-
for j in range(idx+1,len(lines)):
|
489 |
-
if lines[j].strip() and not lines[j].strip().startswith("#"):
|
490 |
-
indent=re.match(r"(\s*)",lines[j]).group(1)
|
491 |
-
new_block="\n".join(indent+ln for ln in manim_latex_code.strip().split("\n"))
|
492 |
-
lines.insert(j,new_block)
|
493 |
-
break
|
494 |
-
else:
|
495 |
-
lines.append(" "+ "\n ".join(manim_latex_code.strip().split("\n")))
|
496 |
-
st.session_state.code="\n".join(lines)
|
497 |
-
st.session_state.temp_code=st.session_state.code
|
498 |
-
st.success("Inserted LaTeX into editor")
|
499 |
-
st.session_state.pending_tab_switch=0
|
500 |
-
st.rerun()
|
501 |
-
else:
|
502 |
-
st.warning("No construct() found")
|
503 |
-
else:
|
504 |
-
basic_scene = f"""from manim import *
|
505 |
-
|
506 |
-
class LatexScene(Scene):
|
507 |
-
def construct(self):
|
508 |
-
# LaTeX formula
|
509 |
-
formula = MathTex(r"{latex_input}")
|
510 |
-
self.play(Write(formula))
|
511 |
-
self.wait(2)
|
512 |
-
"""
|
513 |
-
st.session_state.code=basic_scene
|
514 |
-
st.session_state.temp_code=basic_scene
|
515 |
-
st.success("Created new scene with LaTeX")
|
516 |
-
st.session_state.pending_tab_switch=0
|
517 |
-
st.rerun()
|
518 |
-
with col2:
|
519 |
-
components.html(render_latex_preview(latex_input), height=300)
|
520 |
-
|
521 |
-
# Assets Tab
|
522 |
-
with tabs[3]:
|
523 |
st.markdown("### 🎨 Asset Management")
|
524 |
c1,c2 = st.columns(2)
|
525 |
with c1:
|
526 |
-
imgs=st.file_uploader(
|
|
|
|
|
|
|
527 |
if imgs:
|
528 |
-
|
529 |
-
os.makedirs(
|
530 |
for up in imgs:
|
531 |
ext=up.name.split(".")[-1]
|
532 |
fname=f"img_{int(time.time())}_{uuid.uuid4().hex[:6]}.{ext}"
|
533 |
-
path=os.path.join(
|
534 |
with open(path,"wb") as f: f.write(up.getvalue())
|
535 |
st.session_state.image_paths.append({"name":up.name,"path":path})
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
code_snippet=f"""
|
542 |
# Image asset
|
543 |
image = ImageMobject(r"{info['path']}")
|
544 |
image.scale(2)
|
545 |
self.play(FadeIn(image))
|
546 |
self.wait(1)
|
547 |
"""
|
548 |
-
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
with c2:
|
554 |
-
aud=st.file_uploader("Upload Audio", type=["mp3","wav","ogg"])
|
555 |
if aud:
|
556 |
-
adir=os.path.join(os.getcwd(),"manim_assets","audio")
|
557 |
-
os.makedirs(adir,exist_ok=True)
|
558 |
ext=aud.name.split(".")[-1]
|
559 |
aname=f"audio_{int(time.time())}.{ext}"
|
560 |
ap=os.path.join(adir,aname)
|
@@ -563,60 +491,56 @@ self.wait(1)
|
|
563 |
st.audio(aud)
|
564 |
st.success("Audio uploaded")
|
565 |
|
566 |
-
# Timeline
|
567 |
-
with tabs[
|
568 |
st.markdown("### 🎞️ Timeline Editor")
|
569 |
-
st.info("
|
570 |
|
571 |
-
# Educational Export
|
572 |
-
with tabs[
|
573 |
st.markdown("### 🎓 Educational Export")
|
574 |
if not st.session_state.video_data:
|
575 |
-
st.warning("Generate
|
576 |
else:
|
577 |
-
title
|
578 |
-
expl
|
579 |
-
fmt
|
580 |
if st.button("Export"):
|
581 |
-
|
582 |
-
st.success(f"{fmt} export not yet implemented.")
|
583 |
|
584 |
-
# Python Runner
|
585 |
-
with tabs[
|
586 |
st.markdown("### 🐍 Python Script Runner")
|
587 |
-
examples
|
588 |
-
"Select...":"",
|
589 |
-
"Sine Plot":"""import matplotlib.pyplot as plt
|
590 |
import numpy as np
|
591 |
x=np.linspace(0,10,100)
|
592 |
y=np.sin(x)
|
593 |
plt.plot(x,y)
|
594 |
-
print("Done
|
595 |
-
|
596 |
-
sel=st.selectbox("Example", list(examples.keys()))
|
597 |
code = examples.get(sel, st.session_state.python_script)
|
598 |
if ACE_EDITOR_AVAILABLE:
|
599 |
-
code
|
600 |
else:
|
601 |
-
code
|
602 |
st.session_state.python_script=code
|
603 |
-
inputs
|
604 |
vals=[]
|
605 |
if inputs:
|
606 |
st.info(f"{len(inputs)} input() calls detected")
|
607 |
for i,c in enumerate(inputs):
|
608 |
-
vals.append(st.text_input(f"{c['prompt']} (line {c['line']})", key=f"
|
609 |
-
timeout
|
610 |
if st.button("▶️ Run"):
|
611 |
res=run_python_script(code, inputs=vals, timeout=timeout)
|
612 |
st.session_state.python_result=res
|
613 |
if st.session_state.python_result:
|
614 |
display_python_script_results(st.session_state.python_result)
|
615 |
|
616 |
-
# Handle tab switch
|
617 |
if st.session_state.pending_tab_switch is not None:
|
618 |
st.session_state.active_tab = st.session_state.pending_tab_switch
|
619 |
-
st.session_state.pending_tab_switch=None
|
620 |
|
621 |
if __name__ == "__main__":
|
622 |
main()
|
|
|
13 |
from pygments.lexers import PythonLexer
|
14 |
from pygments.formatters import HtmlFormatter
|
15 |
import base64
|
|
|
16 |
import torch
|
17 |
import re
|
18 |
import shutil
|
19 |
import time
|
20 |
+
from datetime import datetime
|
21 |
import streamlit.components.v1 as components
|
22 |
import uuid
|
|
|
23 |
import pandas as pd
|
24 |
import plotly.express as px
|
|
|
25 |
import zipfile
|
|
|
|
|
26 |
import traceback
|
|
|
27 |
|
28 |
# Set up enhanced logging
|
29 |
logging.basicConfig(
|
30 |
level=logging.INFO,
|
31 |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
32 |
+
handlers=[logging.StreamHandler()]
|
|
|
|
|
33 |
)
|
34 |
logger = logging.getLogger(__name__)
|
35 |
|
36 |
+
# Model configuration mapping
|
37 |
MODEL_CONFIGS = {
|
38 |
+
"DeepSeek-V3-0324": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "DeepSeek"},
|
39 |
+
"DeepSeek-R1": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "DeepSeek"},
|
40 |
+
"Llama-4-Scout-17B-16E-Instruct": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Meta"},
|
41 |
+
"Llama-4-Maverick-17B-128E-Instruct-FP8": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Meta"},
|
42 |
+
"gpt-4o-mini": {"max_tokens": 15000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI"},
|
43 |
+
"gpt-4o": {"max_tokens": 16000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI"},
|
44 |
+
"gpt-4.1": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI"},
|
45 |
+
"gpt-4.1-mini": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI"},
|
46 |
+
"gpt-4.1-nano": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI"},
|
47 |
+
"o3-mini": {"max_completion_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI"},
|
48 |
+
"o1": {"max_completion_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI"},
|
49 |
+
"o1-mini": {"max_completion_tokens": 66000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI"},
|
50 |
+
"o1-preview": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI"},
|
51 |
+
"Phi-4-multimodal-instruct": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Microsoft"},
|
52 |
+
"Mistral-large-2407": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Mistral"},
|
53 |
+
"Codestral-2501": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Mistral"},
|
54 |
+
"default": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Other"}
|
|
|
55 |
}
|
56 |
|
57 |
# Try to import Streamlit Ace
|
|
|
60 |
ACE_EDITOR_AVAILABLE = True
|
61 |
except ImportError:
|
62 |
ACE_EDITOR_AVAILABLE = False
|
63 |
+
logger.warning("streamlit-ace not available, falling back to text area")
|
64 |
|
65 |
def prepare_api_params(messages, model_name):
|
|
|
66 |
config = MODEL_CONFIGS.get(model_name, MODEL_CONFIGS["default"])
|
67 |
+
params = {"messages": messages, "model": model_name}
|
68 |
+
params[config["param_name"]] = config.get(config["param_name"])
|
69 |
+
return params, config
|
|
|
|
|
|
|
|
|
70 |
|
71 |
def get_secret(env_var):
|
|
|
72 |
val = os.environ.get(env_var)
|
73 |
if not val:
|
74 |
logger.warning(f"Secret '{env_var}' not found")
|
75 |
return val
|
76 |
|
77 |
def check_password():
|
|
|
78 |
correct = get_secret("password")
|
79 |
if not correct:
|
80 |
st.error("Admin password not configured")
|
|
|
95 |
|
96 |
def ensure_packages():
|
97 |
required = {
|
98 |
+
'manim': '0.17.3', 'Pillow': '9.0.0', 'numpy': '1.22.0',
|
99 |
+
'transformers': '4.30.0', 'torch': '2.0.0', 'pygments': '2.15.1',
|
100 |
+
'streamlit-ace': '0.1.1', 'pydub': '0.25.1', 'plotly': '5.14.0',
|
101 |
+
'pandas': '2.0.0', 'python-pptx': '0.6.21', 'fpdf': '1.7.2',
|
102 |
+
'matplotlib': '3.5.0', 'seaborn': '0.11.2', 'scipy': '1.7.3',
|
103 |
+
'huggingface_hub': '0.16.0'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
}
|
105 |
missing = {}
|
106 |
for pkg, ver in required.items():
|
|
|
131 |
return True, "No valid packages"
|
132 |
sidebar_txt = st.sidebar.empty()
|
133 |
bar = st.sidebar.progress(0)
|
134 |
+
results, success = [], True
|
|
|
135 |
for i, p in enumerate(parts):
|
136 |
bar.progress(i / len(parts))
|
137 |
sidebar_txt.text(f"Installing {p}...")
|
|
|
165 |
logger.error(str(e))
|
166 |
return None
|
167 |
|
168 |
+
def generate_manim_preview(code):
|
169 |
+
objects = []
|
170 |
+
if "Circle" in code: objects.append("⭕")
|
171 |
+
if "Square" in code: objects.append("🔲")
|
172 |
+
if "MathTex" in code or "Tex" in code: objects.append("📊")
|
173 |
+
if "Text" in code: objects.append("📝")
|
174 |
+
if "Axes" in code: objects.append("📈")
|
175 |
+
icons = "".join(objects) or "🎬"
|
176 |
+
return f"""
|
|
|
177 |
<div style="background:#000;color:#fff;padding:1rem;border-radius:10px;text-align:center;">
|
178 |
<h3>Animation Preview</h3>
|
179 |
+
<div style="font-size:2rem;">{icons}</div>
|
180 |
+
<p>Full rendering required for accurate preview</p>
|
|
|
181 |
</div>
|
182 |
"""
|
|
|
183 |
|
184 |
+
def extract_scene_class_name(code):
|
185 |
+
m = re.findall(r'class\s+(\w+)\s*\([^)]*Scene', code)
|
186 |
+
return m[0] if m else "MyScene"
|
187 |
|
188 |
+
def mp4_to_gif(mp4_path, gif_path, fps=15):
|
189 |
cmd = [
|
190 |
+
"ffmpeg", "-i", mp4_path,
|
191 |
+
"-vf", f"fps={fps},scale=640:-1:flags=lanczos,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse",
|
192 |
+
"-loop", "0", gif_path
|
193 |
]
|
194 |
+
res = subprocess.run(cmd, capture_output=True, text=True)
|
195 |
+
return gif_path if res.returncode == 0 else None
|
196 |
|
197 |
+
def generate_manim_video(code, fmt, quality, speed=1.0, audio_path=None):
|
198 |
temp_dir = tempfile.mkdtemp(prefix="manim_")
|
199 |
+
scene = extract_scene_class_name(code)
|
200 |
+
scene_file = os.path.join(temp_dir, "scene.py")
|
201 |
+
with open(scene_file, "w") as f:
|
202 |
f.write(code)
|
203 |
+
qflags = {"480p":"-ql","720p":"-qm","1080p":"-qh","4K":"-qk","8K":"-qp"}
|
204 |
+
qf = qflags.get(quality, "-qm")
|
205 |
+
cmd = ["manim", scene_file, scene, qf, f"--format={fmt}"]
|
|
|
206 |
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
|
207 |
+
output, out_path, mp4_path = [], None, None
|
|
|
|
|
|
|
208 |
log = st.empty()
|
209 |
+
for line in proc.stdout:
|
210 |
+
output.append(line)
|
211 |
+
log.code("".join(output[-10:]))
|
212 |
+
if "File ready at" in line:
|
213 |
+
m = re.search(r'["\'](.+?\.(?:mp4|gif|webm|svg))["\']', line)
|
214 |
+
if m:
|
215 |
+
out_path = m.group(1)
|
216 |
+
if out_path.endswith(".mp4"):
|
217 |
+
mp4_path = out_path
|
|
|
|
|
|
|
|
|
218 |
proc.wait()
|
219 |
time.sleep(1)
|
220 |
+
if fmt=="gif" and (not out_path or not os.path.exists(out_path)) and mp4_path:
|
221 |
+
gif = os.path.join(temp_dir, "converted.gif")
|
|
|
222 |
conv = mp4_to_gif(mp4_path, gif)
|
223 |
if conv and os.path.exists(conv):
|
224 |
out_path = conv
|
225 |
+
data = None
|
226 |
if out_path and os.path.exists(out_path):
|
227 |
+
with open(out_path, "rb") as f:
|
228 |
+
data = f.read()
|
229 |
shutil.rmtree(temp_dir)
|
230 |
if data:
|
231 |
+
size_mb = len(data)/(1024*1024)
|
232 |
+
return data, f"✅ Generated ({size_mb:.1f} MB)"
|
233 |
else:
|
234 |
+
return None, "❌ No output generated. See logs."
|
235 |
|
236 |
def detect_input_calls(code):
|
237 |
+
calls=[]
|
238 |
+
for i,line in enumerate(code.split("\n"),1):
|
239 |
if "input(" in line and not line.strip().startswith("#"):
|
240 |
+
m=re.search(r'input\(["\'](.+?)["\']\)', line)
|
241 |
+
prompt=m.group(1) if m else f"Input at line {i}"
|
242 |
calls.append({"line":i,"prompt":prompt})
|
243 |
return calls
|
244 |
|
245 |
def run_python_script(code, inputs=None, timeout=60):
|
246 |
+
res={"stdout":"","stderr":"","exception":None,"plots":[],"dataframes":[],"execution_time":0}
|
247 |
+
mod=""
|
248 |
if inputs:
|
249 |
+
mod=f"""
|
250 |
__INPUTS={inputs}
|
251 |
__IDX=0
|
252 |
def input(prompt=''):
|
|
|
259 |
print()
|
260 |
return ''
|
261 |
"""
|
262 |
+
full_code=mod+code
|
263 |
with tempfile.TemporaryDirectory() as td:
|
264 |
+
path=os.path.join(td,"script.py")
|
265 |
+
with open(path,"w") as f: f.write(full_code)
|
266 |
+
outf, errf = os.path.join(td,"out.txt"), os.path.join(td,"err.txt")
|
|
|
267 |
start=time.time()
|
268 |
try:
|
269 |
with open(outf,"w") as o, open(errf,"w") as e:
|
270 |
+
proc=subprocess.Popen([sys.executable, path], stdout=o, stderr=e, cwd=td)
|
271 |
proc.wait(timeout=timeout)
|
272 |
except subprocess.TimeoutExpired:
|
273 |
proc.kill()
|
274 |
+
res["stderr"]+="\nTimed out"
|
275 |
+
res["exception"]="Timeout"
|
276 |
+
res["execution_time"]=time.time()-start
|
277 |
+
res["stdout"]=open(outf).read()
|
278 |
+
res["stderr"]+=open(errf).read()
|
279 |
+
return res
|
280 |
|
281 |
+
def display_python_script_results(r):
|
282 |
+
st.info(f"Completed in {r['execution_time']:.2f}s")
|
283 |
+
if r["exception"]:
|
284 |
+
st.error(f"Exception: {r['exception']}")
|
285 |
+
if r["stderr"]:
|
286 |
st.error("Errors:")
|
287 |
+
st.code(r["stderr"], language="bash")
|
288 |
+
if r["plots"]:
|
289 |
st.markdown("### Plots")
|
290 |
+
cols=st.columns(min(3,len(r["plots"])))
|
291 |
+
for i,p in enumerate(r["plots"]):
|
292 |
cols[i%len(cols)].image(p,use_column_width=True)
|
293 |
+
if r["dataframes"]:
|
294 |
st.markdown("### DataFrames")
|
295 |
+
for df in r["dataframes"]:
|
296 |
+
with st.expander(f"{df['name']} {df['shape']}"):
|
297 |
st.markdown(df["preview_html"], unsafe_allow_html=True)
|
298 |
+
if r["stdout"]:
|
299 |
st.markdown("### Output")
|
300 |
+
st.code(r["stdout"], language="bash")
|
301 |
|
|
|
302 |
def main():
|
303 |
if 'init' not in st.session_state:
|
304 |
st.session_state.update({
|
305 |
'init':True, 'video_data':None, 'status':None, 'ai_models':None,
|
306 |
'generated_code':"", 'code':"", 'temp_code':"", 'editor_key':str(uuid.uuid4()),
|
307 |
+
'packages_checked':False, 'audio_path':None, 'image_paths':[],
|
308 |
+
'custom_library_result':"", 'python_script':"", 'python_result':None,
|
309 |
+
'active_tab':0, 'settings':{"quality":"720p","format_type":"mp4","animation_speed":"Normal"},
|
310 |
+
'password_entered':False, 'custom_model':"gpt-4o", 'pending_tab_switch':None
|
|
|
|
|
311 |
})
|
312 |
st.set_page_config(page_title="Manim Animation Studio", page_icon="🎬", layout="wide")
|
313 |
+
|
314 |
if not st.session_state.packages_checked:
|
315 |
if ensure_packages():
|
316 |
st.session_state.packages_checked=True
|
317 |
else:
|
318 |
+
st.error("Package installation failed")
|
319 |
return
|
320 |
|
321 |
+
tab_names=[
|
322 |
+
"✨ Editor","🤖 AI Assistant","🎨 Assets",
|
323 |
+
"🎞️ Timeline","🎓 Educational Export","🐍 Python Runner"
|
324 |
+
]
|
325 |
tabs = st.tabs(tab_names)
|
326 |
|
327 |
+
# Editor
|
328 |
with tabs[0]:
|
329 |
col1,col2 = st.columns([3,2])
|
330 |
with col1:
|
331 |
st.markdown("### 📝 Animation Editor")
|
332 |
+
mode = st.radio("Code Input", ["Type Code","Upload File"], key="editor_mode")
|
333 |
if mode=="Upload File":
|
334 |
+
up = st.file_uploader("Upload .py", type=["py"])
|
335 |
if up:
|
336 |
txt=up.getvalue().decode()
|
337 |
if txt.strip():
|
338 |
st.session_state.code=txt
|
339 |
st.session_state.temp_code=txt
|
340 |
if ACE_EDITOR_AVAILABLE:
|
341 |
+
st.session_state.temp_code = st_ace(
|
342 |
+
value=st.session_state.code, language="python",
|
343 |
+
theme="monokai", min_lines=20,
|
344 |
+
key=f"ace_{st.session_state.editor_key}"
|
345 |
+
)
|
346 |
else:
|
347 |
+
st.session_state.temp_code = st.text_area(
|
348 |
+
"Code", st.session_state.code, height=400,
|
349 |
+
key=f"ta_{st.session_state.editor_key}"
|
350 |
+
)
|
351 |
if st.session_state.temp_code!=st.session_state.code:
|
352 |
st.session_state.code=st.session_state.temp_code
|
353 |
if st.button("🚀 Generate Animation"):
|
354 |
if not st.session_state.code:
|
355 |
st.error("Enter code first")
|
356 |
else:
|
357 |
+
data, msg = generate_manim_video(
|
358 |
st.session_state.code,
|
359 |
st.session_state.settings["format_type"],
|
360 |
st.session_state.settings["quality"],
|
361 |
{"Slow":0.5,"Normal":1.0,"Fast":2.0,"Very Fast":3.0}[st.session_state.settings["animation_speed"]],
|
362 |
st.session_state.audio_path
|
363 |
)
|
364 |
+
st.session_state.video_data=data
|
365 |
+
st.session_state.status=msg
|
366 |
with col2:
|
367 |
if st.session_state.code:
|
368 |
+
components.html(
|
369 |
+
generate_manim_preview(st.session_state.code),
|
370 |
+
height=250
|
371 |
+
)
|
372 |
if st.session_state.video_data:
|
373 |
fmt=st.session_state.settings["format_type"]
|
374 |
if fmt=="png_sequence":
|
375 |
+
st.download_button(
|
376 |
+
"⬇️ Download PNG ZIP", data=st.session_state.video_data,
|
377 |
+
file_name=f"manim_{datetime.now().strftime('%Y%m%d_%H%M%S')}.zip",
|
378 |
+
mime="application/zip"
|
379 |
+
)
|
380 |
elif fmt=="svg":
|
381 |
try:
|
382 |
svg=st.session_state.video_data.decode('utf-8')
|
383 |
components.html(svg, height=400)
|
384 |
except:
|
385 |
st.error("Cannot display SVG")
|
386 |
+
st.download_button(
|
387 |
+
"⬇️ Download SVG", data=st.session_state.video_data,
|
388 |
+
file_name="animation.svg", mime="image/svg+xml"
|
389 |
+
)
|
390 |
else:
|
391 |
st.video(st.session_state.video_data, format=fmt)
|
392 |
+
st.download_button(
|
393 |
+
f"⬇️ Download {fmt.upper()}", st.session_state.video_data,
|
394 |
+
file_name=f"animation.{fmt}", mime=f"video/{fmt}" if fmt!="gif" else "image/gif"
|
395 |
+
)
|
396 |
if st.session_state.status:
|
397 |
+
if "❌" in st.session_state.status:
|
398 |
st.error(st.session_state.status)
|
399 |
else:
|
400 |
st.success(st.session_state.status)
|
401 |
|
402 |
+
# AI Assistant
|
403 |
with tabs[1]:
|
404 |
st.markdown("### 🤖 AI Animation Assistant")
|
405 |
if check_password():
|
406 |
client_data = init_ai_models_direct()
|
407 |
if client_data:
|
408 |
if st.button("Test API Connection"):
|
409 |
+
from azure.ai.inference.models import UserMessage
|
410 |
+
params,_=prepare_api_params([UserMessage("Hello")], client_data["model_name"])
|
411 |
+
resp=client_data["client"].complete(**params)
|
412 |
+
if resp.choices:
|
413 |
+
st.success("✅ Connection successful!")
|
414 |
+
st.session_state.ai_models=client_data
|
415 |
+
else:
|
416 |
+
st.error("❌ No response")
|
|
|
417 |
if st.session_state.ai_models:
|
418 |
st.info(f"Using model {st.session_state.ai_models['model_name']}")
|
419 |
prompt = st.text_area("Describe animation or paste partial code", height=150)
|
420 |
if st.button("Generate Animation Code"):
|
421 |
if prompt.strip():
|
422 |
from azure.ai.inference.models import UserMessage
|
423 |
+
params,_=prepare_api_params(
|
424 |
+
[UserMessage(f"Write a complete Manim scene for:\n{prompt}")],
|
425 |
+
st.session_state.ai_models["model_name"]
|
426 |
+
)
|
427 |
+
resp=st.session_state.ai_models["client"].complete(**params)
|
428 |
if resp.choices:
|
429 |
code = resp.choices[0].message.content
|
430 |
if "```python" in code:
|
|
|
442 |
st.session_state.pending_tab_switch=0
|
443 |
st.rerun()
|
444 |
else:
|
445 |
+
st.info("Enter password to access AI")
|
446 |
|
447 |
+
# Assets
|
448 |
with tabs[2]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
449 |
st.markdown("### 🎨 Asset Management")
|
450 |
c1,c2 = st.columns(2)
|
451 |
with c1:
|
452 |
+
imgs = st.file_uploader(
|
453 |
+
"Upload Images", type=["png","jpg","jpeg","svg"],
|
454 |
+
accept_multiple_files=True
|
455 |
+
)
|
456 |
if imgs:
|
457 |
+
idir = os.path.join(os.getcwd(),"manim_assets","images")
|
458 |
+
os.makedirs(idir, exist_ok=True)
|
459 |
for up in imgs:
|
460 |
ext=up.name.split(".")[-1]
|
461 |
fname=f"img_{int(time.time())}_{uuid.uuid4().hex[:6]}.{ext}"
|
462 |
+
path=os.path.join(idir,fname)
|
463 |
with open(path,"wb") as f: f.write(up.getvalue())
|
464 |
st.session_state.image_paths.append({"name":up.name,"path":path})
|
465 |
+
for info in st.session_state.image_paths:
|
466 |
+
img=Image.open(info["path"])
|
467 |
+
st.image(img, caption=info["name"], width=100)
|
468 |
+
if st.button(f"Use {info['name']}"):
|
469 |
+
snippet=f"""
|
|
|
470 |
# Image asset
|
471 |
image = ImageMobject(r"{info['path']}")
|
472 |
image.scale(2)
|
473 |
self.play(FadeIn(image))
|
474 |
self.wait(1)
|
475 |
"""
|
476 |
+
st.session_state.code+=snippet
|
477 |
+
st.session_state.temp_code=st.session_state.code
|
478 |
+
st.success(f"Added {info['name']}")
|
479 |
+
st.session_state.pending_tab_switch=0
|
480 |
+
st.rerun()
|
481 |
with c2:
|
482 |
+
aud = st.file_uploader("Upload Audio", type=["mp3","wav","ogg"])
|
483 |
if aud:
|
484 |
+
adir = os.path.join(os.getcwd(),"manim_assets","audio")
|
485 |
+
os.makedirs(adir, exist_ok=True)
|
486 |
ext=aud.name.split(".")[-1]
|
487 |
aname=f"audio_{int(time.time())}.{ext}"
|
488 |
ap=os.path.join(adir,aname)
|
|
|
491 |
st.audio(aud)
|
492 |
st.success("Audio uploaded")
|
493 |
|
494 |
+
# Timeline
|
495 |
+
with tabs[3]:
|
496 |
st.markdown("### 🎞️ Timeline Editor")
|
497 |
+
st.info("Use code editor to adjust timing of self.play and self.wait calls.")
|
498 |
|
499 |
+
# Educational Export
|
500 |
+
with tabs[4]:
|
501 |
st.markdown("### 🎓 Educational Export")
|
502 |
if not st.session_state.video_data:
|
503 |
+
st.warning("Generate animation first")
|
504 |
else:
|
505 |
+
title=st.text_input("Title","Manim Animation")
|
506 |
+
expl=st.text_area("Explanation (use ## to separate steps)",height=150)
|
507 |
+
fmt=st.selectbox("Format",["PowerPoint","HTML","PDF Sequence"])
|
508 |
if st.button("Export"):
|
509 |
+
st.success(f"{fmt} export not implemented yet")
|
|
|
510 |
|
511 |
+
# Python Runner
|
512 |
+
with tabs[5]:
|
513 |
st.markdown("### 🐍 Python Script Runner")
|
514 |
+
examples={"Select...":"","Sine Plot":"""import matplotlib.pyplot as plt
|
|
|
|
|
515 |
import numpy as np
|
516 |
x=np.linspace(0,10,100)
|
517 |
y=np.sin(x)
|
518 |
plt.plot(x,y)
|
519 |
+
print("Done")"""}
|
520 |
+
sel=st.selectbox("Example",list(examples.keys()))
|
|
|
521 |
code = examples.get(sel, st.session_state.python_script)
|
522 |
if ACE_EDITOR_AVAILABLE:
|
523 |
+
code=st_ace(value=code, language="python", theme="monokai", min_lines=15, key="pyace")
|
524 |
else:
|
525 |
+
code=st.text_area("Code", code, height=300, key="pyta")
|
526 |
st.session_state.python_script=code
|
527 |
+
inputs=detect_input_calls(code)
|
528 |
vals=[]
|
529 |
if inputs:
|
530 |
st.info(f"{len(inputs)} input() calls detected")
|
531 |
for i,c in enumerate(inputs):
|
532 |
+
vals.append(st.text_input(f"{c['prompt']} (line {c['line']})", key=f"in{i}"))
|
533 |
+
timeout=st.slider("Timeout",5,300,30)
|
534 |
if st.button("▶️ Run"):
|
535 |
res=run_python_script(code, inputs=vals, timeout=timeout)
|
536 |
st.session_state.python_result=res
|
537 |
if st.session_state.python_result:
|
538 |
display_python_script_results(st.session_state.python_result)
|
539 |
|
540 |
+
# Handle pending tab switch
|
541 |
if st.session_state.pending_tab_switch is not None:
|
542 |
st.session_state.active_tab = st.session_state.pending_tab_switch
|
543 |
+
st.session_state.pending_tab_switch = None
|
544 |
|
545 |
if __name__ == "__main__":
|
546 |
main()
|