Spaces:
Running
Running
import streamlit as st | |
import tempfile | |
import os | |
import logging | |
from pathlib import Path | |
from PIL import Image | |
import io | |
import numpy as np | |
import sys | |
import subprocess | |
import json | |
from pygments import highlight | |
from pygments.lexers import PythonLexer | |
from pygments.formatters import HtmlFormatter | |
import base64 | |
from transformers import pipeline | |
import torch | |
import re | |
import shutil | |
import time | |
from datetime import datetime, timedelta | |
import streamlit.components.v1 as components | |
import uuid | |
import platform | |
import pandas as pd | |
import plotly.express as px | |
import markdown | |
import zipfile | |
import contextlib | |
import threading | |
import traceback | |
from io import StringIO, BytesIO | |
# Set up enhanced logging | |
logging.basicConfig( | |
level=logging.INFO, | |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', | |
handlers=[ | |
logging.StreamHandler() | |
] | |
) | |
logger = logging.getLogger(__name__) | |
# Model configuration mapping for different API requirements and limits | |
MODEL_CONFIGS = { | |
"DeepSeek-V3-0324": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "DeepSeek", "warning": None}, | |
"DeepSeek-R1": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "DeepSeek", "warning": None}, | |
"Llama-4-Scout-17B-16E-Instruct": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Meta", "warning": None}, | |
"Llama-4-Maverick-17B-128E-Instruct-FP8": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Meta", "warning": None}, | |
"gpt-4o-mini": {"max_tokens": 15000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, | |
"gpt-4o": {"max_tokens": 16000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, | |
"gpt-4.1": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, | |
"gpt-4.1-mini": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, | |
"gpt-4.1-nano": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, | |
"o4-mini": {"max_completion_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None}, | |
"o1": {"max_completion_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None}, | |
"o1-mini": {"max_completion_tokens": 66000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None}, | |
"o1-preview": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, | |
"Phi-4-multimodal-instruct": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Microsoft", "warning": None}, | |
"Mistral-large-2407": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Mistral", "warning": None}, | |
"Codestral-2501": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Mistral", "warning": None}, | |
# Default configuration for other models | |
"default": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Other", "warning": None} | |
} | |
# Try to import Streamlit Ace | |
try: | |
from streamlit_ace import st_ace | |
ACE_EDITOR_AVAILABLE = True | |
except ImportError: | |
ACE_EDITOR_AVAILABLE = False | |
logger.warning("streamlit-ace not available, falling back to standard text editor") | |
def prepare_api_params(messages, model_name): | |
"""Create appropriate API parameters based on model configuration""" | |
config = MODEL_CONFIGS.get(model_name, MODEL_CONFIGS["default"]) | |
api_params = { | |
"messages": messages, | |
"model": model_name | |
} | |
token_param = config["param_name"] | |
api_params[token_param] = config.get(token_param) | |
return api_params, config | |
def get_secret(env_var): | |
"""Retrieve a secret from environment variables""" | |
val = os.environ.get(env_var) | |
if not val: | |
logger.warning(f"Secret '{env_var}' not found") | |
return val | |
def check_password(): | |
"""Verify password entered against secret""" | |
correct = get_secret("password") | |
if not correct: | |
st.error("Admin password not configured") | |
return False | |
if "password_entered" not in st.session_state: | |
st.session_state.password_entered = False | |
if not st.session_state.password_entered: | |
pwd = st.text_input("Enter password to access AI features", type="password") | |
if pwd: | |
if pwd == correct: | |
st.session_state.password_entered = True | |
return True | |
else: | |
st.error("Incorrect password") | |
return False | |
return False | |
return True | |
def ensure_packages(): | |
required = { | |
'manim': '0.17.3', | |
'Pillow': '9.0.0', | |
'numpy': '1.22.0', | |
'transformers': '4.30.0', | |
'torch': '2.0.0', | |
'pygments': '2.15.1', | |
'streamlit-ace': '0.1.1', | |
'pydub': '0.25.1', | |
'plotly': '5.14.0', | |
'pandas': '2.0.0', | |
'python-pptx': '0.6.21', | |
'markdown': '3.4.3', | |
'fpdf': '1.7.2', | |
'matplotlib': '3.5.0', | |
'seaborn': '0.11.2', | |
'scipy': '1.7.3', | |
'huggingface_hub': '0.16.0', | |
} | |
missing = {} | |
for pkg, ver in required.items(): | |
try: | |
__import__(pkg if pkg != 'Pillow' else 'PIL') | |
except ImportError: | |
missing[pkg] = ver | |
if not missing: | |
return True | |
bar = st.progress(0) | |
txt = st.empty() | |
for i, (pkg, ver) in enumerate(missing.items()): | |
bar.progress(i / len(missing)) | |
txt.text(f"Installing {pkg}...") | |
res = subprocess.run([sys.executable, "-m", "pip", "install", f"{pkg}>={ver}"], capture_output=True, text=True) | |
if res.returncode != 0: | |
st.error(f"Failed to install {pkg}") | |
return False | |
bar.progress(1.0) | |
txt.empty() | |
return True | |
def install_custom_packages(pkgs): | |
if not pkgs.strip(): | |
return True, "No packages specified" | |
parts = [p.strip() for p in pkgs.split(",") if p.strip()] | |
if not parts: | |
return True, "No valid packages" | |
sidebar_txt = st.sidebar.empty() | |
bar = st.sidebar.progress(0) | |
results = [] | |
success = True | |
for i, p in enumerate(parts): | |
bar.progress(i / len(parts)) | |
sidebar_txt.text(f"Installing {p}...") | |
res = subprocess.run([sys.executable, "-m", "pip", "install", p], capture_output=True, text=True) | |
if res.returncode != 0: | |
results.append(f"Failed {p}: {res.stderr}") | |
success = False | |
else: | |
results.append(f"Installed {p}") | |
bar.progress(1.0) | |
sidebar_txt.empty() | |
return success, "\n".join(results) | |
def init_ai_models_direct(): | |
token = get_secret("github_token_api") | |
if not token: | |
st.error("API token not configured") | |
return None | |
try: | |
from azure.ai.inference import ChatCompletionsClient | |
from azure.ai.inference.models import UserMessage | |
from azure.core.credentials import AzureKeyCredential | |
client = ChatCompletionsClient( | |
endpoint="https://models.inference.ai.azure.com", | |
credential=AzureKeyCredential(token) | |
) | |
return {"client": client, "model_name": "gpt-4o", "last_loaded": datetime.now().isoformat()} | |
except ImportError as e: | |
st.error("Azure AI SDK not installed") | |
logger.error(str(e)) | |
return None | |
def generate_manim_preview(python_code): | |
scene_objects = [] | |
if "Circle" in python_code: scene_objects.append("circle") | |
if "Square" in python_code: scene_objects.append("square") | |
if "MathTex" in python_code or "Tex" in python_code: scene_objects.append("equation") | |
if "Text" in python_code: scene_objects.append("text") | |
if "Axes" in python_code: scene_objects.append("graph") | |
icons = {"circle":"β","square":"π²","equation":"π","text":"π","graph":"π"} | |
icon_html = "".join(f'<span style="font-size:2rem;margin:0.3rem;">{icons[o]}</span>' for o in scene_objects if o in icons) | |
html = f""" | |
<div style="background:#000;color:#fff;padding:1rem;border-radius:10px;text-align:center;"> | |
<h3>Animation Preview</h3> | |
<div>{icon_html or 'π¬'}</div> | |
<p>Contains: {', '.join(scene_objects) or 'none'}</p> | |
<p style="opacity:0.7;">Full rendering required for accurate preview</p> | |
</div> | |
""" | |
return html | |
def extract_scene_class_name(python_code): | |
names = re.findall(r'class\s+(\w+)\s*\([^)]*Scene', python_code) | |
return names[0] if names else "MyScene" | |
def mp4_to_gif(mp4, out, fps=15): | |
cmd = [ | |
"ffmpeg","-i",mp4, | |
"-vf",f"fps={fps},scale=640:-1:flags=lanczos,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse", | |
"-loop","0",out | |
] | |
r = subprocess.run(cmd, capture_output=True, text=True) | |
return out if r.returncode==0 else None | |
def generate_manim_video(code, format_type, quality_preset, speed=1.0, audio_path=None): | |
temp_dir = tempfile.mkdtemp(prefix="manim_") | |
scene_class = extract_scene_class_name(code) | |
file_py = os.path.join(temp_dir, "scene.py") | |
with open(file_py, "w", encoding="utf-8") as f: | |
f.write(code) | |
quality_flags = {"480p":"-ql","720p":"-qm","1080p":"-qh","4K":"-qk","8K":"-qp"} | |
qf = quality_flags.get(quality_preset, "-qm") | |
fmt_arg = f"--format={format_type}" | |
cmd = ["manim", file_py, scene_class, qf, fmt_arg] | |
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) | |
output = [] | |
out_path = None | |
mp4_path = None | |
bar = st.empty() | |
log = st.empty() | |
while True: | |
line = proc.stdout.readline() | |
if not line and proc.poll() is not None: | |
break | |
if line: | |
output.append(line) | |
log.code("".join(output[-10:])) | |
if "File ready at" in line: | |
m = re.search(r'([\'"])?(.+?\.(?:mp4|gif|webm|svg))\1', line) | |
if m: | |
out_path = m.group(2) | |
if out_path.endswith(".mp4"): | |
mp4_path = out_path | |
proc.wait() | |
time.sleep(1) | |
data = None | |
if format_type=="gif" and (not out_path or not os.path.exists(out_path)) and mp4_path and os.path.exists(mp4_path): | |
gif = os.path.join(temp_dir, scene_class+"_conv.gif") | |
conv = mp4_to_gif(mp4_path, gif) | |
if conv and os.path.exists(conv): | |
out_path = conv | |
if out_path and os.path.exists(out_path): | |
with open(out_path,"rb") as f: data = f.read() | |
shutil.rmtree(temp_dir) | |
if data: | |
return data, f"β Generated successfully ({len(data)/(1024*1024):.1f} MB)" | |
else: | |
return None, "β No output generated. Check logs." | |
def detect_input_calls(code): | |
calls = [] | |
for i, line in enumerate(code.split("\n"),1): | |
if "input(" in line and not line.strip().startswith("#"): | |
m = re.search(r'input\(["\'](.+?)["\']\)', line) | |
prompt = m.group(1) if m else f"Input at line {i}" | |
calls.append({"line":i,"prompt":prompt}) | |
return calls | |
def run_python_script(code, inputs=None, timeout=60): | |
result = {"stdout":"","stderr":"","exception":None,"plots":[],"dataframes":[],"execution_time":0} | |
mod = "" | |
if inputs: | |
mod = f""" | |
__INPUTS={inputs} | |
__IDX=0 | |
def input(prompt=''): | |
global __IDX | |
print(prompt,end='') | |
if __IDX<len(__INPUTS): | |
val=__INPUTS[__IDX]; __IDX+=1 | |
print(val) | |
return val | |
print() | |
return '' | |
""" | |
code_full = mod + code | |
with tempfile.TemporaryDirectory() as td: | |
script = os.path.join(td,"script.py") | |
with open(script,"w") as f: f.write(code_full) | |
outf = os.path.join(td,"out.txt") | |
errf = os.path.join(td,"err.txt") | |
start=time.time() | |
try: | |
with open(outf,"w") as o, open(errf,"w") as e: | |
proc=subprocess.Popen([sys.executable, script], stdout=o, stderr=e, cwd=td) | |
proc.wait(timeout=timeout) | |
except subprocess.TimeoutExpired: | |
proc.kill() | |
result["stderr"] += f"\nTimed out after {timeout}s" | |
result["exception"] = "Timeout" | |
result["execution_time"]=time.time()-start | |
result["stdout"]=open(outf).read() | |
result["stderr"]+=open(errf).read() | |
return result | |
def display_python_script_results(res): | |
st.info(f"Completed in {res['execution_time']:.2f}s") | |
if res["exception"]: | |
st.error(f"Exception: {res['exception']}") | |
if res["stderr"]: | |
st.error("Errors:") | |
st.code(res["stderr"], language="bash") | |
if res["plots"]: | |
st.markdown("### Plots") | |
cols = st.columns(min(3,len(res["plots"]))) | |
for i,p in enumerate(res["plots"]): | |
cols[i%len(cols)].image(p,use_column_width=True) | |
if res["dataframes"]: | |
st.markdown("### DataFrames") | |
for df in res["dataframes"]: | |
with st.expander(f"{df['name']} ({df['shape'][0]}Γ{df['shape'][1]})"): | |
st.markdown(df["preview_html"], unsafe_allow_html=True) | |
if res["stdout"]: | |
st.markdown("### Output") | |
st.code(res["stdout"], language="bash") | |
# Main app | |
def main(): | |
if 'init' not in st.session_state: | |
st.session_state.update({ | |
'init':True, 'video_data':None, 'status':None, 'ai_models':None, | |
'generated_code':"", 'code':"", 'temp_code':"", 'editor_key':str(uuid.uuid4()), | |
'packages_checked':False, 'latex_formula':"", 'audio_path':None, | |
'image_paths':[], 'custom_library_result':"", 'python_script':"", | |
'python_result':None, 'active_tab':0, | |
'settings':{"quality":"720p","format_type":"mp4","animation_speed":"Normal"}, | |
'password_entered':False, 'custom_model':"gpt-4o", 'first_load_complete':False, | |
'pending_tab_switch':None | |
}) | |
st.set_page_config(page_title="Manim Animation Studio", page_icon="π¬", layout="wide") | |
if not st.session_state.packages_checked: | |
if ensure_packages(): | |
st.session_state.packages_checked=True | |
else: | |
st.error("Failed to install packages") | |
return | |
tab_names=["β¨ Editor","π€ AI Assistant","π LaTeX Formulas","π¨ Assets","ποΈ Timeline","π Educational Export","π Python Runner"] | |
tabs = st.tabs(tab_names) | |
# Editor Tab | |
with tabs[0]: | |
col1,col2 = st.columns([3,2]) | |
with col1: | |
st.markdown("### π Animation Editor") | |
mode = st.radio("Code Input",["Type Code","Upload File"], key="editor_mode") | |
if mode=="Upload File": | |
up=st.file_uploader("Upload .py file", type=["py"]) | |
if up: | |
txt=up.getvalue().decode() | |
if txt.strip(): | |
st.session_state.code=txt | |
st.session_state.temp_code=txt | |
if ACE_EDITOR_AVAILABLE: | |
st.session_state.temp_code = st_ace(value=st.session_state.code, language="python", theme="monokai", min_lines=20, key=f"ace_{st.session_state.editor_key}") | |
else: | |
st.session_state.temp_code = st.text_area("Code", st.session_state.code, height=400, key=f"ta_{st.session_state.editor_key}") | |
if st.session_state.temp_code!=st.session_state.code: | |
st.session_state.code=st.session_state.temp_code | |
if st.button("π Generate Animation"): | |
if not st.session_state.code: | |
st.error("Enter code first") | |
else: | |
vc,stt = generate_manim_video( | |
st.session_state.code, | |
st.session_state.settings["format_type"], | |
st.session_state.settings["quality"], | |
{"Slow":0.5,"Normal":1.0,"Fast":2.0,"Very Fast":3.0}[st.session_state.settings["animation_speed"]], | |
st.session_state.audio_path | |
) | |
st.session_state.video_data=vc | |
st.session_state.status=stt | |
with col2: | |
if st.session_state.code: | |
st.markdown("<div style='border:1px solid #ccc;padding:1rem;border-radius:8px;'>", unsafe_allow_html=True) | |
components.html(generate_manim_preview(st.session_state.code), height=250) | |
st.markdown("</div>", unsafe_allow_html=True) | |
if st.session_state.video_data: | |
fmt=st.session_state.settings["format_type"] | |
if fmt=="png_sequence": | |
st.download_button("β¬οΈ Download PNG ZIP", data=st.session_state.video_data, file_name=f"manim_pngs_{datetime.now().strftime('%Y%m%d_%H%M%S')}.zip", mime="application/zip") | |
elif fmt=="svg": | |
try: | |
svg=st.session_state.video_data.decode('utf-8') | |
components.html(svg, height=400) | |
except: | |
st.error("Cannot display SVG") | |
st.download_button("β¬οΈ Download SVG", data=st.session_state.video_data, file_name="animation.svg", mime="image/svg+xml") | |
else: | |
st.video(st.session_state.video_data, format=fmt) | |
st.download_button(f"β¬οΈ Download {fmt.upper()}", st.session_state.video_data, file_name=f"animation.{fmt}", mime=f"video/{fmt}" if fmt!="gif" else "image/gif") | |
if st.session_state.status: | |
if "Error" in st.session_state.status: | |
st.error(st.session_state.status) | |
else: | |
st.success(st.session_state.status) | |
# AI Assistant Tab | |
with tabs[1]: | |
st.markdown("### π€ AI Animation Assistant") | |
if check_password(): | |
client_data = init_ai_models_direct() | |
if client_data: | |
if st.button("Test API Connection"): | |
with st.spinner("Testing..."): | |
from azure.ai.inference.models import UserMessage | |
api_params,_=prepare_api_params([UserMessage("Hello")], client_data["model_name"]) | |
resp=client_data["client"].complete(**api_params) | |
if resp.choices: | |
st.success("β Connection successful!") | |
st.session_state.ai_models=client_data | |
else: | |
st.error("β No response") | |
if st.session_state.ai_models: | |
st.info(f"Using model {st.session_state.ai_models['model_name']}") | |
prompt = st.text_area("Describe animation or paste partial code", height=150) | |
if st.button("Generate Animation Code"): | |
if prompt.strip(): | |
from azure.ai.inference.models import UserMessage | |
api_params,_=prepare_api_params([UserMessage(f"Write a complete Manim scene for:\n{prompt}")], st.session_state.ai_models["model_name"]) | |
resp=st.session_state.ai_models["client"].complete(**api_params) | |
if resp.choices: | |
code = resp.choices[0].message.content | |
if "```python" in code: | |
code=code.split("```python")[1].split("```")[0] | |
st.session_state.generated_code=code | |
else: | |
st.error("No code generated") | |
else: | |
st.warning("Enter prompt first") | |
if st.session_state.generated_code: | |
st.code(st.session_state.generated_code, language="python") | |
if st.button("Use This Code"): | |
st.session_state.code=st.session_state.generated_code | |
st.session_state.temp_code=st.session_state.generated_code | |
st.session_state.pending_tab_switch=0 | |
st.rerun() | |
else: | |
st.info("Enter password to access") | |
# LaTeX Formulas Tab | |
with tabs[2]: | |
st.markdown("### π LaTeX Formula Builder") | |
col1,col2=st.columns([3,2]) | |
with col1: | |
latex_input = st.text_area("LaTeX Formula", value=st.session_state.latex_formula, height=100, placeholder=r"e^{i\pi}+1=0") | |
st.session_state.latex_formula=latex_input | |
if latex_input: | |
manim_latex_code = f""" | |
# LaTeX formula | |
formula = MathTex(r"{latex_input}") | |
self.play(Write(formula)) | |
self.wait(2) | |
""" | |
st.code(manim_latex_code, language="python") | |
if st.button("Insert into Editor"): | |
if st.session_state.code: | |
if "def construct(self):" in st.session_state.code: | |
lines=st.session_state.code.split("\n") | |
idx=-1 | |
for i,l in enumerate(lines): | |
if "def construct(self):" in l: | |
idx=i; break | |
if idx>=0: | |
for j in range(idx+1,len(lines)): | |
if lines[j].strip() and not lines[j].strip().startswith("#"): | |
indent=re.match(r"(\s*)",lines[j]).group(1) | |
new_block="\n".join(indent+ln for ln in manim_latex_code.strip().split("\n")) | |
lines.insert(j,new_block) | |
break | |
else: | |
lines.append(" "+ "\n ".join(manim_latex_code.strip().split("\n"))) | |
st.session_state.code="\n".join(lines) | |
st.session_state.temp_code=st.session_state.code | |
st.success("Inserted LaTeX into editor") | |
st.session_state.pending_tab_switch=0 | |
st.rerun() | |
else: | |
st.warning("No construct() found") | |
else: | |
basic_scene = f"""from manim import * | |
class LatexScene(Scene): | |
def construct(self): | |
# LaTeX formula | |
formula = MathTex(r"{latex_input}") | |
self.play(Write(formula)) | |
self.wait(2) | |
""" | |
st.session_state.code=basic_scene | |
st.session_state.temp_code=basic_scene | |
st.success("Created new scene with LaTeX") | |
st.session_state.pending_tab_switch=0 | |
st.rerun() | |
with col2: | |
components.html(render_latex_preview(latex_input), height=300) | |
# Assets Tab | |
with tabs[3]: | |
st.markdown("### π¨ Asset Management") | |
c1,c2 = st.columns(2) | |
with c1: | |
imgs=st.file_uploader("Upload Images", type=["png","jpg","jpeg","svg"], accept_multiple_files=True) | |
if imgs: | |
img_dir=os.path.join(os.getcwd(),"manim_assets","images") | |
os.makedirs(img_dir, exist_ok=True) | |
for up in imgs: | |
ext=up.name.split(".")[-1] | |
fname=f"img_{int(time.time())}_{uuid.uuid4().hex[:6]}.{ext}" | |
path=os.path.join(img_dir,fname) | |
with open(path,"wb") as f: f.write(up.getvalue()) | |
st.session_state.image_paths.append({"name":up.name,"path":path}) | |
if st.session_state.image_paths: | |
for info in st.session_state.image_paths: | |
img=Image.open(info["path"]) | |
st.image(img, caption=info["name"], width=100) | |
if st.button(f"Use {info['name']}"): | |
code_snippet=f""" | |
# Image asset | |
image = ImageMobject(r"{info['path']}") | |
image.scale(2) | |
self.play(FadeIn(image)) | |
self.wait(1) | |
""" | |
st.session_state.code+=code_snippet | |
st.session_state.temp_code=st.session_state.code | |
st.success(f"Added {info['name']} to code") | |
st.session_state.pending_tab_switch=0 | |
st.rerun() | |
with c2: | |
aud=st.file_uploader("Upload Audio", type=["mp3","wav","ogg"]) | |
if aud: | |
adir=os.path.join(os.getcwd(),"manim_assets","audio") | |
os.makedirs(adir,exist_ok=True) | |
ext=aud.name.split(".")[-1] | |
aname=f"audio_{int(time.time())}.{ext}" | |
ap=os.path.join(adir,aname) | |
with open(ap,"wb") as f: f.write(aud.getvalue()) | |
st.session_state.audio_path=ap | |
st.audio(aud) | |
st.success("Audio uploaded") | |
# Timeline Tab | |
with tabs[4]: | |
st.markdown("### ποΈ Timeline Editor") | |
st.info("Drag and adjust steps in code directly for now.") | |
# Educational Export Tab | |
with tabs[5]: | |
st.markdown("### π Educational Export") | |
if not st.session_state.video_data: | |
st.warning("Generate an animation first") | |
else: | |
title = st.text_input("Title", "Manim Animation") | |
expl = st.text_area("Explanation (use ## to separate steps)", height=150) | |
fmt = st.selectbox("Format", ["PowerPoint","HTML","PDF Sequence"]) | |
if st.button("Export"): | |
# Simplified, reuse generate_manim_video logic or placeholder | |
st.success(f"{fmt} export not yet implemented.") | |
# Python Runner Tab | |
with tabs[6]: | |
st.markdown("### π Python Script Runner") | |
examples = { | |
"Select...":"", | |
"Sine Plot":"""import matplotlib.pyplot as plt | |
import numpy as np | |
x=np.linspace(0,10,100) | |
y=np.sin(x) | |
plt.plot(x,y) | |
print("Done plotting")""" | |
} | |
sel=st.selectbox("Example", list(examples.keys())) | |
code = examples.get(sel, st.session_state.python_script) | |
if ACE_EDITOR_AVAILABLE: | |
code = st_ace(value=code, language="python", theme="monokai", min_lines=15, key="pyace") | |
else: | |
code = st.text_area("Code", code, height=300, key="pyta") | |
st.session_state.python_script=code | |
inputs = detect_input_calls(code) | |
vals=[] | |
if inputs: | |
st.info(f"{len(inputs)} input() calls detected") | |
for i,c in enumerate(inputs): | |
vals.append(st.text_input(f"{c['prompt']} (line {c['line']})", key=f"inp{i}")) | |
timeout = st.slider("Timeout", 5,300,30) | |
if st.button("βΆοΈ Run"): | |
res=run_python_script(code, inputs=vals, timeout=timeout) | |
st.session_state.python_result=res | |
if st.session_state.python_result: | |
display_python_script_results(st.session_state.python_result) | |
# Handle tab switch after actions | |
if st.session_state.pending_tab_switch is not None: | |
st.session_state.active_tab = st.session_state.pending_tab_switch | |
st.session_state.pending_tab_switch=None | |
if __name__ == "__main__": | |
main() | |