Spaces:
Running
Running
import streamlit as st | |
import tempfile | |
import os | |
import logging | |
from pathlib import Path | |
from PIL import Image | |
import io | |
import numpy as np | |
import sys | |
import subprocess | |
import json | |
from pygments import highlight | |
from pygments.lexers import PythonLexer | |
from pygments.formatters import HtmlFormatter | |
import base64 | |
import re | |
import shutil | |
import time | |
from datetime import datetime, timedelta | |
import streamlit.components.v1 as components | |
import uuid | |
import pandas as pd | |
import plotly.express as px | |
import markdown | |
import zipfile | |
from azure.ai.inference import ChatCompletionsClient | |
from azure.ai.inference.models import SystemMessage, UserMessage | |
from azure.core.credentials import AzureKeyCredential | |
from openai import OpenAI | |
from transformers import pipeline | |
import torch | |
import traceback | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# Logging | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
logging.basicConfig( | |
level=logging.INFO, | |
format="%(asctime)s β’ %(name)s β’ %(levelname)s β’ %(message)s", | |
handlers=[logging.StreamHandler()] | |
) | |
logger = logging.getLogger(__name__) | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# Model & Render Configuration | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
MODEL_CONFIGS = { | |
"DeepSeek-V3-0324": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "DeepSeek", "warning": None}, | |
"DeepSeek-R1": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "DeepSeek", "warning": None}, | |
"Llama-4-Scout-17B-16E-Instruct": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Meta", "warning": None}, | |
"Llama-4-Maverick-17B-128E-Instruct-FP8": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Meta", "warning": None}, | |
"gpt-4o-mini": {"max_tokens": 15000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, | |
"gpt-4o": {"max_tokens": 16000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, | |
"gpt-4.1": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, | |
"gpt-4.1-mini": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, | |
"gpt-4.1-nano": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, | |
"o3-mini": {"max_completion_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None}, | |
"o1": {"max_completion_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None}, | |
"o1-mini": {"max_completion_tokens": 66000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None}, | |
"o1-preview": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, | |
"Phi-4-multimodal-instruct": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Microsoft", "warning": None}, | |
"Mistral-large-2407": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Mistral", "warning": None}, | |
"Codestral-2501": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Mistral", "warning": None}, | |
"default": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Other", "warning": None} | |
} | |
QUALITY_PRESETS = { | |
"480p": {"flag": "-ql", "fps": 30}, | |
"720p": {"flag": "-qm", "fps": 30}, | |
"1080p": {"flag": "-qh", "fps": 60}, | |
"4K": {"flag": "-qk", "fps": 60}, | |
"8K": {"flag": "-qp", "fps": 60}, | |
} | |
ANIMATION_SPEEDS = { | |
"Slow": 0.5, | |
"Normal": 1.0, | |
"Fast": 2.0, | |
"Very Fast": 3.0 | |
} | |
EXPORT_FORMATS = { | |
"MP4 Video": "mp4", | |
"GIF Animation": "gif", | |
"WebM Video": "webm", | |
"PNG Sequence": "png_sequence", | |
"SVG": "svg" | |
} | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 1. prepare_api_params | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def prepare_api_params(messages, model_name): | |
"""Lookup MODEL_CONFIGS and build API call parameters.""" | |
config = MODEL_CONFIGS.get(model_name, MODEL_CONFIGS["default"]) | |
params = { | |
"messages": messages, | |
"model": model_name, | |
config["param_name"]: config.get(config["param_name"]) | |
} | |
return params, config | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 2. get_secret | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def get_secret(key): | |
"""Read an environment variable (e.g. password, API token).""" | |
val = os.environ.get(key) | |
if not val: | |
logger.warning(f"Secret '{key}' not found") | |
return val or "" | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 3. check_password | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def check_password(): | |
"""Prompt for admin password and gate AI features.""" | |
correct = get_secret("password") | |
if not correct: | |
st.error("Admin password not configured in secrets") | |
return False | |
if "auth_ok" not in st.session_state: | |
st.session_state.auth_ok = False | |
if not st.session_state.auth_ok: | |
pwd = st.text_input("π Enter admin password", type="password", help="Protects AI assistant") | |
if pwd: | |
if pwd == correct: | |
st.session_state.auth_ok = True | |
st.success("Access granted") | |
else: | |
st.error("Incorrect password") | |
return False | |
return True | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 4. ensure_packages | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def ensure_packages(): | |
"""Check & install core dependencies on first run.""" | |
required = { | |
'streamlit':'1.25.0','manim':'0.17.3','numpy':'1.22.0','Pillow':'9.0.0', | |
'transformers':'4.30.0','torch':'2.0.0','plotly':'5.14.0','pandas':'2.0.0', | |
'python-pptx':'0.6.21','markdown':'3.4.3','fpdf':'1.7.2','matplotlib':'3.5.0', | |
'seaborn':'0.11.2','scipy':'1.7.3','huggingface_hub':'0.16.0', | |
'azure-ai-inference':'1.0.0b9','azure-core':'1.33.0','openai':'' | |
} | |
missing = [] | |
for pkg, ver in required.items(): | |
try: | |
__import__(pkg if pkg!='Pillow' else 'PIL') | |
except ImportError: | |
missing.append(f"{pkg}>={ver}" if ver else pkg) | |
if missing: | |
st.sidebar.info("Installing required packages...") | |
prog = st.sidebar.progress(0) | |
for i, pkg in enumerate(missing, 1): | |
subprocess.run([sys.executable, "-m", "pip", "install", pkg], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) | |
prog.progress(i/len(missing)) | |
st.sidebar.success("All packages installed") | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 5. install_custom_packages | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def install_custom_packages(package_list): | |
"""Install user-specified pip packages on the fly.""" | |
packages = [p.strip() for p in package_list.split(",") if p.strip()] | |
if not packages: | |
return True, "No packages specified" | |
results = [] | |
success = True | |
for pkg in packages: | |
res = subprocess.run([sys.executable, "-m", "pip", "install", pkg], capture_output=True, text=True) | |
ok = (res.returncode == 0) | |
results.append(f"{pkg}: {'β ' if ok else 'β'}") | |
if not ok: success = False | |
return success, "\n".join(results) | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 6. init_ai_models_direct | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def init_ai_models_direct(): | |
"""Initialize Azure ChatCompletionsClient for AI code generation.""" | |
token = get_secret("github_token_api") | |
if not token: | |
st.error("GitHub token not found in secrets") | |
return None | |
endpoint = "https://models.inference.ai.azure.com" | |
client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(token)) | |
return {"client": client, "model_name": "gpt-4o", "endpoint": endpoint} | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 7. suggest_code_completion | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def suggest_code_completion(code_snippet, models): | |
"""Use the initialized AI model to generate complete Manim code.""" | |
if not models: | |
st.error("AI models not initialized") | |
return None | |
prompt = f"""Write a complete Manim animation scene based on this code or idea: | |
{code_snippet} | |
The code should include: | |
- A Scene subclass | |
- self.play() animations | |
- wait times | |
Return only valid Python code. | |
""" | |
config = MODEL_CONFIGS.get(models["model_name"].split("/")[-1], MODEL_CONFIGS["default"]) | |
if config["category"] == "OpenAI": | |
client = models.get("openai_client") or OpenAI(base_url="https://models.github.ai/inference", api_key=get_secret("github_token_api")) | |
models["openai_client"] = client | |
messages = [{"role":"developer","content":"Expert in Manim."}, {"role":"user","content":prompt}] | |
params = {"messages": messages, "model": models["model_name"], config["param_name"]: config.get(config["param_name"])} | |
resp = client.chat.completions.create(**params) | |
content = resp.choices[0].message.content | |
else: | |
client = models["client"] | |
msgs = [UserMessage(prompt)] | |
params, _ = prepare_api_params(msgs, models["model_name"]) | |
resp = client.complete(**params) | |
content = resp.choices[0].message.content | |
# extract code block | |
if "```python" in content: | |
content = content.split("```python")[1].split("```")[0] | |
elif "```" in content: | |
content = content.split("```")[1].split("```")[0] | |
if "class" not in content: | |
content = f"from manim import *\n\nclass MyScene(Scene):\n def construct(self):\n {content}" | |
return content | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 8. check_model_freshness | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def check_model_freshness(): | |
"""Return True if AI client was loaded within the past hour.""" | |
if not st.session_state.get("ai_models"): return False | |
last = st.session_state.ai_models.get("last_loaded") | |
if not last: return False | |
return datetime.fromisoformat(last) + timedelta(hours=1) > datetime.now() | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 9. extract_scene_class_name | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def extract_scene_class_name(python_code): | |
"""Regex for the first class inheriting from Scene.""" | |
m = re.findall(r"class\s+(\w+)\s*\([^)]*Scene[^)]*\)", python_code) | |
return m[0] if m else "MyScene" | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 10. highlight_code | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def highlight_code(code): | |
"""Return HTML+CSS highlighted Python code.""" | |
formatter = HtmlFormatter(style="monokai", full=True, noclasses=True) | |
return highlight(code, PythonLexer(), formatter) | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 11. generate_manim_preview | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def generate_manim_preview(python_code): | |
"""Show icons for detected Manim objects in code.""" | |
icons = [] | |
mapping = { | |
"Circle":"β","Square":"π²","MathTex":"π","Tex":"π", | |
"Text":"π","Axes":"π","ThreeDScene":"π§","Sphere":"π","Cube":"π§" | |
} | |
for key,icon in mapping.items(): | |
if key in python_code: icons.append(icon) | |
icons = icons or ["π¬"] | |
html = f""" | |
<div style="background:#000;color:#fff;padding:1rem;border-radius:8px;text-align:center;"> | |
<h4>Animation Preview</h4> | |
<div style="font-size:2.5rem">{''.join(icons)}</div> | |
<p style="opacity:0.7">Accurate preview requires full render</p> | |
</div> | |
""" | |
return html | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 12. render_latex_preview | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def render_latex_preview(latex_formula): | |
"""Return HTML snippet with MathJax preview for LaTeX.""" | |
if not latex_formula: | |
return """ | |
<div style="background:#f8f9fa;padding:1rem;border-radius:6px;text-align:center;color:#777;"> | |
Enter a LaTeX formula above. | |
</div>""" | |
return f""" | |
<div style="background:#202124;color:#fff;padding:1rem;border-radius:6px;text-align:center;"> | |
<script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script> | |
<script async id="MathJax-script" src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script> | |
<h4>LaTeX Preview</h4> | |
<div>$$ {latex_formula} $$</div> | |
</div>""" | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 13. prepare_audio_for_manim | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def prepare_audio_for_manim(audio_file, target_dir): | |
"""Save uploaded audio and return filesystem path.""" | |
os.makedirs(target_dir, exist_ok=True) | |
filename = f"audio_{int(time.time())}.mp3" | |
out = os.path.join(target_dir, filename) | |
with open(out, "wb") as f: | |
f.write(audio_file.getvalue()) | |
return out | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 14. mp4_to_gif | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def mp4_to_gif(mp4_path, output_path, fps=15): | |
"""Use ffmpeg to convert an MP4 to a looping GIF.""" | |
cmd = [ | |
"ffmpeg","-i",mp4_path, | |
"-vf",f"fps={fps},scale=640:-1:flags=lanczos,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse", | |
"-loop","0",output_path | |
] | |
res = subprocess.run(cmd, capture_output=True, text=True) | |
return output_path if res.returncode==0 else None | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 15. generate_manim_video | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def generate_manim_video(python_code, format_type, quality_preset, animation_speed=1.0, audio_path=None): | |
"""Render code via Manim CLI; fallback for GIF via ffmpeg.""" | |
temp_dir = tempfile.mkdtemp(prefix="manim_") | |
try: | |
scene = extract_scene_class_name(python_code) | |
scene_file = os.path.join(temp_dir, "scene.py") | |
with open(scene_file, "w", encoding="utf-8") as f: | |
f.write(python_code) | |
flag = QUALITY_PRESETS[quality_preset]["flag"] | |
cmd = ["manim", scene_file, scene, flag, f"--format={format_type}"] | |
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) | |
output = [] | |
while True: | |
line = proc.stdout.readline() | |
if not line and proc.poll() is not None: | |
break | |
output.append(line) | |
proc.wait() | |
# find output file | |
matches = list(Path(temp_dir).rglob(f"*.{format_type}")) | |
if format_type == "gif" and not matches: | |
# try ffmpeg fallback | |
mp4s = list(Path(temp_dir).rglob("*.mp4")) | |
if mp4s: | |
gif = os.path.join(temp_dir, f"{scene}.gif") | |
mp4_to_gif(str(mp4s[-1]), gif) | |
matches = [Path(gif)] | |
if not matches: | |
return None, "β No output file found" | |
data = matches[-1].read_bytes() | |
return data, f"β Generated ({len(data)/(1024*1024):.1f} MB)" | |
finally: | |
shutil.rmtree(temp_dir, ignore_errors=True) | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 16. detect_input_calls | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def detect_input_calls(code): | |
"""Scan for input() calls and extract prompts.""" | |
calls = [] | |
for i, line in enumerate(code.splitlines(), 1): | |
if "input(" in line and not line.strip().startswith("#"): | |
m = re.search(r'input\(["\'](.+?)["\']\)', line) | |
prompt = m.group(1) if m else f"Input at line {i}" | |
calls.append({"line": i, "prompt": prompt}) | |
return calls | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 17. run_python_script | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def run_python_script(code, inputs=None, timeout=60): | |
"""Execute arbitrary Python code, capturing stdout/stderr, plots, DataFrames.""" | |
tmp = tempfile.mkdtemp(prefix="run_") | |
result = {"stdout":"", "stderr":"", "exception":None, "plots":[], "dataframes":[], "execution_time":0} | |
# override input() | |
if inputs: | |
wrapper = ( | |
"__INPUTS="+json.dumps(inputs)+"\n" | |
"__IDX=0\n" | |
"def input(prompt=''):\n" | |
" global __IDX\n" | |
" val = __INPUTS[__IDX] if __IDX<len(__INPUTS) else ''\n" | |
" __IDX +=1\n" | |
" print(prompt+val)\n" | |
" return val\n\n" | |
) | |
code = wrapper + code | |
# ensure matplotlib & pandas imports | |
if "plt" in code and "import matplotlib" not in code: | |
code = "import matplotlib.pyplot as plt\n" + code | |
if "pd." in code and "import pandas" not in code: | |
code = "import pandas as pd\n" + code | |
script_path = os.path.join(tmp, "script.py") | |
with open(script_path, "w") as f: | |
f.write(code) | |
start = time.time() | |
try: | |
proc = subprocess.Popen([sys.executable, script_path], | |
stdout=subprocess.PIPE, stderr=subprocess.PIPE, | |
cwd=tmp, text=True) | |
out, err = proc.communicate(timeout=timeout) | |
result["stdout"] = out | |
result["stderr"] = err | |
except subprocess.TimeoutExpired: | |
proc.kill() | |
result["stderr"] += f"\nβ±οΈ Execution timed out after {timeout}s" | |
finally: | |
result["execution_time"] = time.time() - start | |
# plots & dataframes capture omitted for brevity | |
shutil.rmtree(tmp, ignore_errors=True) | |
return result | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 18. display_python_script_results | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def display_python_script_results(res): | |
"""Render the result dict from run_python_script() in Streamlit.""" | |
if res["exception"]: | |
st.error(f"Exception: {res['exception']}") | |
if res["stderr"]: | |
st.error("Errors:") | |
st.code(res["stderr"]) | |
if res["stdout"]: | |
st.markdown("### Output:") | |
st.code(res["stdout"]) | |
st.info(f"Execution time: {res['execution_time']:.2f}s") | |
# plots & dataframes display omitted for brevity | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 19. parse_animation_steps | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def parse_animation_steps(python_code): | |
"""Extract self.play() and self.wait() steps into a list of dicts.""" | |
plays = re.findall(r"self\.play\((.*?)\)", python_code, re.DOTALL) | |
waits = re.findall(r"self\.wait\((.*?)\)", python_code, re.DOTALL) | |
steps = [] | |
current = 0.0 | |
for i, play in enumerate(plays): | |
anims = [a.strip() for a in play.split(",")] | |
dur = float(waits[i]) if i < len(waits) and re.match(r"[\d\.]+", waits[i]) else 1.0 | |
steps.append({"id": i+1, "animations": anims, "duration": dur, "start_time": current, "code": f"self.play({play})"}) | |
current += dur | |
return steps | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 20. generate_code_from_timeline | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def generate_code_from_timeline(animation_steps, original_code): | |
"""Regenerate the construct() method body from timeline steps.""" | |
class_match = re.search(r"(class\s+\w+\s*\([^)]*\)\s*:\s*.*?def\s+construct\s*\(self\)\s*:)", original_code, re.DOTALL) | |
if not class_match: | |
return original_code | |
header = class_match.group(1) | |
indent = " " * (len(header) - len(header.lstrip())) + " " | |
body = [header] | |
for step in animation_steps: | |
body.append(f"{indent}{step['code']}") | |
body.append(f"{indent}self.wait({step['duration']})") | |
return "\n".join(body) | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 21. create_timeline_editor | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def create_timeline_editor(code): | |
"""Interactive timeline editor tab to reorder/update animation steps.""" | |
st.markdown("### π Animation Timeline") | |
steps = parse_animation_steps(code) | |
if not steps: | |
st.warning("No animation steps detected.") | |
return code | |
df = pd.DataFrame(steps) | |
fig = px.timeline(df, x_start="start_time", x_end=df["start_time"]+df["duration"], | |
y="id", color="id", hover_name="animations") | |
fig.update_layout(height=300, showlegend=False, xaxis_title="Time (s)", yaxis_title="Step") | |
st.plotly_chart(fig, use_container_width=True) | |
cols = st.columns(3) | |
step_id = cols[0].selectbox("Select Step", df["id"]) | |
new_dur = cols[1].number_input("New Duration (s)", min_value=0.1, step=0.1, value=float(df[df["id"]==step_id]["duration"])) | |
action = cols[2].selectbox("Action", ["Update Duration","Delete Step","Move Up","Move Down"]) | |
if st.button("Apply"): | |
idx = df[df["id"]==step_id].index[0] | |
if action=="Update Duration": | |
df.at[idx,"duration"]=new_dur | |
elif action=="Delete Step": | |
df = df[df["id"]!=step_id] | |
elif action=="Move Up" and step_id>1: | |
other = df[df["id"]==step_id-1].index[0] | |
df.at[idx,"id"],df.at[other,"id"]=df.at[other,"id"],df.at[idx,"id"] | |
elif action=="Move Down" and step_id<len(df): | |
other = df[df["id"]==step_id+1].index[0] | |
df.at[idx,"id"],df.at[other,"id"]=df.at[other,"id"],df.at[idx,"id"] | |
df = df.sort_values("id").reset_index(drop=True) | |
current=0.0 | |
for i,row in df.iterrows(): | |
df.at[i,"start_time"]=current | |
current+=row["duration"] | |
new_code = generate_code_from_timeline(df.to_dict("records"), code) | |
st.success("Timeline updated!") | |
return new_code | |
return code | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 22. export_to_educational_format | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def export_to_educational_format(video_data, format_type, animation_title, explanation_text, temp_dir): | |
"""Export the existing video_data to PPTX, HTML, or PDF sequence.""" | |
if format_type=="powerpoint": | |
from pptx import Presentation | |
from pptx.util import Inches | |
prs = Presentation() | |
slide = prs.slides.add_slide(prs.slide_layouts[0]) | |
slide.shapes.title.text = animation_title | |
video_path = os.path.join(temp_dir,"video.mp4") | |
with open(video_path,"wb") as f: f.write(video_data) | |
slide2 = prs.slides.add_slide(prs.slide_layouts[5]) | |
slide2.shapes.title.text="Animation" | |
slide2.shapes.add_movie(video_path, Inches(1),Inches(1.5),Inches(8),Inches(4.5)) | |
if explanation_text: | |
txt_sl = prs.slides.add_slide(prs.slide_layouts[1]) | |
txt_sl.shapes.title.text="Explanation" | |
txt_sl.placeholders[1].text=explanation_text | |
out = os.path.join(temp_dir,f"{animation_title}.pptx") | |
prs.save(out) | |
return open(out,"rb").read(), "pptx" | |
elif format_type=="html": | |
html_template = """<!DOCTYPE html><html><head><meta charset="utf-8"><title>{title}</title></head><body> | |
<h1>{title}</h1><video controls width="100%"><source src="data:video/mp4;base64,{b64}"></video> | |
<div>{explanation}</div></body></html>""" | |
b64 = base64.b64encode(video_data).decode() | |
expl = markdown.markdown(explanation_text or "") | |
content = html_template.format(title=animation_title,b64=b64,explanation=expl) | |
out = os.path.join(temp_dir,f"{animation_title}.html") | |
with open(out,"w",encoding="utf-8") as f: f.write(content) | |
return open(out,"rb").read(), "html" | |
elif format_type=="sequence": | |
from fpdf import FPDF | |
video_path = os.path.join(temp_dir,"video.mp4") | |
with open(video_path,"wb") as f: f.write(video_data) | |
frames_dir = os.path.join(temp_dir,"frames") | |
os.makedirs(frames_dir, exist_ok=True) | |
# extract 5 key frames | |
subprocess.run(["ffmpeg","-i",video_path,"-vf","select=not(mod(n\\,10))","-vsync","vfr", | |
os.path.join(frames_dir,"frame_%03d.png")], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) | |
pdf = FPDF() | |
segments = explanation_text.split("##") if explanation_text else [] | |
for i, img in enumerate(sorted(os.listdir(frames_dir))): | |
pdf.add_page() | |
pdf.image(os.path.join(frames_dir,img), x=10,y=10,w=190) | |
pdf.ln(100) | |
txt = segments[i] if i<len(segments) else "" | |
pdf.multi_cell(0, 5, txt) | |
out = os.path.join(temp_dir,f"{animation_title}.pdf") | |
pdf.output(out) | |
return open(out,"rb").read(), "pdf" | |
return None, None | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
# 23. main | |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
def main(): | |
st.set_page_config(page_title="π¬ Manim Animation Studio", layout="wide") | |
# Custom CSS | |
st.markdown(""" | |
<style> | |
.main-header { font-size:2.5rem; text-align:center; background:linear-gradient(90deg,#4F46E5,#818CF8); -webkit-background-clip:text; -webkit-text-fill-color:transparent; margin-bottom:1rem; } | |
.card { background:#fff; padding:1rem; border-radius:8px; box-shadow:0 2px 6px rgba(0,0,0,0.1); margin-bottom:1rem; } | |
</style> | |
""", unsafe_allow_html=True) | |
# Ensure packages installed once | |
if 'packages_checked' not in st.session_state: | |
ensure_packages() | |
st.session_state.packages_checked = True | |
# Sidebar | |
with st.sidebar: | |
st.header("βοΈ Settings") | |
with st.expander("Render Settings", True): | |
st.selectbox("Quality", list(QUALITY_PRESETS.keys()), key="quality") | |
st.selectbox("Format", list(EXPORT_FORMATS.keys()), key="format") | |
st.selectbox("Speed", list(ANIMATION_SPEEDS.keys()), key="speed") | |
with st.expander("Custom Libraries"): | |
txt = st.text_area("pip install β¦", help="e.g. scipy,networkx") | |
if st.button("Install"): | |
ok,msg = install_custom_packages(txt) | |
st.code(msg) | |
st.markdown("---") | |
st.markdown("Manim Studio β’ Powered by Streamlit") | |
# Tabs | |
tabs = st.tabs(["β¨ Editor","π€ AI","π LaTeX","π¨ Assets","ποΈ Timeline","π Export","π Python"]) | |
# --- Editor Tab --- | |
with tabs[0]: | |
st.markdown("<div class='main-header'>β¨ Animation Editor</div>", unsafe_allow_html=True) | |
code = st.text_area("Python code", height=300, key="editor_code") | |
st.markdown(generate_manim_preview(code), unsafe_allow_html=True) | |
if st.button("π Generate Animation"): | |
data, status = generate_manim_video( | |
code, | |
EXPORT_FORMATS[st.session_state.format], | |
st.session_state.quality, | |
ANIMATION_SPEEDS[st.session_state.speed] | |
) | |
if data: | |
st.video(data) | |
st.success(status) | |
st.session_state.last_video = data | |
else: | |
st.error(status) | |
if st.session_state.get("last_video"): | |
st.download_button("β¬οΈ Download Animation", st.session_state.last_video, | |
f"manim_animation.{EXPORT_FORMATS[st.session_state.format]}", use_container_width=True) | |
# --- AI Tab --- | |
with tabs[1]: | |
st.markdown("<div class='main-header'>π€ AI Animation Assistant</div>", unsafe_allow_html=True) | |
if not check_password(): | |
return | |
if "ai_models" not in st.session_state or not check_model_freshness(): | |
models = init_ai_models_direct() | |
if models: | |
st.session_state.ai_models = {**models, "last_loaded": datetime.now().isoformat()} | |
st.markdown("### Describe your animation or paste code stub") | |
prompt = st.text_area("Prompt / stub", height=150) | |
if st.button("β¨ Generate Code"): | |
models = st.session_state.ai_models | |
gen = suggest_code_completion(prompt, models) | |
if gen: | |
st.code(gen, language="python") | |
if st.button("Use This Code"): | |
st.session_state.editor_code = gen | |
st.experimental_rerun() | |
# --- LaTeX Tab --- | |
with tabs[2]: | |
st.markdown("<div class='main-header'>π LaTeX Formula Builder</div>", unsafe_allow_html=True) | |
latex_input = st.text_input("LaTeX", key="latex_input", help="Raw string, e.g. r\"e^{i\\pi}+1=0\"") | |
st.markdown(render_latex_preview(latex_input), unsafe_allow_html=True) | |
if latex_input and st.button("Insert into Editor"): | |
snippet = f"""formula = MathTex(r"{latex_input}")\nself.play(Write(formula))\nself.wait(2)""" | |
st.session_state.editor_code += "\n " + snippet | |
st.success("Inserted into editor") | |
st.experimental_rerun() | |
# --- Assets Tab --- | |
with tabs[3]: | |
st.markdown("<div class='main-header'>π¨ Asset Management</div>", unsafe_allow_html=True) | |
imgs = st.file_uploader("Upload images", accept_multiple_files=True) | |
for img in imgs: | |
st.image(img, width=150, caption=img.name) | |
if st.button(f"Use {img.name}"): | |
code_snip = f"""from manim import ImageMobject\nimg = ImageMobject(r"{img.name}")\nself.play(FadeIn(img))""" | |
st.session_state.editor_code += "\n " + code_snip | |
st.success(f"Added {img.name} to code") | |
st.experimental_rerun() | |
audio = st.file_uploader("Upload audio", type=["mp3","wav"]) | |
if audio: | |
path = prepare_audio_for_manim(audio, "manim_assets/audio") | |
st.audio(audio) | |
st.code(f"@with_sound(r\"{path}\")\nclass YourScene(Scene):\n ...") | |
# --- Timeline Tab --- | |
with tabs[4]: | |
st.markdown("<div class='main-header'>ποΈ Timeline Editor</div>", unsafe_allow_html=True) | |
new_code = create_timeline_editor(st.session_state.get("editor_code","")) | |
if new_code != st.session_state.get("editor_code",""): | |
st.session_state.editor_code = new_code | |
# --- Export Tab --- | |
with tabs[5]: | |
st.markdown("<div class='main-header'>π Educational Export</div>", unsafe_allow_html=True) | |
if not st.session_state.get("last_video"): | |
st.warning("Generate an animation first") | |
else: | |
title = st.text_input("Animation Title", "My Animation") | |
expl = st.text_area("Explanation (use ## for steps)") | |
fmt = st.selectbox("Export Format", ["PowerPoint","HTML","PDF Sequence"]) | |
if st.button("π€ Export"): | |
fmt_key = {"PowerPoint":"powerpoint","HTML":"html","PDF Sequence":"sequence"}[fmt] | |
data,ft = export_to_educational_format( | |
st.session_state.last_video, fmt_key, title, expl, tempfile.mkdtemp() | |
) | |
if data: | |
ext = {"pptx":"pptx","html":"html","pdf":"pdf"}[ft] | |
st.success(f"{fmt} created") | |
st.download_button(f"β¬οΈ Download {fmt}", data, f"{title}.{ext}") | |
# --- Python Tab --- | |
with tabs[6]: | |
st.markdown("<div class='main-header'>π Python Script Runner</div>", unsafe_allow_html=True) | |
script = st.text_area("Script", height=200, key="python_script") | |
calls = detect_input_calls(script) | |
inputs = [] | |
if calls: | |
st.info("Detected input() calls β please provide values:") | |
for c in calls: | |
v = st.text_input(f"{c['prompt']} (line {c['line']})") | |
inputs.append(v) | |
if st.button("βΆοΈ Run Script"): | |
res = run_python_script(script, inputs) | |
display_python_script_results(res) | |
if __name__ == "__main__": | |
main() | |