# voice_to_app.py - Core Creator Logic from core_creator.intent_parser import classify_robot_idea from core_creator.app_blueprint import generate_app_blueprint from core_creator.code_generator import generate_app_code from core_creator.assets_manager import fetch_visual_assets class VoiceToAppCreator: def __init__(self, voice_transcript: str): self.voice_input = voice_transcript self.intent = None self.blueprint = None self.generated_code = None self.assets = None def run_pipeline(self): print("\n[🔍] Classifying robot intent...") self.intent = classify_robot_idea(self.voice_input) print(f"[🧠] Detected intent: {self.intent}") self.blueprint = generate_app_blueprint(self.voice_input, self.intent) print("[⚙️] Generating code from blueprint...") self.generated_code = generate_app_code(self.blueprint) print("[🎨] Fetching visual/audio assets...") self.assets = fetch_visual_assets(self.intent) print("[✅] Robot App creation complete.") return { "intent": self.intent, "blueprint": self.blueprint, "code": self.generated_code, "assets": self.assets } # Example usage if __name__ == "__main__": user_idea = "Build a robot that teaches kids to brush their teeth with fun animations." creator = VoiceToAppCreator(user_idea) app_package = creator.run_pipeline() print("\n--- Final App Package ---") print(app_package["code"][:500]) # preview generated code