mgbam commited on
Commit
b0450d8
Β·
verified Β·
1 Parent(s): 181f321

Update deployer/gradio_generator.py

Browse files
Files changed (1) hide show
  1. deployer/gradio_generator.py +35 -48
deployer/gradio_generator.py CHANGED
@@ -1,24 +1,22 @@
1
- # gradio_generator.py - CPU & Spaces-compatible Gradio interface with full monetization & artifact management for RoboSage
2
 
3
  import os
4
  import json
 
5
  import gradio as gr
6
- from transformers import pipeline
7
  from deployer.simulator_interface import VirtualRobot
8
  from deployer.revenue_tracker import get_revenue_stats, package_artifacts
9
  from core_creator.voice_to_app import VoiceToAppCreator
10
 
11
- # Initialize Hugging Face ASR pipeline
 
12
  HK_API_KEY = os.getenv("HK_API_KEY")
13
  if not HK_API_KEY:
14
- raise EnvironmentError("Please set the HK_API_KEY environment variable for audio transcription via HuggingFace API.")
15
- asr = pipeline(
16
- task="automatic-speech-recognition",
17
- model="openai/whisper-small",
18
- use_auth_token=HK_API_KEY
19
- )
20
 
21
- # Core actions
22
  def robot_behavior(user_input: str) -> str:
23
  bot = VirtualRobot()
24
  text = user_input.strip().lower()
@@ -28,27 +26,36 @@ def robot_behavior(user_input: str) -> str:
28
  return bot.perform_action(text)
29
  return bot.perform_action("say I'm sorry, I didn't understand that.")
30
 
31
- # Transcription helper
32
  def transcribe_audio(audio_file: str) -> str:
33
- result = asr(audio_file)
 
 
 
 
 
 
34
  return result.get("text", "")
35
 
 
36
  def transcribe_and_respond(audio_file: str) -> str:
37
  text = transcribe_audio(audio_file)
38
  return robot_behavior(text)
39
 
40
- # Generation & packaging pipeline
41
- def generate_and_package_app(idea: str):
42
- creator = VoiceToAppCreator(idea)
43
  assets = creator.run_pipeline()
44
- # Package code & config into zip and return path
45
  zip_path = package_artifacts(assets)
46
- # Prepare blueprint and code previews
47
  blueprint = assets.get("blueprint", {})
48
- code_files = assets.get("code", {}) # dict of {filename: code_str}
49
- # Concatenate code files for preview
50
  code_preview = "\n\n".join([f"# {fname}\n{content}" for fname, content in code_files.items()])
51
- return zip_path, blueprint, code_preview
 
 
 
 
 
52
 
53
  # Build full-featured Gradio app
54
  def launch_gradio_app(
@@ -58,7 +65,7 @@ def launch_gradio_app(
58
  with gr.Blocks() as demo:
59
  gr.Markdown(f"# πŸš€ {title}\n\n{description}")
60
 
61
- # 1️⃣ Generate & Download Artifacts
62
  with gr.Accordion("🎨 Generate App & Download Artifacts", open=True):
63
  idea = gr.Textbox(label="Robot Idea", placeholder="e.g. A friendly greeting robot.")
64
  gen_btn = gr.Button("Generate & Package", key="gen-app-btn")
@@ -66,54 +73,34 @@ def launch_gradio_app(
66
  download_zip = gr.File(label="Download Artifacts (.zip)")
67
  blueprint_view = gr.JSON(label="App Blueprint")
68
  code_view = gr.Code(label="Generated Code Preview", language="python")
69
-
70
- def on_generate(i):
71
- zip_path, blueprint, code_preview = generate_and_package_app(i)
72
- return (
73
- "βœ… App generated successfully!",
74
- zip_path,
75
- blueprint,
76
- code_preview
77
- )
78
-
79
  gen_btn.click(
80
  fn=on_generate,
81
  inputs=[idea],
82
  outputs=[status, download_zip, blueprint_view, code_view]
83
  )
84
 
85
- # 2️⃣ Robot Simulation
86
- with gr.Accordion("πŸ€– Test Your Robot (Text & Voice)", open=False):
87
- # Text command
88
- text_input = gr.Textbox(label="Text Command", placeholder="Type 'hello' or 'say Great job!'" )
89
  text_btn = gr.Button("Send Text", key="send-text-btn")
90
  text_output = gr.Textbox(label="Robot Response", lines=3, interactive=False)
91
  text_btn.click(fn=robot_behavior, inputs=[text_input], outputs=[text_output])
92
 
93
  gr.Markdown("---")
94
- # Voice command
95
  audio_input = gr.Audio(source="microphone", type="filepath", label="Record Command")
96
  audio_btn = gr.Button("Send Audio", key="send-audio-btn")
97
  audio_output = gr.Textbox(label="Robot Response (via voice)", lines=3, interactive=False)
98
  audio_btn.click(fn=transcribe_and_respond, inputs=[audio_input], outputs=[audio_output])
99
 
100
- # 3️⃣ Monetization & Revenue
101
  with gr.Accordion("πŸ’° Monetization Dashboard", open=False):
102
  subscribe_btn = gr.Button("Subscribe to Pro Plan", key="subscribe-btn")
103
  subscribe_msg = gr.Textbox(label="Subscription Status", interactive=False)
104
  rev_btn = gr.Button("View Revenue Stats", key="rev-stats-btn")
105
  rev_table = gr.Dataframe(label="Revenue & Usage Metrics")
106
-
107
- def on_subscribe():
108
- # Stubbed subscription activation
109
- return "βœ… Subscribed! You now have access to premium features."
110
-
111
- def on_view_revenue():
112
- df = get_revenue_stats()
113
- return df
114
-
115
- subscribe_btn.click(fn=on_subscribe, inputs=None, outputs=[subscribe_msg])
116
- rev_btn.click(fn=on_view_revenue, inputs=None, outputs=[rev_table])
117
 
118
  return demo
119
 
 
1
+ # gradio_generator.py - CPU & Spaces-compatible Gradio interface with HF Inference API for STT & Monetization
2
 
3
  import os
4
  import json
5
+ import requests
6
  import gradio as gr
 
7
  from deployer.simulator_interface import VirtualRobot
8
  from deployer.revenue_tracker import get_revenue_stats, package_artifacts
9
  from core_creator.voice_to_app import VoiceToAppCreator
10
 
11
+ # Hugging Face Inference API settings
12
+ HF_STT_URL = "https://api-inference.huggingface.co/models/openai/whisper-small"
13
  HK_API_KEY = os.getenv("HK_API_KEY")
14
  if not HK_API_KEY:
15
+ raise EnvironmentError(
16
+ "Please set the HK_API_KEY environment variable for HuggingFace Inference API.")
17
+ HEADERS = {"Authorization": f"Bearer {HK_API_KEY}"}
 
 
 
18
 
19
+ # Core robot logic
20
  def robot_behavior(user_input: str) -> str:
21
  bot = VirtualRobot()
22
  text = user_input.strip().lower()
 
26
  return bot.perform_action(text)
27
  return bot.perform_action("say I'm sorry, I didn't understand that.")
28
 
29
+ # Transcribe via Hugging Face Inference API
30
  def transcribe_audio(audio_file: str) -> str:
31
+ with open(audio_file, "rb") as f:
32
+ data = f.read()
33
+ response = requests.post(HF_STT_URL, headers=HEADERS, data=data)
34
+ if response.status_code != 200:
35
+ return f"❗ Transcription error: {response.status_code}"
36
+ result = response.json()
37
+ # The inference API returns {'text': '...'}
38
  return result.get("text", "")
39
 
40
+ # Combined flow
41
  def transcribe_and_respond(audio_file: str) -> str:
42
  text = transcribe_audio(audio_file)
43
  return robot_behavior(text)
44
 
45
+ # Package generation pipeline
46
+ def on_generate(i: str):
47
+ creator = VoiceToAppCreator(i)
48
  assets = creator.run_pipeline()
 
49
  zip_path = package_artifacts(assets)
 
50
  blueprint = assets.get("blueprint", {})
51
+ code_files = assets.get("code", {})
 
52
  code_preview = "\n\n".join([f"# {fname}\n{content}" for fname, content in code_files.items()])
53
+ return (
54
+ "βœ… App generated successfully!",
55
+ zip_path,
56
+ blueprint,
57
+ code_preview
58
+ )
59
 
60
  # Build full-featured Gradio app
61
  def launch_gradio_app(
 
65
  with gr.Blocks() as demo:
66
  gr.Markdown(f"# πŸš€ {title}\n\n{description}")
67
 
68
+ # 1. Generate & Download Artifacts
69
  with gr.Accordion("🎨 Generate App & Download Artifacts", open=True):
70
  idea = gr.Textbox(label="Robot Idea", placeholder="e.g. A friendly greeting robot.")
71
  gen_btn = gr.Button("Generate & Package", key="gen-app-btn")
 
73
  download_zip = gr.File(label="Download Artifacts (.zip)")
74
  blueprint_view = gr.JSON(label="App Blueprint")
75
  code_view = gr.Code(label="Generated Code Preview", language="python")
 
 
 
 
 
 
 
 
 
 
76
  gen_btn.click(
77
  fn=on_generate,
78
  inputs=[idea],
79
  outputs=[status, download_zip, blueprint_view, code_view]
80
  )
81
 
82
+ # 2. Robot Simulation (Text & Voice)
83
+ with gr.Accordion("πŸ€– Test Your Robot", open=False):
84
+ text_input = gr.Textbox(label="Text Command", placeholder="Type 'hello' or 'say Good job!'" )
 
85
  text_btn = gr.Button("Send Text", key="send-text-btn")
86
  text_output = gr.Textbox(label="Robot Response", lines=3, interactive=False)
87
  text_btn.click(fn=robot_behavior, inputs=[text_input], outputs=[text_output])
88
 
89
  gr.Markdown("---")
90
+
91
  audio_input = gr.Audio(source="microphone", type="filepath", label="Record Command")
92
  audio_btn = gr.Button("Send Audio", key="send-audio-btn")
93
  audio_output = gr.Textbox(label="Robot Response (via voice)", lines=3, interactive=False)
94
  audio_btn.click(fn=transcribe_and_respond, inputs=[audio_input], outputs=[audio_output])
95
 
96
+ # 3. Monetization & Revenue Dashboard
97
  with gr.Accordion("πŸ’° Monetization Dashboard", open=False):
98
  subscribe_btn = gr.Button("Subscribe to Pro Plan", key="subscribe-btn")
99
  subscribe_msg = gr.Textbox(label="Subscription Status", interactive=False)
100
  rev_btn = gr.Button("View Revenue Stats", key="rev-stats-btn")
101
  rev_table = gr.Dataframe(label="Revenue & Usage Metrics")
102
+ subscribe_btn.click(fn=lambda: "βœ… Subscribed!", inputs=None, outputs=[subscribe_msg])
103
+ rev_btn.click(fn=get_revenue_stats, inputs=None, outputs=[rev_table])
 
 
 
 
 
 
 
 
 
104
 
105
  return demo
106