richard-su commited on
Commit
17dac9a
Β·
verified Β·
1 Parent(s): 6c2f516

Upload folder using huggingface_hub

Browse files
src/__pycache__/app.cpython-310.pyc CHANGED
Binary files a/src/__pycache__/app.cpython-310.pyc and b/src/__pycache__/app.cpython-310.pyc differ
 
src/app.py CHANGED
@@ -28,95 +28,80 @@ def create_app():
28
 
29
  print("πŸš€ Starting Gradio + FastMCP server")
30
 
31
- # Check if this is HF Spaces environment
32
- is_hf_spaces = "SPACE_ID" in os.environ
33
-
34
- if is_hf_spaces:
35
- # For HF Spaces: return pure Gradio app (simpler approach)
36
- print("πŸ€— Creating HF Spaces compatible app")
37
- ui_app = create_gradio_interface()
38
-
39
- print("βœ… Server startup completed")
40
- print("🎨 Gradio UI: /")
41
-
42
- return ui_app
43
-
44
- else:
45
- # For other environments: full FastAPI + MCP + Gradio setup
46
- # Create FastMCP server with new tools
47
- mcp = FastMCP("Podcast MCP")
48
-
49
- # Register tools using the new service architecture
50
- @mcp.tool(description="Transcribe audio files to text using Whisper model with speaker diarization support")
51
- async def transcribe_audio_file_tool(
52
- audio_file_path: str,
53
- model_size: str = "turbo",
54
- language: str = None,
55
- output_format: str = "srt",
56
- enable_speaker_diarization: bool = False
57
- ):
58
- return await mcp_tools.transcribe_audio_file(
59
- audio_file_path, model_size, language, output_format, enable_speaker_diarization
60
- )
61
-
62
- @mcp.tool(description="Download Apple Podcast audio files")
63
- async def download_apple_podcast_tool(url: str):
64
- return await mcp_tools.download_apple_podcast(url)
65
-
66
- @mcp.tool(description="Download XiaoYuZhou podcast audio files")
67
- async def download_xyz_podcast_tool(url: str):
68
- return await mcp_tools.download_xyz_podcast(url)
69
-
70
- @mcp.tool(description="Scan directory for MP3 audio files")
71
- async def get_mp3_files_tool(directory: str):
72
- return await mcp_tools.get_mp3_files(directory)
73
-
74
- @mcp.tool(description="Get basic file information")
75
- async def get_file_info_tool(file_path: str):
76
- return await mcp_tools.get_file_info(file_path)
77
-
78
- @mcp.tool(description="Read text file content in segments")
79
- async def read_text_file_segments_tool(
80
- file_path: str,
81
- chunk_size: int = 65536,
82
- start_position: int = 0
83
- ):
84
- return await mcp_tools.read_text_file_segments(file_path, chunk_size, start_position)
85
-
86
- # Create FastAPI wrapper
87
- fastapi_wrapper = FastAPI(
88
- title="Modal AudioTranscriber MCP",
89
- description="Gradio UI + FastMCP Tool + Modal Integration AudioTranscriber MCP",
90
- version="1.0.0",
91
- lifespan=lambda app: mcp.session_manager.run()
92
- )
93
-
94
- # Get FastMCP's streamable HTTP app
95
- mcp_app = mcp.streamable_http_app()
96
-
97
- # Mount FastMCP application to /api path
98
- fastapi_wrapper.mount("/api", mcp_app)
99
-
100
- # Create Gradio interface
101
- ui_app = create_gradio_interface()
102
-
103
- # Use Gradio's standard mounting approach
104
- final_app = mount_gradio_app(
105
- app=fastapi_wrapper,
106
- blocks=ui_app,
107
- path="/",
108
- app_kwargs={
109
- "docs_url": "/docs",
110
- "redoc_url": "/redoc",
111
- }
112
  )
113
-
114
- print("βœ… Server startup completed")
115
- print("🎨 Gradio UI: /")
116
- print("πŸ”§ MCP Streamable HTTP: /api/mcp")
117
- print(f"πŸ“ Server name: {mcp.name}")
118
-
119
- return final_app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
  # ==================== Modal Deployment Configuration ====================
122
 
@@ -143,56 +128,26 @@ if _modal_available:
143
 
144
  # ==================== Main Entry Point ====================
145
 
146
- def main():
147
- """Main entry point for all deployment modes"""
148
-
149
- if is_modal_mode():
150
- print("☁️ Modal mode: Use 'modal deploy src.app::gradio_mcp_app'")
151
- return None
152
- else:
153
- print("🏠 Starting in local mode")
154
- print("πŸ’‘ GPU functions will be routed to Modal endpoints")
155
-
156
- app = create_app()
157
- return app
158
-
159
  def run_local():
160
- """Run local server with uvicorn (for direct execution)"""
161
- # Double-check: don't run uvicorn in HF Spaces
162
- if os.environ.get("HF_SPACES_MODE"):
163
- print("⚠️ Skipping uvicorn.run() in HF Spaces mode")
164
- return
165
-
166
- app = main()
167
- if app:
168
- # Use port 7860 for HF Spaces compatibility, 8000 for local
169
- port = int(os.environ.get("PORT", 8000)) # Use 8000 for local dev
170
- uvicorn.run(
171
- app,
172
- host="0.0.0.0",
173
- port=port,
174
- reload=False
175
- )
176
-
177
- # ==================== Hugging Face Spaces Support ====================
178
-
179
- # For Hugging Face Spaces, directly create the app
180
- def get_app():
181
- """Get app instance for HF Spaces"""
182
- if "DEPLOYMENT_MODE" not in os.environ:
183
- os.environ["DEPLOYMENT_MODE"] = "local"
184
- return main()
185
-
186
- # HF Spaces compatibility: only create app when not in main execution
187
- if __name__ != "__main__":
188
- # Check if we're in HF Spaces mode and app is already created
189
- if not os.environ.get("HF_SPACES_MODE"):
190
- # This will be called when imported by other environments (not HF Spaces)
191
- app = get_app()
192
 
193
  if __name__ == "__main__":
194
- # Check if we should run uvicorn (not in HF Spaces)
195
- if not os.environ.get("HF_SPACES_MODE"):
196
- run_local()
197
- else:
198
- print("⚠️ Skipping uvicorn in HF Spaces mode")
 
28
 
29
  print("πŸš€ Starting Gradio + FastMCP server")
30
 
31
+ # Create FastMCP server with new tools
32
+ mcp = FastMCP("Podcast MCP")
33
+
34
+ # Register tools using the new service architecture
35
+ @mcp.tool(description="Transcribe audio files to text using Whisper model with speaker diarization support")
36
+ async def transcribe_audio_file_tool(
37
+ audio_file_path: str,
38
+ model_size: str = "turbo",
39
+ language: str = None,
40
+ output_format: str = "srt",
41
+ enable_speaker_diarization: bool = False
42
+ ):
43
+ return await mcp_tools.transcribe_audio_file(
44
+ audio_file_path, model_size, language, output_format, enable_speaker_diarization
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  )
46
+
47
+ @mcp.tool(description="Download Apple Podcast audio files")
48
+ async def download_apple_podcast_tool(url: str):
49
+ return await mcp_tools.download_apple_podcast(url)
50
+
51
+ @mcp.tool(description="Download XiaoYuZhou podcast audio files")
52
+ async def download_xyz_podcast_tool(url: str):
53
+ return await mcp_tools.download_xyz_podcast(url)
54
+
55
+ @mcp.tool(description="Scan directory for MP3 audio files")
56
+ async def get_mp3_files_tool(directory: str):
57
+ return await mcp_tools.get_mp3_files(directory)
58
+
59
+ @mcp.tool(description="Get basic file information")
60
+ async def get_file_info_tool(file_path: str):
61
+ return await mcp_tools.get_file_info(file_path)
62
+
63
+ @mcp.tool(description="Read text file content in segments")
64
+ async def read_text_file_segments_tool(
65
+ file_path: str,
66
+ chunk_size: int = 65536,
67
+ start_position: int = 0
68
+ ):
69
+ return await mcp_tools.read_text_file_segments(file_path, chunk_size, start_position)
70
+
71
+ # Create FastAPI wrapper
72
+ fastapi_wrapper = FastAPI(
73
+ title="Modal AudioTranscriber MCP",
74
+ description="Gradio UI + FastMCP Tool + Modal Integration AudioTranscriber MCP",
75
+ version="1.0.0",
76
+ lifespan=lambda app: mcp.session_manager.run()
77
+ )
78
+
79
+ # Get FastMCP's streamable HTTP app
80
+ mcp_app = mcp.streamable_http_app()
81
+
82
+ # Mount FastMCP application to /api path
83
+ fastapi_wrapper.mount("/api", mcp_app)
84
+
85
+ # Create Gradio interface
86
+ ui_app = create_gradio_interface()
87
+
88
+ # Use Gradio's standard mounting approach
89
+ final_app = mount_gradio_app(
90
+ app=fastapi_wrapper,
91
+ blocks=ui_app,
92
+ path="/",
93
+ app_kwargs={
94
+ "docs_url": "/docs",
95
+ "redoc_url": "/redoc",
96
+ }
97
+ )
98
+
99
+ print("βœ… Server startup completed")
100
+ print("🎨 Gradio UI: /")
101
+ print("πŸ”§ MCP Streamable HTTP: /api/mcp")
102
+ print(f"πŸ“ Server name: {mcp.name}")
103
+
104
+ return final_app
105
 
106
  # ==================== Modal Deployment Configuration ====================
107
 
 
128
 
129
  # ==================== Main Entry Point ====================
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  def run_local():
132
+ """Run local server with uvicorn"""
133
+ print("🏠 Starting in local mode")
134
+
135
+ # Set default environment
136
+ os.environ.setdefault("DEPLOYMENT_MODE", "local")
137
+
138
+ app = create_app()
139
+
140
+ # Use port 7860 for HF Spaces compatibility
141
+ port = int(os.environ.get("PORT", 7860))
142
+
143
+ print(f"🌐 Starting server on port {port}")
144
+
145
+ uvicorn.run(
146
+ app,
147
+ host="0.0.0.0",
148
+ port=port,
149
+ reload=False
150
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
  if __name__ == "__main__":
153
+ run_local()
 
 
 
 
src/services/__pycache__/podcast_download_service.cpython-310.pyc CHANGED
Binary files a/src/services/__pycache__/podcast_download_service.cpython-310.pyc and b/src/services/__pycache__/podcast_download_service.cpython-310.pyc differ
 
src/ui/__pycache__/gradio_ui.cpython-310.pyc CHANGED
Binary files a/src/ui/__pycache__/gradio_ui.cpython-310.pyc and b/src/ui/__pycache__/gradio_ui.cpython-310.pyc differ