Upload folder using huggingface_hub
Browse files
src/__pycache__/app.cpython-310.pyc
CHANGED
Binary files a/src/__pycache__/app.cpython-310.pyc and b/src/__pycache__/app.cpython-310.pyc differ
|
|
src/app.py
CHANGED
@@ -28,95 +28,80 @@ def create_app():
|
|
28 |
|
29 |
print("π Starting Gradio + FastMCP server")
|
30 |
|
31 |
-
#
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
# For other environments: full FastAPI + MCP + Gradio setup
|
46 |
-
# Create FastMCP server with new tools
|
47 |
-
mcp = FastMCP("Podcast MCP")
|
48 |
-
|
49 |
-
# Register tools using the new service architecture
|
50 |
-
@mcp.tool(description="Transcribe audio files to text using Whisper model with speaker diarization support")
|
51 |
-
async def transcribe_audio_file_tool(
|
52 |
-
audio_file_path: str,
|
53 |
-
model_size: str = "turbo",
|
54 |
-
language: str = None,
|
55 |
-
output_format: str = "srt",
|
56 |
-
enable_speaker_diarization: bool = False
|
57 |
-
):
|
58 |
-
return await mcp_tools.transcribe_audio_file(
|
59 |
-
audio_file_path, model_size, language, output_format, enable_speaker_diarization
|
60 |
-
)
|
61 |
-
|
62 |
-
@mcp.tool(description="Download Apple Podcast audio files")
|
63 |
-
async def download_apple_podcast_tool(url: str):
|
64 |
-
return await mcp_tools.download_apple_podcast(url)
|
65 |
-
|
66 |
-
@mcp.tool(description="Download XiaoYuZhou podcast audio files")
|
67 |
-
async def download_xyz_podcast_tool(url: str):
|
68 |
-
return await mcp_tools.download_xyz_podcast(url)
|
69 |
-
|
70 |
-
@mcp.tool(description="Scan directory for MP3 audio files")
|
71 |
-
async def get_mp3_files_tool(directory: str):
|
72 |
-
return await mcp_tools.get_mp3_files(directory)
|
73 |
-
|
74 |
-
@mcp.tool(description="Get basic file information")
|
75 |
-
async def get_file_info_tool(file_path: str):
|
76 |
-
return await mcp_tools.get_file_info(file_path)
|
77 |
-
|
78 |
-
@mcp.tool(description="Read text file content in segments")
|
79 |
-
async def read_text_file_segments_tool(
|
80 |
-
file_path: str,
|
81 |
-
chunk_size: int = 65536,
|
82 |
-
start_position: int = 0
|
83 |
-
):
|
84 |
-
return await mcp_tools.read_text_file_segments(file_path, chunk_size, start_position)
|
85 |
-
|
86 |
-
# Create FastAPI wrapper
|
87 |
-
fastapi_wrapper = FastAPI(
|
88 |
-
title="Modal AudioTranscriber MCP",
|
89 |
-
description="Gradio UI + FastMCP Tool + Modal Integration AudioTranscriber MCP",
|
90 |
-
version="1.0.0",
|
91 |
-
lifespan=lambda app: mcp.session_manager.run()
|
92 |
-
)
|
93 |
-
|
94 |
-
# Get FastMCP's streamable HTTP app
|
95 |
-
mcp_app = mcp.streamable_http_app()
|
96 |
-
|
97 |
-
# Mount FastMCP application to /api path
|
98 |
-
fastapi_wrapper.mount("/api", mcp_app)
|
99 |
-
|
100 |
-
# Create Gradio interface
|
101 |
-
ui_app = create_gradio_interface()
|
102 |
-
|
103 |
-
# Use Gradio's standard mounting approach
|
104 |
-
final_app = mount_gradio_app(
|
105 |
-
app=fastapi_wrapper,
|
106 |
-
blocks=ui_app,
|
107 |
-
path="/",
|
108 |
-
app_kwargs={
|
109 |
-
"docs_url": "/docs",
|
110 |
-
"redoc_url": "/redoc",
|
111 |
-
}
|
112 |
)
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
# ==================== Modal Deployment Configuration ====================
|
122 |
|
@@ -143,56 +128,26 @@ if _modal_available:
|
|
143 |
|
144 |
# ==================== Main Entry Point ====================
|
145 |
|
146 |
-
def main():
|
147 |
-
"""Main entry point for all deployment modes"""
|
148 |
-
|
149 |
-
if is_modal_mode():
|
150 |
-
print("βοΈ Modal mode: Use 'modal deploy src.app::gradio_mcp_app'")
|
151 |
-
return None
|
152 |
-
else:
|
153 |
-
print("π Starting in local mode")
|
154 |
-
print("π‘ GPU functions will be routed to Modal endpoints")
|
155 |
-
|
156 |
-
app = create_app()
|
157 |
-
return app
|
158 |
-
|
159 |
def run_local():
|
160 |
-
"""Run local server with uvicorn
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
app =
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
# For Hugging Face Spaces, directly create the app
|
180 |
-
def get_app():
|
181 |
-
"""Get app instance for HF Spaces"""
|
182 |
-
if "DEPLOYMENT_MODE" not in os.environ:
|
183 |
-
os.environ["DEPLOYMENT_MODE"] = "local"
|
184 |
-
return main()
|
185 |
-
|
186 |
-
# HF Spaces compatibility: only create app when not in main execution
|
187 |
-
if __name__ != "__main__":
|
188 |
-
# Check if we're in HF Spaces mode and app is already created
|
189 |
-
if not os.environ.get("HF_SPACES_MODE"):
|
190 |
-
# This will be called when imported by other environments (not HF Spaces)
|
191 |
-
app = get_app()
|
192 |
|
193 |
if __name__ == "__main__":
|
194 |
-
|
195 |
-
if not os.environ.get("HF_SPACES_MODE"):
|
196 |
-
run_local()
|
197 |
-
else:
|
198 |
-
print("β οΈ Skipping uvicorn in HF Spaces mode")
|
|
|
28 |
|
29 |
print("π Starting Gradio + FastMCP server")
|
30 |
|
31 |
+
# Create FastMCP server with new tools
|
32 |
+
mcp = FastMCP("Podcast MCP")
|
33 |
+
|
34 |
+
# Register tools using the new service architecture
|
35 |
+
@mcp.tool(description="Transcribe audio files to text using Whisper model with speaker diarization support")
|
36 |
+
async def transcribe_audio_file_tool(
|
37 |
+
audio_file_path: str,
|
38 |
+
model_size: str = "turbo",
|
39 |
+
language: str = None,
|
40 |
+
output_format: str = "srt",
|
41 |
+
enable_speaker_diarization: bool = False
|
42 |
+
):
|
43 |
+
return await mcp_tools.transcribe_audio_file(
|
44 |
+
audio_file_path, model_size, language, output_format, enable_speaker_diarization
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
)
|
46 |
+
|
47 |
+
@mcp.tool(description="Download Apple Podcast audio files")
|
48 |
+
async def download_apple_podcast_tool(url: str):
|
49 |
+
return await mcp_tools.download_apple_podcast(url)
|
50 |
+
|
51 |
+
@mcp.tool(description="Download XiaoYuZhou podcast audio files")
|
52 |
+
async def download_xyz_podcast_tool(url: str):
|
53 |
+
return await mcp_tools.download_xyz_podcast(url)
|
54 |
+
|
55 |
+
@mcp.tool(description="Scan directory for MP3 audio files")
|
56 |
+
async def get_mp3_files_tool(directory: str):
|
57 |
+
return await mcp_tools.get_mp3_files(directory)
|
58 |
+
|
59 |
+
@mcp.tool(description="Get basic file information")
|
60 |
+
async def get_file_info_tool(file_path: str):
|
61 |
+
return await mcp_tools.get_file_info(file_path)
|
62 |
+
|
63 |
+
@mcp.tool(description="Read text file content in segments")
|
64 |
+
async def read_text_file_segments_tool(
|
65 |
+
file_path: str,
|
66 |
+
chunk_size: int = 65536,
|
67 |
+
start_position: int = 0
|
68 |
+
):
|
69 |
+
return await mcp_tools.read_text_file_segments(file_path, chunk_size, start_position)
|
70 |
+
|
71 |
+
# Create FastAPI wrapper
|
72 |
+
fastapi_wrapper = FastAPI(
|
73 |
+
title="Modal AudioTranscriber MCP",
|
74 |
+
description="Gradio UI + FastMCP Tool + Modal Integration AudioTranscriber MCP",
|
75 |
+
version="1.0.0",
|
76 |
+
lifespan=lambda app: mcp.session_manager.run()
|
77 |
+
)
|
78 |
+
|
79 |
+
# Get FastMCP's streamable HTTP app
|
80 |
+
mcp_app = mcp.streamable_http_app()
|
81 |
+
|
82 |
+
# Mount FastMCP application to /api path
|
83 |
+
fastapi_wrapper.mount("/api", mcp_app)
|
84 |
+
|
85 |
+
# Create Gradio interface
|
86 |
+
ui_app = create_gradio_interface()
|
87 |
+
|
88 |
+
# Use Gradio's standard mounting approach
|
89 |
+
final_app = mount_gradio_app(
|
90 |
+
app=fastapi_wrapper,
|
91 |
+
blocks=ui_app,
|
92 |
+
path="/",
|
93 |
+
app_kwargs={
|
94 |
+
"docs_url": "/docs",
|
95 |
+
"redoc_url": "/redoc",
|
96 |
+
}
|
97 |
+
)
|
98 |
+
|
99 |
+
print("β
Server startup completed")
|
100 |
+
print("π¨ Gradio UI: /")
|
101 |
+
print("π§ MCP Streamable HTTP: /api/mcp")
|
102 |
+
print(f"π Server name: {mcp.name}")
|
103 |
+
|
104 |
+
return final_app
|
105 |
|
106 |
# ==================== Modal Deployment Configuration ====================
|
107 |
|
|
|
128 |
|
129 |
# ==================== Main Entry Point ====================
|
130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
def run_local():
|
132 |
+
"""Run local server with uvicorn"""
|
133 |
+
print("π Starting in local mode")
|
134 |
+
|
135 |
+
# Set default environment
|
136 |
+
os.environ.setdefault("DEPLOYMENT_MODE", "local")
|
137 |
+
|
138 |
+
app = create_app()
|
139 |
+
|
140 |
+
# Use port 7860 for HF Spaces compatibility
|
141 |
+
port = int(os.environ.get("PORT", 7860))
|
142 |
+
|
143 |
+
print(f"π Starting server on port {port}")
|
144 |
+
|
145 |
+
uvicorn.run(
|
146 |
+
app,
|
147 |
+
host="0.0.0.0",
|
148 |
+
port=port,
|
149 |
+
reload=False
|
150 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
|
152 |
if __name__ == "__main__":
|
153 |
+
run_local()
|
|
|
|
|
|
|
|
src/services/__pycache__/podcast_download_service.cpython-310.pyc
CHANGED
Binary files a/src/services/__pycache__/podcast_download_service.cpython-310.pyc and b/src/services/__pycache__/podcast_download_service.cpython-310.pyc differ
|
|
src/ui/__pycache__/gradio_ui.cpython-310.pyc
CHANGED
Binary files a/src/ui/__pycache__/gradio_ui.cpython-310.pyc and b/src/ui/__pycache__/gradio_ui.cpython-310.pyc differ
|
|