Spaces:
Configuration error
Configuration error
Fedir Zadniprovskyi
commited on
Commit
·
fa8a19e
1
Parent(s):
9bac415
refactor
Browse files
faster_whisper_server/config.py
CHANGED
@@ -195,6 +195,9 @@ class Config(BaseSettings):
|
|
195 |
model_config = SettingsConfigDict(env_nested_delimiter="__")
|
196 |
|
197 |
log_level: str = "info"
|
|
|
|
|
|
|
198 |
default_language: Language | None = None
|
199 |
default_response_format: ResponseFormat = ResponseFormat.JSON
|
200 |
whisper: WhisperConfig = WhisperConfig()
|
|
|
195 |
model_config = SettingsConfigDict(env_nested_delimiter="__")
|
196 |
|
197 |
log_level: str = "info"
|
198 |
+
host: str = Field(alias="UVICORN_HOST", default="0.0.0.0")
|
199 |
+
port: int = Field(alias="UVICORN_PORT", default=8000)
|
200 |
+
|
201 |
default_language: Language | None = None
|
202 |
default_response_format: ResponseFormat = ResponseFormat.JSON
|
203 |
whisper: WhisperConfig = WhisperConfig()
|
faster_whisper_server/gradio_app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
from collections.abc import Generator
|
2 |
-
import os
|
3 |
|
4 |
import gradio as gr
|
5 |
import httpx
|
@@ -11,30 +10,29 @@ from faster_whisper_server.config import Config, Task
|
|
11 |
TRANSCRIPTION_ENDPOINT = "/v1/audio/transcriptions"
|
12 |
TRANSLATION_ENDPOINT = "/v1/audio/translations"
|
13 |
TIMEOUT_SECONDS = 180
|
|
|
14 |
|
15 |
|
16 |
def create_gradio_demo(config: Config) -> gr.Blocks:
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
http_client = httpx.Client(base_url=f"http://{host}:{port}", timeout=httpx.Timeout(timeout=TIMEOUT_SECONDS))
|
21 |
-
openai_client = OpenAI(base_url=f"http://{host}:{port}/v1", api_key="cant-be-empty")
|
22 |
|
23 |
def handler(file_path: str, model: str, task: Task, temperature: float, stream: bool) -> Generator[str, None, None]:
|
|
|
|
|
|
|
|
|
|
|
24 |
if stream:
|
25 |
previous_transcription = ""
|
26 |
-
for transcription in
|
27 |
previous_transcription += transcription
|
28 |
yield previous_transcription
|
29 |
else:
|
30 |
-
yield
|
31 |
-
|
32 |
-
def transcribe_audio(file_path: str, task: Task, temperature: float, model: str) -> str:
|
33 |
-
if task == Task.TRANSCRIBE:
|
34 |
-
endpoint = TRANSCRIPTION_ENDPOINT
|
35 |
-
elif task == Task.TRANSLATE:
|
36 |
-
endpoint = TRANSLATION_ENDPOINT
|
37 |
|
|
|
38 |
with open(file_path, "rb") as file:
|
39 |
response = http_client.post(
|
40 |
endpoint,
|
@@ -49,8 +47,8 @@ def create_gradio_demo(config: Config) -> gr.Blocks:
|
|
49 |
response.raise_for_status()
|
50 |
return response.text
|
51 |
|
52 |
-
def
|
53 |
-
file_path: str,
|
54 |
) -> Generator[str, None, None]:
|
55 |
with open(file_path, "rb") as file:
|
56 |
kwargs = {
|
@@ -62,7 +60,6 @@ def create_gradio_demo(config: Config) -> gr.Blocks:
|
|
62 |
"stream": True,
|
63 |
},
|
64 |
}
|
65 |
-
endpoint = TRANSCRIPTION_ENDPOINT if task == Task.TRANSCRIBE else TRANSLATION_ENDPOINT
|
66 |
with connect_sse(http_client, "POST", endpoint, **kwargs) as event_source:
|
67 |
for event in event_source.iter_sse():
|
68 |
yield event.data
|
|
|
1 |
from collections.abc import Generator
|
|
|
2 |
|
3 |
import gradio as gr
|
4 |
import httpx
|
|
|
10 |
TRANSCRIPTION_ENDPOINT = "/v1/audio/transcriptions"
|
11 |
TRANSLATION_ENDPOINT = "/v1/audio/translations"
|
12 |
TIMEOUT_SECONDS = 180
|
13 |
+
TIMEOUT = httpx.Timeout(timeout=TIMEOUT_SECONDS)
|
14 |
|
15 |
|
16 |
def create_gradio_demo(config: Config) -> gr.Blocks:
|
17 |
+
base_url = f"http://{config.host}:{config.port}"
|
18 |
+
http_client = httpx.Client(base_url=base_url, timeout=TIMEOUT)
|
19 |
+
openai_client = OpenAI(base_url=f"{base_url}/v1", api_key="cant-be-empty")
|
|
|
|
|
20 |
|
21 |
def handler(file_path: str, model: str, task: Task, temperature: float, stream: bool) -> Generator[str, None, None]:
|
22 |
+
if task == Task.TRANSCRIBE:
|
23 |
+
endpoint = TRANSCRIPTION_ENDPOINT
|
24 |
+
elif task == Task.TRANSLATE:
|
25 |
+
endpoint = TRANSLATION_ENDPOINT
|
26 |
+
|
27 |
if stream:
|
28 |
previous_transcription = ""
|
29 |
+
for transcription in streaming_audio_task(file_path, endpoint, temperature, model):
|
30 |
previous_transcription += transcription
|
31 |
yield previous_transcription
|
32 |
else:
|
33 |
+
yield audio_task(file_path, endpoint, temperature, model)
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
+
def audio_task(file_path: str, endpoint: str, temperature: float, model: str) -> str:
|
36 |
with open(file_path, "rb") as file:
|
37 |
response = http_client.post(
|
38 |
endpoint,
|
|
|
47 |
response.raise_for_status()
|
48 |
return response.text
|
49 |
|
50 |
+
def streaming_audio_task(
|
51 |
+
file_path: str, endpoint: str, temperature: float, model: str
|
52 |
) -> Generator[str, None, None]:
|
53 |
with open(file_path, "rb") as file:
|
54 |
kwargs = {
|
|
|
60 |
"stream": True,
|
61 |
},
|
62 |
}
|
|
|
63 |
with connect_sse(http_client, "POST", endpoint, **kwargs) as event_source:
|
64 |
for event in event_source.iter_sse():
|
65 |
yield event.data
|