sanchit-gandhi commited on
Commit
a00a8e9
·
1 Parent(s): ce7b54d

update desc/article

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -17,14 +17,15 @@ title = "Whisper JAX: The Fastest Whisper API ⚡️"
17
 
18
  description = """Whisper JAX is an optimised implementation of the [Whisper model](https://huggingface.co/openai/whisper-large-v2) by OpenAI. It runs on JAX with a TPU v4-8 in the backend. Compared to PyTorch on an A100 GPU, it is over [**70x faster**](https://github.com/sanchit-gandhi/whisper-jax#benchmarks), making it the fastest Whisper API available.
19
 
20
- Note that using microphone or audio file requires the audio input to be transferred from the Gradio demo to the TPU, which for large audio files can be slow. We recommend using YouTube where possible, since this directly downloads the audio file to the TPU, skipping the file transfer step.
21
- """
22
 
23
- API_URL = os.getenv("API_URL")
24
- API_URL_FROM_FEATURES = os.getenv("API_URL_FROM_FEATURES")
25
 
26
  article = "Whisper large-v2 model by OpenAI. Backend running JAX on a TPU v4-8 through the generous support of the [TRC](https://sites.research.google/trc/about/) programme. Whisper JAX [code](https://github.com/sanchit-gandhi/whisper-jax) and Gradio demo by 🤗 Hugging Face."
27
 
 
 
28
  language_names = sorted(TO_LANGUAGE_CODE.keys())
29
  CHUNK_LENGTH_S = 30
30
  BATCH_SIZE = 16
 
17
 
18
  description = """Whisper JAX is an optimised implementation of the [Whisper model](https://huggingface.co/openai/whisper-large-v2) by OpenAI. It runs on JAX with a TPU v4-8 in the backend. Compared to PyTorch on an A100 GPU, it is over [**70x faster**](https://github.com/sanchit-gandhi/whisper-jax#benchmarks), making it the fastest Whisper API available.
19
 
20
+ Note that at peak times, you may find yourself in the queue for this demo. When you submit a request, your queue position will be shown in the top right-hand side of the demo pane. Once you reach the front of the queue, your audio file will be transcribed, with the progress displayed through a progress bar.
 
21
 
22
+ To skip the queue, you may wish to create your own inference endpoint, details for which can be found in the [Whisper JAX repository](https://github.com/sanchit-gandhi/whisper-jax#creating-an-endpoint).
23
+ """
24
 
25
  article = "Whisper large-v2 model by OpenAI. Backend running JAX on a TPU v4-8 through the generous support of the [TRC](https://sites.research.google/trc/about/) programme. Whisper JAX [code](https://github.com/sanchit-gandhi/whisper-jax) and Gradio demo by 🤗 Hugging Face."
26
 
27
+ API_URL = os.getenv("API_URL")
28
+ API_URL_FROM_FEATURES = os.getenv("API_URL_FROM_FEATURES")
29
  language_names = sorted(TO_LANGUAGE_CODE.keys())
30
  CHUNK_LENGTH_S = 30
31
  BATCH_SIZE = 16