Spaces:
Running
Running
sanchit-gandhi
commited on
Commit
·
87505e7
1
Parent(s):
af74e64
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ import requests
|
|
3 |
from transformers.models.whisper.tokenization_whisper import TO_LANGUAGE_CODE
|
4 |
|
5 |
|
6 |
-
title = "Whisper JAX: The Fastest Whisper API
|
7 |
|
8 |
description = "Whisper JAX is an optimised implementation of the [Whisper model](https://huggingface.co/openai/whisper-large-v2) by OpenAI. It runs on JAX with a TPU v4-8 in the backend. Compared to PyTorch on an A100 GPU, it is over **12x** faster, making it the fastest Whisper API available."
|
9 |
#description += "\nYou can submit requests to Whisper JAX through this Gradio Demo, or directly through API calls (see below). This notebook demonstrates how you can run the Whisper JAX model yourself on a TPU v2-8 in a Google Colab: TODO."
|
|
|
3 |
from transformers.models.whisper.tokenization_whisper import TO_LANGUAGE_CODE
|
4 |
|
5 |
|
6 |
+
title = "Whisper JAX: The Fastest Whisper API ⚡️"
|
7 |
|
8 |
description = "Whisper JAX is an optimised implementation of the [Whisper model](https://huggingface.co/openai/whisper-large-v2) by OpenAI. It runs on JAX with a TPU v4-8 in the backend. Compared to PyTorch on an A100 GPU, it is over **12x** faster, making it the fastest Whisper API available."
|
9 |
#description += "\nYou can submit requests to Whisper JAX through this Gradio Demo, or directly through API calls (see below). This notebook demonstrates how you can run the Whisper JAX model yourself on a TPU v2-8 in a Google Colab: TODO."
|