Spaces:
Sleeping
Sleeping
File size: 1,415 Bytes
cce0bf1 894be5c cce0bf1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import time
from fastapi import FastAPI, UploadFile
from fastapi.middleware.cors import CORSMiddleware
import torch
from transformers import pipeline
app = FastAPI(docs_url="/api/docs")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
allow_credentials=True,
)
device = "cuda:0" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
BATCH_SIZE = 8
pipe = pipeline("automatic-speech-recognition",
"openai/whisper-large-v3",
torch_dtype=torch_dtype,
device=device)
@app.get("/device")
def getDevice():
start_time = time.time()
print("Time took to process the request and return response is {} sec".format(
time.time() - start_time))
return device
@app.post("/transcribe")
def transcribe(soundFile: UploadFile, task="transcribe"):
start_time = time.time()
if soundFile is None:
raise "No audio file submitted! Please upload or record an audio file before submitting your request."
inputFile = soundFile.file.read()
text = pipe(inputFile, batch_size=BATCH_SIZE, generate_kwargs={
"task": task}, return_timestamps=True)["text"]
print("Time took to process the request and return response is {} sec".format(
time.time() - start_time))
return text
|