mutisya commited on
Commit
4b945aa
·
verified ·
1 Parent(s): e85afdf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -1
app.py CHANGED
@@ -2,7 +2,7 @@ from fastapi import FastAPI, File, UploadFile, Form
2
  from fastapi import HTTPException
3
  import uvicorn
4
  from pydantic import BaseModel
5
- from transformers import pipeline
6
  import torch
7
  import ffmpeg
8
  import io
@@ -19,6 +19,14 @@ model_name = "openai/whisper-base"
19
  device = 0 if torch.cuda.is_available() else -1 # Use GPU if available
20
  whisper_pipeline = pipeline("automatic-speech-recognition", model=model_name, device=device)
21
 
 
 
 
 
 
 
 
 
22
 
23
  class RecognitionResponse(BaseModel):
24
  text: str
@@ -76,6 +84,27 @@ async def recognize_audio(audio: UploadFile = File(...), language: str = Form("e
76
  logger.error("Unexpected error during transcription", exc_info=True)
77
  raise HTTPException(status_code=500, detail="Internal Server Error")
78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  # Run the FastAPI application
80
  if __name__ == "__main__":
81
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
2
  from fastapi import HTTPException
3
  import uvicorn
4
  from pydantic import BaseModel
5
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
6
  import torch
7
  import ffmpeg
8
  import io
 
19
  device = 0 if torch.cuda.is_available() else -1 # Use GPU if available
20
  whisper_pipeline = pipeline("automatic-speech-recognition", model=model_name, device=device)
21
 
22
+ # set up translation pipeline
23
+ def get_translation_pipeline(translation_model_path):
24
+ model = AutoModelForSeq2SeqLM.from_pretrained(translation_model_path)
25
+ tokenizer = AutoTokenizer.from_pretrained(translation_model_path)
26
+ translation_pipeline = pipeline('translation', model=model, tokenizer=tokenizer, device=device)
27
+ return translation_pipeline
28
+
29
+ translator = get_translation_pipeline("mutisya/nllb_600m-en-kik-kam-luo-mer-som-swh-drL-24_5-filtered-v24_28_4")
30
 
31
  class RecognitionResponse(BaseModel):
32
  text: str
 
84
  logger.error("Unexpected error during transcription", exc_info=True)
85
  raise HTTPException(status_code=500, detail="Internal Server Error")
86
 
87
+ class TranslationResponse(BaseModel):
88
+ translated_text: str
89
+
90
+ @app.post("/translate", response_model=TranslationResponse)
91
+ async def translate_text(request: TranslationRequest):
92
+ source_language = request.source_language
93
+ target_language = request.target_language
94
+ text_to_translate = request.text
95
+
96
+ try:
97
+ src_lang = flores_codes[source_language]
98
+ tgt_lang = flores_codes[target_language]
99
+
100
+ translated_text = translator(text_to_translate, src_lang=src_lang,tgt_lang=tgt_lang)[0]['translation_text']
101
+
102
+ return TranslationResponse(translated_text=translated_text)
103
+
104
+ except Exception as e:
105
+ raise HTTPException(status_code=500, detail=str(e))
106
+
107
+
108
  # Run the FastAPI application
109
  if __name__ == "__main__":
110
  uvicorn.run(app, host="0.0.0.0", port=7860)