zouhairk commited on
Commit
bfcb0a5
·
1 Parent(s): 03541a0
Files changed (2) hide show
  1. Dockerfile +1 -1
  2. app.py +38 -35
Dockerfile CHANGED
@@ -13,4 +13,4 @@ COPY --chown=user ./requirements.txt requirements.txt
13
  RUN pip install --no-cache-dir --upgrade -r requirements.txt
14
 
15
  COPY --chown=user . /app
16
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
13
  RUN pip install --no-cache-dir --upgrade -r requirements.txt
14
 
15
  COPY --chown=user . /app
16
+ CMD ["python", "app.py"]
app.py CHANGED
@@ -1,52 +1,55 @@
1
- from fastapi import FastAPI, File, UploadFile, HTTPException
2
  from transformers import AutoProcessor, SeamlessM4Tv2Model
3
  import numpy as np
4
  import wave
5
  import os
6
- from starlette.responses import JSONResponse
 
7
 
8
- app = FastAPI()
9
 
10
- processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h")
11
- model = SeamlessM4Tv2Model.from_pretrained("facebook/wav2vec2-base-960h")
12
 
13
  UPLOAD_FOLDER = "audio_files"
14
  os.makedirs(UPLOAD_FOLDER, exist_ok=True)
15
 
16
- @app.get("/")
17
  def return_text():
18
- return {"text": "Hello, world!"}
19
 
20
- @app.post("/record")
21
- async def record_audio(audio: UploadFile = File(...)):
22
- filename = os.path.join(UPLOAD_FOLDER, audio.filename)
23
- with open(filename, "wb") as buffer:
24
- buffer.write(await audio.read())
25
 
26
- try:
27
- # Charger et traiter l'audio
28
- audio_data, orig_freq = torchaudio.load(filename)
29
- audio_inputs = processor(audios=audio_data, return_tensors="pt")
30
- output_tokens = model.generate(**audio_inputs, tgt_lang="fra", generate_speech=False)
31
- translated_text = processor.decode(output_tokens[0].tolist()[0], skip_special_tokens=True)
32
- except Exception as e:
33
- raise HTTPException(status_code=500, detail=f"Erreur de transcription: {str(e)}")
34
 
35
- return JSONResponse(content={"translated_text": translated_text})
36
 
37
- @app.post("/text_to_speech")
38
- async def text_to_speech(text: str, src_lang: str, tgt_lang: str):
39
- try:
40
- text_inputs = processor(text=text, src_lang=src_lang, return_tensors="pt")
41
- audio_array = model.generate(**text_inputs, tgt_lang=tgt_lang)[0].cpu().numpy().squeeze()
 
42
 
43
- output_filename = os.path.join(UPLOAD_FOLDER, "output.wav")
44
- with wave.open(output_filename, "wb") as wf:
45
- wf.setnchannels(1)
46
- wf.setsampwidth(2)
47
- wf.setframerate(16000)
48
- wf.writeframes((audio_array * 32767).astype(np.int16).tobytes())
49
- except Exception as e:
50
- raise HTTPException(status_code=500, detail=f"Erreur de synthèse vocale: {str(e)}")
51
 
52
- return JSONResponse(content={"audio_url": output_filename})
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
  from transformers import AutoProcessor, SeamlessM4Tv2Model
3
  import numpy as np
4
  import wave
5
  import os
6
+ from huggingface_hub import InferenceClient, login
7
+ app = Flask(__name__)
8
 
9
+
10
 
11
+ processor = AutoProcessor.from_pretrained("facebook/seamless-m4t-v2-large" )
12
+ model = SeamlessM4Tv2Model.from_pretrained("facebook/seamless-m4t-v2-large")
13
 
14
  UPLOAD_FOLDER = "audio_files"
15
  os.makedirs(UPLOAD_FOLDER, exist_ok=True)
16
 
17
+ @app.route("/", methods=["GET"])
18
  def return_text():
19
+ return jsonify({"text": "Hello, world!"})
20
 
21
+ @app.route("/record", methods=["POST"])
22
+ def record_audio():
23
+ file = request.files['audio']
24
+ filename = os.path.join(UPLOAD_FOLDER, file.filename)
25
+ file.save(filename)
26
 
27
+ # Charger et traiter l'audio
28
+ audio_data, orig_freq = torchaudio.load(filename)
29
+ audio_inputs = processor(audios=audio_data, return_tensors="pt")
30
+ output_tokens = model.generate(**audio_inputs, tgt_lang="fra", generate_speech=False)
31
+ translated_text = processor.decode(output_tokens[0].tolist()[0], skip_special_tokens=True)
 
 
 
32
 
33
+ return jsonify({"translated_text": translated_text})
34
 
35
+ @app.route("/text_to_speech", methods=["POST"])
36
+ def text_to_speech():
37
+ data = request.get_json()
38
+ text = data.get("text")
39
+ src_lang = data.get("src_lang")
40
+ tgt_lang = data.get("tgt_lang")
41
 
42
+ text_inputs = processor(text=text, src_lang=src_lang, return_tensors="pt")
43
+ audio_array = model.generate(**text_inputs, tgt_lang=tgt_lang)[0].cpu().numpy().squeeze()
 
 
 
 
 
 
44
 
45
+ output_filename = os.path.join(UPLOAD_FOLDER, "output.wav")
46
+ with wave.open(output_filename, "wb") as wf:
47
+ wf.setnchannels(1)
48
+ wf.setsampwidth(2)
49
+ wf.setframerate(16000)
50
+ wf.writeframes((audio_array * 32767).astype(np.int16).tobytes())
51
+
52
+ return jsonify({"audio_url": output_filename})
53
+
54
+ if __name__ == "__main__":
55
+ app.run(debug=True)