zouhairk commited on
Commit
e70527d
·
1 Parent(s): 56f88f2
Files changed (3) hide show
  1. Dockerfile +16 -0
  2. app.py +34 -38
  3. requirements.txt +2 -1
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
+ # you will also find guides on how best to write your Dockerfile
3
+
4
+ FROM python:3.9
5
+
6
+ RUN useradd -m -u 1000 user
7
+ USER user
8
+ ENV PATH="/home/user/.local/bin:$PATH"
9
+
10
+ WORKDIR /app
11
+
12
+ COPY --chown=user ./requirements.txt requirements.txt
13
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
14
+
15
+ COPY --chown=user . /app
16
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py CHANGED
@@ -1,56 +1,52 @@
1
- from flask import Flask, request, jsonify
2
  from transformers import AutoProcessor, SeamlessM4Tv2Model
3
  import numpy as np
4
  import wave
5
  import os
6
- from huggingface_hub import InferenceClient, login
7
- from dotenv import load_dotenv
8
- app = Flask(__name__)
9
 
10
-
11
 
12
- processor = AutoProcessor.from_pretrained("facebook/seamless-m4t-v2-large" )
13
  model = SeamlessM4Tv2Model.from_pretrained("facebook/seamless-m4t-v2-large")
14
 
15
  UPLOAD_FOLDER = "audio_files"
16
  os.makedirs(UPLOAD_FOLDER, exist_ok=True)
17
 
18
- @app.route("/", methods=["GET"])
19
  def return_text():
20
- return jsonify({"text": "Hello, world!"})
21
 
22
- @app.route("/record", methods=["POST"])
23
- def record_audio():
24
- file = request.files['audio']
25
- filename = os.path.join(UPLOAD_FOLDER, file.filename)
26
- file.save(filename)
27
 
28
- # Charger et traiter l'audio
29
- audio_data, orig_freq = torchaudio.load(filename)
30
- audio_inputs = processor(audios=audio_data, return_tensors="pt")
31
- output_tokens = model.generate(**audio_inputs, tgt_lang="fra", generate_speech=False)
32
- translated_text = processor.decode(output_tokens[0].tolist()[0], skip_special_tokens=True)
 
 
 
33
 
34
- return jsonify({"translated_text": translated_text})
35
 
36
- @app.route("/text_to_speech", methods=["POST"])
37
- def text_to_speech():
38
- data = request.get_json()
39
- text = data.get("text")
40
- src_lang = data.get("src_lang")
41
- tgt_lang = data.get("tgt_lang")
42
 
43
- text_inputs = processor(text=text, src_lang=src_lang, return_tensors="pt")
44
- audio_array = model.generate(**text_inputs, tgt_lang=tgt_lang)[0].cpu().numpy().squeeze()
 
 
 
 
 
 
45
 
46
- output_filename = os.path.join(UPLOAD_FOLDER, "output.wav")
47
- with wave.open(output_filename, "wb") as wf:
48
- wf.setnchannels(1)
49
- wf.setsampwidth(2)
50
- wf.setframerate(16000)
51
- wf.writeframes((audio_array * 32767).astype(np.int16).tobytes())
52
-
53
- return jsonify({"audio_url": output_filename})
54
-
55
- if __name__ == "__main__":
56
- app.run(debug=True)
 
1
+ from fastapi import FastAPI, File, UploadFile, HTTPException
2
  from transformers import AutoProcessor, SeamlessM4Tv2Model
3
  import numpy as np
4
  import wave
5
  import os
6
+ from starlette.responses import JSONResponse
 
 
7
 
8
+ app = FastAPI()
9
 
10
+ processor = AutoProcessor.from_pretrained("facebook/seamless-m4t-v2-large")
11
  model = SeamlessM4Tv2Model.from_pretrained("facebook/seamless-m4t-v2-large")
12
 
13
  UPLOAD_FOLDER = "audio_files"
14
  os.makedirs(UPLOAD_FOLDER, exist_ok=True)
15
 
16
+ @app.get("/")
17
  def return_text():
18
+ return {"text": "Hello, world!"}
19
 
20
+ @app.post("/record")
21
+ async def record_audio(audio: UploadFile = File(...)):
22
+ filename = os.path.join(UPLOAD_FOLDER, audio.filename)
23
+ with open(filename, "wb") as buffer:
24
+ buffer.write(await audio.read())
25
 
26
+ try:
27
+ # Charger et traiter l'audio
28
+ audio_data, orig_freq = torchaudio.load(filename)
29
+ audio_inputs = processor(audios=audio_data, return_tensors="pt")
30
+ output_tokens = model.generate(**audio_inputs, tgt_lang="fra", generate_speech=False)
31
+ translated_text = processor.decode(output_tokens[0].tolist()[0], skip_special_tokens=True)
32
+ except Exception as e:
33
+ raise HTTPException(status_code=500, detail=f"Erreur de transcription: {str(e)}")
34
 
35
+ return JSONResponse(content={"translated_text": translated_text})
36
 
37
+ @app.post("/text_to_speech")
38
+ async def text_to_speech(text: str, src_lang: str, tgt_lang: str):
39
+ try:
40
+ text_inputs = processor(text=text, src_lang=src_lang, return_tensors="pt")
41
+ audio_array = model.generate(**text_inputs, tgt_lang=tgt_lang)[0].cpu().numpy().squeeze()
 
42
 
43
+ output_filename = os.path.join(UPLOAD_FOLDER, "output.wav")
44
+ with wave.open(output_filename, "wb") as wf:
45
+ wf.setnchannels(1)
46
+ wf.setsampwidth(2)
47
+ wf.setframerate(16000)
48
+ wf.writeframes((audio_array * 32767).astype(np.int16).tobytes())
49
+ except Exception as e:
50
+ raise HTTPException(status_code=500, detail=f"Erreur de synthèse vocale: {str(e)}")
51
 
52
+ return JSONResponse(content={"audio_url": output_filename})
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
- flask
 
2
  torch
3
  numpy
4
  transformers
 
1
+ fastapi
2
+ uvicorn[standard]
3
  torch
4
  numpy
5
  transformers