File size: 2,767 Bytes
60713ab
9de27c4
 
73a9cc3
 
bfcb0a5
a03524e
630ae5a
a03524e
630ae5a
440c34d
9de27c4
630ae5a
 
 
a03524e
73a9cc3
 
60713ab
 
73a9cc3
60713ab
 
 
 
 
 
 
bdaabef
630ae5a
 
 
 
 
 
3d2c274
 
630ae5a
 
 
 
bfcb0a5
 
630ae5a
 
 
bfcb0a5
 
 
73a9cc3
bfcb0a5
 
 
 
 
73a9cc3
bfcb0a5
73a9cc3
bfcb0a5
 
 
 
 
 
73a9cc3
bfcb0a5
 
73a9cc3
bfcb0a5
 
 
 
 
 
 
 
 
2b7f7fa
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
from flask import Flask, request, jsonify, send_from_directory
from transformers import AutoProcessor, SeamlessM4Tv2Model
import numpy as np
import wave
import os
from huggingface_hub import InferenceClient, login
import logging
import torchaudio

# Configurer les logs de debug
logging.basicConfig(level=logging.INFO)

# Initialisation des variables pour le modèle et le processeur
model = None
processor = None

UPLOAD_FOLDER = "audio_files"
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
 
app = Flask(__name__, static_folder="front/dist", static_url_path="/")

@app.route("/")
def serve_react_app():
    return send_from_directory("front/dist", "index.html")

@app.route("/<path:path>")
def serve_static_files(path):
    return send_from_directory("dist", path)

@app.route("/load_model", methods=["POST"])
def load_model():
    global model, processor
    load = request.json.get("load", False)
    
    if load:
        processor = AutoProcessor.from_pretrained("facebook/seamless-m4t-v2-large")  # Remplacez "model_name" par le nom de votre modèle
        model = SeamlessM4Tv2Model.from_pretrained("facebook/seamless-m4t-v2-large")  # Remplacez "model_name" par le nom de votre modèle
        return jsonify({"message": "Modèle chargé avec succès."}), 200
    else:
        return jsonify({"message": "Le modèle n'est pas chargé."}), 200

@app.route("/record", methods=["POST"])
def record_audio():
    if model is None or processor is None:
        return jsonify({"error": "Le modèle n'est pas chargé."}), 400
    
    file = request.files['audio']
    filename = os.path.join(UPLOAD_FOLDER, file.filename)
    file.save(filename)
    
    # Charger et traiter l'audio
    audio_data, orig_freq = torchaudio.load(filename)
    audio_inputs = processor(audios=audio_data, return_tensors="pt")
    output_tokens = model.generate(**audio_inputs, tgt_lang="fra", generate_speech=False)
    translated_text = processor.decode(output_tokens[0].tolist()[0], skip_special_tokens=True)
    
    return jsonify({"translated_text": translated_text})

@app.route("/text_to_speech", methods=["POST"])
def text_to_speech():
    data = request.get_json()
    text = data.get("text")
    src_lang = data.get("src_lang")
    tgt_lang = data.get("tgt_lang")
    
    text_inputs = processor(text=text, src_lang=src_lang, return_tensors="pt")
    audio_array = model.generate(**text_inputs, tgt_lang=tgt_lang)[0].cpu().numpy().squeeze()
    
    output_filename = os.path.join(UPLOAD_FOLDER, "output.wav")
    with wave.open(output_filename, "wb") as wf:
        wf.setnchannels(1)
        wf.setsampwidth(2)
        wf.setframerate(16000)
        wf.writeframes((audio_array * 32767).astype(np.int16).tobytes())
    
    return jsonify({"audio_url": output_filename})