Nechba's picture
Update app.py
fa58d25 verified
raw
history blame
2.59 kB
from flask import Flask, request, jsonify
from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForTokenClassification
import whisper
import os
import ffmpeg
app = Flask(__name__)
# Initialize Whisper model
whisper_model = whisper.load_model("small") # Renamed variable
# Initialize Emotion Classifier
classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True)
# Initialize NER pipeline
ner_tokenizer = AutoTokenizer.from_pretrained("dslim/bert-base-NER")
ner_model = AutoModelForTokenClassification.from_pretrained("dslim/bert-base-NER") # Renamed variable
ner_pipeline = pipeline("ner", model=ner_model, tokenizer=ner_tokenizer) # Renamed variable
@app.route('/transcribe', methods=['POST'])
def transcribe_audio():
# Check if a file was uploaded
if 'file' not in request.files:
return jsonify({'error': 'No file uploaded'}), 400
file = request.files['file']
# Check if the file is empty
if file.filename == '':
return jsonify({'error': 'No selected file'}), 400
try:
# Save the uploaded file to a temporary file
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_audio:
file.save(temp_audio)
temp_path = temp_audio.name
# Transcribe the audio using Whisper
result = whisper_model.transcribe(temp_path)
transcription = result["text"]
# Clean up the temporary file
os.remove(temp_path)
return jsonify({'transcription': transcription})
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/classify', methods=['POST'])
def classify():
try:
data = request.get_json()
if 'text' not in data:
return jsonify({"error": "Missing 'text' field"}), 400
text = data['text']
result = classifier(text)
return jsonify(result)
except Exception as e:
return jsonify({"error": str(e)}), 500
@app.route('/ner', methods=['POST'])
def ner_endpoint():
try:
data = request.get_json()
text = data.get("text", "")
# Use the renamed ner_pipeline
ner_results = ner_pipeline(text)
words_and_entities = [
{"word": result['word'], "entity": result['entity']}
for result in ner_results
]
return jsonify({"entities": words_and_entities})
except Exception as e:
return jsonify({"error": str(e)}), 500