Upload 3 files
Browse files- dockerfile +5 -0
- main.py +94 -0
- requirements.txt +11 -0
dockerfile
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
WORKDIR /code
|
3 |
+
COPY . .
|
4 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
5 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
main.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
import gradio as gr
|
3 |
+
from huggingface_hub import from_pretrained_keras
|
4 |
+
import pandas as pd
|
5 |
+
import numpy as np
|
6 |
+
import joblib
|
7 |
+
import os
|
8 |
+
import sys
|
9 |
+
|
10 |
+
# librosa is a Python library for analyzing audio and music. It can be used to extract the data from the audio files we will see it later.
|
11 |
+
import librosa
|
12 |
+
import librosa.display
|
13 |
+
import seaborn as sns
|
14 |
+
import matplotlib.pyplot as plt
|
15 |
+
|
16 |
+
from sklearn.preprocessing import StandardScaler, OneHotEncoder
|
17 |
+
from sklearn.metrics import confusion_matrix, classification_report
|
18 |
+
from sklearn.model_selection import train_test_split
|
19 |
+
|
20 |
+
# to play the audio files
|
21 |
+
|
22 |
+
|
23 |
+
import keras
|
24 |
+
from keras.preprocessing import sequence
|
25 |
+
from keras.models import Sequential, model_from_json
|
26 |
+
from keras.layers import Dense, Embedding
|
27 |
+
from keras.layers import LSTM, BatchNormalization, GRU
|
28 |
+
from keras.preprocessing.text import Tokenizer
|
29 |
+
|
30 |
+
from tensorflow.keras.utils import to_categorical
|
31 |
+
from keras.layers import Input, Flatten, Dropout, Activation
|
32 |
+
from keras.layers import Conv1D, MaxPooling1D, AveragePooling1D
|
33 |
+
from keras.models import Model
|
34 |
+
from keras.callbacks import ModelCheckpoint
|
35 |
+
from tensorflow.keras.optimizers import SGD
|
36 |
+
from fastapi import FastAPI, Request, UploadFile, File
|
37 |
+
|
38 |
+
|
39 |
+
import warnings
|
40 |
+
if not sys.warnoptions:
|
41 |
+
warnings.simplefilter("ignore")
|
42 |
+
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
43 |
+
|
44 |
+
model = from_pretrained_keras('Mohamed41/MODEL_EMOTION_AR_TEXT_72P')
|
45 |
+
|
46 |
+
|
47 |
+
def feat_ext(data):
|
48 |
+
# Time_domain_features
|
49 |
+
# ZCR Persody features or Low level ascoustic features
|
50 |
+
result = np.array([])
|
51 |
+
zcr = np.mean(librosa.feature.zero_crossing_rate(y=data).T, axis=0)
|
52 |
+
result = np.hstack((result, zcr)) # stacking horizontally
|
53 |
+
# Frequency_domain_features
|
54 |
+
# Spectral and wavelet Features
|
55 |
+
# MFCC
|
56 |
+
mfcc = np.mean(librosa.feature.mfcc(y=data, sr=22050, n_mfcc=40).T, axis=0)
|
57 |
+
result = np.hstack((result, mfcc)) # stacking horizontally
|
58 |
+
return result
|
59 |
+
|
60 |
+
|
61 |
+
scaler = joblib.load('scaler.joblib')
|
62 |
+
encoder = joblib.load('encoder.joblib')
|
63 |
+
|
64 |
+
|
65 |
+
def get_predict_feat(path):
|
66 |
+
d, s_rate = librosa.load(path, duration=2.5, offset=0.6)
|
67 |
+
res = feat_ext(d)
|
68 |
+
result = np.array(res)
|
69 |
+
result = np.reshape(result, newshape=(1, 41))
|
70 |
+
i_result = scaler.transform(result)
|
71 |
+
final_result = np.expand_dims(i_result, axis=2)
|
72 |
+
|
73 |
+
return final_result
|
74 |
+
|
75 |
+
|
76 |
+
emotions1 = {1: 'Neutral', 2: 'Calm', 3: 'Happy', 4: 'Sad',
|
77 |
+
5: 'Angry', 6: 'Fear', 7: 'Disgust', 8: 'Surprise'}
|
78 |
+
|
79 |
+
|
80 |
+
def prediction(path1):
|
81 |
+
res = get_predict_feat(path1)
|
82 |
+
predictions = model.predict(res)
|
83 |
+
y_pred = encoder.inverse_transform(predictions)
|
84 |
+
return y_pred[0][0]
|
85 |
+
|
86 |
+
|
87 |
+
app = FastAPI()
|
88 |
+
|
89 |
+
|
90 |
+
@app.post("/")
|
91 |
+
async def read_root(request: Request, file: UploadFile = File(...)):
|
92 |
+
json_data = await request.json()
|
93 |
+
|
94 |
+
return {"filename": file.filename, "filepath": f"/app/{file.filename}"}
|
requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
keras
|
2 |
+
pandas
|
3 |
+
numpy
|
4 |
+
librosa
|
5 |
+
seaborn
|
6 |
+
matplotlib
|
7 |
+
tensorflow
|
8 |
+
fastapi==0.74.*
|
9 |
+
requests==2.27.*
|
10 |
+
sentencepiece==0.1.*
|
11 |
+
uvicorn[standard]==0.17.*
|