gputrain commited on
Commit
21cc674
·
1 Parent(s): e722eb3

app update

Browse files
Files changed (1) hide show
  1. app.py +130 -0
app.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[1]:
5
+
6
+
7
+ import gradio
8
+
9
+ from fastai.vision.all import *
10
+ from fastai.data.all import *
11
+ from pathlib import Path
12
+ import pandas as pd
13
+ from matplotlib.pyplot import specgram
14
+ import librosa
15
+ import librosa.display
16
+ from huggingface_hub import hf_hub_download
17
+ from fastai.learner import load_learner
18
+
19
+
20
+ # In[9]:
21
+
22
+
23
+ ref_file = hf_hub_download("gputrain/UrbanSound8K-model", "UrbanSound8K.csv")
24
+
25
+ model_file = hf_hub_download("gputrain/UrbanSound8K-model", "model.pkl")
26
+
27
+
28
+ # In[10]:
29
+
30
+
31
+ df = pd.read_csv(ref_file)
32
+ df['fname'] = df[['slice_file_name','fold']].apply (lambda x: str(x['slice_file_name'][:-4])+'.png'.strip(),axis=1 )
33
+ my_dict = dict(zip(df.fname,df['class']))
34
+ def label_func(f_name):
35
+ f_name = str(f_name).split('/')[-1:][0]
36
+ return my_dict[f_name]
37
+ model = load_learner (model_file)
38
+ labels = model.dls.vocab
39
+
40
+
41
+ # In[11]:
42
+
43
+
44
+ with open("article.md") as f:
45
+ article = f.read()
46
+
47
+
48
+ # In[12]:
49
+
50
+
51
+ interface_options = {
52
+ "title": "Urban Sound 8K Classification",
53
+ "description": "A Fast AI example with ResNet34 image classification of a sound wav file transformed to a Mel Spectrogram ",
54
+ #"article": article,
55
+ "interpretation": "default",
56
+ "layout": "horizontal",
57
+ # Audio from validation file
58
+ "examples": ["dog_bark.wav", "children_playing.wav", "air_conditioner.wav", "street_music.wav", "engine_idling.wav",
59
+ "jackhammer.wav", "drilling.wav", "siren.wav","car_horn.wav","gun_shot.wav"],
60
+ "allow_flagging": "never"
61
+ }
62
+
63
+
64
+ # In[13]:
65
+
66
+
67
+ def convert_sounds_melspectogram (audio_file):
68
+
69
+ samples, sample_rate = librosa.load(audio_file) #create onces with librosa
70
+
71
+ fig = plt.figure(figsize=[0.72,0.72])
72
+ ax = fig.add_subplot(111)
73
+ ax.axes.get_xaxis().set_visible(False)
74
+ ax.axes.get_yaxis().set_visible(False)
75
+ ax.set_frame_on(False)
76
+ melS = librosa.feature.melspectrogram(y=samples, sr=sample_rate)
77
+ librosa.display.specshow(librosa.power_to_db(melS, ref=np.max))
78
+ filename = 'temp.png'
79
+ plt.savefig(filename, dpi=400, bbox_inches='tight',pad_inches=0)
80
+ plt.close('all')
81
+
82
+ return None
83
+
84
+
85
+ # In[14]:
86
+
87
+
88
+ def predict():
89
+ img = PILImage.create('temp.png')
90
+ pred,pred_idx,probs = learn.predict(img)
91
+ return {labels[i]: float(probs[i]) for i in range(len(labels))}
92
+ return labels_probs
93
+
94
+
95
+ # In[15]:
96
+
97
+
98
+ def end2endpipeline(filename):
99
+ create_image(filename)
100
+ return predict("temp.png")
101
+
102
+
103
+ # In[16]:
104
+
105
+
106
+ demo = gradio.Interface(
107
+ fn=end2endpipeline,
108
+ inputs=gradio.inputs.Audio(source="upload", type="filepath"),
109
+ outputs=gradio.outputs.Label(num_top_classes=10),
110
+ **interface_options,
111
+ )
112
+
113
+
114
+ # In[19]:
115
+
116
+
117
+ launch_options = {
118
+ "enable_queue": True,
119
+ "share": False,
120
+ "cache_examples": True,
121
+ }
122
+
123
+ demo.launch(**launch_options)
124
+
125
+
126
+ # In[ ]:
127
+
128
+
129
+
130
+