remotewith commited on
Commit
91a603c
·
1 Parent(s): ff1e37d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -0
app.py CHANGED
@@ -18,6 +18,99 @@ from sklearn.cluster import AgglomerativeClustering
18
  import numpy as np
19
 
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
 
23
 
 
18
  import numpy as np
19
 
20
 
21
+ def audio_to_text(audio, num_speakers):
22
+ path, error = convert_to_wav(audio)
23
+ if error is not None:
24
+ return error
25
+
26
+ duration = get_duration(path)
27
+ if duration > 4 * 60 * 60:
28
+ return "Audio duration too long"
29
+
30
+ result = model.transcribe(path)
31
+ segments = result["segments"]
32
+
33
+ num_speakers = min(max(round(num_speakers), 1), len(segments))
34
+ if len(segments) == 1:
35
+ segments[0]['speaker'] = 'SPEAKER 1'
36
+ else:
37
+ embeddings = make_embeddings(path, segments, duration)
38
+ add_speaker_labels(segments, embeddings, num_speakers)
39
+ output = get_output(segments)
40
+ return output
41
+
42
+ def convert_to_wav(path):
43
+ if path[-3:] != 'wav':
44
+ new_path = '.'.join(path.split('.')[:-1]) + '.wav'
45
+ try:
46
+ subprocess.call(['ffmpeg', '-i', path, new_path, '-y'])
47
+ except:
48
+ return path, 'Error: Could not convert file to .wav'
49
+ path = new_path
50
+ return path, None
51
+
52
+ def get_duration(path):
53
+ with contextlib.closing(wave.open(path,'r')) as f:
54
+ frames = f.getnframes()
55
+ rate = f.getframerate()
56
+ return frames / float(rate)
57
+
58
+ def make_embeddings(path, segments, duration):
59
+ embeddings = np.zeros(shape=(len(segments), 192))
60
+ for i, segment in enumerate(segments):
61
+ embeddings[i] = segment_embedding(path, segment, duration)
62
+ return np.nan_to_num(embeddings)
63
+
64
+ audio = Audio()
65
+
66
+ def segment_embedding(path, segment, duration):
67
+ start = segment["start"]
68
+ # Whisper overshoots the end timestamp in the last segment
69
+ end = min(duration, segment["end"])
70
+ clip = Segment(start, end)
71
+ waveform, sample_rate = audio.crop(path, clip)
72
+ return embedding_model(waveform[None])
73
+
74
+ def add_speaker_labels(segments, embeddings, num_speakers):
75
+ clustering = AgglomerativeClustering(num_speakers).fit(embeddings)
76
+ labels = clustering.labels_
77
+ for i in range(len(segments)):
78
+ segments[i]["speaker"] = 'SPEAKER ' + str(labels[i] + 1)
79
+
80
+ def time(secs):
81
+ return datetime.timedelta(seconds=round(secs))
82
+
83
+ def get_output(segments):
84
+ output = ''
85
+ for (i, segment) in enumerate(segments):
86
+ if i == 0 or segments[i - 1]["speaker"] != segment["speaker"]:
87
+ if i != 0:
88
+ output += '\n\n'
89
+ output += segment["speaker"] + ' ' + str(time(segment["start"])) + '\n\n'
90
+ output += segment["text"][1:] + ' '
91
+ return output
92
+
93
+ app1=gr.Interface(
94
+ title = 'AI Voice to Text',
95
+ fn=transcribe,
96
+ inputs=[
97
+ gr.inputs.Audio(source="upload", type="filepath"),
98
+ gr.inputs.Number(default=2, label="Number of Speakers")
99
+
100
+ ],
101
+ outputs=[
102
+ gr.outputs.Textbox(label='Transcript')
103
+ ]
104
+ )
105
+
106
+ app1.launch()
107
+
108
+
109
+
110
+
111
+
112
+
113
+
114
 
115
 
116