add summary topic sentiment features
Browse files- app.py +25 -15
- sentiment_analysis.py +5 -0
- summary.py +5 -0
- topic.py +5 -0
app.py
CHANGED
@@ -1,16 +1,28 @@
|
|
1 |
import gradio as gr
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
def main(audio_file, number_of_speakers):
|
5 |
# Audio to Text Converter
|
6 |
-
text_data = transcribe(audio_file, number_of_speakers)
|
7 |
-
print(text_data)
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
return title, short_summary, sentiment_analysis, quality, detailed_summary
|
14 |
|
15 |
# UI Interface on the Hugging Face Page
|
16 |
with gr.Blocks() as demo:
|
@@ -23,19 +35,17 @@ with gr.Blocks() as demo:
|
|
23 |
btn_clear = gr.ClearButton(value="Clear", components=[audio_file, number_of_speakers])
|
24 |
btn_submit = gr.Button(value="Submit")
|
25 |
with gr.Column():
|
26 |
-
|
27 |
-
|
28 |
sentiment_analysis = gr.Textbox(label="Sentiment Analysis", placeholder="Sentiment Analysis for Conversation")
|
29 |
-
|
30 |
-
detailed_summary = gr.Textbox(label="Detailed Summary", placeholder="Detailed Summary for Conversation")
|
31 |
-
btn_submit.click(fn=main, inputs=[audio_file, number_of_speakers], outputs=[title, short_summary, sentiment_analysis, quality, detailed_summary])
|
32 |
gr.Markdown("## Examples")
|
33 |
gr.Examples(
|
34 |
examples=[
|
35 |
["./examples/sample4.wav", 2],
|
36 |
],
|
37 |
inputs=[audio_file, number_of_speakers],
|
38 |
-
outputs=[
|
39 |
fn=main,
|
40 |
)
|
41 |
gr.Markdown(
|
|
|
1 |
import gradio as gr
|
2 |
+
import requests
|
3 |
+
# from transcribe import transcribe
|
4 |
+
from sentiment_analysis import sentiment_analyser
|
5 |
+
from summary import summarizer
|
6 |
+
from topic import topic_gen
|
7 |
+
|
8 |
+
def transcribe2():
|
9 |
+
response = requests.post("https://dwarkesh-whisper-speaker-recognition.hf.space/run/predict", json={
|
10 |
+
"data": [
|
11 |
+
{"name":"audio.wav","data":"data:audio/wav;base64,UklGRiQAAABXQVZFZm10IBAAAAABAAEARKwAAIhYAQACABAAZGF0YQAAAAA="},
|
12 |
+
2,
|
13 |
+
]}).json()
|
14 |
+
|
15 |
+
data = response["data"]
|
16 |
|
17 |
def main(audio_file, number_of_speakers):
|
18 |
# Audio to Text Converter
|
19 |
+
# text_data = transcribe(audio_file, number_of_speakers)
|
20 |
+
# print(text_data)
|
21 |
+
text_data = ""
|
22 |
+
topic = topic_gen(text_data)
|
23 |
+
summary = summarizer(text_data)
|
24 |
+
sentiment_analysis = sentiment_analyser(text_data)
|
25 |
+
return topic, summary, sentiment_analysis
|
|
|
26 |
|
27 |
# UI Interface on the Hugging Face Page
|
28 |
with gr.Blocks() as demo:
|
|
|
35 |
btn_clear = gr.ClearButton(value="Clear", components=[audio_file, number_of_speakers])
|
36 |
btn_submit = gr.Button(value="Submit")
|
37 |
with gr.Column():
|
38 |
+
topic = gr.Textbox(label="Title", placeholder="Title for Conversation")
|
39 |
+
summary = gr.Textbox(label="Short Summary", placeholder="Short Summary for Conversation")
|
40 |
sentiment_analysis = gr.Textbox(label="Sentiment Analysis", placeholder="Sentiment Analysis for Conversation")
|
41 |
+
btn_submit.click(fn=main, inputs=[audio_file, number_of_speakers], outputs=[topic, summary, sentiment_analysis])
|
|
|
|
|
42 |
gr.Markdown("## Examples")
|
43 |
gr.Examples(
|
44 |
examples=[
|
45 |
["./examples/sample4.wav", 2],
|
46 |
],
|
47 |
inputs=[audio_file, number_of_speakers],
|
48 |
+
outputs=[topic, summary, sentiment_analysis],
|
49 |
fn=main,
|
50 |
)
|
51 |
gr.Markdown(
|
sentiment_analysis.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
|
3 |
+
def sentiment_analyser(text):
|
4 |
+
sent = pipeline("sentiment-analysis",model="siebert/sentiment-roberta-large-english")
|
5 |
+
return sent(text)
|
summary.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
|
3 |
+
def summarizer(text):
|
4 |
+
summ = pipeline("summarization", model="knkarthick/MEETING-SUMMARY-BART-LARGE-XSUM-SAMSUM-DIALOGSUM")
|
5 |
+
return summ(text)
|
topic.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
|
3 |
+
def topic_gen(text):
|
4 |
+
topic = pipeline(" Text2Text Generation", model="knkarthick/TOPIC-DIALOGSUM")
|
5 |
+
return topic(text)
|