rajistics commited on
Commit
cbe4331
·
1 Parent(s): bba38af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -18
app.py CHANGED
@@ -5,7 +5,7 @@ from functools import partial
5
 
6
  import requests
7
  import pandas as pd
8
- import plotly.express as px
9
 
10
  import torch
11
  import gradio as gr
@@ -21,41 +21,35 @@ device = 0 if torch.cuda.is_available() else -1
21
 
22
  # display if the sentiment value is above these thresholds
23
  thresholds = {"joy": 0.99,"anger": 0.95,"surprise": 0.95,"sadness": 0.98,"fear": 0.95,"love": 0.99,}
24
-
25
  color_map = {"joy": "green","anger": "red","surprise": "yellow","sadness": "blue","fear": "orange","love": "purple",}
26
 
27
  # Audio components
28
  whisper_device = "cuda" if torch.cuda.is_available() else "cpu"
29
  whisper = whisperx.load_model("tiny.en", whisper_device)
30
  alignment_model, metadata = whisperx.load_align_model(language_code="en", device=whisper_device)
31
- speaker_segmentation = Pipeline.from_pretrained("pyannote/[email protected]",
32
- use_auth_token=os.environ['ENO_TOKEN'])
33
 
 
 
 
 
 
 
 
 
34
 
35
  # Text components
36
  emotion_pipeline = pipeline(
37
  "text-classification",
38
  model="bhadresh-savani/distilbert-base-uncased-emotion",
39
- device=device,
40
  )
41
  summarization_pipeline = pipeline(
42
  "summarization",
43
  model="knkarthick/MEETING_SUMMARY",
44
- device=device
45
  )
46
 
47
- EXAMPLES = [["Customer_Support_Call.wav"]]
48
-
49
-
50
- speech_to_text = partial(
51
- stt,
52
- speaker_segmentation=speaker_segmentation,
53
- whisper=whisper,
54
- alignment_model=alignment_model,
55
- metadata=metadata,
56
- whisper_device=whisper_device
57
- )
58
-
59
  def summarize(diarized, summarization_pipeline):
60
  text = ""
61
  for d in diarized:
@@ -78,6 +72,8 @@ def sentiment(diarized, emotion_pipeline):
78
 
79
  return customer_sentiments
80
 
 
 
81
  with gr.Blocks() as demo:
82
 
83
  with gr.Row():
 
5
 
6
  import requests
7
  import pandas as pd
8
+ #import plotly.express as px
9
 
10
  import torch
11
  import gradio as gr
 
21
 
22
  # display if the sentiment value is above these thresholds
23
  thresholds = {"joy": 0.99,"anger": 0.95,"surprise": 0.95,"sadness": 0.98,"fear": 0.95,"love": 0.99,}
 
24
  color_map = {"joy": "green","anger": "red","surprise": "yellow","sadness": "blue","fear": "orange","love": "purple",}
25
 
26
  # Audio components
27
  whisper_device = "cuda" if torch.cuda.is_available() else "cpu"
28
  whisper = whisperx.load_model("tiny.en", whisper_device)
29
  alignment_model, metadata = whisperx.load_align_model(language_code="en", device=whisper_device)
30
+ speaker_segmentation = Pipeline.from_pretrained("pyannote/[email protected]", use_auth_token=os.environ['ENO_TOKEN'])
 
31
 
32
+ speech_to_text = partial(
33
+ stt,
34
+ speaker_segmentation=speaker_segmentation,
35
+ whisper=whisper,
36
+ alignment_model=alignment_model,
37
+ metadata=metadata,
38
+ whisper_device=whisper_device
39
+ )
40
 
41
  # Text components
42
  emotion_pipeline = pipeline(
43
  "text-classification",
44
  model="bhadresh-savani/distilbert-base-uncased-emotion",
45
+ #device=device,
46
  )
47
  summarization_pipeline = pipeline(
48
  "summarization",
49
  model="knkarthick/MEETING_SUMMARY",
50
+ #device=device
51
  )
52
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  def summarize(diarized, summarization_pipeline):
54
  text = ""
55
  for d in diarized:
 
72
 
73
  return customer_sentiments
74
 
75
+ EXAMPLES = [["Customer_Support_Call.wav"]]
76
+
77
  with gr.Blocks() as demo:
78
 
79
  with gr.Row():