DrishtiSharma commited on
Commit
ca1c1f2
·
verified ·
1 Parent(s): a7aceea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -20
app.py CHANGED
@@ -1,12 +1,16 @@
1
  import streamlit as st
2
  from openai import OpenAI
3
  import os
 
 
 
 
4
 
5
  # Load environment variables
6
  MODEL = 'gpt-4'
7
  client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
8
 
9
- st.title('AUDIO ANALYZER')
10
 
11
  # Upload audio file
12
  audio_file = st.file_uploader("Upload an audio file", type=["mp3", "wav", "m4a"])
@@ -15,22 +19,100 @@ if audio_file:
15
  # Display audio player
16
  st.audio(audio_file)
17
 
18
- # Transcribe audio
19
- transcription = client.audio.transcriptions.create(
20
- model="whisper-1",
21
- file=audio_file,
22
- )
23
-
24
- # Analyze transcription
25
- response = client.chat.completions.create(
26
- model=MODEL,
27
- messages=[
28
- {"role": "system", "content": """You are an AI audio analyzer.
29
- You are tasked to analyze the audio and create a summary of the provided transcription. Please respond in Markdown."""},
30
- {"role": "user", "content": f"The audio transcription is: {transcription.text}"}
31
- ],
32
- temperature=0,
33
- )
34
-
35
- # Display the response
36
- st.markdown(response.choices[0].message.content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  from openai import OpenAI
3
  import os
4
+ from wordcloud import WordCloud
5
+ import matplotlib.pyplot as plt
6
+ from fpdf import FPDF
7
+ import tempfile
8
 
9
  # Load environment variables
10
  MODEL = 'gpt-4'
11
  client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
12
 
13
+ st.title('Advanced Audio Analyzer')
14
 
15
  # Upload audio file
16
  audio_file = st.file_uploader("Upload an audio file", type=["mp3", "wav", "m4a"])
 
19
  # Display audio player
20
  st.audio(audio_file)
21
 
22
+ # Transcribe audio using Whisper API
23
+ with st.spinner("Transcribing audio..."):
24
+ transcription = client.audio.transcriptions.create(
25
+ model="whisper-1",
26
+ file=audio_file,
27
+ )
28
+ st.subheader("Transcription")
29
+ st.markdown(transcription.text)
30
+
31
+ # Summarize in multiple formats
32
+ with st.spinner("Generating summary..."):
33
+ summary_response = client.chat.completions.create(
34
+ model=MODEL,
35
+ messages=[
36
+ {"role": "system", "content": "Summarize the transcription in two formats:\n1. A concise paragraph\n2. Key points in bullet form."},
37
+ {"role": "user", "content": f"Here is the audio transcription: {transcription.text}"}
38
+ ],
39
+ temperature=0,
40
+ )
41
+ st.subheader("Summary in Multiple Formats")
42
+ summary_text = summary_response.choices[0].message.content
43
+ st.markdown(summary_text)
44
+
45
+ # Sentiment analysis
46
+ with st.spinner("Analyzing sentiment..."):
47
+ sentiment_response = client.chat.completions.create(
48
+ model=MODEL,
49
+ messages=[
50
+ {"role": "system", "content": "You are an AI sentiment analyzer. Analyze the sentiment of the transcription as positive, negative, or neutral and explain your reasoning."},
51
+ {"role": "user", "content": f"Here is the audio transcription: {transcription.text}"}
52
+ ],
53
+ temperature=0,
54
+ )
55
+ st.subheader("Sentiment Analysis")
56
+ sentiment_text = sentiment_response.choices[0].message.content
57
+ st.markdown(sentiment_text)
58
+
59
+ # Generate Sentiment Word Cloud
60
+ with st.spinner("Generating sentiment word cloud..."):
61
+ wordcloud = WordCloud(width=800, height=400, background_color='white').generate(transcription.text)
62
+ st.subheader("Word Cloud of Sentiment Analysis")
63
+ fig, ax = plt.subplots()
64
+ ax.imshow(wordcloud, interpolation='bilinear')
65
+ ax.axis('off')
66
+ st.pyplot(fig)
67
+
68
+ # Extract keywords and entities
69
+ with st.spinner("Extracting keywords and entities..."):
70
+ keywords_response = client.chat.completions.create(
71
+ model=MODEL,
72
+ messages=[
73
+ {"role": "system", "content": "Extract the key topics, keywords, and named entities (like people, places, or organizations) from the transcription."},
74
+ {"role": "user", "content": f"Here is the audio transcription: {transcription.text}"}
75
+ ],
76
+ temperature=0,
77
+ )
78
+ st.subheader("Key Topics and Keywords")
79
+ keywords_text = keywords_response.choices[0].message.content
80
+ st.markdown(keywords_text)
81
+
82
+ # Prepare analysis results
83
+ analysis_results = f"""
84
+ ### Transcription:
85
+ {transcription.text}
86
+
87
+ ### Summary:
88
+ {summary_text}
89
+
90
+ ### Sentiment Analysis:
91
+ {sentiment_text}
92
+
93
+ ### Key Topics and Keywords:
94
+ {keywords_text}
95
+ """
96
+
97
+ # Export as TXT
98
+ st.subheader("Export Analysis Results")
99
+ st.download_button("Download as TXT", analysis_results, file_name="audio_analysis.txt")
100
+
101
+ # Export as PDF
102
+ def create_pdf(content):
103
+ pdf = FPDF()
104
+ pdf.add_page()
105
+ pdf.set_font("Arial", size=12)
106
+
107
+ # Add each line of content to the PDF
108
+ for line in content.split("\n"):
109
+ pdf.multi_cell(0, 10, line)
110
+
111
+ # Save to a temporary file
112
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmpfile:
113
+ pdf.output(tmpfile.name)
114
+ return tmpfile.name
115
+
116
+ pdf_file = create_pdf(analysis_results)
117
+ with open(pdf_file, "rb") as file:
118
+ st.download_button("Download as PDF", file, file_name="audio_analysis.pdf", mime="application/pdf")