Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import streamlit as st
|
2 |
-
import speech_recognition as sr
|
3 |
from transformers import pipeline
|
4 |
from gtts import gTTS
|
5 |
import requests
|
@@ -8,6 +7,9 @@ import os
|
|
8 |
# Initialize the text generation pipeline with GPT-2
|
9 |
generator = pipeline('text-generation', model='gpt2')
|
10 |
|
|
|
|
|
|
|
11 |
# Function to generate a Trump-like response
|
12 |
def generate_response(prompt):
|
13 |
# This is your original `generate_response` function
|
@@ -25,7 +27,6 @@ UBERDUCK_API_SECRET = 'pk_8e4b3ad9-d26a-49ca-ac83-4ddfe4dace85' # Replace with
|
|
25 |
|
26 |
# Function to generate audio using Uberduck API
|
27 |
def generate_audio_uberduck(text):
|
28 |
-
# This is your original Uberduck API function
|
29 |
url = "https://api.uberduck.ai/speak"
|
30 |
headers = {
|
31 |
"Accept": "application/json",
|
@@ -34,7 +35,7 @@ def generate_audio_uberduck(text):
|
|
34 |
}
|
35 |
json_data = {
|
36 |
"speech": text,
|
37 |
-
"voice": "donald-trump"
|
38 |
}
|
39 |
|
40 |
# Send a request to Uberduck's API to generate audio
|
@@ -57,17 +58,11 @@ def generate_audio_gtts(text):
|
|
57 |
tts.save(audio_path)
|
58 |
return audio_path
|
59 |
|
60 |
-
# Function to transcribe audio to text
|
61 |
def transcribe_audio(file_path):
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
audio = recognizer.record(source)
|
66 |
-
text = recognizer.recognize_google(audio)
|
67 |
-
return text
|
68 |
-
except Exception as e:
|
69 |
-
st.write("Error transcribing audio:", str(e))
|
70 |
-
return None
|
71 |
|
72 |
# Streamlit app UI
|
73 |
st.title("Trump-like Voice Assistant")
|
@@ -93,7 +88,6 @@ if uploaded_audio is not None:
|
|
93 |
st.write("Trump-like Assistant:", trump_response)
|
94 |
|
95 |
# Generate audio response (choose either Uberduck or gTTS)
|
96 |
-
# Uncomment one of the following two lines depending on your preferred TTS provider
|
97 |
audio_output_path = generate_audio_uberduck(trump_response) # Uberduck
|
98 |
# audio_output_path = generate_audio_gtts(trump_response) # gTTS as fallback
|
99 |
|
|
|
1 |
import streamlit as st
|
|
|
2 |
from transformers import pipeline
|
3 |
from gtts import gTTS
|
4 |
import requests
|
|
|
7 |
# Initialize the text generation pipeline with GPT-2
|
8 |
generator = pipeline('text-generation', model='gpt2')
|
9 |
|
10 |
+
# Initialize Whisper model for transcription
|
11 |
+
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base")
|
12 |
+
|
13 |
# Function to generate a Trump-like response
|
14 |
def generate_response(prompt):
|
15 |
# This is your original `generate_response` function
|
|
|
27 |
|
28 |
# Function to generate audio using Uberduck API
|
29 |
def generate_audio_uberduck(text):
|
|
|
30 |
url = "https://api.uberduck.ai/speak"
|
31 |
headers = {
|
32 |
"Accept": "application/json",
|
|
|
35 |
}
|
36 |
json_data = {
|
37 |
"speech": text,
|
38 |
+
"voice": "donald-trump"
|
39 |
}
|
40 |
|
41 |
# Send a request to Uberduck's API to generate audio
|
|
|
58 |
tts.save(audio_path)
|
59 |
return audio_path
|
60 |
|
61 |
+
# Function to transcribe audio to text using Whisper
|
62 |
def transcribe_audio(file_path):
|
63 |
+
# Transcribe audio using Whisper model
|
64 |
+
transcription = transcriber(file_path)
|
65 |
+
return transcription['text']
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
# Streamlit app UI
|
68 |
st.title("Trump-like Voice Assistant")
|
|
|
88 |
st.write("Trump-like Assistant:", trump_response)
|
89 |
|
90 |
# Generate audio response (choose either Uberduck or gTTS)
|
|
|
91 |
audio_output_path = generate_audio_uberduck(trump_response) # Uberduck
|
92 |
# audio_output_path = generate_audio_gtts(trump_response) # gTTS as fallback
|
93 |
|