tommy24 commited on
Commit
f92ea75
·
1 Parent(s): 9ad0a66

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -44
app.py CHANGED
@@ -76,54 +76,81 @@
76
 
77
  import gradio as gr
78
  import requests
79
- import urllib.request
80
- from pydub import AudioSegment
81
- import numpy as np
82
  import os
83
- import sys
84
- import wave
85
- import io
86
- import base64
87
- import azure.cognitiveservices.speech as speechsdk
88
-
89
- speech_key = os.environ.get("test3")
90
- service_region = os.environ.get("test4")
91
-
92
- speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
93
- # Note: the voice setting will not overwrite the voice element in input SSML.
94
- speech_config.speech_synthesis_voice_name = os.environ.get("test5")
95
-
96
- def function1(prompt):
97
- response = requests.post("https://tommy24-testing3.hf.space/run/predict", json={
98
- "data": [
99
- prompt,
100
- ]}).json()
101
- message = response["data"][0]
102
- speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config)
103
- result = speech_synthesizer.speak_text_async(message).get()
104
- if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
105
- audio_stream = io.BytesIO(result.audio_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
- # Create a wave file object and write the audio data to it
108
- with wave.open("audio.wav", 'wb') as wave_file:
109
- wave_file.setnchannels(1)
110
- wave_file.setsampwidth(2)
111
- wave_file.setframerate(16000)
112
- wave_file.writeframesraw(audio_stream.getvalue())
113
 
114
- # Use ffmpeg to convert the wave file to an mp3 file
115
- filename = "output.mp3"
116
 
117
- if os.path.exists(filename):
118
- os.remove(filename)
119
- else:
120
- pass
121
- command = f"ffmpeg -i audio.wav -y -codec:a libmp3lame -qscale:a 2 {filename}"
122
- os.system(command)
123
- return "output.mp3"
124
-
125
- iface = gr.Interface(fn=function1, inputs="text", outputs=[gr.Audio(label="Audio",type="numpy")])
126
- iface.launch()
127
 
128
  # import gradio as gr
129
  # import requests
 
76
 
77
  import gradio as gr
78
  import requests
79
+ import time
 
 
80
  import os
81
+
82
+ def function(prompt):
83
+ url = os.environ.get("test7")
84
+ headers = {os.environ.get("test8")}
85
+ data = {
86
+ "input": prompt
87
+ }
88
+ response = requests.post(url, headers=headers, json=data)
89
+ response = response.json()
90
+ output = response["output"]
91
+ trigger1 = os.environ.get("test9")
92
+ trigger2 = os.environ.get("test10")
93
+ set1 = os.environ.get("test11")
94
+ set2 = os.environ.get("test12")
95
+ if trigger1 in output and trigger2 in output:
96
+ output = re.sub(rf"{trigger2}", ste1, output)
97
+ output = re.sub(rf"{trigger1}", set2, output)
98
+ print(output)
99
+
100
+ iface = gr.Interface(fn=function, inputs="text", outputs="text")
101
+ iface.launch()
102
+
103
+ # ********************************************************************************************
104
+ # import gradio as gr
105
+ # import requests
106
+ # import urllib.request
107
+ # from pydub import AudioSegment
108
+ # import numpy as np
109
+ # import os
110
+ # import sys
111
+ # import wave
112
+ # import io
113
+ # import base64
114
+ # import azure.cognitiveservices.speech as speechsdk
115
+
116
+ # speech_key = os.environ.get("test3")
117
+ # service_region = os.environ.get("test4")
118
+
119
+ # speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
120
+ # # Note: the voice setting will not overwrite the voice element in input SSML.
121
+ # speech_config.speech_synthesis_voice_name = os.environ.get("test5")
122
+
123
+ # def function1(prompt):
124
+ # response = requests.post("https://tommy24-testing3.hf.space/run/predict", json={
125
+ # "data": [
126
+ # prompt,
127
+ # ]}).json()
128
+ # message = response["data"][0]
129
+ # speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config)
130
+ # result = speech_synthesizer.speak_text_async(message).get()
131
+ # if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
132
+ # audio_stream = io.BytesIO(result.audio_data)
133
 
134
+ # # Create a wave file object and write the audio data to it
135
+ # with wave.open("audio.wav", 'wb') as wave_file:
136
+ # wave_file.setnchannels(1)
137
+ # wave_file.setsampwidth(2)
138
+ # wave_file.setframerate(16000)
139
+ # wave_file.writeframesraw(audio_stream.getvalue())
140
 
141
+ # # Use ffmpeg to convert the wave file to an mp3 file
142
+ # filename = "output.mp3"
143
 
144
+ # if os.path.exists(filename):
145
+ # os.remove(filename)
146
+ # else:
147
+ # pass
148
+ # command = f"ffmpeg -i audio.wav -y -codec:a libmp3lame -qscale:a 2 {filename}"
149
+ # os.system(command)
150
+ # return "output.mp3"
151
+
152
+ # iface = gr.Interface(fn=function1, inputs="text", outputs=[gr.Audio(label="Audio",type="numpy")])
153
+ # iface.launch()
154
 
155
  # import gradio as gr
156
  # import requests