tommy24 commited on
Commit
9ad0a66
·
1 Parent(s): e05fd5d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -95
app.py CHANGED
@@ -74,111 +74,111 @@
74
  # iface = gr.Interface(fn=function1, inputs="text", outputs=[gr.Audio(label="Audio",type="numpy")])
75
  # iface.launch()
76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  # import gradio as gr
78
  # import requests
79
- # import urllib.request
80
- # from pydub import AudioSegment
81
- # import numpy as np
82
  # import os
83
- # import sys
84
- # import wave
85
- # import io
86
- # import base64
87
- # import azure.cognitiveservices.speech as speechsdk
88
-
89
- # speech_key = os.environ.get("test3")
90
- # service_region = os.environ.get("test4")
91
 
92
- # speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
93
- # # Note: the voice setting will not overwrite the voice element in input SSML.
94
- # speech_config.speech_synthesis_voice_name = os.environ.get("test5")
95
-
96
- # def function1(prompt):
97
  # response = requests.post("https://tommy24-testing3.hf.space/run/predict", json={
98
  # "data": [
99
  # prompt,
100
  # ]}).json()
101
  # message = response["data"][0]
102
- # speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config)
103
- # result = speech_synthesizer.speak_text_async(message).get()
104
- # if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
105
- # audio_stream = io.BytesIO(result.audio_data)
106
-
107
- # # Create a wave file object and write the audio data to it
108
- # with wave.open("audio.wav", 'wb') as wave_file:
109
- # wave_file.setnchannels(1)
110
- # wave_file.setsampwidth(2)
111
- # wave_file.setframerate(16000)
112
- # wave_file.writeframesraw(audio_stream.getvalue())
113
-
114
- # # Use ffmpeg to convert the wave file to an mp3 file
115
- # filename = "output.mp3"
116
-
117
- # if os.path.exists(filename):
118
- # os.remove(filename)
119
- # else:
120
- # pass
121
- # command = f"ffmpeg -i audio.wav -y -codec:a libmp3lame -qscale:a 2 {filename}"
122
- # os.system(command)
123
- # return "output.mp3"
124
 
125
- # iface = gr.Interface(fn=function1, inputs="text", outputs=[gr.Audio(label="Audio",type="numpy")])
126
- # iface.launch()
 
 
 
 
127
 
128
- import gradio as gr
129
- import requests
130
- import json
131
- import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
- def function2(prompt):
134
- response = requests.post("https://tommy24-testing3.hf.space/run/predict", json={
135
- "data": [
136
- prompt,
137
- ]}).json()
138
- message = response["data"][0]
139
 
140
- url = "https://api.dynapictures.com/designs/7c4aba1d73"
141
- test6 = os.environ.get("test6")
142
- headers = {
143
- "Authorization": f"Bearer {test6}",
144
- "Content-Type": "application/json"
145
- }
146
-
147
- payload = {
148
- "format": "jpeg",
149
- "metadata": "some text",
150
- "params": [
151
- {
152
- "name": "bubble",
153
- "imageUrl": "https://dynapictures.com/b/rest/public/media/0ceb636a01/images/568b337221.png"
154
- },
155
- {
156
- "name": "quotes",
157
- "imageUrl": "https://dynapictures.com/b/rest/public/media/0ceb636a01/images/779f8b9041.png"
158
- },
159
- {
160
- "name": "text",
161
- "text": message
162
- },
163
- {
164
- "name": "avatar",
165
- "imageUrl": "https://dynapictures.com/b/rest/public/media/0ceb636a01/images/2f7ddd7b55.jpg"
166
- },
167
- {
168
- "name": "name",
169
- "text": "JohnAI"
170
- },
171
- {
172
- "name": "title",
173
- "text": "Automated"
174
- }
175
- ]
176
- }
177
-
178
- response = requests.post(url, headers=headers, data=json.dumps(payload))
179
- response = response.json()
180
- response = response["imageUrl"]
181
- return response
182
-
183
- iface = gr.Interface(fn=function2, inputs="text", outputs="text")
184
- iface.launch()
 
74
  # iface = gr.Interface(fn=function1, inputs="text", outputs=[gr.Audio(label="Audio",type="numpy")])
75
  # iface.launch()
76
 
77
+ import gradio as gr
78
+ import requests
79
+ import urllib.request
80
+ from pydub import AudioSegment
81
+ import numpy as np
82
+ import os
83
+ import sys
84
+ import wave
85
+ import io
86
+ import base64
87
+ import azure.cognitiveservices.speech as speechsdk
88
+
89
+ speech_key = os.environ.get("test3")
90
+ service_region = os.environ.get("test4")
91
+
92
+ speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
93
+ # Note: the voice setting will not overwrite the voice element in input SSML.
94
+ speech_config.speech_synthesis_voice_name = os.environ.get("test5")
95
+
96
+ def function1(prompt):
97
+ response = requests.post("https://tommy24-testing3.hf.space/run/predict", json={
98
+ "data": [
99
+ prompt,
100
+ ]}).json()
101
+ message = response["data"][0]
102
+ speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config)
103
+ result = speech_synthesizer.speak_text_async(message).get()
104
+ if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
105
+ audio_stream = io.BytesIO(result.audio_data)
106
+
107
+ # Create a wave file object and write the audio data to it
108
+ with wave.open("audio.wav", 'wb') as wave_file:
109
+ wave_file.setnchannels(1)
110
+ wave_file.setsampwidth(2)
111
+ wave_file.setframerate(16000)
112
+ wave_file.writeframesraw(audio_stream.getvalue())
113
+
114
+ # Use ffmpeg to convert the wave file to an mp3 file
115
+ filename = "output.mp3"
116
+
117
+ if os.path.exists(filename):
118
+ os.remove(filename)
119
+ else:
120
+ pass
121
+ command = f"ffmpeg -i audio.wav -y -codec:a libmp3lame -qscale:a 2 {filename}"
122
+ os.system(command)
123
+ return "output.mp3"
124
+
125
+ iface = gr.Interface(fn=function1, inputs="text", outputs=[gr.Audio(label="Audio",type="numpy")])
126
+ iface.launch()
127
+
128
  # import gradio as gr
129
  # import requests
130
+ # import json
 
 
131
  # import os
 
 
 
 
 
 
 
 
132
 
133
+ # def function2(prompt):
 
 
 
 
134
  # response = requests.post("https://tommy24-testing3.hf.space/run/predict", json={
135
  # "data": [
136
  # prompt,
137
  # ]}).json()
138
  # message = response["data"][0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
 
140
+ # url = "https://api.dynapictures.com/designs/7c4aba1d73"
141
+ # test6 = os.environ.get("test6")
142
+ # headers = {
143
+ # "Authorization": f"Bearer {test6}",
144
+ # "Content-Type": "application/json"
145
+ # }
146
 
147
+ # payload = {
148
+ # "format": "jpeg",
149
+ # "metadata": "some text",
150
+ # "params": [
151
+ # {
152
+ # "name": "bubble",
153
+ # "imageUrl": "https://dynapictures.com/b/rest/public/media/0ceb636a01/images/568b337221.png"
154
+ # },
155
+ # {
156
+ # "name": "quotes",
157
+ # "imageUrl": "https://dynapictures.com/b/rest/public/media/0ceb636a01/images/779f8b9041.png"
158
+ # },
159
+ # {
160
+ # "name": "text",
161
+ # "text": message
162
+ # },
163
+ # {
164
+ # "name": "avatar",
165
+ # "imageUrl": "https://dynapictures.com/b/rest/public/media/0ceb636a01/images/2f7ddd7b55.jpg"
166
+ # },
167
+ # {
168
+ # "name": "name",
169
+ # "text": "JohnAI"
170
+ # },
171
+ # {
172
+ # "name": "title",
173
+ # "text": "Automated"
174
+ # }
175
+ # ]
176
+ # }
177
 
178
+ # response = requests.post(url, headers=headers, data=json.dumps(payload))
179
+ # response = response.json()
180
+ # response = response["imageUrl"]
181
+ # return response
 
 
182
 
183
+ # iface = gr.Interface(fn=function2, inputs="text", outputs="text")
184
+ # iface.launch()