appl044 JavaFXpert commited on
Commit
8592444
·
0 Parent(s):

Duplicate from JavaFXpert/Chat-GPT-LangChain

Browse files

Co-authored-by: James Weaver <[email protected]>

.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
36
+ *.png filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: GPT+WolframAlpha+Whisper
3
+ emoji: 👀
4
+ colorFrom: red
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 3.16.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: JavaFXpert/Chat-GPT-LangChain
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,878 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+ import ssl
4
+ from contextlib import closing
5
+ from typing import Optional, Tuple
6
+ import datetime
7
+
8
+ import boto3
9
+ import gradio as gr
10
+ import requests
11
+
12
+ # UNCOMMENT TO USE WHISPER
13
+ import warnings
14
+ import whisper
15
+
16
+ from langchain import ConversationChain, LLMChain
17
+
18
+ from langchain.agents import load_tools, initialize_agent
19
+ from langchain.chains.conversation.memory import ConversationBufferMemory
20
+ from langchain.llms import OpenAI
21
+ from threading import Lock
22
+
23
+ # Console to variable
24
+ from io import StringIO
25
+ import sys
26
+ import re
27
+
28
+ from openai.error import AuthenticationError, InvalidRequestError, RateLimitError
29
+
30
+ # Pertains to Express-inator functionality
31
+ from langchain.prompts import PromptTemplate
32
+
33
+ from polly_utils import PollyVoiceData, NEURAL_ENGINE
34
+ from azure_utils import AzureVoiceData
35
+
36
+ # Pertains to question answering functionality
37
+ from langchain.embeddings.openai import OpenAIEmbeddings
38
+ from langchain.text_splitter import CharacterTextSplitter
39
+ from langchain.vectorstores.faiss import FAISS
40
+ from langchain.docstore.document import Document
41
+ from langchain.chains.question_answering import load_qa_chain
42
+
43
+ news_api_key = os.environ["NEWS_API_KEY"]
44
+ tmdb_bearer_token = os.environ["TMDB_BEARER_TOKEN"]
45
+
46
+ TOOLS_LIST = ['serpapi', 'wolfram-alpha', 'pal-math', 'pal-colored-objects', 'news-api', 'tmdb-api',
47
+ 'open-meteo-api'] # 'google-search'
48
+ TOOLS_DEFAULT_LIST = ['serpapi', 'pal-math']
49
+ BUG_FOUND_MSG = "Congratulations, you've found a bug in this application!"
50
+ # AUTH_ERR_MSG = "Please paste your OpenAI key from openai.com to use this application. It is not necessary to hit a button or key after pasting it."
51
+ AUTH_ERR_MSG = "Please paste your OpenAI key from openai.com to use this application. "
52
+ MAX_TOKENS = 512
53
+
54
+ LOOPING_TALKING_HEAD = "videos/Masahiro.mp4"
55
+ TALKING_HEAD_WIDTH = "192"
56
+ MAX_TALKING_HEAD_TEXT_LENGTH = 155
57
+
58
+ # Pertains to Express-inator functionality
59
+ NUM_WORDS_DEFAULT = 0
60
+ MAX_WORDS = 400
61
+ FORMALITY_DEFAULT = "N/A"
62
+ TEMPERATURE_DEFAULT = 0.5
63
+ EMOTION_DEFAULT = "N/A"
64
+ LANG_LEVEL_DEFAULT = "N/A"
65
+ TRANSLATE_TO_DEFAULT = "N/A"
66
+ LITERARY_STYLE_DEFAULT = "N/A"
67
+ PROMPT_TEMPLATE = PromptTemplate(
68
+ input_variables=["original_words", "num_words", "formality", "emotions", "lang_level", "translate_to",
69
+ "literary_style"],
70
+ template="Restate {num_words}{formality}{emotions}{lang_level}{translate_to}{literary_style}the following: \n{original_words}\n",
71
+ )
72
+
73
+ POLLY_VOICE_DATA = PollyVoiceData()
74
+ AZURE_VOICE_DATA = AzureVoiceData()
75
+
76
+ # Pertains to WHISPER functionality
77
+ WHISPER_DETECT_LANG = "Detect language"
78
+
79
+
80
+ # UNCOMMENT TO USE WHISPER
81
+ warnings.filterwarnings("ignore")
82
+ WHISPER_MODEL = whisper.load_model("tiny")
83
+ print("WHISPER_MODEL", WHISPER_MODEL)
84
+
85
+
86
+ # UNCOMMENT TO USE WHISPER
87
+ def transcribe(aud_inp, whisper_lang):
88
+ if aud_inp is None:
89
+ return ""
90
+ aud = whisper.load_audio(aud_inp)
91
+ aud = whisper.pad_or_trim(aud)
92
+ mel = whisper.log_mel_spectrogram(aud).to(WHISPER_MODEL.device)
93
+ _, probs = WHISPER_MODEL.detect_language(mel)
94
+ options = whisper.DecodingOptions()
95
+ if whisper_lang != WHISPER_DETECT_LANG:
96
+ whisper_lang_code = POLLY_VOICE_DATA.get_whisper_lang_code(whisper_lang)
97
+ options = whisper.DecodingOptions(language=whisper_lang_code)
98
+ result = whisper.decode(WHISPER_MODEL, mel, options)
99
+ print("result.text", result.text)
100
+ result_text = ""
101
+ if result and result.text:
102
+ result_text = result.text
103
+ return result_text
104
+
105
+
106
+ # Temporarily address Wolfram Alpha SSL certificate issue
107
+ ssl._create_default_https_context = ssl._create_unverified_context
108
+
109
+
110
+ # TEMPORARY FOR TESTING
111
+ def transcribe_dummy(aud_inp_tb, whisper_lang):
112
+ if aud_inp_tb is None:
113
+ return ""
114
+ # aud = whisper.load_audio(aud_inp)
115
+ # aud = whisper.pad_or_trim(aud)
116
+ # mel = whisper.log_mel_spectrogram(aud).to(WHISPER_MODEL.device)
117
+ # _, probs = WHISPER_MODEL.detect_language(mel)
118
+ # options = whisper.DecodingOptions()
119
+ # options = whisper.DecodingOptions(language="ja")
120
+ # result = whisper.decode(WHISPER_MODEL, mel, options)
121
+ result_text = "Whisper will detect language"
122
+ if whisper_lang != WHISPER_DETECT_LANG:
123
+ whisper_lang_code = POLLY_VOICE_DATA.get_whisper_lang_code(whisper_lang)
124
+ result_text = f"Whisper will use lang code: {whisper_lang_code}"
125
+ print("result_text", result_text)
126
+ return aud_inp_tb
127
+
128
+
129
+ # Pertains to Express-inator functionality
130
+ def transform_text(desc, express_chain, num_words, formality,
131
+ anticipation_level, joy_level, trust_level,
132
+ fear_level, surprise_level, sadness_level, disgust_level, anger_level,
133
+ lang_level, translate_to, literary_style):
134
+ num_words_prompt = ""
135
+ if num_words and int(num_words) != 0:
136
+ num_words_prompt = "using up to " + str(num_words) + " words, "
137
+
138
+ # Change some arguments to lower case
139
+ formality = formality.lower()
140
+ anticipation_level = anticipation_level.lower()
141
+ joy_level = joy_level.lower()
142
+ trust_level = trust_level.lower()
143
+ fear_level = fear_level.lower()
144
+ surprise_level = surprise_level.lower()
145
+ sadness_level = sadness_level.lower()
146
+ disgust_level = disgust_level.lower()
147
+ anger_level = anger_level.lower()
148
+
149
+ formality_str = ""
150
+ if formality != "n/a":
151
+ formality_str = "in a " + formality + " manner, "
152
+
153
+ # put all emotions into a list
154
+ emotions = []
155
+ if anticipation_level != "n/a":
156
+ emotions.append(anticipation_level)
157
+ if joy_level != "n/a":
158
+ emotions.append(joy_level)
159
+ if trust_level != "n/a":
160
+ emotions.append(trust_level)
161
+ if fear_level != "n/a":
162
+ emotions.append(fear_level)
163
+ if surprise_level != "n/a":
164
+ emotions.append(surprise_level)
165
+ if sadness_level != "n/a":
166
+ emotions.append(sadness_level)
167
+ if disgust_level != "n/a":
168
+ emotions.append(disgust_level)
169
+ if anger_level != "n/a":
170
+ emotions.append(anger_level)
171
+
172
+ emotions_str = ""
173
+ if len(emotions) > 0:
174
+ if len(emotions) == 1:
175
+ emotions_str = "with emotion of " + emotions[0] + ", "
176
+ else:
177
+ emotions_str = "with emotions of " + ", ".join(emotions[:-1]) + " and " + emotions[-1] + ", "
178
+
179
+ lang_level_str = ""
180
+ if lang_level != LANG_LEVEL_DEFAULT:
181
+ lang_level_str = "at a " + lang_level + " level, " if translate_to == TRANSLATE_TO_DEFAULT else ""
182
+
183
+ translate_to_str = ""
184
+ if translate_to != TRANSLATE_TO_DEFAULT:
185
+ translate_to_str = "translated to " + (
186
+ "" if lang_level == TRANSLATE_TO_DEFAULT else lang_level + " level ") + translate_to + ", "
187
+
188
+ literary_style_str = ""
189
+ if literary_style != LITERARY_STYLE_DEFAULT:
190
+ if literary_style == "Prose":
191
+ literary_style_str = "as prose, "
192
+ if literary_style == "Story":
193
+ literary_style_str = "as a story, "
194
+ elif literary_style == "Summary":
195
+ literary_style_str = "as a summary, "
196
+ elif literary_style == "Outline":
197
+ literary_style_str = "as an outline numbers and lower case letters, "
198
+ elif literary_style == "Bullets":
199
+ literary_style_str = "as bullet points using bullets, "
200
+ elif literary_style == "Poetry":
201
+ literary_style_str = "as a poem, "
202
+ elif literary_style == "Haiku":
203
+ literary_style_str = "as a haiku, "
204
+ elif literary_style == "Limerick":
205
+ literary_style_str = "as a limerick, "
206
+ elif literary_style == "Rap":
207
+ literary_style_str = "as a rap, "
208
+ elif literary_style == "Joke":
209
+ literary_style_str = "as a very funny joke with a setup and punchline, "
210
+ elif literary_style == "Knock-knock":
211
+ literary_style_str = "as a very funny knock-knock joke, "
212
+ elif literary_style == "FAQ":
213
+ literary_style_str = "as a FAQ with several questions and answers, "
214
+
215
+ formatted_prompt = PROMPT_TEMPLATE.format(
216
+ original_words=desc,
217
+ num_words=num_words_prompt,
218
+ formality=formality_str,
219
+ emotions=emotions_str,
220
+ lang_level=lang_level_str,
221
+ translate_to=translate_to_str,
222
+ literary_style=literary_style_str
223
+ )
224
+
225
+ trans_instr = num_words_prompt + formality_str + emotions_str + lang_level_str + translate_to_str + literary_style_str
226
+ if express_chain and len(trans_instr.strip()) > 0:
227
+ generated_text = express_chain.run(
228
+ {'original_words': desc, 'num_words': num_words_prompt, 'formality': formality_str,
229
+ 'emotions': emotions_str, 'lang_level': lang_level_str, 'translate_to': translate_to_str,
230
+ 'literary_style': literary_style_str}).strip()
231
+ else:
232
+ print("Not transforming text")
233
+ generated_text = desc
234
+
235
+ # replace all newlines with <br> in generated_text
236
+ generated_text = generated_text.replace("\n", "\n\n")
237
+
238
+ prompt_plus_generated = "GPT prompt: " + formatted_prompt + "\n\n" + generated_text
239
+
240
+ print("\n==== date/time: " + str(datetime.datetime.now() - datetime.timedelta(hours=5)) + " ====")
241
+ print("prompt_plus_generated: " + prompt_plus_generated)
242
+
243
+ return generated_text
244
+
245
+
246
+ def load_chain(tools_list, llm):
247
+ chain = None
248
+ express_chain = None
249
+ memory = None
250
+ if llm:
251
+ print("\ntools_list", tools_list)
252
+ tool_names = tools_list
253
+ tools = load_tools(tool_names, llm=llm, news_api_key=news_api_key, tmdb_bearer_token=tmdb_bearer_token)
254
+
255
+ memory = ConversationBufferMemory(memory_key="chat_history")
256
+
257
+ chain = initialize_agent(tools, llm, agent="conversational-react-description", verbose=True, memory=memory)
258
+ express_chain = LLMChain(llm=llm, prompt=PROMPT_TEMPLATE, verbose=True)
259
+ return chain, express_chain, memory
260
+
261
+
262
+ def set_openai_api_key(api_key):
263
+ """Set the api key and return chain.
264
+ If no api_key, then None is returned.
265
+ """
266
+ if api_key and api_key.startswith("sk-") and len(api_key) > 50:
267
+ os.environ["OPENAI_API_KEY"] = api_key
268
+ print("\n\n ++++++++++++++ Setting OpenAI API key ++++++++++++++ \n\n")
269
+ print(str(datetime.datetime.now()) + ": Before OpenAI, OPENAI_API_KEY length: " + str(
270
+ len(os.environ["OPENAI_API_KEY"])))
271
+ llm = OpenAI(temperature=0, max_tokens=MAX_TOKENS)
272
+ print(str(datetime.datetime.now()) + ": After OpenAI, OPENAI_API_KEY length: " + str(
273
+ len(os.environ["OPENAI_API_KEY"])))
274
+ chain, express_chain, memory = load_chain(TOOLS_DEFAULT_LIST, llm)
275
+
276
+ # Pertains to question answering functionality
277
+ embeddings = OpenAIEmbeddings()
278
+ qa_chain = load_qa_chain(OpenAI(temperature=0), chain_type="stuff")
279
+
280
+ print(str(datetime.datetime.now()) + ": After load_chain, OPENAI_API_KEY length: " + str(
281
+ len(os.environ["OPENAI_API_KEY"])))
282
+ os.environ["OPENAI_API_KEY"] = ""
283
+ return chain, express_chain, llm, embeddings, qa_chain, memory
284
+ return None, None, None, None, None, None
285
+
286
+
287
+ def run_chain(chain, inp, capture_hidden_text):
288
+ output = ""
289
+ hidden_text = None
290
+ if capture_hidden_text:
291
+ error_msg = None
292
+ tmp = sys.stdout
293
+ hidden_text_io = StringIO()
294
+ sys.stdout = hidden_text_io
295
+
296
+ try:
297
+ output = chain.run(input=inp)
298
+ except AuthenticationError as ae:
299
+ error_msg = AUTH_ERR_MSG + str(datetime.datetime.now()) + ". " + str(ae)
300
+ print("error_msg", error_msg)
301
+ except RateLimitError as rle:
302
+ error_msg = "\n\nRateLimitError: " + str(rle)
303
+ except ValueError as ve:
304
+ error_msg = "\n\nValueError: " + str(ve)
305
+ except InvalidRequestError as ire:
306
+ error_msg = "\n\nInvalidRequestError: " + str(ire)
307
+ except Exception as e:
308
+ error_msg = "\n\n" + BUG_FOUND_MSG + ":\n\n" + str(e)
309
+
310
+ sys.stdout = tmp
311
+ hidden_text = hidden_text_io.getvalue()
312
+
313
+ # remove escape characters from hidden_text
314
+ hidden_text = re.sub(r'\x1b[^m]*m', '', hidden_text)
315
+
316
+ # remove "Entering new AgentExecutor chain..." from hidden_text
317
+ hidden_text = re.sub(r"Entering new AgentExecutor chain...\n", "", hidden_text)
318
+
319
+ # remove "Finished chain." from hidden_text
320
+ hidden_text = re.sub(r"Finished chain.", "", hidden_text)
321
+
322
+ # Add newline after "Thought:" "Action:" "Observation:" "Input:" and "AI:"
323
+ hidden_text = re.sub(r"Thought:", "\n\nThought:", hidden_text)
324
+ hidden_text = re.sub(r"Action:", "\n\nAction:", hidden_text)
325
+ hidden_text = re.sub(r"Observation:", "\n\nObservation:", hidden_text)
326
+ hidden_text = re.sub(r"Input:", "\n\nInput:", hidden_text)
327
+ hidden_text = re.sub(r"AI:", "\n\nAI:", hidden_text)
328
+
329
+ if error_msg:
330
+ hidden_text += error_msg
331
+
332
+ print("hidden_text: ", hidden_text)
333
+ else:
334
+ try:
335
+ output = chain.run(input=inp)
336
+ except AuthenticationError as ae:
337
+ output = AUTH_ERR_MSG + str(datetime.datetime.now()) + ". " + str(ae)
338
+ print("output", output)
339
+ except RateLimitError as rle:
340
+ output = "\n\nRateLimitError: " + str(rle)
341
+ except ValueError as ve:
342
+ output = "\n\nValueError: " + str(ve)
343
+ except InvalidRequestError as ire:
344
+ output = "\n\nInvalidRequestError: " + str(ire)
345
+ except Exception as e:
346
+ output = "\n\n" + BUG_FOUND_MSG + ":\n\n" + str(e)
347
+
348
+ return output, hidden_text
349
+
350
+
351
+ def reset_memory(history, memory):
352
+ memory.clear()
353
+ history = []
354
+ return history, history, memory
355
+
356
+
357
+ class ChatWrapper:
358
+
359
+ def __init__(self):
360
+ self.lock = Lock()
361
+
362
+ def __call__(
363
+ self, api_key: str, inp: str, history: Optional[Tuple[str, str]], chain: Optional[ConversationChain],
364
+ trace_chain: bool, speak_text: bool, talking_head: bool, monologue: bool, express_chain: Optional[LLMChain],
365
+ num_words, formality, anticipation_level, joy_level, trust_level,
366
+ fear_level, surprise_level, sadness_level, disgust_level, anger_level,
367
+ lang_level, translate_to, literary_style, qa_chain, docsearch, use_embeddings
368
+ ):
369
+ """Execute the chat functionality."""
370
+ self.lock.acquire()
371
+ try:
372
+ print("\n==== date/time: " + str(datetime.datetime.now()) + " ====")
373
+ print("inp: " + inp)
374
+ print("trace_chain: ", trace_chain)
375
+ print("speak_text: ", speak_text)
376
+ print("talking_head: ", talking_head)
377
+ print("monologue: ", monologue)
378
+ history = history or []
379
+ # If chain is None, that is because no API key was provided.
380
+ output = "Please paste your OpenAI key from openai.com to use this app. " + str(datetime.datetime.now())
381
+ hidden_text = output
382
+
383
+ if chain:
384
+ # Set OpenAI key
385
+ import openai
386
+ openai.api_key = api_key
387
+ if not monologue:
388
+ if use_embeddings:
389
+ if inp and inp.strip() != "":
390
+ if docsearch:
391
+ docs = docsearch.similarity_search(inp)
392
+ output = str(qa_chain.run(input_documents=docs, question=inp))
393
+ else:
394
+ output, hidden_text = "Please supply some text in the the Embeddings tab.", None
395
+ else:
396
+ output, hidden_text = "What's on your mind?", None
397
+ else:
398
+ output, hidden_text = run_chain(chain, inp, capture_hidden_text=trace_chain)
399
+ else:
400
+ output, hidden_text = inp, None
401
+
402
+ output = transform_text(output, express_chain, num_words, formality, anticipation_level, joy_level,
403
+ trust_level,
404
+ fear_level, surprise_level, sadness_level, disgust_level, anger_level,
405
+ lang_level, translate_to, literary_style)
406
+
407
+ text_to_display = output
408
+ if trace_chain:
409
+ text_to_display = hidden_text + "\n\n" + output
410
+ history.append((inp, text_to_display))
411
+
412
+ html_video, temp_file, html_audio, temp_aud_file = None, None, None, None
413
+ if speak_text:
414
+ if talking_head:
415
+ if len(output) <= MAX_TALKING_HEAD_TEXT_LENGTH:
416
+ html_video, temp_file = do_html_video_speak(output, translate_to)
417
+ else:
418
+ temp_file = LOOPING_TALKING_HEAD
419
+ html_video = create_html_video(temp_file, TALKING_HEAD_WIDTH)
420
+ html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
421
+ else:
422
+ html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
423
+ else:
424
+ if talking_head:
425
+ temp_file = LOOPING_TALKING_HEAD
426
+ html_video = create_html_video(temp_file, TALKING_HEAD_WIDTH)
427
+ else:
428
+ # html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
429
+ # html_video = create_html_video(temp_file, "128")
430
+ pass
431
+
432
+ except Exception as e:
433
+ raise e
434
+ finally:
435
+ self.lock.release()
436
+ return history, history, html_video, temp_file, html_audio, temp_aud_file, ""
437
+ # return history, history, html_audio, temp_aud_file, ""
438
+
439
+
440
+ chat = ChatWrapper()
441
+
442
+
443
+ def do_html_audio_speak(words_to_speak, polly_language):
444
+ polly_client = boto3.Session(
445
+ aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
446
+ aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
447
+ region_name=os.environ["AWS_DEFAULT_REGION"]
448
+ ).client('polly')
449
+
450
+ # voice_id, language_code, engine = POLLY_VOICE_DATA.get_voice(polly_language, "Female")
451
+ voice_id, language_code, engine = POLLY_VOICE_DATA.get_voice(polly_language, "Male")
452
+ if not voice_id:
453
+ # voice_id = "Joanna"
454
+ voice_id = "Matthew"
455
+ language_code = "en-US"
456
+ engine = NEURAL_ENGINE
457
+ response = polly_client.synthesize_speech(
458
+ Text=words_to_speak,
459
+ OutputFormat='mp3',
460
+ VoiceId=voice_id,
461
+ LanguageCode=language_code,
462
+ Engine=engine
463
+ )
464
+
465
+ html_audio = '<pre>no audio</pre>'
466
+
467
+ # Save the audio stream returned by Amazon Polly on Lambda's temp directory
468
+ if "AudioStream" in response:
469
+ with closing(response["AudioStream"]) as stream:
470
+ # output = os.path.join("/tmp/", "speech.mp3")
471
+
472
+ try:
473
+ with open('audios/tempfile.mp3', 'wb') as f:
474
+ f.write(stream.read())
475
+ temp_aud_file = gr.File("audios/tempfile.mp3")
476
+ temp_aud_file_url = "/file=" + temp_aud_file.value['name']
477
+ html_audio = f'<audio autoplay><source src={temp_aud_file_url} type="audio/mp3"></audio>'
478
+ except IOError as error:
479
+ # Could not write to file, exit gracefully
480
+ print(error)
481
+ return None, None
482
+ else:
483
+ # The response didn't contain audio data, exit gracefully
484
+ print("Could not stream audio")
485
+ return None, None
486
+
487
+ return html_audio, "audios/tempfile.mp3"
488
+
489
+
490
+ def create_html_video(file_name, width):
491
+ temp_file_url = "/file=" + tmp_file.value['name']
492
+ html_video = f'<video width={width} height={width} autoplay muted loop><source src={temp_file_url} type="video/mp4" poster="Masahiro.png"></video>'
493
+ return html_video
494
+
495
+
496
+ def do_html_video_speak(words_to_speak, azure_language):
497
+ azure_voice = AZURE_VOICE_DATA.get_voice(azure_language, "Male")
498
+ if not azure_voice:
499
+ azure_voice = "en-US-ChristopherNeural"
500
+
501
+ headers = {"Authorization": f"Bearer {os.environ['EXHUMAN_API_KEY']}"}
502
+ body = {
503
+ 'bot_name': 'Masahiro',
504
+ 'bot_response': words_to_speak,
505
+ 'azure_voice': azure_voice,
506
+ 'azure_style': 'friendly',
507
+ 'animation_pipeline': 'high_speed',
508
+ }
509
+ api_endpoint = "https://api.exh.ai/animations/v1/generate_lipsync"
510
+ res = requests.post(api_endpoint, json=body, headers=headers)
511
+ print("res.status_code: ", res.status_code)
512
+
513
+ html_video = '<pre>no video</pre>'
514
+ if isinstance(res.content, bytes):
515
+ response_stream = io.BytesIO(res.content)
516
+ print("len(res.content)): ", len(res.content))
517
+
518
+ with open('videos/tempfile.mp4', 'wb') as f:
519
+ f.write(response_stream.read())
520
+ temp_file = gr.File("videos/tempfile.mp4")
521
+ temp_file_url = "/file=" + temp_file.value['name']
522
+ html_video = f'<video width={TALKING_HEAD_WIDTH} height={TALKING_HEAD_WIDTH} autoplay><source src={temp_file_url} type="video/mp4" poster="Masahiro.png"></video>'
523
+ else:
524
+ print('video url unknown')
525
+ return html_video, "videos/tempfile.mp4"
526
+
527
+
528
+ def update_selected_tools(widget, state, llm):
529
+ if widget:
530
+ state = widget
531
+ chain, express_chain, memory = load_chain(state, llm)
532
+ return state, llm, chain, express_chain
533
+
534
+
535
+ def update_talking_head(widget, state):
536
+ if widget:
537
+ state = widget
538
+
539
+ video_html_talking_head = create_html_video(LOOPING_TALKING_HEAD, TALKING_HEAD_WIDTH)
540
+ return state, video_html_talking_head
541
+ else:
542
+ # return state, create_html_video(LOOPING_TALKING_HEAD, "32")
543
+ return None, "<pre></pre>"
544
+
545
+
546
+ def update_foo(widget, state):
547
+ if widget:
548
+ state = widget
549
+ return state
550
+
551
+
552
+ # Pertains to question answering functionality
553
+ def update_embeddings(embeddings_text, embeddings, qa_chain):
554
+ if embeddings_text:
555
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
556
+ texts = text_splitter.split_text(embeddings_text)
557
+
558
+ docsearch = FAISS.from_texts(texts, embeddings)
559
+ print("Embeddings updated")
560
+ return docsearch
561
+
562
+
563
+ # Pertains to question answering functionality
564
+ def update_use_embeddings(widget, state):
565
+ if widget:
566
+ state = widget
567
+ return state
568
+
569
+
570
+ with gr.Blocks(css=".gradio-container {background-color: lightgray}") as block:
571
+ llm_state = gr.State()
572
+ history_state = gr.State()
573
+ chain_state = gr.State()
574
+ express_chain_state = gr.State()
575
+ tools_list_state = gr.State(TOOLS_DEFAULT_LIST)
576
+ trace_chain_state = gr.State(False)
577
+ speak_text_state = gr.State(False)
578
+ talking_head_state = gr.State(True)
579
+ monologue_state = gr.State(False) # Takes the input and repeats it back to the user, optionally transforming it.
580
+ memory_state = gr.State()
581
+
582
+ # Pertains to Express-inator functionality
583
+ num_words_state = gr.State(NUM_WORDS_DEFAULT)
584
+ formality_state = gr.State(FORMALITY_DEFAULT)
585
+ anticipation_level_state = gr.State(EMOTION_DEFAULT)
586
+ joy_level_state = gr.State(EMOTION_DEFAULT)
587
+ trust_level_state = gr.State(EMOTION_DEFAULT)
588
+ fear_level_state = gr.State(EMOTION_DEFAULT)
589
+ surprise_level_state = gr.State(EMOTION_DEFAULT)
590
+ sadness_level_state = gr.State(EMOTION_DEFAULT)
591
+ disgust_level_state = gr.State(EMOTION_DEFAULT)
592
+ anger_level_state = gr.State(EMOTION_DEFAULT)
593
+ lang_level_state = gr.State(LANG_LEVEL_DEFAULT)
594
+ translate_to_state = gr.State(TRANSLATE_TO_DEFAULT)
595
+ literary_style_state = gr.State(LITERARY_STYLE_DEFAULT)
596
+
597
+ # Pertains to WHISPER functionality
598
+ whisper_lang_state = gr.State(WHISPER_DETECT_LANG)
599
+
600
+ # Pertains to question answering functionality
601
+ embeddings_state = gr.State()
602
+ qa_chain_state = gr.State()
603
+ docsearch_state = gr.State()
604
+ use_embeddings_state = gr.State(False)
605
+
606
+ with gr.Tab("Chat"):
607
+ with gr.Row():
608
+ with gr.Column():
609
+ gr.HTML(
610
+ """<b><center>GPT + WolframAlpha + Whisper</center></b>
611
+ <p><center>New feature: <b>Embeddings</b></center></p>""")
612
+
613
+ openai_api_key_textbox = gr.Textbox(placeholder="Paste your OpenAI API key (sk-...)",
614
+ show_label=False, lines=1, type='password')
615
+
616
+ with gr.Row():
617
+ with gr.Column(scale=1, min_width=TALKING_HEAD_WIDTH, visible=True):
618
+ speak_text_cb = gr.Checkbox(label="Enable speech", value=False)
619
+ speak_text_cb.change(update_foo, inputs=[speak_text_cb, speak_text_state],
620
+ outputs=[speak_text_state])
621
+
622
+ my_file = gr.File(label="Upload a file", type="file", visible=False)
623
+ tmp_file = gr.File(LOOPING_TALKING_HEAD, visible=False)
624
+ # tmp_file_url = "/file=" + tmp_file.value['name']
625
+ htm_video = create_html_video(LOOPING_TALKING_HEAD, TALKING_HEAD_WIDTH)
626
+ video_html = gr.HTML(htm_video)
627
+
628
+ # my_aud_file = gr.File(label="Audio file", type="file", visible=True)
629
+ tmp_aud_file = gr.File("audios/tempfile.mp3", visible=False)
630
+ tmp_aud_file_url = "/file=" + tmp_aud_file.value['name']
631
+ htm_audio = f'<audio><source src={tmp_aud_file_url} type="audio/mp3"></audio>'
632
+ audio_html = gr.HTML(htm_audio)
633
+
634
+ with gr.Column(scale=7):
635
+ chatbot = gr.Chatbot()
636
+
637
+ with gr.Row():
638
+ message = gr.Textbox(label="What's on your mind??",
639
+ placeholder="What's the answer to life, the universe, and everything?",
640
+ lines=1)
641
+ submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
642
+
643
+ # UNCOMMENT TO USE WHISPER
644
+ with gr.Row():
645
+ audio_comp = gr.Microphone(source="microphone", type="filepath", label="Just say it!",
646
+ interactive=True, streaming=False)
647
+ audio_comp.change(transcribe, inputs=[audio_comp, whisper_lang_state], outputs=[message])
648
+
649
+ # TEMPORARY FOR TESTING
650
+ # with gr.Row():
651
+ # audio_comp_tb = gr.Textbox(label="Just say it!", lines=1)
652
+ # audio_comp_tb.submit(transcribe_dummy, inputs=[audio_comp_tb, whisper_lang_state], outputs=[message])
653
+
654
+ gr.Examples(
655
+ examples=["How many people live in Canada?",
656
+ "What is 2 to the 30th power?",
657
+ "If x+y=10 and x-y=4, what are x and y?",
658
+ "How much did it rain in SF today?",
659
+ "Get me information about the movie 'Avatar'",
660
+ "What are the top tech headlines in the US?",
661
+ "On the desk, you see two blue booklets, two purple booklets, and two yellow pairs of sunglasses - "
662
+ "if I remove all the pairs of sunglasses from the desk, how many purple items remain on it?"],
663
+ inputs=message
664
+ )
665
+
666
+ with gr.Tab("Settings"):
667
+ tools_cb_group = gr.CheckboxGroup(label="Tools:", choices=TOOLS_LIST,
668
+ value=TOOLS_DEFAULT_LIST)
669
+ tools_cb_group.change(update_selected_tools,
670
+ inputs=[tools_cb_group, tools_list_state, llm_state],
671
+ outputs=[tools_list_state, llm_state, chain_state, express_chain_state])
672
+
673
+ trace_chain_cb = gr.Checkbox(label="Show reasoning chain in chat bubble", value=False)
674
+ trace_chain_cb.change(update_foo, inputs=[trace_chain_cb, trace_chain_state],
675
+ outputs=[trace_chain_state])
676
+
677
+ # speak_text_cb = gr.Checkbox(label="Speak text from agent", value=False)
678
+ # speak_text_cb.change(update_foo, inputs=[speak_text_cb, speak_text_state],
679
+ # outputs=[speak_text_state])
680
+
681
+ talking_head_cb = gr.Checkbox(label="Show talking head", value=True)
682
+ talking_head_cb.change(update_talking_head, inputs=[talking_head_cb, talking_head_state],
683
+ outputs=[talking_head_state, video_html])
684
+
685
+ monologue_cb = gr.Checkbox(label="Babel fish mode (translate/restate what you enter, no conversational agent)",
686
+ value=False)
687
+ monologue_cb.change(update_foo, inputs=[monologue_cb, monologue_state],
688
+ outputs=[monologue_state])
689
+
690
+ reset_btn = gr.Button(value="Reset chat", variant="secondary").style(full_width=False)
691
+ reset_btn.click(reset_memory, inputs=[history_state, memory_state], outputs=[chatbot, history_state, memory_state])
692
+
693
+ with gr.Tab("Whisper STT"):
694
+ whisper_lang_radio = gr.Radio(label="Whisper speech-to-text language:", choices=[
695
+ WHISPER_DETECT_LANG, "Arabic", "Arabic (Gulf)", "Catalan", "Chinese (Cantonese)", "Chinese (Mandarin)",
696
+ "Danish", "Dutch", "English (Australian)", "English (British)", "English (Indian)", "English (New Zealand)",
697
+ "English (South African)", "English (US)", "English (Welsh)", "Finnish", "French", "French (Canadian)",
698
+ "German", "German (Austrian)", "Georgian", "Hindi", "Icelandic", "Indonesian", "Italian", "Japanese",
699
+ "Korean", "Norwegian", "Polish",
700
+ "Portuguese (Brazilian)", "Portuguese (European)", "Romanian", "Russian", "Spanish (European)",
701
+ "Spanish (Mexican)", "Spanish (US)", "Swedish", "Turkish", "Ukrainian", "Welsh"],
702
+ value=WHISPER_DETECT_LANG)
703
+
704
+ whisper_lang_radio.change(update_foo,
705
+ inputs=[whisper_lang_radio, whisper_lang_state],
706
+ outputs=[whisper_lang_state])
707
+
708
+ with gr.Tab("Translate to"):
709
+ lang_level_radio = gr.Radio(label="Language level:", choices=[
710
+ LANG_LEVEL_DEFAULT, "1st grade", "2nd grade", "3rd grade", "4th grade", "5th grade", "6th grade",
711
+ "7th grade", "8th grade", "9th grade", "10th grade", "11th grade", "12th grade", "University"],
712
+ value=LANG_LEVEL_DEFAULT)
713
+ lang_level_radio.change(update_foo, inputs=[lang_level_radio, lang_level_state],
714
+ outputs=[lang_level_state])
715
+
716
+ translate_to_radio = gr.Radio(label="Language:", choices=[
717
+ TRANSLATE_TO_DEFAULT, "Arabic", "Arabic (Gulf)", "Catalan", "Chinese (Cantonese)", "Chinese (Mandarin)",
718
+ "Danish", "Dutch", "English (Australian)", "English (British)", "English (Indian)", "English (New Zealand)",
719
+ "English (South African)", "English (US)", "English (Welsh)", "Finnish", "French", "French (Canadian)",
720
+ "German", "German (Austrian)", "Georgian", "Hindi", "Icelandic", "Indonesian", "Italian", "Japanese",
721
+ "Korean", "Norwegian", "Polish",
722
+ "Portuguese (Brazilian)", "Portuguese (European)", "Romanian", "Russian", "Spanish (European)",
723
+ "Spanish (Mexican)", "Spanish (US)", "Swedish", "Turkish", "Ukrainian", "Welsh",
724
+ "emojis", "Gen Z slang", "how the stereotypical Karen would say it", "Klingon", "Neanderthal",
725
+ "Pirate", "Strange Planet expospeak technical talk", "Yoda"],
726
+ value=TRANSLATE_TO_DEFAULT)
727
+
728
+ translate_to_radio.change(update_foo,
729
+ inputs=[translate_to_radio, translate_to_state],
730
+ outputs=[translate_to_state])
731
+
732
+ with gr.Tab("Formality"):
733
+ formality_radio = gr.Radio(label="Formality:",
734
+ choices=[FORMALITY_DEFAULT, "Casual", "Polite", "Honorific"],
735
+ value=FORMALITY_DEFAULT)
736
+ formality_radio.change(update_foo,
737
+ inputs=[formality_radio, formality_state],
738
+ outputs=[formality_state])
739
+
740
+ with gr.Tab("Lit style"):
741
+ literary_style_radio = gr.Radio(label="Literary style:", choices=[
742
+ LITERARY_STYLE_DEFAULT, "Prose", "Story", "Summary", "Outline", "Bullets", "Poetry", "Haiku", "Limerick", "Rap",
743
+ "Joke", "Knock-knock", "FAQ"],
744
+ value=LITERARY_STYLE_DEFAULT)
745
+
746
+ literary_style_radio.change(update_foo,
747
+ inputs=[literary_style_radio, literary_style_state],
748
+ outputs=[literary_style_state])
749
+
750
+ with gr.Tab("Emotions"):
751
+ anticipation_level_radio = gr.Radio(label="Anticipation level:",
752
+ choices=[EMOTION_DEFAULT, "Interest", "Anticipation", "Vigilance"],
753
+ value=EMOTION_DEFAULT)
754
+ anticipation_level_radio.change(update_foo,
755
+ inputs=[anticipation_level_radio, anticipation_level_state],
756
+ outputs=[anticipation_level_state])
757
+
758
+ joy_level_radio = gr.Radio(label="Joy level:",
759
+ choices=[EMOTION_DEFAULT, "Serenity", "Joy", "Ecstasy"],
760
+ value=EMOTION_DEFAULT)
761
+ joy_level_radio.change(update_foo,
762
+ inputs=[joy_level_radio, joy_level_state],
763
+ outputs=[joy_level_state])
764
+
765
+ trust_level_radio = gr.Radio(label="Trust level:",
766
+ choices=[EMOTION_DEFAULT, "Acceptance", "Trust", "Admiration"],
767
+ value=EMOTION_DEFAULT)
768
+ trust_level_radio.change(update_foo,
769
+ inputs=[trust_level_radio, trust_level_state],
770
+ outputs=[trust_level_state])
771
+
772
+ fear_level_radio = gr.Radio(label="Fear level:",
773
+ choices=[EMOTION_DEFAULT, "Apprehension", "Fear", "Terror"],
774
+ value=EMOTION_DEFAULT)
775
+ fear_level_radio.change(update_foo,
776
+ inputs=[fear_level_radio, fear_level_state],
777
+ outputs=[fear_level_state])
778
+
779
+ surprise_level_radio = gr.Radio(label="Surprise level:",
780
+ choices=[EMOTION_DEFAULT, "Distraction", "Surprise", "Amazement"],
781
+ value=EMOTION_DEFAULT)
782
+ surprise_level_radio.change(update_foo,
783
+ inputs=[surprise_level_radio, surprise_level_state],
784
+ outputs=[surprise_level_state])
785
+
786
+ sadness_level_radio = gr.Radio(label="Sadness level:",
787
+ choices=[EMOTION_DEFAULT, "Pensiveness", "Sadness", "Grief"],
788
+ value=EMOTION_DEFAULT)
789
+ sadness_level_radio.change(update_foo,
790
+ inputs=[sadness_level_radio, sadness_level_state],
791
+ outputs=[sadness_level_state])
792
+
793
+ disgust_level_radio = gr.Radio(label="Disgust level:",
794
+ choices=[EMOTION_DEFAULT, "Boredom", "Disgust", "Loathing"],
795
+ value=EMOTION_DEFAULT)
796
+ disgust_level_radio.change(update_foo,
797
+ inputs=[disgust_level_radio, disgust_level_state],
798
+ outputs=[disgust_level_state])
799
+
800
+ anger_level_radio = gr.Radio(label="Anger level:",
801
+ choices=[EMOTION_DEFAULT, "Annoyance", "Anger", "Rage"],
802
+ value=EMOTION_DEFAULT)
803
+ anger_level_radio.change(update_foo,
804
+ inputs=[anger_level_radio, anger_level_state],
805
+ outputs=[anger_level_state])
806
+
807
+ with gr.Tab("Max words"):
808
+ num_words_slider = gr.Slider(label="Max number of words to generate (0 for don't care)",
809
+ value=NUM_WORDS_DEFAULT, minimum=0, maximum=MAX_WORDS, step=10)
810
+ num_words_slider.change(update_foo,
811
+ inputs=[num_words_slider, num_words_state],
812
+ outputs=[num_words_state])
813
+
814
+ with gr.Tab("Embeddings"):
815
+ embeddings_text_box = gr.Textbox(label="Enter text for embeddings and hit Create:",
816
+ lines=20)
817
+
818
+ with gr.Row():
819
+ use_embeddings_cb = gr.Checkbox(label="Use embeddings", value=False)
820
+ use_embeddings_cb.change(update_use_embeddings, inputs=[use_embeddings_cb, use_embeddings_state],
821
+ outputs=[use_embeddings_state])
822
+
823
+ embeddings_text_submit = gr.Button(value="Create", variant="secondary").style(full_width=False)
824
+ embeddings_text_submit.click(update_embeddings,
825
+ inputs=[embeddings_text_box, embeddings_state, qa_chain_state],
826
+ outputs=[docsearch_state])
827
+
828
+ gr.HTML("""
829
+ <p>This application, developed by <a href='https://www.linkedin.com/in/javafxpert/'>James L. Weaver</a>,
830
+ demonstrates a conversational agent implemented with OpenAI GPT-3.5 and LangChain.
831
+ When necessary, it leverages tools for complex math, searching the internet, and accessing news and weather.
832
+ Uses talking heads from <a href='https://exh.ai/'>Ex-Human</a>.
833
+ For faster inference without waiting in queue, you may duplicate the space.
834
+ </p>""")
835
+
836
+ gr.HTML("""
837
+ <form action="https://www.paypal.com/donate" method="post" target="_blank">
838
+ <input type="hidden" name="business" value="AK8BVNALBXSPQ" />
839
+ <input type="hidden" name="no_recurring" value="0" />
840
+ <input type="hidden" name="item_name" value="Please consider helping to defray the cost of APIs such as SerpAPI and WolframAlpha that this app uses." />
841
+ <input type="hidden" name="currency_code" value="USD" />
842
+ <input type="image" src="https://www.paypalobjects.com/en_US/i/btn/btn_donate_LG.gif" border="0" name="submit" title="PayPal - The safer, easier way to pay online!" alt="Donate with PayPal button" />
843
+ <img alt="" border="0" src="https://www.paypal.com/en_US/i/scr/pixel.gif" width="1" height="1" />
844
+ </form>
845
+ """)
846
+
847
+ gr.HTML("""<center>
848
+ <a href="https://huggingface.co/spaces/JavaFXpert/Chat-GPT-LangChain?duplicate=true">
849
+ <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
850
+ Powered by <a href='https://github.com/hwchase17/langchain'>LangChain 🦜️🔗</a>
851
+ </center>""")
852
+
853
+ message.submit(chat, inputs=[openai_api_key_textbox, message, history_state, chain_state, trace_chain_state,
854
+ speak_text_state, talking_head_state, monologue_state,
855
+ express_chain_state, num_words_state, formality_state,
856
+ anticipation_level_state, joy_level_state, trust_level_state, fear_level_state,
857
+ surprise_level_state, sadness_level_state, disgust_level_state, anger_level_state,
858
+ lang_level_state, translate_to_state, literary_style_state,
859
+ qa_chain_state, docsearch_state, use_embeddings_state],
860
+ outputs=[chatbot, history_state, video_html, my_file, audio_html, tmp_aud_file, message])
861
+ # outputs=[chatbot, history_state, audio_html, tmp_aud_file, message])
862
+
863
+ submit.click(chat, inputs=[openai_api_key_textbox, message, history_state, chain_state, trace_chain_state,
864
+ speak_text_state, talking_head_state, monologue_state,
865
+ express_chain_state, num_words_state, formality_state,
866
+ anticipation_level_state, joy_level_state, trust_level_state, fear_level_state,
867
+ surprise_level_state, sadness_level_state, disgust_level_state, anger_level_state,
868
+ lang_level_state, translate_to_state, literary_style_state,
869
+ qa_chain_state, docsearch_state, use_embeddings_state],
870
+ outputs=[chatbot, history_state, video_html, my_file, audio_html, tmp_aud_file, message])
871
+ # outputs=[chatbot, history_state, audio_html, tmp_aud_file, message])
872
+
873
+ openai_api_key_textbox.change(set_openai_api_key,
874
+ inputs=[openai_api_key_textbox],
875
+ outputs=[chain_state, express_chain_state, llm_state, embeddings_state,
876
+ qa_chain_state, memory_state])
877
+
878
+ block.launch(debug=True)
audios/tempfile.mp3 ADDED
Binary file (785 kB). View file
 
azure_utils.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This class stores Azure voice data. Specifically, the class stores several records containing
2
+ # language, lang_code, gender, voice_id and engine. The class also has a method to return the
3
+ # voice_id, lang_code and engine given a language and gender.
4
+
5
+ NEURAL_ENGINE = "neural"
6
+ STANDARD_ENGINE = "standard"
7
+
8
+
9
+ class AzureVoiceData:
10
+ def get_voice(self, language, gender):
11
+ for voice in self.voice_data:
12
+ if voice['language'] == language and voice['gender'] == gender:
13
+ return voice['azure_voice']
14
+ return None
15
+
16
+ def __init__(self):
17
+ self.voice_data = [
18
+ {'language': 'Arabic',
19
+ 'azure_voice': 'ar-EG-ShakirNeural',
20
+ 'gender': 'Male'},
21
+ {'language': 'Arabic (Gulf)',
22
+ 'azure_voice': 'ar-KW-FahedNeural',
23
+ 'gender': 'Male'},
24
+ {'language': 'Catalan',
25
+ 'azure_voice': 'ca-ES-EnricNeural',
26
+ 'gender': 'Male'},
27
+ {'language': 'Chinese (Cantonese)',
28
+ 'azure_voice': 'yue-CN-YunSongNeural',
29
+ 'gender': 'Male'},
30
+ {'language': 'Chinese (Mandarin)',
31
+ 'azure_voice': 'zh-CN-YunxiNeural',
32
+ 'gender': 'Male'},
33
+ {'language': 'Danish',
34
+ 'azure_voice': 'da-DK-JeppeNeural',
35
+ 'gender': 'Male'},
36
+ {'language': 'Dutch',
37
+ 'azure_voice': 'nl-NL-MaartenNeural',
38
+ 'gender': 'Male'},
39
+ {'language': 'English (Australian)',
40
+ 'azure_voice': 'en-AU-KenNeural',
41
+ 'gender': 'Male'},
42
+ {'language': 'English (British)',
43
+ 'azure_voice': 'en-GB-RyanNeural',
44
+ 'gender': 'Male'},
45
+ {'language': 'English (Indian)',
46
+ 'azure_voice': 'en-IN-PrabhatNeural',
47
+ 'gender': 'Male'},
48
+ {'language': 'English (New Zealand)',
49
+ 'azure_voice': 'en-NZ-MitchellNeural',
50
+ 'gender': 'Male'},
51
+ {'language': 'English (South African)',
52
+ 'azure_voice': 'en-ZA-LukeNeural',
53
+ 'gender': 'Male'},
54
+ {'language': 'English (US)',
55
+ 'azure_voice': 'en-US-ChristopherNeural',
56
+ 'gender': 'Male'},
57
+ {'language': 'English (Welsh)',
58
+ 'azure_voice': 'cy-GB-AledNeural',
59
+ 'gender': 'Male'},
60
+ {'language': 'Finnish',
61
+ 'azure_voice': 'fi-FI-HarriNeural',
62
+ 'gender': 'Male'},
63
+ {'language': 'French',
64
+ 'azure_voice': 'fr-FR-HenriNeural',
65
+ 'gender': 'Male'},
66
+ {'language': 'French (Canadian)',
67
+ 'azure_voice': 'fr-CA-AntoineNeural',
68
+ 'gender': 'Male'},
69
+ {'language': 'German',
70
+ 'azure_voice': 'de-DE-KlausNeural',
71
+ 'gender': 'Male'},
72
+ {'language': 'German (Austrian)',
73
+ 'azure_voice': 'de-AT-JonasNeural',
74
+ 'gender': 'Male'},
75
+ {'language': 'Hindi',
76
+ 'azure_voice': 'hi-IN-MadhurNeural',
77
+ 'gender': 'Male'},
78
+ {'language': 'Icelandic',
79
+ 'azure_voice': 'is-IS-GunnarNeural',
80
+ 'gender': 'Male'},
81
+ {'language': 'Italian',
82
+ 'azure_voice': 'it-IT-GianniNeural',
83
+ 'gender': 'Male'},
84
+ {'language': 'Japanese',
85
+ 'azure_voice': 'ja-JP-KeitaNeural',
86
+ 'gender': 'Male'},
87
+ {'language': 'Korean',
88
+ 'azure_voice': 'ko-KR-GookMinNeural',
89
+ 'gender': 'Male'},
90
+ {'language': 'Norwegian',
91
+ 'azure_voice': 'nb-NO-FinnNeural',
92
+ 'gender': 'Male'},
93
+ {'language': 'Polish',
94
+ 'azure_voice': 'pl-PL-MarekNeural',
95
+ 'gender': 'Male'},
96
+ {'language': 'Portuguese (Brazilian)',
97
+ 'azure_voice': 'pt-BR-NicolauNeural',
98
+ 'gender': 'Male'},
99
+ {'language': 'Portuguese (European)',
100
+ 'azure_voice': 'pt-PT-DuarteNeural',
101
+ 'gender': 'Male'},
102
+ {'language': 'Romanian',
103
+ 'azure_voice': 'ro-RO-EmilNeural',
104
+ 'gender': 'Male'},
105
+ {'language': 'Russian',
106
+ 'azure_voice': 'ru-RU-DmitryNeural',
107
+ 'gender': 'Male'},
108
+ {'language': 'Spanish (European)',
109
+ 'azure_voice': 'es-ES-TeoNeural',
110
+ 'gender': 'Male'},
111
+ {'language': 'Spanish (Mexican)',
112
+ 'azure_voice': 'es-MX-LibertoNeural',
113
+ 'gender': 'Male'},
114
+ {'language': 'Spanish (US)',
115
+ 'azure_voice': 'es-US-AlonsoNeural"',
116
+ 'gender': 'Male'},
117
+ {'language': 'Swedish',
118
+ 'azure_voice': 'sv-SE-MattiasNeural',
119
+ 'gender': 'Male'},
120
+ {'language': 'Turkish',
121
+ 'azure_voice': 'tr-TR-AhmetNeural',
122
+ 'gender': 'Male'},
123
+ {'language': 'Welsh',
124
+ 'azure_voice': 'cy-GB-AledNeural',
125
+ 'gender': 'Male'},
126
+ ]
127
+
128
+
129
+ # Run from the command-line
130
+ if __name__ == '__main__':
131
+ azure_voice_data = AzureVoiceData()
132
+
133
+ azure_voice = azure_voice_data.get_voice('English (US)', 'Male')
134
+ print('English (US)', 'Male', azure_voice)
135
+
136
+ azure_voice = azure_voice_data.get_voice('English (US)', 'Female')
137
+ print('English (US)', 'Female', azure_voice)
138
+
139
+ azure_voice = azure_voice_data.get_voice('French', 'Female')
140
+ print('French', 'Female', azure_voice)
141
+
142
+ azure_voice = azure_voice_data.get_voice('French', 'Male')
143
+ print('French', 'Male', azure_voice)
144
+
145
+ azure_voice = azure_voice_data.get_voice('Japanese', 'Female')
146
+ print('Japanese', 'Female', azure_voice)
147
+
148
+ azure_voice = azure_voice_data.get_voice('Japanese', 'Male')
149
+ print('Japanese', 'Male', azure_voice)
150
+
151
+ azure_voice = azure_voice_data.get_voice('Hindi', 'Female')
152
+ print('Hindi', 'Female', azure_voice)
153
+
154
+ azure_voice = azure_voice_data.get_voice('Hindi', 'Male')
155
+ print('Hindi', 'Male', azure_voice)
images/Masahiro.png ADDED

Git LFS Details

  • SHA256: 215bfaa1bdb0ee4852988b29d480e2d1c2d9669eaa907ba25cc2d3dfa6ebfa4e
  • Pointer size: 132 Bytes
  • Size of remote file: 4.39 MB
polly_utils.py ADDED
@@ -0,0 +1,635 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This class stores Polly voice data. Specifically, the class stores several records containing
2
+ # language, lang_code, gender, voice_id and engine. The class also has a method to return the
3
+ # voice_id, lang_code and engine given a language and gender.
4
+
5
+ NEURAL_ENGINE = "neural"
6
+ STANDARD_ENGINE = "standard"
7
+
8
+
9
+ class PollyVoiceData:
10
+ def get_voice(self, language, gender):
11
+ for voice in self.voice_data:
12
+ if voice['language'] == language and voice['gender'] == gender:
13
+ if voice['neural'] == 'Yes':
14
+ return voice['voice_id'], voice['lang_code'], NEURAL_ENGINE
15
+ for voice in self.voice_data:
16
+ if voice['language'] == language and voice['gender'] == gender:
17
+ if voice['standard'] == 'Yes':
18
+ return voice['voice_id'], voice['lang_code'], STANDARD_ENGINE
19
+ return None, None, None
20
+
21
+ def get_whisper_lang_code(self, language):
22
+ for voice in self.voice_data:
23
+ if voice['language'] == language:
24
+ return voice['whisper_lang_code']
25
+ return "en"
26
+
27
+ def __init__(self):
28
+ self.voice_data = [
29
+ {'language': 'Arabic',
30
+ 'lang_code': 'arb',
31
+ 'whisper_lang_code': 'ar',
32
+ 'voice_id': 'Zeina',
33
+ 'gender': 'Female',
34
+ 'neural': 'No',
35
+ 'standard': 'Yes'},
36
+ {'language': 'Arabic (Gulf)',
37
+ 'lang_code': 'ar-AE',
38
+ 'whisper_lang_code': 'ar',
39
+ 'voice_id': 'Hala',
40
+ 'gender': 'Female',
41
+ 'neural': 'Yes',
42
+ 'standard': 'No'},
43
+ {'language': 'Catalan',
44
+ 'lang_code': 'ca-ES',
45
+ 'whisper_lang_code': 'ca',
46
+ 'voice_id': 'Arlet',
47
+ 'gender': 'Female',
48
+ 'neural': 'Yes',
49
+ 'standard': 'No'},
50
+ {'language': 'Chinese (Cantonese)',
51
+ 'lang_code': 'yue-CN',
52
+ 'whisper_lang_code': 'zh',
53
+ 'voice_id': 'Hiujin',
54
+ 'gender': 'Female',
55
+ 'neural': 'Yes',
56
+ 'standard': 'No'},
57
+ {'language': 'Chinese (Mandarin)',
58
+ 'lang_code': 'cmn-CN',
59
+ 'whisper_lang_code': 'zh',
60
+ 'voice_id': 'Zhiyu',
61
+ 'gender': 'Female',
62
+ 'neural': 'Yes',
63
+ 'standard': 'No'},
64
+ {'language': 'Danish',
65
+ 'lang_code': 'da-DK',
66
+ 'whisper_lang_code': 'da',
67
+ 'voice_id': 'Naja',
68
+ 'gender': 'Female',
69
+ 'neural': 'No',
70
+ 'standard': 'Yes'},
71
+ {'language': 'Danish',
72
+ 'lang_code': 'da-DK',
73
+ 'whisper_lang_code': 'da',
74
+ 'voice_id': 'Mads',
75
+ 'gender': 'Male',
76
+ 'neural': 'No',
77
+ 'standard': 'Yes'},
78
+ {'language': 'Dutch',
79
+ 'lang_code': 'nl-NL',
80
+ 'whisper_lang_code': 'nl',
81
+ 'voice_id': 'Laura',
82
+ 'gender': 'Female',
83
+ 'neural': 'Yes',
84
+ 'standard': 'No'},
85
+ {'language': 'Dutch',
86
+ 'lang_code': 'nl-NL',
87
+ 'whisper_lang_code': 'nl',
88
+ 'voice_id': 'Lotte',
89
+ 'gender': 'Female',
90
+ 'neural': 'No',
91
+ 'standard': 'Yes'},
92
+ {'language': 'Dutch',
93
+ 'lang_code': 'nl-NL',
94
+ 'whisper_lang_code': 'nl',
95
+ 'voice_id': 'Ruben',
96
+ 'gender': 'Male',
97
+ 'neural': 'No',
98
+ 'standard': 'Yes'},
99
+ {'language': 'English (Australian)',
100
+ 'lang_code': 'en-AU',
101
+ 'whisper_lang_code': 'en',
102
+ 'voice_id': 'Nicole',
103
+ 'gender': 'Female',
104
+ 'neural': 'No',
105
+ 'standard': 'Yes'},
106
+ {'language': 'English (Australian)',
107
+ 'lang_code': 'en-AU',
108
+ 'whisper_lang_code': 'en',
109
+ 'voice_id': 'Olivia',
110
+ 'gender': 'Female',
111
+ 'neural': 'Yes',
112
+ 'standard': 'No'},
113
+ {'language': 'English (Australian)',
114
+ 'lang_code': 'en-AU',
115
+ 'whisper_lang_code': 'en',
116
+ 'voice_id': 'Russell',
117
+ 'gender': 'Male',
118
+ 'neural': 'No',
119
+ 'standard': 'Yes'},
120
+ {'language': 'English (British)',
121
+ 'lang_code': 'en-GB',
122
+ 'whisper_lang_code': 'en',
123
+ 'voice_id': 'Amy',
124
+ 'gender': 'Female',
125
+ 'neural': 'Yes',
126
+ 'standard': 'Yes'},
127
+ {'language': 'English (British)',
128
+ 'lang_code': 'en-GB',
129
+ 'whisper_lang_code': 'en',
130
+ 'voice_id': 'Emma',
131
+ 'gender': 'Female',
132
+ 'neural': 'Yes',
133
+ 'standard': 'Yes'},
134
+ {'language': 'English (British)',
135
+ 'lang_code': 'en-GB',
136
+ 'whisper_lang_code': 'en',
137
+ 'voice_id': 'Brian',
138
+ 'gender': 'Male',
139
+ 'neural': 'Yes',
140
+ 'standard': 'Yes'},
141
+ {'language': 'English (British)',
142
+ 'lang_code': 'en-GB',
143
+ 'whisper_lang_code': 'en',
144
+ 'voice_id': 'Arthur',
145
+ 'gender': 'Male',
146
+ 'neural': 'Yes',
147
+ 'standard': 'No'},
148
+ {'language': 'English (Indian)',
149
+ 'lang_code': 'en-IN',
150
+ 'whisper_lang_code': 'en',
151
+ 'voice_id': 'Aditi',
152
+ 'gender': 'Female',
153
+ 'neural': 'No',
154
+ 'standard': 'Yes'},
155
+ {'language': 'English (Indian)',
156
+ 'lang_code': 'en-IN',
157
+ 'whisper_lang_code': 'en',
158
+ 'voice_id': 'Raveena',
159
+ 'gender': 'Female',
160
+ 'neural': 'No',
161
+ 'standard': 'Yes'},
162
+ {'language': 'English (Indian)',
163
+ 'lang_code': 'en-IN',
164
+ 'whisper_lang_code': 'en',
165
+ 'voice_id': 'Kajal',
166
+ 'gender': 'Female',
167
+ 'neural': 'Yes',
168
+ 'standard': 'No'},
169
+ {'language': 'English (New Zealand)',
170
+ 'lang_code': 'en-NZ',
171
+ 'whisper_lang_code': 'en',
172
+ 'voice_id': 'Aria',
173
+ 'gender': 'Female',
174
+ 'neural': 'Yes',
175
+ 'standard': 'No'},
176
+ {'language': 'English (South African)',
177
+ 'lang_code': 'en-ZA',
178
+ 'whisper_lang_code': 'en',
179
+ 'voice_id': 'Ayanda',
180
+ 'gender': 'Female',
181
+ 'neural': 'Yes',
182
+ 'standard': 'No'},
183
+ {'language': 'English (US)',
184
+ 'lang_code': 'en-US',
185
+ 'whisper_lang_code': 'en',
186
+ 'voice_id': 'Ivy',
187
+ 'gender': 'Female (child)',
188
+ 'neural': 'Yes',
189
+ 'standard': 'Yes'},
190
+ {'language': 'English (US)',
191
+ 'lang_code': 'en-US',
192
+ 'whisper_lang_code': 'en',
193
+ 'voice_id': 'Joanna',
194
+ 'gender': 'Female',
195
+ 'neural': 'Yes',
196
+ 'standard': 'Yes'},
197
+ {'language': 'English (US)',
198
+ 'lang_code': 'en-US',
199
+ 'whisper_lang_code': 'en',
200
+ 'voice_id': 'Kendra',
201
+ 'gender': 'Female',
202
+ 'neural': 'Yes',
203
+ 'standard': 'Yes'},
204
+ {'language': 'English (US)',
205
+ 'lang_code': 'en-US',
206
+ 'whisper_lang_code': 'en',
207
+ 'voice_id': 'Kimberly',
208
+ 'gender': 'Female',
209
+ 'neural': 'Yes',
210
+ 'standard': 'Yes'},
211
+ {'language': 'English (US)',
212
+ 'lang_code': 'en-US',
213
+ 'whisper_lang_code': 'en',
214
+ 'voice_id': 'Salli',
215
+ 'gender': 'Female',
216
+ 'neural': 'Yes',
217
+ 'standard': 'Yes'},
218
+ {'language': 'English (US)',
219
+ 'lang_code': 'en-US',
220
+ 'whisper_lang_code': 'en',
221
+ 'voice_id': 'Joey',
222
+ 'gender': 'Male',
223
+ 'neural': 'Yes',
224
+ 'standard': 'Yes'},
225
+ {'language': 'English (US)',
226
+ 'lang_code': 'en-US',
227
+ 'whisper_lang_code': 'en',
228
+ 'voice_id': 'Justin',
229
+ 'gender': 'Male (child)',
230
+ 'neural': 'Yes',
231
+ 'standard': 'Yes'},
232
+ {'language': 'English (US)',
233
+ 'lang_code': 'en-US',
234
+ 'whisper_lang_code': 'en',
235
+ 'voice_id': 'Kevin',
236
+ 'gender': 'Male (child)',
237
+ 'neural': 'Yes',
238
+ 'standard': 'No'},
239
+ {'language': 'English (US)',
240
+ 'lang_code': 'en-US',
241
+ 'whisper_lang_code': 'en',
242
+ 'voice_id': 'Matthew',
243
+ 'gender': 'Male',
244
+ 'neural': 'Yes',
245
+ 'standard': 'Yes'},
246
+ {'language': 'English (Welsh)',
247
+ 'lang_code': 'en-GB-WLS',
248
+ 'whisper_lang_code': 'en',
249
+ 'voice_id': 'Geraint',
250
+ 'gender': 'Male',
251
+ 'neural': 'No',
252
+ 'standard': 'Yes'},
253
+ {'language': 'Finnish',
254
+ 'lang_code': 'fi-FI',
255
+ 'whisper_lang_code': 'fi',
256
+ 'voice_id': 'Suvi',
257
+ 'gender': 'Female',
258
+ 'neural': 'Yes',
259
+ 'standard': 'No'},
260
+ {'language': 'French',
261
+ 'lang_code': 'fr-FR',
262
+ 'whisper_lang_code': 'fr',
263
+ 'voice_id': 'Celine',
264
+ 'gender': 'Female',
265
+ 'neural': 'No',
266
+ 'standard': 'Yes'},
267
+ {'language': 'French',
268
+ 'lang_code': 'fr-FR',
269
+ 'whisper_lang_code': 'fr',
270
+ 'voice_id': 'Lea',
271
+ 'gender': 'Female',
272
+ 'neural': 'Yes',
273
+ 'standard': 'Yes'},
274
+ {'language': 'French',
275
+ 'lang_code': 'fr-FR',
276
+ 'whisper_lang_code': 'fr',
277
+ 'voice_id': 'Mathieu',
278
+ 'gender': 'Male',
279
+ 'neural': 'No',
280
+ 'standard': 'Yes'},
281
+ {'language': 'French (Canadian)',
282
+ 'lang_code': 'fr-CA',
283
+ 'whisper_lang_code': 'fr',
284
+ 'voice_id': 'Chantal',
285
+ 'gender': 'Female',
286
+ 'neural': 'No',
287
+ 'standard': 'Yes'},
288
+ {'language': 'French (Canadian)',
289
+ 'lang_code': 'fr-CA',
290
+ 'whisper_lang_code': 'fr',
291
+ 'voice_id': 'Gabrielle',
292
+ 'gender': 'Female',
293
+ 'neural': 'Yes',
294
+ 'standard': 'No'},
295
+ {'language': 'French (Canadian)',
296
+ 'lang_code': 'fr-CA',
297
+ 'whisper_lang_code': 'fr',
298
+ 'voice_id': 'Liam',
299
+ 'gender': 'Male',
300
+ 'neural': 'Yes',
301
+ 'standard': 'No'},
302
+ {'language': 'German',
303
+ 'lang_code': 'de-DE',
304
+ 'whisper_lang_code': 'de',
305
+ 'voice_id': 'Marlene',
306
+ 'gender': 'Female',
307
+ 'neural': 'No',
308
+ 'standard': 'Yes'},
309
+ {'language': 'German',
310
+ 'lang_code': 'de-DE',
311
+ 'whisper_lang_code': 'de',
312
+ 'voice_id': 'Vicki',
313
+ 'gender': 'Female',
314
+ 'neural': 'Yes',
315
+ 'standard': 'Yes'},
316
+ {'language': 'German',
317
+ 'lang_code': 'de-DE',
318
+ 'whisper_lang_code': 'de',
319
+ 'voice_id': 'Hans',
320
+ 'gender': 'Male',
321
+ 'neural': 'No',
322
+ 'standard': 'Yes'},
323
+ {'language': 'German',
324
+ 'lang_code': 'de-DE',
325
+ 'whisper_lang_code': 'de',
326
+ 'voice_id': 'Daniel',
327
+ 'gender': 'Male',
328
+ 'neural': 'Yes',
329
+ 'standard': 'No'},
330
+ {'language': 'German (Austrian)',
331
+ 'lang_code': 'de-AT',
332
+ 'whisper_lang_code': 'de',
333
+ 'voice_id': 'Hannah',
334
+ 'gender': 'Female',
335
+ 'neural': 'Yes',
336
+ 'standard': 'No'},
337
+ {'language': 'Hindi',
338
+ 'lang_code': 'hi-IN',
339
+ 'whisper_lang_code': 'hi',
340
+ 'voice_id': 'Aditi',
341
+ 'gender': 'Female',
342
+ 'neural': 'No',
343
+ 'standard': 'Yes'},
344
+ {'language': 'Hindi',
345
+ 'lang_code': 'hi-IN',
346
+ 'whisper_lang_code': 'hi',
347
+ 'voice_id': 'Kajal',
348
+ 'gender': 'Female',
349
+ 'neural': 'Yes',
350
+ 'standard': 'No'},
351
+ {'language': 'Icelandic',
352
+ 'lang_code': 'is-IS',
353
+ 'whisper_lang_code': 'is',
354
+ 'voice_id': 'Dora',
355
+ 'gender': 'Female',
356
+ 'neural': 'No',
357
+ 'standard': 'Yes'},
358
+ {'language': 'Icelandic',
359
+ 'lang_code': 'is-IS',
360
+ 'whisper_lang_code': 'is',
361
+ 'voice_id': 'Karl',
362
+ 'gender': 'Male',
363
+ 'neural': 'No',
364
+ 'standard': 'Yes'},
365
+ {'language': 'Italian',
366
+ 'lang_code': 'it-IT',
367
+ 'whisper_lang_code': 'it',
368
+ 'voice_id': 'Carla',
369
+ 'gender': 'Female',
370
+ 'neural': 'No',
371
+ 'standard': 'Yes'},
372
+ {'language': 'Italian',
373
+ 'lang_code': 'it-IT',
374
+ 'whisper_lang_code': 'it',
375
+ 'voice_id': 'Bianca',
376
+ 'gender': 'Female',
377
+ 'neural': 'Yes',
378
+ 'standard': 'Yes'},
379
+ {'language': 'Japanese',
380
+ 'lang_code': 'ja-JP',
381
+ 'whisper_lang_code': 'ja',
382
+ 'voice_id': 'Mizuki',
383
+ 'gender': 'Female',
384
+ 'neural': 'No',
385
+ 'standard': 'Yes'},
386
+ {'language': 'Japanese',
387
+ 'lang_code': 'ja-JP',
388
+ 'whisper_lang_code': 'ja',
389
+ 'voice_id': 'Takumi',
390
+ 'gender': 'Male',
391
+ 'neural': 'Yes',
392
+ 'standard': 'Yes'},
393
+ {'language': 'Korean',
394
+ 'lang_code': 'ko-KR',
395
+ 'whisper_lang_code': 'ko',
396
+ 'voice_id': 'Seoyeon',
397
+ 'gender': 'Female',
398
+ 'neural': 'Yes',
399
+ 'standard': 'Yes'},
400
+ {'language': 'Norwegian',
401
+ 'lang_code': 'nb-NO',
402
+ 'whisper_lang_code': 'no',
403
+ 'voice_id': 'Liv',
404
+ 'gender': 'Female',
405
+ 'neural': 'No',
406
+ 'standard': 'Yes'},
407
+ {'language': 'Norwegian',
408
+ 'lang_code': 'nb-NO',
409
+ 'whisper_lang_code': 'no',
410
+ 'voice_id': 'Ida',
411
+ 'gender': 'Female',
412
+ 'neural': 'Yes',
413
+ 'standard': 'No'},
414
+ {'language': 'Polish',
415
+ 'lang_code': 'pl-PL',
416
+ 'whisper_lang_code': 'pl',
417
+ 'voice_id': 'Ewa',
418
+ 'gender': 'Female',
419
+ 'neural': 'No',
420
+ 'standard': 'Yes'},
421
+ {'language': 'Polish',
422
+ 'lang_code': 'pl-PL',
423
+ 'whisper_lang_code': 'pl',
424
+ 'voice_id': 'Maja',
425
+ 'gender': 'Female',
426
+ 'neural': 'No',
427
+ 'standard': 'Yes'},
428
+ {'language': 'Polish',
429
+ 'lang_code': 'pl-PL',
430
+ 'whisper_lang_code': 'pl',
431
+ 'voice_id': 'Jacek',
432
+ 'gender': 'Male',
433
+ 'neural': 'No',
434
+ 'standard': 'Yes'},
435
+ {'language': 'Polish',
436
+ 'lang_code': 'pl-PL',
437
+ 'whisper_lang_code': 'pl',
438
+ 'voice_id': 'Jan',
439
+ 'gender': 'Male',
440
+ 'neural': 'No',
441
+ 'standard': 'Yes'},
442
+ {'language': 'Polish',
443
+ 'lang_code': 'pl-PL',
444
+ 'whisper_lang_code': 'pl',
445
+ 'voice_id': 'Ola',
446
+ 'gender': 'Female',
447
+ 'neural': 'Yes',
448
+ 'standard': 'No'},
449
+ {'language': 'Portuguese (Brazilian)',
450
+ 'lang_code': 'pt-BR',
451
+ 'whisper_lang_code': 'pt',
452
+ 'voice_id': 'Camila',
453
+ 'gender': 'Female',
454
+ 'neural': 'Yes',
455
+ 'standard': 'Yes'},
456
+ {'language': 'Portuguese (Brazilian)',
457
+ 'lang_code': 'pt-BR',
458
+ 'whisper_lang_code': 'pt',
459
+ 'voice_id': 'Vitoria',
460
+ 'gender': 'Female',
461
+ 'neural': 'Yes',
462
+ 'standard': 'Yes'},
463
+ {'language': 'Portuguese (Brazilian)',
464
+ 'lang_code': 'pt-BR',
465
+ 'whisper_lang_code': 'pt',
466
+ 'voice_id': 'Ricardo',
467
+ 'gender': 'Male',
468
+ 'neural': 'No',
469
+ 'standard': 'Yes'},
470
+ {'language': 'Portuguese (European)',
471
+ 'lang_code': 'pt-PT',
472
+ 'whisper_lang_code': 'pt',
473
+ 'voice_id': 'Ines',
474
+ 'gender': 'Female',
475
+ 'neural': 'Yes',
476
+ 'standard': 'Yes'},
477
+ {'language': 'Portuguese (European)',
478
+ 'lang_code': 'pt-PT',
479
+ 'whisper_lang_code': 'pt',
480
+ 'voice_id': 'Cristiano',
481
+ 'gender': 'Male',
482
+ 'neural': 'No',
483
+ 'standard': 'Yes'},
484
+ {'language': 'Romanian',
485
+ 'lang_code': 'ro-RO',
486
+ 'whisper_lang_code': 'ro',
487
+ 'voice_id': 'Carmen',
488
+ 'gender': 'Female',
489
+ 'neural': 'No',
490
+ 'standard': 'Yes'},
491
+ {'language': 'Russian',
492
+ 'lang_code': 'ru-RU',
493
+ 'whisper_lang_code': 'ru',
494
+ 'voice_id': 'Tatyana',
495
+ 'gender': 'Female',
496
+ 'neural': 'No',
497
+ 'standard': 'Yes'},
498
+ {'language': 'Russian',
499
+ 'lang_code': 'ru-RU',
500
+ 'whisper_lang_code': 'ru',
501
+ 'voice_id': 'Maxim',
502
+ 'gender': 'Male',
503
+ 'neural': 'No',
504
+ 'standard': 'Yes'},
505
+ {'language': 'Spanish (European)',
506
+ 'lang_code': 'es-ES',
507
+ 'whisper_lang_code': 'es',
508
+ 'voice_id': 'Conchita',
509
+ 'gender': 'Female',
510
+ 'neural': 'No',
511
+ 'standard': 'Yes'},
512
+ {'language': 'Spanish (European)',
513
+ 'lang_code': 'es-ES',
514
+ 'whisper_lang_code': 'es',
515
+ 'voice_id': 'Lucia',
516
+ 'gender': 'Female',
517
+ 'neural': 'Yes',
518
+ 'standard': 'Yes'},
519
+ {'language': 'Spanish (European)',
520
+ 'lang_code': 'es-ES',
521
+ 'whisper_lang_code': 'es',
522
+ 'voice_id': 'Enrique',
523
+ 'gender': 'Male',
524
+ 'neural': 'No',
525
+ 'standard': 'Yes'},
526
+ {'language': 'Spanish (Mexican)',
527
+ 'lang_code': 'es-MX',
528
+ 'whisper_lang_code': 'es',
529
+ 'voice_id': 'Mia',
530
+ 'gender': 'Female',
531
+ 'neural': 'Yes',
532
+ 'standard': 'Yes'},
533
+ {'language': 'Spanish (US)',
534
+ 'lang_code': 'es-US',
535
+ 'whisper_lang_code': 'es',
536
+ 'voice_id': 'Lupe',
537
+ 'gender': 'Female',
538
+ 'neural': 'Yes',
539
+ 'standard': 'Yes'},
540
+ {'language': 'Spanish (US)',
541
+ 'lang_code': 'es-US',
542
+ 'whisper_lang_code': 'es',
543
+ 'voice_id': 'Penelope',
544
+ 'gender': 'Female',
545
+ 'neural': 'No',
546
+ 'standard': 'Yes'},
547
+ {'language': 'Spanish (US)',
548
+ 'lang_code': 'es-US',
549
+ 'whisper_lang_code': 'es',
550
+ 'voice_id': 'Miguel',
551
+ 'gender': 'Male',
552
+ 'neural': 'No',
553
+ 'standard': 'Yes'},
554
+ {'language': 'Spanish (US)',
555
+ 'lang_code': 'es-US',
556
+ 'whisper_lang_code': 'es',
557
+ 'voice_id': 'Pedro',
558
+ 'gender': 'Male',
559
+ 'neural': 'Yes',
560
+ 'standard': 'No'},
561
+ {'language': 'Swedish',
562
+ 'lang_code': 'sv-SE',
563
+ 'whisper_lang_code': 'sv',
564
+ 'voice_id': 'Astrid',
565
+ 'gender': 'Female',
566
+ 'neural': 'No',
567
+ 'standard': 'Yes'},
568
+ {'language': 'Swedish',
569
+ 'lang_code': 'sv-SE',
570
+ 'whisper_lang_code': 'sv',
571
+ 'voice_id': 'Elin',
572
+ 'gender': 'Female',
573
+ 'neural': 'Yes',
574
+ 'standard': 'No'},
575
+ {'language': 'Turkish',
576
+ 'lang_code': 'tr-TR',
577
+ 'whisper_lang_code': 'tr',
578
+ 'voice_id': 'Filiz',
579
+ 'gender': 'Female',
580
+ 'neural': 'No',
581
+ 'standard': 'Yes'},
582
+ {'language': 'Welsh',
583
+ 'lang_code': 'cy-GB',
584
+ 'whisper_lang_code': 'cy',
585
+ 'voice_id': 'Gwyneth',
586
+ 'gender': 'Female',
587
+ 'neural': 'No',
588
+ 'standard': 'Yes'}
589
+ ]
590
+
591
+
592
+ # Run from the command-line
593
+ if __name__ == '__main__':
594
+ polly_voice_data = PollyVoiceData()
595
+
596
+ voice_id, language_code, engine = polly_voice_data.get_voice('English (US)', 'Male')
597
+ print('English (US)', 'Male', voice_id, language_code, engine)
598
+
599
+ voice_id, language_code, engine = polly_voice_data.get_voice('English (US)', 'Female')
600
+ print('English (US)', 'Female', voice_id, language_code, engine)
601
+
602
+ voice_id, language_code, engine = polly_voice_data.get_voice('French', 'Female')
603
+ print('French', 'Female', voice_id, language_code, engine)
604
+
605
+ voice_id, language_code, engine = polly_voice_data.get_voice('French', 'Male')
606
+ print('French', 'Male', voice_id, language_code, engine)
607
+
608
+ voice_id, language_code, engine = polly_voice_data.get_voice('Japanese', 'Female')
609
+ print('Japanese', 'Female', voice_id, language_code, engine)
610
+
611
+ voice_id, language_code, engine = polly_voice_data.get_voice('Japanese', 'Male')
612
+ print('Japanese', 'Male', voice_id, language_code, engine)
613
+
614
+ voice_id, language_code, engine = polly_voice_data.get_voice('Hindi', 'Female')
615
+ print('Hindi', 'Female', voice_id, language_code, engine)
616
+
617
+ voice_id, language_code, engine = polly_voice_data.get_voice('Hindi', 'Male')
618
+ print('Hindi', 'Male', voice_id, language_code, engine)
619
+
620
+ whisper_lang_code = polly_voice_data.get_whisper_lang_code('English (US)')
621
+ print('English (US) whisper_lang_code:', whisper_lang_code)
622
+
623
+ whisper_lang_code = polly_voice_data.get_whisper_lang_code('Chinese (Mandarin)')
624
+ print('Chinese (Mandarin) whisper_lang_code:', whisper_lang_code)
625
+
626
+ whisper_lang_code = polly_voice_data.get_whisper_lang_code('Norwegian')
627
+ print('Norwegian whisper_lang_code:', whisper_lang_code)
628
+
629
+ whisper_lang_code = polly_voice_data.get_whisper_lang_code('Dutch')
630
+ print('Dutch whisper_lang_code:', whisper_lang_code)
631
+
632
+ whisper_lang_code = polly_voice_data.get_whisper_lang_code('Foo')
633
+ print('Foo whisper_lang_code:', whisper_lang_code)
634
+
635
+
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ openai==0.26.4
2
+ gradio==3.17.1
3
+ google-search-results
4
+ google-api-python-client==2.76.0
5
+ wolframalpha
6
+ langchain==0.0.78
7
+ requests==2.28.2
8
+ git+https://github.com/openai/whisper.git
9
+ boto3==1.26.65
10
+ faiss-cpu
videos/Masahiro.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca886517414fae8bcea5a5130ae1d01ef3ead7aed437203abebf032217fd0be6
3
+ size 2425212
videos/tempfile.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4579f43ce3f39906372a8f52b0858510321807df2ed98d4a539a991986ab0cc7
3
+ size 103305