Spaces:
Running
Running
Commit
·
9e5d9ca
1
Parent(s):
0c3d039
Upload 12 files
Browse files
app.py
CHANGED
@@ -5,8 +5,8 @@ import ssl
|
|
5 |
from contextlib import closing
|
6 |
from typing import Optional, Tuple
|
7 |
import datetime
|
8 |
-
import promptlayer
|
9 |
-
promptlayer.api_key = os.environ.get("PROMPTLAYER_KEY")
|
10 |
|
11 |
import boto3
|
12 |
import gradio as gr
|
@@ -20,8 +20,8 @@ from langchain import ConversationChain, LLMChain
|
|
20 |
|
21 |
from langchain.agents import load_tools, initialize_agent
|
22 |
from langchain.chains.conversation.memory import ConversationBufferMemory
|
23 |
-
|
24 |
-
from promptlayer.langchain.llms import OpenAI
|
25 |
from threading import Lock
|
26 |
|
27 |
# Console to variable
|
@@ -311,7 +311,7 @@ def set_openai_api_key(api_key):
|
|
311 |
return chain, express_chain, llm, embeddings, qa_chain, memory
|
312 |
return None, None, None, None, None, None
|
313 |
|
314 |
-
PROMPTLAYER_API_BASE = "https://api.promptlayer.com"
|
315 |
|
316 |
def run_chain(chain, inp, capture_hidden_text):
|
317 |
output = ""
|
@@ -445,14 +445,14 @@ class ChatWrapper:
|
|
445 |
html_video, temp_file = do_html_video_speak(output, translate_to)
|
446 |
else:
|
447 |
temp_file = LOOPING_TALKING_HEAD
|
448 |
-
html_video = create_html_video(temp_file, TALKING_HEAD_WIDTH)
|
449 |
html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
|
450 |
else:
|
451 |
html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
|
452 |
else:
|
453 |
if talking_head:
|
454 |
temp_file = LOOPING_TALKING_HEAD
|
455 |
-
html_video = create_html_video(temp_file, TALKING_HEAD_WIDTH)
|
456 |
else:
|
457 |
# html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
|
458 |
# html_video = create_html_video(temp_file, "128")
|
@@ -935,23 +935,13 @@ with gr.Blocks(css=".gradio-container {background-color: lightgray}") as block:
|
|
935 |
|
936 |
openai_api_key_textbox.change(None,
|
937 |
inputs=[openai_api_key_textbox],
|
938 |
-
# outputs=None, _js="() => localStorage.setItem('open_api_key', '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')")
|
939 |
outputs=None, _js="(api_key) => localStorage.setItem('open_api_key', api_key)")
|
940 |
-
|
941 |
-
# openai_api_key_textbox.change(set_openai_api_key,
|
942 |
-
# inputs=[openai_api_key_textbox],
|
943 |
-
# outputs=[chain_state, express_chain_state, llm_state, embeddings_state,
|
944 |
-
# qa_chain_state, memory_state])
|
945 |
-
|
946 |
|
947 |
openai_api_key_textbox.change(set_openai_api_key,
|
948 |
inputs=[openai_api_key_textbox],
|
949 |
outputs=[chain_state, express_chain_state, llm_state, embeddings_state,
|
950 |
qa_chain_state, memory_state])
|
951 |
|
952 |
-
|
953 |
-
|
954 |
-
# block.load(None, inputs=None, outputs=openai_api_key_textbox, _js="()=> localStorage.getItem('open_api_key')")
|
955 |
-
|
956 |
|
957 |
block.launch(debug=True)
|
|
|
5 |
from contextlib import closing
|
6 |
from typing import Optional, Tuple
|
7 |
import datetime
|
8 |
+
# import promptlayer
|
9 |
+
# promptlayer.api_key = os.environ.get("PROMPTLAYER_KEY")
|
10 |
|
11 |
import boto3
|
12 |
import gradio as gr
|
|
|
20 |
|
21 |
from langchain.agents import load_tools, initialize_agent
|
22 |
from langchain.chains.conversation.memory import ConversationBufferMemory
|
23 |
+
from langchain.llms import OpenAI
|
24 |
+
# from promptlayer.langchain.llms import OpenAI
|
25 |
from threading import Lock
|
26 |
|
27 |
# Console to variable
|
|
|
311 |
return chain, express_chain, llm, embeddings, qa_chain, memory
|
312 |
return None, None, None, None, None, None
|
313 |
|
314 |
+
# PROMPTLAYER_API_BASE = "https://api.promptlayer.com"
|
315 |
|
316 |
def run_chain(chain, inp, capture_hidden_text):
|
317 |
output = ""
|
|
|
445 |
html_video, temp_file = do_html_video_speak(output, translate_to)
|
446 |
else:
|
447 |
temp_file = LOOPING_TALKING_HEAD
|
448 |
+
# html_video = create_html_video(temp_file, TALKING_HEAD_WIDTH)
|
449 |
html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
|
450 |
else:
|
451 |
html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
|
452 |
else:
|
453 |
if talking_head:
|
454 |
temp_file = LOOPING_TALKING_HEAD
|
455 |
+
# html_video = create_html_video(temp_file, TALKING_HEAD_WIDTH)
|
456 |
else:
|
457 |
# html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
|
458 |
# html_video = create_html_video(temp_file, "128")
|
|
|
935 |
|
936 |
openai_api_key_textbox.change(None,
|
937 |
inputs=[openai_api_key_textbox],
|
|
|
938 |
outputs=None, _js="(api_key) => localStorage.setItem('open_api_key', api_key)")
|
|
|
|
|
|
|
|
|
|
|
|
|
939 |
|
940 |
openai_api_key_textbox.change(set_openai_api_key,
|
941 |
inputs=[openai_api_key_textbox],
|
942 |
outputs=[chain_state, express_chain_state, llm_state, embeddings_state,
|
943 |
qa_chain_state, memory_state])
|
944 |
|
945 |
+
block.load(None, inputs=None, outputs=openai_api_key_textbox, _js="()=> localStorage.getItem('open_api_key')")
|
|
|
|
|
|
|
946 |
|
947 |
block.launch(debug=True)
|