Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
import os
|
2 |
import time
|
3 |
-
import
|
4 |
import numpy as np
|
5 |
import streamlit as st
|
6 |
import tensorflow as tf
|
7 |
import tensorflow_text
|
8 |
# import plotly.graph_objects as go
|
9 |
# from dotenv import load_dotenv
|
10 |
-
from
|
11 |
from langchain.memory import ConversationSummaryMemory, ChatMessageHistory
|
12 |
from llm import sys_instruction
|
13 |
|
@@ -72,13 +72,20 @@ openai.api_key = st.session_state.key
|
|
72 |
|
73 |
# gpt llm
|
74 |
if 'llm' not in st.session_state:
|
75 |
-
st.session_state.llm =
|
76 |
temperature=0.2, openai_api_key=st.session_state.key)
|
77 |
|
78 |
# model name
|
79 |
if "openai_model" not in st.session_state:
|
80 |
st.session_state["openai_model"] = "gpt-3.5-turbo-instruct"
|
81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
# st chat history
|
83 |
if "message_history" not in st.session_state:
|
84 |
st.session_state.message_history = []
|
@@ -190,7 +197,7 @@ if prompt := st.chat_input("What is up?"):
|
|
190 |
full_response = ""
|
191 |
|
192 |
# get response
|
193 |
-
for
|
194 |
model=st.session_state["openai_model"],
|
195 |
messages=[
|
196 |
{"role": "system", "content": st.session_state.sys_inst.format(
|
@@ -200,8 +207,10 @@ if prompt := st.chat_input("What is up?"):
|
|
200 |
stream=True):
|
201 |
|
202 |
# render gpt response in realtime
|
203 |
-
|
204 |
-
|
|
|
|
|
205 |
message_placeholder.markdown(full_response)
|
206 |
|
207 |
# add to st history
|
|
|
1 |
import os
|
2 |
import time
|
3 |
+
from openai import OpenAI
|
4 |
import numpy as np
|
5 |
import streamlit as st
|
6 |
import tensorflow as tf
|
7 |
import tensorflow_text
|
8 |
# import plotly.graph_objects as go
|
9 |
# from dotenv import load_dotenv
|
10 |
+
from langchain_openai import OpenAI as OpenAiLC
|
11 |
from langchain.memory import ConversationSummaryMemory, ChatMessageHistory
|
12 |
from llm import sys_instruction
|
13 |
|
|
|
72 |
|
73 |
# gpt llm
|
74 |
if 'llm' not in st.session_state:
|
75 |
+
st.session_state.llm = OpenAiLC(
|
76 |
temperature=0.2, openai_api_key=st.session_state.key)
|
77 |
|
78 |
# model name
|
79 |
if "openai_model" not in st.session_state:
|
80 |
st.session_state["openai_model"] = "gpt-3.5-turbo-instruct"
|
81 |
|
82 |
+
# openai client
|
83 |
+
# model name
|
84 |
+
if "client" not in st.session_state:
|
85 |
+
st.session_state["client"] = OpenAI(
|
86 |
+
api_key=st.session_state.key
|
87 |
+
)
|
88 |
+
|
89 |
# st chat history
|
90 |
if "message_history" not in st.session_state:
|
91 |
st.session_state.message_history = []
|
|
|
197 |
full_response = ""
|
198 |
|
199 |
# get response
|
200 |
+
for chunk in st.session_state.client.chat.completions.create(
|
201 |
model=st.session_state["openai_model"],
|
202 |
messages=[
|
203 |
{"role": "system", "content": st.session_state.sys_inst.format(
|
|
|
207 |
stream=True):
|
208 |
|
209 |
# render gpt response in realtime
|
210 |
+
if chunk.choices[0].delta.content:
|
211 |
+
# print(chunk.choices[0].delta.content)
|
212 |
+
full_response += chunk.choices[0].delta.content
|
213 |
+
message_placeholder.markdown(full_response + "▌")
|
214 |
message_placeholder.markdown(full_response)
|
215 |
|
216 |
# add to st history
|