Spaces:
Sleeping
Sleeping
ChatOpenAI -> ChatGoogleGenerativeAI
Browse files- requirements.txt +1 -0
- streamlit-gpt4o/app.py +11 -8
requirements.txt
CHANGED
@@ -7,3 +7,4 @@ openai
|
|
7 |
pillow>=10.0.1 # not directly required, pinned by Snyk to avoid a vulnerability
|
8 |
streamlit
|
9 |
tiktoken
|
|
|
|
7 |
pillow>=10.0.1 # not directly required, pinned by Snyk to avoid a vulnerability
|
8 |
streamlit
|
9 |
tiktoken
|
10 |
+
python-dotenv
|
streamlit-gpt4o/app.py
CHANGED
@@ -13,6 +13,8 @@ from langchain_core.runnables.history import RunnableWithMessageHistory
|
|
13 |
from langchain_openai import ChatOpenAI
|
14 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
15 |
from st_multimodal_chatinput import multimodal_chatinput
|
|
|
|
|
16 |
|
17 |
__version__ = "0.0.4"
|
18 |
|
@@ -89,17 +91,18 @@ top = st.container()
|
|
89 |
bottom = st.container()
|
90 |
|
91 |
with st.sidebar:
|
92 |
-
openai_api_key = st.text_input("OpenAI API Key", type="password")
|
|
|
93 |
use_gpt4o = st.toggle(label="`gpt-4-turbo` ⇄ `gpt-4o`", value=True)
|
94 |
model_option = "gpt-4o" if use_gpt4o else "gpt-4-turbo"
|
95 |
if openai_api_key:
|
96 |
-
llm = ChatOpenAI(
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
)
|
102 |
-
|
103 |
runnable = prompt | llm
|
104 |
with_message_history = RunnableWithMessageHistory(
|
105 |
runnable,
|
|
|
13 |
from langchain_openai import ChatOpenAI
|
14 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
15 |
from st_multimodal_chatinput import multimodal_chatinput
|
16 |
+
from dotenv import load_dotenv
|
17 |
+
load_dotenv()
|
18 |
|
19 |
__version__ = "0.0.4"
|
20 |
|
|
|
91 |
bottom = st.container()
|
92 |
|
93 |
with st.sidebar:
|
94 |
+
# openai_api_key = st.text_input("OpenAI API Key", type="password")
|
95 |
+
openai_api_key = True
|
96 |
use_gpt4o = st.toggle(label="`gpt-4-turbo` ⇄ `gpt-4o`", value=True)
|
97 |
model_option = "gpt-4o" if use_gpt4o else "gpt-4-turbo"
|
98 |
if openai_api_key:
|
99 |
+
# llm = ChatOpenAI(
|
100 |
+
# model=model_option,
|
101 |
+
# streaming=True,
|
102 |
+
# verbose=True,
|
103 |
+
# openai_api_key=openai_api_key,
|
104 |
+
# )
|
105 |
+
llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro-preview-0514")
|
106 |
runnable = prompt | llm
|
107 |
with_message_history = RunnableWithMessageHistory(
|
108 |
runnable,
|