Spaces:
Sleeping
Sleeping
updated app to use gemini instead of GPT-4o
Browse files- streamlit-gpt4o/app.py +42 -44
streamlit-gpt4o/app.py
CHANGED
@@ -10,7 +10,7 @@ from langchain_community.chat_message_histories import (
|
|
10 |
from langchain_core.messages import HumanMessage
|
11 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
12 |
from langchain_core.runnables.history import RunnableWithMessageHistory
|
13 |
-
from langchain_openai import ChatOpenAI
|
14 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
15 |
from st_multimodal_chatinput import multimodal_chatinput
|
16 |
from dotenv import load_dotenv
|
@@ -91,18 +91,17 @@ top = st.container()
|
|
91 |
bottom = st.container()
|
92 |
|
93 |
with st.sidebar:
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
model_option = "
|
98 |
-
if
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro-preview-0514")
|
106 |
runnable = prompt | llm
|
107 |
with_message_history = RunnableWithMessageHistory(
|
108 |
runnable,
|
@@ -114,7 +113,7 @@ with st.sidebar:
|
|
114 |
langsmith_api_key = st.text_input("LangSmith API Key", type="password")
|
115 |
langsmith_project_name = st.text_input(
|
116 |
"LangSmith Project Name",
|
117 |
-
value="streamlit-
|
118 |
)
|
119 |
langsmith_endpoint = st.text_input(
|
120 |
"LangSmith Endpoint",
|
@@ -144,38 +143,37 @@ with st.sidebar:
|
|
144 |
st.rerun()
|
145 |
|
146 |
|
|
|
|
|
147 |
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
chat_input_dict = multimodal_chatinput(text_color="black")
|
154 |
-
if chat_input_dict:
|
155 |
-
chat_input_human_message = chat_input_to_human_message(chat_input_dict)
|
156 |
|
157 |
-
with top:
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
|
166 |
-
|
167 |
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
with st.chat_message("ai"):
|
172 |
-
st.write_stream(
|
173 |
-
with_message_history.stream(
|
174 |
-
{"input": [chat_input_human_message]},
|
175 |
-
{
|
176 |
-
"configurable": {"session_id": st.session_state.session_id},
|
177 |
-
},
|
178 |
-
),
|
179 |
-
)
|
180 |
|
181 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
from langchain_core.messages import HumanMessage
|
11 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
12 |
from langchain_core.runnables.history import RunnableWithMessageHistory
|
13 |
+
# from langchain_openai import ChatOpenAI
|
14 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
15 |
from st_multimodal_chatinput import multimodal_chatinput
|
16 |
from dotenv import load_dotenv
|
|
|
91 |
bottom = st.container()
|
92 |
|
93 |
with st.sidebar:
|
94 |
+
google_api_key = st.text_input("Google Generative AI API Key", type="password")
|
95 |
+
st.write('Gemini 1.5')
|
96 |
+
use_flash = st.toggle(label="`Pro` ⇄ `Flash`", value=True)
|
97 |
+
model_option = "models/gemini-1.5-flash-latest" if use_flash else "models/gemini-1.5-pro-latest"
|
98 |
+
if google_api_key:
|
99 |
+
llm = ChatGoogleGenerativeAI(
|
100 |
+
model=model_option,
|
101 |
+
streaming=True,
|
102 |
+
verbose=True,
|
103 |
+
google_api_key=google_api_key,
|
104 |
+
)
|
|
|
105 |
runnable = prompt | llm
|
106 |
with_message_history = RunnableWithMessageHistory(
|
107 |
runnable,
|
|
|
113 |
langsmith_api_key = st.text_input("LangSmith API Key", type="password")
|
114 |
langsmith_project_name = st.text_input(
|
115 |
"LangSmith Project Name",
|
116 |
+
value="streamlit-gemini",
|
117 |
)
|
118 |
langsmith_endpoint = st.text_input(
|
119 |
"LangSmith Endpoint",
|
|
|
143 |
st.rerun()
|
144 |
|
145 |
|
146 |
+
if not with_message_history:
|
147 |
+
st.error("Please enter an Google Generative AI API key in the sidebar.")
|
148 |
|
149 |
+
else:
|
150 |
+
with bottom:
|
151 |
+
chat_input_dict = multimodal_chatinput(text_color="black")
|
152 |
+
if chat_input_dict:
|
153 |
+
chat_input_human_message = chat_input_to_human_message(chat_input_dict)
|
|
|
|
|
|
|
154 |
|
155 |
+
with top:
|
156 |
+
for msg in history.messages:
|
157 |
+
if msg.type.lower() in ("user", "human"):
|
158 |
+
with st.chat_message("human"):
|
159 |
+
render_human_contents(msg)
|
160 |
+
elif msg.type.lower() in ("ai", "assistant", "aimessagechunk"):
|
161 |
+
with st.chat_message("ai"):
|
162 |
+
st.write(msg.content)
|
163 |
|
164 |
+
if chat_input_human_message:
|
165 |
|
166 |
+
with st.chat_message("human"):
|
167 |
+
render_human_contents(chat_input_human_message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
|
169 |
+
with st.chat_message("ai"):
|
170 |
+
st.write_stream(
|
171 |
+
with_message_history.stream(
|
172 |
+
{"input": [chat_input_human_message]},
|
173 |
+
{
|
174 |
+
"configurable": {"session_id": st.session_state.session_id},
|
175 |
+
},
|
176 |
+
),
|
177 |
+
)
|
178 |
+
|
179 |
+
chat_input_human_message = None
|