LeoWalker commited on
Commit
3362c76
·
1 Parent(s): fcfd432

staged small changes to add ChatGoogleGenerativeAI instead of toggling between GPT-4o

Browse files
Files changed (2) hide show
  1. requirements.txt +1 -0
  2. streamlit-gpt4o/app.py +3 -0
requirements.txt CHANGED
@@ -1,6 +1,7 @@
1
  https://github.com/joshuasundance-swca/st-multimodal-chatinput/releases/download/v0.1.7-experimental-unapproved-text-color/st_multimodal_chatinput-0.1.7-py3-none-any.whl
2
  langchain
3
  langchain-openai
 
4
  langsmith
5
  openai
6
  pillow>=10.0.1 # not directly required, pinned by Snyk to avoid a vulnerability
 
1
  https://github.com/joshuasundance-swca/st-multimodal-chatinput/releases/download/v0.1.7-experimental-unapproved-text-color/st_multimodal_chatinput-0.1.7-py3-none-any.whl
2
  langchain
3
  langchain-openai
4
+ langchain-google-genai
5
  langsmith
6
  openai
7
  pillow>=10.0.1 # not directly required, pinned by Snyk to avoid a vulnerability
streamlit-gpt4o/app.py CHANGED
@@ -11,6 +11,8 @@ from langchain_core.messages import HumanMessage
11
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
12
  from langchain_core.runnables.history import RunnableWithMessageHistory
13
  from langchain_openai import ChatOpenAI
 
 
14
  from st_multimodal_chatinput import multimodal_chatinput
15
 
16
  __version__ = "0.0.4"
@@ -98,6 +100,7 @@ with st.sidebar:
98
  verbose=True,
99
  openai_api_key=openai_api_key,
100
  )
 
101
  runnable = prompt | llm
102
  with_message_history = RunnableWithMessageHistory(
103
  runnable,
 
11
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
12
  from langchain_core.runnables.history import RunnableWithMessageHistory
13
  from langchain_openai import ChatOpenAI
14
+ from langchain_google_genai import ChatGoogleGenerativeAI
15
+ from langchain_gemini
16
  from st_multimodal_chatinput import multimodal_chatinput
17
 
18
  __version__ = "0.0.4"
 
100
  verbose=True,
101
  openai_api_key=openai_api_key,
102
  )
103
+ # llm = ChatGoogleGenerativeAI(model=""gemini-1.5-pro-preview-0514"")
104
  runnable = prompt | llm
105
  with_message_history = RunnableWithMessageHistory(
106
  runnable,