Spaces:
Running
Running
Commit
·
f4bec4e
1
Parent(s):
f289dc1
Upload 3 files
Browse files
appp.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from streamlit_option_menu import option_menu
|
3 |
+
from page1 import text
|
4 |
+
from page2 import image
|
5 |
+
def main():
|
6 |
+
|
7 |
+
st.title("Chat With Gemini")
|
8 |
+
|
9 |
+
with st.sidebar:
|
10 |
+
selection = option_menu(
|
11 |
+
menu_title="Main Menu",
|
12 |
+
options=["Text Model", "Image Model"],
|
13 |
+
icons=["pencil", "image"],
|
14 |
+
menu_icon="cast",
|
15 |
+
default_index=0
|
16 |
+
)
|
17 |
+
|
18 |
+
if selection == "Text Model":
|
19 |
+
text()
|
20 |
+
|
21 |
+
elif selection == "Image Model":
|
22 |
+
image()
|
23 |
+
|
24 |
+
|
25 |
+
if __name__ == '__main__':
|
26 |
+
main()
|
page1.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain_core.messages import HumanMessage
|
3 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
4 |
+
from langchain.chains import LLMChain
|
5 |
+
from langchain.prompts import PromptTemplate
|
6 |
+
from langchain.memory import ConversationBufferMemory
|
7 |
+
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
|
8 |
+
|
9 |
+
def text():
|
10 |
+
|
11 |
+
apiKey = "AIzaSyAXkkcrrUBjPEgj93tZ9azy7zcS1wI1jUA"
|
12 |
+
msgs = StreamlitChatMessageHistory(key="special_app_key")
|
13 |
+
|
14 |
+
memory = ConversationBufferMemory(memory_key="history", chat_memory=msgs)
|
15 |
+
if len(msgs.messages) == 0:
|
16 |
+
msgs.add_ai_message("How can I help you?")
|
17 |
+
template = """You are an AI chatbot having a conversation with a human.
|
18 |
+
|
19 |
+
{history}
|
20 |
+
Human: {human_input}
|
21 |
+
AI: """
|
22 |
+
prompt = PromptTemplate(input_variables=["history", "human_input"], template=template)
|
23 |
+
llm_chain = LLMChain( llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=apiKey), prompt=prompt, memory = memory)
|
24 |
+
|
25 |
+
if 'messages' not in st.session_state:
|
26 |
+
st.session_state['messages'] = []
|
27 |
+
|
28 |
+
for message in st.session_state.messages:
|
29 |
+
with st.chat_message(message["role"]):
|
30 |
+
st.markdown(message["content"])
|
31 |
+
|
32 |
+
prompt = st.chat_input("Say something")
|
33 |
+
|
34 |
+
if prompt:
|
35 |
+
with st.chat_message("user").markdown(prompt):
|
36 |
+
st.session_state.messages.append(
|
37 |
+
{
|
38 |
+
"role": "user",
|
39 |
+
"content": prompt
|
40 |
+
}
|
41 |
+
)
|
42 |
+
for chunk in llm_chain.stream(prompt):
|
43 |
+
text_output = chunk.get("text", "")
|
44 |
+
|
45 |
+
with st.chat_message("assistant").markdown(text_output):
|
46 |
+
st.session_state.messages.append(
|
47 |
+
{
|
48 |
+
"role": "assistant",
|
49 |
+
"content": text_output
|
50 |
+
}
|
51 |
+
)
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
|
page2.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain_core.messages import HumanMessage
|
3 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
4 |
+
from streamlit_chat import message
|
5 |
+
from PIL import Image
|
6 |
+
import base64
|
7 |
+
import io
|
8 |
+
from IPython.display import display
|
9 |
+
from IPython.display import Markdown
|
10 |
+
|
11 |
+
import pathlib
|
12 |
+
import textwrap
|
13 |
+
from langchain.chains import LLMChain
|
14 |
+
from langchain.prompts import PromptTemplate
|
15 |
+
from langchain.memory import ConversationBufferMemory
|
16 |
+
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
|
17 |
+
|
18 |
+
# Streamlit app
|
19 |
+
def image():
|
20 |
+
|
21 |
+
|
22 |
+
def process_image(uploaded_file):
|
23 |
+
# Display the uploaded image
|
24 |
+
image = Image.open(uploaded_file)
|
25 |
+
st.image(image, caption='Uploaded Image', use_column_width=True)
|
26 |
+
|
27 |
+
# Process the image and return the URL or other information
|
28 |
+
# For demonstration purposes, convert the image to base64 and return a data URL
|
29 |
+
buffered = io.BytesIO()
|
30 |
+
image.save(buffered, format="JPEG")
|
31 |
+
image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
32 |
+
image_url = f"data:image/jpeg;base64,{image_base64}"
|
33 |
+
|
34 |
+
return image_url
|
35 |
+
apiKey = "AIzaSyAXkkcrrUBjPEgj93tZ9azy7zcS1wI1jUA"
|
36 |
+
|
37 |
+
llm = ChatGoogleGenerativeAI(model="gemini-pro-vision", google_api_key=apiKey)
|
38 |
+
|
39 |
+
image_url = None # Initialize image_url outside the if statement
|
40 |
+
|
41 |
+
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
42 |
+
if uploaded_file is not None:
|
43 |
+
image_url = process_image(uploaded_file)
|
44 |
+
|
45 |
+
|
46 |
+
if 'messages' not in st.session_state:
|
47 |
+
st.session_state['messages'] = []
|
48 |
+
|
49 |
+
for message in st.session_state.messages:
|
50 |
+
with st.chat_message(message["role"]):
|
51 |
+
st.markdown(message["content"])
|
52 |
+
prompt = st.chat_input("Say something")
|
53 |
+
message = HumanMessage(
|
54 |
+
content=[
|
55 |
+
{
|
56 |
+
"type": "text",
|
57 |
+
"text": prompt,
|
58 |
+
}, # You can optionally provide text parts
|
59 |
+
{"type": "image_url", "image_url": image_url},
|
60 |
+
]
|
61 |
+
)
|
62 |
+
|
63 |
+
if prompt:
|
64 |
+
with st.chat_message("user").markdown(prompt):
|
65 |
+
st.session_state.messages.append(
|
66 |
+
{
|
67 |
+
"role": "user",
|
68 |
+
"content": prompt
|
69 |
+
}
|
70 |
+
)
|
71 |
+
response = llm.invoke([message])
|
72 |
+
text_output = response.content
|
73 |
+
|
74 |
+
with st.chat_message("assistant").markdown(text_output):
|
75 |
+
st.session_state.messages.append(
|
76 |
+
{
|
77 |
+
"role": "assistant",
|
78 |
+
"content": text_output
|
79 |
+
}
|
80 |
+
)
|
81 |
+
|
82 |
+
|