File size: 2,722 Bytes
f4bec4e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b827b4c
 
 
 
f4bec4e
b827b4c
 
f4bec4e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b827b4c
 
 
f4bec4e
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import streamlit as st
from langchain_core.messages import HumanMessage
from langchain_google_genai import ChatGoogleGenerativeAI
from streamlit_chat import message
from PIL import Image
import base64
import io
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory

# Streamlit app
def image():

    def process_image(uploaded_file):
        # Display the uploaded image
        image = Image.open(uploaded_file)
        st.image(image, caption='Uploaded Image', use_column_width=True)

        # Process the image and return the URL or other information
        # For demonstration purposes, convert the image to base64 and return a data URL
        buffered = io.BytesIO()
        image.save(buffered, format="JPEG")
        image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
        image_url = f"data:image/jpeg;base64,{image_base64}"

        return image_url
    apiKey = "AIzaSyAXkkcrrUBjPEgj93tZ9azy7zcS1wI1jUA"

    llm = ChatGoogleGenerativeAI(model="gemini-pro-vision", google_api_key=apiKey)

    image_url = None  # Initialize image_url outside the if statement
    with st.sidebar:
        uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
        if uploaded_file is not None:
            image_url = process_image(uploaded_file)

    with st.chat_message("assistant"):
        st.write("Hello πŸ‘‹, upload an image and ask questions related to it!")
    if 'messages' not in st.session_state:
        st.session_state['messages'] = []

    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])
    prompt = st.chat_input("Say something")
    message = HumanMessage(
        content=[
            {
                "type": "text",
                "text": prompt,
            },  # You can optionally provide text parts
            {"type": "image_url", "image_url": image_url},
        ]
    )

    if prompt:
        with st.chat_message("user").markdown(prompt):
            st.session_state.messages.append(
                {
                    "role": "user",
                    "content": prompt
                }
            )
        with st.spinner('Generating...'):
            response = llm.invoke([message])
            text_output = response.content

        with st.chat_message("assistant").markdown(text_output):
            st.session_state.messages.append(
                {
                    "role": "assistant",
                    "content": text_output
                }
            )