Update app.py
Browse files
app.py
CHANGED
@@ -1,29 +1,26 @@
|
|
1 |
import os
|
2 |
|
3 |
-
import openai
|
4 |
import streamlit as st
|
5 |
from dotenv import load_dotenv
|
6 |
from PIL import Image
|
7 |
|
8 |
from blip_model import extract_image_details
|
9 |
-
from
|
10 |
|
11 |
-
# Load environment variables from .env
|
12 |
load_dotenv()
|
13 |
|
14 |
-
# Set OpenAI API key
|
15 |
-
openai.api_key = os.getenv('api_key')
|
16 |
-
|
17 |
# Initialize session state for image details
|
18 |
if "image_details" not in st.session_state:
|
19 |
st.session_state.image_details = ""
|
20 |
|
21 |
-
st.title("Image to Text Response with RAG")
|
22 |
|
23 |
# File uploader for image upload
|
24 |
uploaded_file = st.file_uploader("Upload an Image", type=["jpg", "jpeg", "png"])
|
25 |
|
26 |
if uploaded_file is not None:
|
|
|
27 |
if "uploaded_file" not in st.session_state or uploaded_file != st.session_state.uploaded_file:
|
28 |
st.session_state.uploaded_file = uploaded_file
|
29 |
|
@@ -33,10 +30,12 @@ if uploaded_file is not None:
|
|
33 |
# Show loading status while processing the image
|
34 |
with st.status("Processing image...", state="running"):
|
35 |
image = Image.open(uploaded_file)
|
|
|
36 |
st.session_state.image_details = extract_image_details(image)
|
37 |
|
38 |
st.success("Image processed successfully.")
|
39 |
else:
|
|
|
40 |
st.stop()
|
41 |
|
42 |
# Chat interface
|
@@ -57,7 +56,7 @@ if st.session_state.image_details:
|
|
57 |
# Add user message to session state
|
58 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
59 |
|
60 |
-
# Generate response
|
61 |
response = generate_response([st.session_state.image_details], prompt)
|
62 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
63 |
|
|
|
1 |
import os
|
2 |
|
|
|
3 |
import streamlit as st
|
4 |
from dotenv import load_dotenv
|
5 |
from PIL import Image
|
6 |
|
7 |
from blip_model import extract_image_details
|
8 |
+
from qwen_model import generate_response
|
9 |
|
10 |
+
# Load environment variables from .env (if you have any relevant ones)
|
11 |
load_dotenv()
|
12 |
|
|
|
|
|
|
|
13 |
# Initialize session state for image details
|
14 |
if "image_details" not in st.session_state:
|
15 |
st.session_state.image_details = ""
|
16 |
|
17 |
+
st.title("Image to Text Response with RAG (Qwen)")
|
18 |
|
19 |
# File uploader for image upload
|
20 |
uploaded_file = st.file_uploader("Upload an Image", type=["jpg", "jpeg", "png"])
|
21 |
|
22 |
if uploaded_file is not None:
|
23 |
+
# Check if a new file is uploaded
|
24 |
if "uploaded_file" not in st.session_state or uploaded_file != st.session_state.uploaded_file:
|
25 |
st.session_state.uploaded_file = uploaded_file
|
26 |
|
|
|
30 |
# Show loading status while processing the image
|
31 |
with st.status("Processing image...", state="running"):
|
32 |
image = Image.open(uploaded_file)
|
33 |
+
# Extract text details from the image (using your BLIP model)
|
34 |
st.session_state.image_details = extract_image_details(image)
|
35 |
|
36 |
st.success("Image processed successfully.")
|
37 |
else:
|
38 |
+
# If no file is uploaded, stop the execution
|
39 |
st.stop()
|
40 |
|
41 |
# Chat interface
|
|
|
56 |
# Add user message to session state
|
57 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
58 |
|
59 |
+
# Generate response using Qwen
|
60 |
response = generate_response([st.session_state.image_details], prompt)
|
61 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
62 |
|