Spaces:
Running
Running
Commit
Β·
4edecd5
1
Parent(s):
d8bcdf5
Update page2.py
Browse files
page2.py
CHANGED
@@ -5,6 +5,11 @@ from streamlit_chat import message
|
|
5 |
from PIL import Image
|
6 |
import base64
|
7 |
import io
|
|
|
|
|
|
|
|
|
|
|
8 |
from langchain.chains import LLMChain
|
9 |
from langchain.prompts import PromptTemplate
|
10 |
from langchain.memory import ConversationBufferMemory
|
@@ -12,7 +17,39 @@ from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
|
|
12 |
|
13 |
# Streamlit app
|
14 |
def image():
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
def process_image(uploaded_file):
|
17 |
# Display the uploaded image
|
18 |
image = Image.open(uploaded_file)
|
@@ -36,8 +73,7 @@ def image():
|
|
36 |
if uploaded_file is not None:
|
37 |
image_url = process_image(uploaded_file)
|
38 |
|
39 |
-
|
40 |
-
st.write("Hello π, upload an image and ask questions related to it!")
|
41 |
if 'messages' not in st.session_state:
|
42 |
st.session_state['messages'] = []
|
43 |
|
@@ -63,9 +99,92 @@ def image():
|
|
63 |
"content": prompt
|
64 |
}
|
65 |
)
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
|
70 |
with st.chat_message("assistant").markdown(text_output):
|
71 |
st.session_state.messages.append(
|
|
|
5 |
from PIL import Image
|
6 |
import base64
|
7 |
import io
|
8 |
+
from IPython.display import display
|
9 |
+
from IPython.display import Markdown
|
10 |
+
|
11 |
+
import pathlib
|
12 |
+
import textwrap
|
13 |
from langchain.chains import LLMChain
|
14 |
from langchain.prompts import PromptTemplate
|
15 |
from langchain.memory import ConversationBufferMemory
|
|
|
17 |
|
18 |
# Streamlit app
|
19 |
def image():
|
20 |
+
st.markdown("""
|
21 |
+
<style>
|
22 |
+
.anim-typewriter {
|
23 |
+
animation: typewriter 3s steps(40) 1s 1 normal both, blinkTextCursor 800ms steps(40) infinite normal;
|
24 |
+
overflow: hidden;
|
25 |
+
white-space: nowrap;
|
26 |
+
border-right: 4px solid;
|
27 |
+
font-family: serif;
|
28 |
+
font-size: 1.1em;
|
29 |
+
}
|
30 |
+
@keyframes typewriter {
|
31 |
+
from {
|
32 |
+
width: 0;
|
33 |
+
}
|
34 |
+
to {
|
35 |
+
width: 100%;
|
36 |
+
height: 100%
|
37 |
+
}
|
38 |
+
}
|
39 |
+
@keyframes blinkTextCursor {
|
40 |
+
from {
|
41 |
+
border-right-color: rgba(255, 255, 255, 0.75);
|
42 |
+
}
|
43 |
+
to {
|
44 |
+
border-right-color: transparent;
|
45 |
+
}
|
46 |
+
}
|
47 |
+
</style>
|
48 |
+
""", unsafe_allow_html=True)
|
49 |
+
text1 = "Hello π, upload an image and ask questions related to it!"
|
50 |
+
animated = f'<div class="line-1 anim-typewriter">{text1}</div>'
|
51 |
+
with st.chat_message("assistant").markdown(animated, unsafe_allow_html=True):
|
52 |
+
st.markdown(animated, unsafe_allow_html=True)
|
53 |
def process_image(uploaded_file):
|
54 |
# Display the uploaded image
|
55 |
image = Image.open(uploaded_file)
|
|
|
73 |
if uploaded_file is not None:
|
74 |
image_url = process_image(uploaded_file)
|
75 |
|
76 |
+
|
|
|
77 |
if 'messages' not in st.session_state:
|
78 |
st.session_state['messages'] = []
|
79 |
|
|
|
99 |
"content": prompt
|
100 |
}
|
101 |
)
|
102 |
+
spinner_html = """
|
103 |
+
<div class="col-3">
|
104 |
+
<div class="snippet" data-title="dot-pulse">
|
105 |
+
<div class="stage">
|
106 |
+
<div class="dot-pulse"></div>
|
107 |
+
</div>
|
108 |
+
</div>
|
109 |
+
</div>
|
110 |
+
"""
|
111 |
+
|
112 |
+
spinner_css = """
|
113 |
+
.dot-pulse {
|
114 |
+
position: relative;
|
115 |
+
left: -9999px;
|
116 |
+
|
117 |
+
width: 10px;
|
118 |
+
height: 10px;
|
119 |
+
border-radius: 5px;
|
120 |
+
background-color: #9880ff;
|
121 |
+
color: #9880ff;
|
122 |
+
box-shadow: 9999px 0 0 -5px;
|
123 |
+
animation: dot-pulse 1.5s infinite linear;
|
124 |
+
animation-delay: 0.25s;
|
125 |
+
}
|
126 |
+
.dot-pulse::before, .dot-pulse::after {
|
127 |
+
content: "";
|
128 |
+
display: inline-block;
|
129 |
+
position: absolute;
|
130 |
+
top: 0;
|
131 |
+
width: 10px;
|
132 |
+
height: 10px;
|
133 |
+
border-radius: 5px;
|
134 |
+
background-color: #9880ff;
|
135 |
+
color: #9880ff;
|
136 |
+
}
|
137 |
+
.dot-pulse::before {
|
138 |
+
box-shadow: 9984px 0 0 -5px;
|
139 |
+
animation: dot-pulse-before 1.5s infinite linear;
|
140 |
+
animation-delay: 0s;
|
141 |
+
}
|
142 |
+
.dot-pulse::after {
|
143 |
+
box-shadow: 10014px 0 0 -5px;
|
144 |
+
animation: dot-pulse-after 1.5s infinite linear;
|
145 |
+
animation-delay: 0.5s;
|
146 |
+
}
|
147 |
+
|
148 |
+
@keyframes dot-pulse-before {
|
149 |
+
0% {
|
150 |
+
box-shadow: 9984px 0 0 -5px;
|
151 |
+
}
|
152 |
+
30% {
|
153 |
+
box-shadow: 9984px 0 0 2px;
|
154 |
+
}
|
155 |
+
60%, 100% {
|
156 |
+
box-shadow: 9984px 0 0 -5px;
|
157 |
+
}
|
158 |
+
}
|
159 |
+
@keyframes dot-pulse {
|
160 |
+
0% {
|
161 |
+
box-shadow: 9999px 0 0 -5px;
|
162 |
+
}
|
163 |
+
30% {
|
164 |
+
box-shadow: 9999px 0 0 2px;
|
165 |
+
}
|
166 |
+
60%, 100% {
|
167 |
+
box-shadow: 9999px 0 0 -5px;
|
168 |
+
}
|
169 |
+
}
|
170 |
+
@keyframes dot-pulse-after {
|
171 |
+
0% {
|
172 |
+
box-shadow: 10014px 0 0 -5px;
|
173 |
+
}
|
174 |
+
30% {
|
175 |
+
box-shadow: 10014px 0 0 2px;
|
176 |
+
}
|
177 |
+
60%, 100% {
|
178 |
+
box-shadow: 10014px 0 0 -5px;
|
179 |
+
}
|
180 |
+
}
|
181 |
+
"""
|
182 |
+
|
183 |
+
st.markdown(f'<style>{spinner_css}</style>', unsafe_allow_html=True)
|
184 |
+
st.markdown(spinner_html, unsafe_allow_html=True)
|
185 |
+
response = llm.invoke([message])
|
186 |
+
text_output = response.content
|
187 |
+
st.markdown('<style>.dot-pulse { visibility: hidden; }</style>', unsafe_allow_html=True)
|
188 |
|
189 |
with st.chat_message("assistant").markdown(text_output):
|
190 |
st.session_state.messages.append(
|