Docfile commited on
Commit
a1445fb
·
verified ·
1 Parent(s): f710c8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -184
app.py CHANGED
@@ -1,195 +1,66 @@
1
- import streamlit as st
2
- import mediapipe as mp
3
- import numpy as np
4
- import base64
5
- import io
6
- import PIL.Image
7
- import asyncio
8
  import os
9
- import sounddevice as sd
10
- from google import genai
11
- from streamlit_webrtc import webrtc_streamer
12
- import av
13
- from mediapipe.tasks import python
14
- from mediapipe.tasks.python import vision
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- # Configuration
17
- CHANNELS = 1
18
- SAMPLE_RATE = 16000
19
- CHUNK_SIZE = 1024
 
 
 
 
20
 
21
- # Initialize Genai client
22
- genai.configure(api_key="AIzaSyC_zxN9IHjEAxIoshWPzMfgb9qwMsu5t5Y")
23
- client = genai.Client(http_options={"api_version": "v1alpha"})
24
- MODEL = "models/gemini-2.0-flash-exp"
25
- CONFIG = {"generation_config": {"response_modalities": ["AUDIO"]}}
26
 
27
- class AudioProcessor:
28
- def __init__(self):
29
- self.stream = None
30
- self.audio_queue = asyncio.Queue()
31
-
32
- def audio_callback(self, indata, frames, time, status):
33
- """This is called (from a separate thread) for each audio block."""
34
- if status:
35
- print(status)
36
- self.audio_queue.put_nowait(indata.copy())
37
-
38
- def start_stream(self):
39
- try:
40
- self.stream = sd.InputStream(
41
- channels=CHANNELS,
42
- samplerate=SAMPLE_RATE,
43
- callback=self.audio_callback,
44
- blocksize=CHUNK_SIZE
45
- )
46
- self.stream.start()
47
- except Exception as e:
48
- st.error(f"Error starting audio stream: {str(e)}")
49
 
50
- def stop_stream(self):
51
- if self.stream is not None:
52
- self.stream.stop()
53
- self.stream.close()
54
- self.stream = None
55
 
56
- class VideoProcessor:
57
- def __init__(self):
58
- self.frame_queue = asyncio.Queue(maxsize=5)
59
- self.mp_draw = mp.solutions.drawing_utils
60
- self.mp_face_detection = mp.solutions.face_detection
61
- self.face_detection = self.mp_face_detection.FaceDetection(
62
- min_detection_confidence=0.5)
63
-
64
- def video_frame_callback(self, frame):
65
- img = frame.to_ndarray(format="rgb24")
66
 
67
- results = self.face_detection.process(img)
68
-
69
- if results.detections:
70
- for detection in results.detections:
71
- self.mp_draw.draw_detection(img, detection)
72
-
73
- pil_img = PIL.Image.fromarray(img)
74
- pil_img.thumbnail([1024, 1024])
75
-
76
- image_io = io.BytesIO()
77
- pil_img.save(image_io, format="jpeg")
78
- image_io.seek(0)
79
-
80
- frame_data = {
81
- "mime_type": "image/jpeg",
82
- "data": base64.b64encode(image_io.read()).decode()
83
- }
84
-
85
- try:
86
- self.frame_queue.put_nowait(frame_data)
87
- except asyncio.QueueFull:
88
- pass
89
 
90
- return av.VideoFrame.from_ndarray(img, format="rgb24")
91
-
92
- def __del__(self):
93
- if hasattr(self, 'face_detection'):
94
- self.face_detection.close()
95
 
96
- def initialize_session_state():
97
- if 'audio_processor' not in st.session_state:
98
- st.session_state.audio_processor = AudioProcessor()
99
- if 'video_processor' not in st.session_state:
100
- st.session_state.video_processor = VideoProcessor()
101
- if 'session' not in st.session_state:
102
- st.session_state.session = None
103
- if 'messages' not in st.session_state:
104
- st.session_state.messages = []
105
-
106
- def display_chat_messages():
107
- for message in st.session_state.messages:
108
- with st.chat_message(message["role"]):
109
- st.markdown(message["content"])
110
-
111
- def main():
112
- st.title("Gemini Interactive Assistant")
113
-
114
- initialize_session_state()
115
 
116
- st.sidebar.title("Settings")
117
- input_mode = st.sidebar.radio(
118
- "Input Mode",
119
- ["Text Only", "Audio + Video", "Audio Only"]
120
- )
121
-
122
- enable_face_detection = st.sidebar.checkbox("Enable Face Detection", value=True)
123
-
124
- if enable_face_detection:
125
- detection_confidence = st.sidebar.slider(
126
- "Face Detection Confidence",
127
- min_value=0.0,
128
- max_value=1.0,
129
- value=0.5,
130
- step=0.1
131
- )
132
- st.session_state.video_processor.face_detection = (
133
- st.session_state.video_processor.mp_face_detection.FaceDetection(
134
- min_detection_confidence=detection_confidence
135
- )
136
- )
137
-
138
- display_chat_messages()
139
-
140
- if input_mode == "Text Only":
141
- user_input = st.chat_input("Your message")
142
- if user_input:
143
- st.session_state.messages.append({"role": "user", "content": user_input})
144
- with st.chat_message("user"):
145
- st.markdown(user_input)
146
-
147
- async def send_message():
148
- async with client.aio.live.connect(model=MODEL, config=CONFIG) as session:
149
- await session.send(user_input, end_of_turn=True)
150
- turn = session.receive()
151
- async for response in turn:
152
- if text := response.text:
153
- st.session_state.messages.append(
154
- {"role": "assistant", "content": text}
155
- )
156
- with st.chat_message("assistant"):
157
- st.markdown(text)
158
-
159
- asyncio.run(send_message())
160
-
161
- else:
162
- if input_mode == "Audio + Video":
163
- ctx = webrtc_streamer(
164
- key="gemini-stream",
165
- video_frame_callback=st.session_state.video_processor.video_frame_callback,
166
- rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
167
- media_stream_constraints={"video": True, "audio": True},
168
- )
169
-
170
- col1, col2 = st.columns(2)
171
- with col1:
172
- if st.button("Start Recording", type="primary"):
173
- st.session_state.audio_processor.start_stream()
174
- st.session_state['recording'] = True
175
-
176
- with col2:
177
- if st.button("Stop Recording", type="secondary"):
178
- st.session_state.audio_processor.stop_stream()
179
- st.session_state['recording'] = False
180
-
181
- async def process_audio_stream():
182
- while st.session_state.get('recording', False):
183
- try:
184
- audio_data = await st.session_state.audio_processor.audio_queue.get()
185
- await st.session_state.audio_processor.audio_queue.put({
186
- "data": audio_data.tobytes(),
187
- "mime_type": "audio/pcm",
188
- "sample_rate": SAMPLE_RATE
189
- })
190
- except asyncio.QueueEmpty:
191
- pass
192
- await asyncio.sleep(0.1)
193
 
194
- if __name__ == "__main__":
195
- main()
 
 
 
 
 
 
 
 
1
  import os
2
+ import streamlit as st
3
+ import google.generativeai as genai
4
+ from PIL import Image
5
+
6
+ # Set up the Streamlit App
7
+ st.set_page_config(page_title="Multimodal Chatbot with Gemini Flash", layout="wide")
8
+ st.title("Multimodal Chatbot with Gemini Flash ⚡️")
9
+ st.caption("Chat with Google's Gemini Flash model using image and text input to get lightning fast results. 🌟")
10
+
11
+ # Get OpenAI API key from user
12
+ api_key = "AIzaSyC_zxN9IHjEAxIoshWPzMfgb9qwMsu5t5Y"
13
+ # Set up the Gemini model
14
+ genai.configure(api_key=api_key)
15
+ model = genai.GenerativeModel(model_name="gemini-1.5-flash-latest")
16
+
17
+ if api_key:
18
+ # Initialize the chat history
19
+ if "messages" not in st.session_state:
20
+ st.session_state.messages = []
21
 
22
+ # Sidebar for image upload
23
+ with st.sidebar:
24
+ st.title("Chat with Images")
25
+ uploaded_file = st.file_uploader("Upload an image...", type=["jpg", "jpeg", "png"])
26
+
27
+ if uploaded_file:
28
+ image = Image.open(uploaded_file)
29
+ st.image(image, caption='Uploaded Image', use_column_width=True)
30
 
31
+ # Main layout
32
+ chat_placeholder = st.container()
 
 
 
33
 
34
+ with chat_placeholder:
35
+ # Display the chat history
36
+ for message in st.session_state.messages:
37
+ with st.chat_message(message["role"]):
38
+ st.markdown(message["content"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ # User input area at the bottom
41
+ prompt = st.chat_input("What do you want to know?")
 
 
 
42
 
43
+ if prompt:
44
+ inputs = [prompt]
 
 
 
 
 
 
 
 
45
 
46
+ # Add user message to chat history
47
+ st.session_state.messages.append({"role": "user", "content": prompt})
48
+ # Display user message in chat message container
49
+ with chat_placeholder:
50
+ with st.chat_message("user"):
51
+ st.markdown(prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
+ if uploaded_file:
54
+ inputs.append(image)
 
 
 
55
 
56
+ with st.spinner('Generating response...'):
57
+ # Generate response
58
+ response = model.generate_content(inputs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
+ # Display assistant response in chat message container
61
+ with chat_placeholder:
62
+ with st.chat_message("assistant"):
63
+ st.markdown(response.text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
+ if uploaded_file and not prompt:
66
+ st.warning("Please enter a text query to accompany the image.")