Update app.py
Browse files
app.py
CHANGED
@@ -46,6 +46,7 @@ def bot(
|
|
46 |
google_key: str,
|
47 |
model_name: str,
|
48 |
image_prompt: Optional[Image.Image],
|
|
|
49 |
temperature: float,
|
50 |
max_output_tokens: int,
|
51 |
stop_sequences: str,
|
@@ -84,6 +85,23 @@ def bot(
|
|
84 |
generation_config=generation_config)
|
85 |
response.resolve()
|
86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
# streaming effect
|
88 |
chatbot[-1][1] = ""
|
89 |
for chunk in response:
|
@@ -103,6 +121,7 @@ google_key_component = gr.Textbox(
|
|
103 |
)
|
104 |
|
105 |
image_prompt_component = gr.Image(type="pil", label="Image", scale=1)
|
|
|
106 |
|
107 |
model_selection = gr.Dropdown(["gemini-1.0-pro", "gemini-pro-vision","gemini-1.5-flash-latest", "gemini-1.5-pro-latest","gemini-1.0-pro-001"],label="Select Gemini Model",value="gemini-1.0-pro")
|
108 |
|
@@ -182,6 +201,7 @@ bot_inputs = [
|
|
182 |
google_key_component,
|
183 |
model_selection,
|
184 |
image_prompt_component,
|
|
|
185 |
temperature_component,
|
186 |
max_output_tokens_component,
|
187 |
stop_sequences_component,
|
@@ -198,6 +218,7 @@ with gr.Blocks() as demo:
|
|
198 |
google_key_component.render()
|
199 |
with gr.Row():
|
200 |
image_prompt_component.render()
|
|
|
201 |
model_selection.render()
|
202 |
chatbot_component.render()
|
203 |
text_prompt_component.render()
|
|
|
46 |
google_key: str,
|
47 |
model_name: str,
|
48 |
image_prompt: Optional[Image.Image],
|
49 |
+
video_prompt,
|
50 |
temperature: float,
|
51 |
max_output_tokens: int,
|
52 |
stop_sequences: str,
|
|
|
85 |
generation_config=generation_config)
|
86 |
response.resolve()
|
87 |
|
88 |
+
if video_prompt is None:
|
89 |
+
model = genai.GenerativeModel(model_name)
|
90 |
+
response = model.generate_content(
|
91 |
+
text_prompt,
|
92 |
+
stream=True,
|
93 |
+
generation_config=generation_config)
|
94 |
+
response.resolve()
|
95 |
+
else:
|
96 |
+
video_prompt = preprocess_image(video_prompt)
|
97 |
+
model = genai.GenerativeModel('gemini-pro-vision')
|
98 |
+
response = model.generate_content(
|
99 |
+
contents=[text_prompt, video_prompt_prompt],
|
100 |
+
stream=True,
|
101 |
+
generation_config=generation_config)
|
102 |
+
response.resolve()
|
103 |
+
|
104 |
+
|
105 |
# streaming effect
|
106 |
chatbot[-1][1] = ""
|
107 |
for chunk in response:
|
|
|
121 |
)
|
122 |
|
123 |
image_prompt_component = gr.Image(type="pil", label="Image", scale=1)
|
124 |
+
video_prompt_component = gr.Video()
|
125 |
|
126 |
model_selection = gr.Dropdown(["gemini-1.0-pro", "gemini-pro-vision","gemini-1.5-flash-latest", "gemini-1.5-pro-latest","gemini-1.0-pro-001"],label="Select Gemini Model",value="gemini-1.0-pro")
|
127 |
|
|
|
201 |
google_key_component,
|
202 |
model_selection,
|
203 |
image_prompt_component,
|
204 |
+
video_prompt_component,
|
205 |
temperature_component,
|
206 |
max_output_tokens_component,
|
207 |
stop_sequences_component,
|
|
|
218 |
google_key_component.render()
|
219 |
with gr.Row():
|
220 |
image_prompt_component.render()
|
221 |
+
video_prompt_component.render()
|
222 |
model_selection.render()
|
223 |
chatbot_component.render()
|
224 |
text_prompt_component.render()
|