Spaces:
Running
Running
Niki Zhang
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -735,8 +735,8 @@ def upload_callback(image_input, state, visual_chatgpt=None, openai_api_key=None
|
|
735 |
)
|
736 |
]
|
737 |
|
738 |
-
return state, state, image_input, click_state, image_input, image_input, image_input, image_embedding, \
|
739 |
-
original_size, input_size
|
740 |
|
741 |
|
742 |
|
@@ -1154,6 +1154,8 @@ def cap_everything_withoutsound(image_input, visual_chatgpt, text_refiner,paragr
|
|
1154 |
# visual_chatgpt.agent.memory.buffer = visual_chatgpt.agent.memory.buffer + visual_chatgpt.global_prompt
|
1155 |
return paragraph
|
1156 |
|
|
|
|
|
1157 |
# def handle_liked(state,like_res):
|
1158 |
# if state:
|
1159 |
# like_res.append(state[-1][1])
|
@@ -1378,6 +1380,14 @@ def create_ui():
|
|
1378 |
artist_label_base = gr.Button(value="Artist: ")
|
1379 |
year_label_base = gr.Button(value="Year: ")
|
1380 |
material_label_base = gr.Button(value="Material: ")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1381 |
|
1382 |
with gr.Tab("Click") as click_tab:
|
1383 |
image_input = gr.Image(type="pil", interactive=True, elem_id="image_upload")
|
@@ -1423,6 +1433,11 @@ def create_ui():
|
|
1423 |
# elem_id="image_sketcher")
|
1424 |
sketcher_input = gr.ImageEditor(type="pil", interactive=True,
|
1425 |
elem_id="image_sketcher")
|
|
|
|
|
|
|
|
|
|
|
1426 |
# example_image_traj = gr.Image(type="pil", interactive=False, visible=False)
|
1427 |
with gr.Row():
|
1428 |
clear_button_sketcher = gr.Button(value="Clear Sketch", interactive=True)
|
@@ -1895,8 +1910,10 @@ def create_ui():
|
|
1895 |
|
1896 |
|
1897 |
image_input_base.upload(upload_callback, [image_input_base, state, visual_chatgpt,openai_api_key],
|
1898 |
-
[chatbot, state, origin_image, click_state, image_input, image_input_base, sketcher_input,
|
1899 |
-
image_embedding, original_size, input_size,name_label,artist_label,year_label,material_label,name_label_base, artist_label_base, year_label_base, material_label_base,
|
|
|
|
|
1900 |
|
1901 |
# image_input.upload(upload_callback, [image_input, state, visual_chatgpt, openai_api_key],
|
1902 |
# [chatbot, state, origin_image, click_state, image_input, image_input_base, sketcher_input,
|
@@ -1911,8 +1928,10 @@ def create_ui():
|
|
1911 |
[chatbot, state, aux_state,output_audio])
|
1912 |
submit_button_text.click(lambda: "", None, chat_input)
|
1913 |
example_image.change(upload_callback, [example_image, state, visual_chatgpt, openai_api_key],
|
1914 |
-
[chatbot, state, origin_image, click_state, image_input, image_input_base, sketcher_input,
|
1915 |
-
image_embedding, original_size, input_size,name_label,artist_label,year_label,material_label,name_label_base, artist_label_base, year_label_base, material_label_base,
|
|
|
|
|
1916 |
|
1917 |
example_image.change(clear_chat_memory, inputs=[visual_chatgpt])
|
1918 |
|
@@ -1939,6 +1958,7 @@ def create_ui():
|
|
1939 |
traj_tab.select(on_click_tab_selected, outputs=[modules_need_gpt1,modules_not_need_gpt2,modules_need_gpt0,modules_need_gpt2])
|
1940 |
click_tab.select(on_click_tab_selected, outputs=[modules_need_gpt1,modules_not_need_gpt2,modules_need_gpt0,modules_need_gpt2])
|
1941 |
base_tab.select(on_base_selected, outputs=[modules_need_gpt0,modules_need_gpt2,modules_not_need_gpt2,modules_need_gpt1])
|
|
|
1942 |
|
1943 |
|
1944 |
|
|
|
735 |
)
|
736 |
]
|
737 |
|
738 |
+
return [state, state, image_input, click_state, image_input, image_input, image_input, image_input, image_embedding, \
|
739 |
+
original_size, input_size] + [f"Name: {name}", f"Artist: {artist}", f"Year: {year}", f"Material: {material}"]*4 + [paragraph,artist]
|
740 |
|
741 |
|
742 |
|
|
|
1154 |
# visual_chatgpt.agent.memory.buffer = visual_chatgpt.agent.memory.buffer + visual_chatgpt.global_prompt
|
1155 |
return paragraph
|
1156 |
|
1157 |
+
|
1158 |
+
|
1159 |
# def handle_liked(state,like_res):
|
1160 |
# if state:
|
1161 |
# like_res.append(state[-1][1])
|
|
|
1380 |
artist_label_base = gr.Button(value="Artist: ")
|
1381 |
year_label_base = gr.Button(value="Year: ")
|
1382 |
material_label_base = gr.Button(value="Material: ")
|
1383 |
+
|
1384 |
+
with gr.Tab("Base2") as base_tab2:
|
1385 |
+
image_input_base_2 = gr.Image(type="pil", interactive=True, elem_id="image_upload")
|
1386 |
+
with gr.Row():
|
1387 |
+
name_label_base2 = gr.Button(value="Name: ")
|
1388 |
+
artist_label_base2 = gr.Button(value="Artist: ")
|
1389 |
+
year_label_base2 = gr.Button(value="Year: ")
|
1390 |
+
material_label_base2 = gr.Button(value="Material: ")
|
1391 |
|
1392 |
with gr.Tab("Click") as click_tab:
|
1393 |
image_input = gr.Image(type="pil", interactive=True, elem_id="image_upload")
|
|
|
1433 |
# elem_id="image_sketcher")
|
1434 |
sketcher_input = gr.ImageEditor(type="pil", interactive=True,
|
1435 |
elem_id="image_sketcher")
|
1436 |
+
with gr.Row():
|
1437 |
+
name_label_traj = gr.Button(value="Name: ")
|
1438 |
+
artist_label_traj = gr.Button(value="Artist: ")
|
1439 |
+
year_label_traj = gr.Button(value="Year: ")
|
1440 |
+
material_label_traj = gr.Button(value="Material: ")
|
1441 |
# example_image_traj = gr.Image(type="pil", interactive=False, visible=False)
|
1442 |
with gr.Row():
|
1443 |
clear_button_sketcher = gr.Button(value="Clear Sketch", interactive=True)
|
|
|
1910 |
|
1911 |
|
1912 |
image_input_base.upload(upload_callback, [image_input_base, state, visual_chatgpt,openai_api_key],
|
1913 |
+
[chatbot, state, origin_image, click_state, image_input, image_input_base, sketcher_input,image_input_base_2,
|
1914 |
+
image_embedding, original_size, input_size,name_label,artist_label,year_label,material_label,name_label_base, artist_label_base, year_label_base, material_label_base, \
|
1915 |
+
name_label_base2, artist_label_base2, year_label_base2, material_label_base2,name_label_traj, artist_label_traj, year_label_traj, material_label_traj, \
|
1916 |
+
paragraph,artist])
|
1917 |
|
1918 |
# image_input.upload(upload_callback, [image_input, state, visual_chatgpt, openai_api_key],
|
1919 |
# [chatbot, state, origin_image, click_state, image_input, image_input_base, sketcher_input,
|
|
|
1928 |
[chatbot, state, aux_state,output_audio])
|
1929 |
submit_button_text.click(lambda: "", None, chat_input)
|
1930 |
example_image.change(upload_callback, [example_image, state, visual_chatgpt, openai_api_key],
|
1931 |
+
[chatbot, state, origin_image, click_state, image_input, image_input_base, sketcher_input,image_input_base_2,
|
1932 |
+
image_embedding, original_size, input_size,name_label,artist_label,year_label,material_label,name_label_base, artist_label_base, year_label_base, material_label_base, \
|
1933 |
+
name_label_base2, artist_label_base2, year_label_base2, material_label_base2,name_label_traj, artist_label_traj, year_label_traj, material_label_traj, \
|
1934 |
+
paragraph,artist])
|
1935 |
|
1936 |
example_image.change(clear_chat_memory, inputs=[visual_chatgpt])
|
1937 |
|
|
|
1958 |
traj_tab.select(on_click_tab_selected, outputs=[modules_need_gpt1,modules_not_need_gpt2,modules_need_gpt0,modules_need_gpt2])
|
1959 |
click_tab.select(on_click_tab_selected, outputs=[modules_need_gpt1,modules_not_need_gpt2,modules_need_gpt0,modules_need_gpt2])
|
1960 |
base_tab.select(on_base_selected, outputs=[modules_need_gpt0,modules_need_gpt2,modules_not_need_gpt2,modules_need_gpt1])
|
1961 |
+
base_tab2.select(on_base_selected, outputs=[modules_not_need_gpt2,modules_not_need_gpt2,modules_need_gpt0,modules_need_gpt1])
|
1962 |
|
1963 |
|
1964 |
|