Niki Zhang commited on
Commit
a572f3e
·
verified ·
1 Parent(s): 84533cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -7
app.py CHANGED
@@ -397,7 +397,7 @@ def infer(crop_image_path,full_image_path,state,language,task_type=None):
397
 
398
  elif task_type=="task 3":
399
  item_gallery_output.append("recomendation_pic/3.8.png")
400
- item_gallery_output.append("recomendation_pic/basket-2.jpg")
401
  input_image = Image.open(full_image_path).convert("RGB")
402
  input_features = extract_features_siglip(input_image.convert("RGB"))
403
  input_features = input_features.detach().cpu().numpy()
@@ -999,6 +999,27 @@ async def upload_callback(image_input,state, log_state, task_type, visual_chatgp
999
  year="12th century (Song Dynasty)"
1000
  material="Chinese painting"
1001
  gender="male"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1002
  else:
1003
  parsed_data = get_gpt_response(openai_api_key, new_image_path,"Please provide the name, artist, year of creation (including the art historical period), and painting style used for this painting. Return the information in dictionary format without any newline characters. Format as follows: { \"name\": \"Name of the painting\", \"artist\": \"Name of the artist\", \"year\": \"Year of creation (Art historical period)\", \"style\": \"Painting style used in the painting\",\"gender\": \"The gender of the author\"}")
1004
  print(parsed_data)
@@ -1675,7 +1696,7 @@ async def texttospeech(text, language,gender='female'):
1675
  return None
1676
 
1677
  # give the reason of recommendation
1678
- async def item_associate(new_crop,openai_api_key,language,autoplay,length,log_state,sort_score,narritive,state,history_log, evt: gr.SelectData):
1679
  persona=naritive_mapping[narritive]
1680
  rec_path=evt._data['value']['image']['path']
1681
  index="Item Recommendation Picture "+ str(evt.index)
@@ -1684,7 +1705,7 @@ async def item_associate(new_crop,openai_api_key,language,autoplay,length,log_st
1684
 
1685
  image_paths=[new_crop,rec_path]
1686
 
1687
- result=get_gpt_response(openai_api_key, image_paths, prompt,history_log)
1688
  print("recommend result",result)
1689
  state += [(None, f"{result}")]
1690
  log_state += [("User wants to know object recomendation reason", None)]
@@ -1700,14 +1721,14 @@ async def item_associate(new_crop,openai_api_key,language,autoplay,length,log_st
1700
  return state,state,audio_output,log_state,index,gr.update(value=[])
1701
 
1702
 
1703
- async def style_associate(image_path,openai_api_key,language,autoplay,length,log_state,sort_score,narritive,state,history_log,evt: gr.SelectData):
1704
  persona=naritive_mapping[narritive]
1705
  rec_path=evt._data['value']['image']['path']
1706
  index="Style Recommendation Picture "+str(evt.index)
1707
  print("rec_path",rec_path)
1708
  prompt=recommendation_prompt[1][persona].format(language=language,length=length)
1709
  image_paths=[image_path,rec_path]
1710
- result=get_gpt_response(openai_api_key, image_paths, prompt,history_log)
1711
  print("recommend result",result)
1712
  state += [(None, f"{result}")]
1713
  log_state += [("User wants to know style recomendation reason", None)]
@@ -2284,7 +2305,7 @@ def create_ui():
2284
 
2285
  gallery_result.select(
2286
  item_associate,
2287
- inputs=[new_crop_save_path,openai_api_key,language,auto_play,length,log_state,sort_rec,naritive,recomended_state,history_log],
2288
  outputs=[recommend_bot,recomended_state,output_audio,log_state,pic_index,recommend_score],
2289
 
2290
 
@@ -2292,7 +2313,7 @@ def create_ui():
2292
 
2293
  style_gallery_result.select(
2294
  style_associate,
2295
- inputs=[image_path,openai_api_key,language,auto_play,length,log_state,sort_rec,naritive,recomended_state,history_log],
2296
  outputs=[recommend_bot,recomended_state,output_audio,log_state,pic_index,recommend_score],
2297
 
2298
 
 
397
 
398
  elif task_type=="task 3":
399
  item_gallery_output.append("recomendation_pic/3.8.png")
400
+ item_gallery_output.append("recomendation_pic/basket-2.png")
401
  input_image = Image.open(full_image_path).convert("RGB")
402
  input_features = extract_features_siglip(input_image.convert("RGB"))
403
  input_features = input_features.detach().cpu().numpy()
 
999
  year="12th century (Song Dynasty)"
1000
  material="Chinese painting"
1001
  gender="male"
1002
+
1003
+ elif task_type=="task 1":
1004
+ name ="The Ambassadors"
1005
+ artist ="Hans Holbein the Younger"
1006
+ year = "1533 (Northern Renaissance)"
1007
+ material= "Realism"
1008
+ gender = "male"
1009
+
1010
+ elif task_type=="task 2":
1011
+ name ="The Ambassadors"
1012
+ artist ="Hans Holbein the Younger"
1013
+ year = "1533 (Northern Renaissance)"
1014
+ material= "Realism"
1015
+ gender = "male"
1016
+
1017
+ elif task_type=="task 2":
1018
+ name = "The Football Players"
1019
+ artist= "Albert Gleizes"
1020
+ year= "1912 (Cubism)"
1021
+ material= "Cubism",
1022
+ gender= "male"
1023
  else:
1024
  parsed_data = get_gpt_response(openai_api_key, new_image_path,"Please provide the name, artist, year of creation (including the art historical period), and painting style used for this painting. Return the information in dictionary format without any newline characters. Format as follows: { \"name\": \"Name of the painting\", \"artist\": \"Name of the artist\", \"year\": \"Year of creation (Art historical period)\", \"style\": \"Painting style used in the painting\",\"gender\": \"The gender of the author\"}")
1025
  print(parsed_data)
 
1696
  return None
1697
 
1698
  # give the reason of recommendation
1699
+ async def item_associate(new_crop,openai_api_key,language,autoplay,length,log_state,sort_score,narritive,state, evt: gr.SelectData):
1700
  persona=naritive_mapping[narritive]
1701
  rec_path=evt._data['value']['image']['path']
1702
  index="Item Recommendation Picture "+ str(evt.index)
 
1705
 
1706
  image_paths=[new_crop,rec_path]
1707
 
1708
+ result=get_gpt_response(openai_api_key, image_paths, prompt)
1709
  print("recommend result",result)
1710
  state += [(None, f"{result}")]
1711
  log_state += [("User wants to know object recomendation reason", None)]
 
1721
  return state,state,audio_output,log_state,index,gr.update(value=[])
1722
 
1723
 
1724
+ async def style_associate(image_path,openai_api_key,language,autoplay,length,log_state,sort_score,narritive,state,evt: gr.SelectData):
1725
  persona=naritive_mapping[narritive]
1726
  rec_path=evt._data['value']['image']['path']
1727
  index="Style Recommendation Picture "+str(evt.index)
1728
  print("rec_path",rec_path)
1729
  prompt=recommendation_prompt[1][persona].format(language=language,length=length)
1730
  image_paths=[image_path,rec_path]
1731
+ result=get_gpt_response(openai_api_key, image_paths, prompt )
1732
  print("recommend result",result)
1733
  state += [(None, f"{result}")]
1734
  log_state += [("User wants to know style recomendation reason", None)]
 
2305
 
2306
  gallery_result.select(
2307
  item_associate,
2308
+ inputs=[new_crop_save_path,openai_api_key,language,auto_play,length,log_state,sort_rec,naritive,recomended_state],
2309
  outputs=[recommend_bot,recomended_state,output_audio,log_state,pic_index,recommend_score],
2310
 
2311
 
 
2313
 
2314
  style_gallery_result.select(
2315
  style_associate,
2316
+ inputs=[image_path,openai_api_key,language,auto_play,length,log_state,sort_rec,naritive,recomended_state],
2317
  outputs=[recommend_bot,recomended_state,output_audio,log_state,pic_index,recommend_score],
2318
 
2319