johann22 commited on
Commit
eac6dfb
·
1 Parent(s): f7e6ab1

Update app_dialogue.py

Browse files
Files changed (1) hide show
  1. app_dialogue.py +17 -22
app_dialogue.py CHANGED
@@ -17,8 +17,9 @@ from transformers import AutoProcessor
17
 
18
 
19
  MODELS = [
20
- "HuggingFaceM4/idefics-9b-instruct",
21
- #"HuggingFaceM4/idefics-80b-instruct",
 
22
  ]
23
 
24
  API_PATHS = {
@@ -32,7 +33,6 @@ API_PATHS = {
32
 
33
  SYSTEM_PROMPT = [
34
  """The following is a conversation between a highly knowledgeable and intelligent visual AI assistant, called Assistant, and a human user, called User. In the following interactions, User and Assistant will converse in natural language, and Assistant will do its best to answer User’s questions. Assistant has the ability to perceive images and reason about the content of visual inputs. Assistant was built to be respectful, polite and inclusive. It knows a lot, and always tells the truth. When prompted with an image, it does not make up facts.
35
-
36
  The conversation begins:""",
37
  """\nUser:""",
38
  "https://i1.pickpik.com/photos/515/906/355/kittens-cats-pet-cute-preview.jpg",
@@ -62,10 +62,12 @@ BAN_TOKENS = ( # For documentation puporse. We are not using this list, it is h
62
  EOS_STRINGS = ["<end_of_utterance>", "\nUser:"]
63
  STOP_SUSPECT_LIST = []
64
 
 
65
  GRADIO_LINK = "https://johann22-idefics-playground.hf.space"
66
- API_TOKEN = os.getenv("HF_AUTH_TOK")
67
- IDEFICS_LOGO = "https://huggingface.co/spaces/johann22/idefics_playground/resolve/main/IDEFICS_logo.png"
68
- #IDEFICS_LOGO = "IDEFICS_logo.png"
 
69
 
70
  PROCESSOR = AutoProcessor.from_pretrained(
71
  "HuggingFaceM4/idefics-9b-instruct",
@@ -112,7 +114,7 @@ DEFAULT_IMAGES_TMP_PATH_TO_URL = {}
112
  for im_path in all_images:
113
  H = gr.Image(im_path, visible=False, type="filepath")
114
  tmp_filename = H.preprocess(H.value)
115
- DEFAULT_IMAGES_TMP_PATH_TO_URL[tmp_filename] = f"https://huggingface.co/spaces/johann22/idefics_playground/resolve/main/example_images/{os.path.basename(im_path)}"
116
 
117
 
118
  # Utils to handle the image markdown display logic
@@ -424,7 +426,7 @@ with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
424
  label="Maximum number of new tokens to generate",
425
  )
426
  repetition_penalty = gr.Slider(
427
- minimum=0.01,
428
  maximum=5.0,
429
  value=1.0,
430
  step=0.01,
@@ -578,7 +580,7 @@ with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
578
  Same as `model_inference` but in greedy mode and with the 80b-instruct.
579
  Specifically for pre-computing the default examples.
580
  """
581
- model_selector="HuggingFaceM4/idefics-9b-instruct"
582
  user_prompt_str=message
583
  chat_history=[]
584
  max_new_tokens=512
@@ -672,22 +674,14 @@ with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
672
  ],
673
  )
674
 
675
- def remove_last_turn_OLD(chat_history):
676
- if len(chat_history) == 0:
677
- return gr.Update(), gr.update()
678
- last_interaction = chat_history[-1]
679
- chat_history = chat_history[:-1]
680
- chat_update = gr.update(value=chat_history)
681
- text_update = gr.update(value=last_interaction[0])
682
- return chat_update, text_update
683
  def remove_last_turn(chat_history):
684
  if len(chat_history) == 0:
685
- return gr.update(), gr.update()
686
  last_interaction = chat_history[-1]
687
  chat_history = chat_history[:-1]
688
  chat_update = gr.update(value=chat_history)
689
  text_update = gr.update(value=last_interaction[0])
690
- return chat_update, text_update
691
 
692
  regenerate_btn.click(fn=remove_last_turn, inputs=chatbot, outputs=[chatbot, textbox]).then(
693
  fn=model_inference,
@@ -785,7 +779,7 @@ with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
785
  # - Using the inline syntax: `text<fake_token_around_image><image:URL_IMAGE><fake_token_around_image>text`
786
 
787
  # The second syntax allows inputting an arbitrary number of images.""")
788
-
789
  examples_path = os.path.dirname(__file__)
790
  gr.Examples(
791
  examples=[
@@ -876,13 +870,14 @@ with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
876
  inputs=[textbox, imagebox],
877
  outputs=[textbox, imagebox, chatbot],
878
  fn=process_example,
879
- cache_examples=False,
880
  examples_per_page=6,
881
  label=(
882
  "Click on any example below to get started.\nFor convenience, the model generations have been"
883
  " pre-computed with `idefics-80b-instruct`."
884
  ),
885
  )
 
886
 
887
  demo.queue(concurrency_count=40, max_size=40)
888
- demo.launch()
 
17
 
18
 
19
  MODELS = [
20
+ # "HuggingFaceM4/idefics-9b-instruct",
21
+ #"tiiuae/falcon-180B",
22
+ "HuggingFaceM4/idefics-80b-instruct",
23
  ]
24
 
25
  API_PATHS = {
 
33
 
34
  SYSTEM_PROMPT = [
35
  """The following is a conversation between a highly knowledgeable and intelligent visual AI assistant, called Assistant, and a human user, called User. In the following interactions, User and Assistant will converse in natural language, and Assistant will do its best to answer User’s questions. Assistant has the ability to perceive images and reason about the content of visual inputs. Assistant was built to be respectful, polite and inclusive. It knows a lot, and always tells the truth. When prompted with an image, it does not make up facts.
 
36
  The conversation begins:""",
37
  """\nUser:""",
38
  "https://i1.pickpik.com/photos/515/906/355/kittens-cats-pet-cute-preview.jpg",
 
62
  EOS_STRINGS = ["<end_of_utterance>", "\nUser:"]
63
  STOP_SUSPECT_LIST = []
64
 
65
+ #GRADIO_LINK = "https://huggingfacem4-idefics-playground.hf.space"
66
  GRADIO_LINK = "https://johann22-idefics-playground.hf.space"
67
+ API_TOKEN = os.getenv("HF_AUTH_TOKEN")
68
+ IDEFICS_LOGO = "https://huggingface.co/spaces/HuggingFaceM4/idefics_playground/resolve/main/IDEFICS_logo.png"
69
+
70
+
71
 
72
  PROCESSOR = AutoProcessor.from_pretrained(
73
  "HuggingFaceM4/idefics-9b-instruct",
 
114
  for im_path in all_images:
115
  H = gr.Image(im_path, visible=False, type="filepath")
116
  tmp_filename = H.preprocess(H.value)
117
+ DEFAULT_IMAGES_TMP_PATH_TO_URL[tmp_filename] = f"https://huggingface.co/spaces/HuggingFaceM4/idefics_playground/resolve/main/example_images/{os.path.basename(im_path)}"
118
 
119
 
120
  # Utils to handle the image markdown display logic
 
426
  label="Maximum number of new tokens to generate",
427
  )
428
  repetition_penalty = gr.Slider(
429
+ minimum=0.0,
430
  maximum=5.0,
431
  value=1.0,
432
  step=0.01,
 
580
  Same as `model_inference` but in greedy mode and with the 80b-instruct.
581
  Specifically for pre-computing the default examples.
582
  """
583
+ model_selector="HuggingFaceM4/idefics-80b-instruct"
584
  user_prompt_str=message
585
  chat_history=[]
586
  max_new_tokens=512
 
674
  ],
675
  )
676
 
 
 
 
 
 
 
 
 
677
  def remove_last_turn(chat_history):
678
  if len(chat_history) == 0:
679
+ return gr.Update(), gr.Update()
680
  last_interaction = chat_history[-1]
681
  chat_history = chat_history[:-1]
682
  chat_update = gr.update(value=chat_history)
683
  text_update = gr.update(value=last_interaction[0])
684
+ return chat_update, text_update
685
 
686
  regenerate_btn.click(fn=remove_last_turn, inputs=chatbot, outputs=[chatbot, textbox]).then(
687
  fn=model_inference,
 
779
  # - Using the inline syntax: `text<fake_token_around_image><image:URL_IMAGE><fake_token_around_image>text`
780
 
781
  # The second syntax allows inputting an arbitrary number of images.""")
782
+ '''
783
  examples_path = os.path.dirname(__file__)
784
  gr.Examples(
785
  examples=[
 
870
  inputs=[textbox, imagebox],
871
  outputs=[textbox, imagebox, chatbot],
872
  fn=process_example,
873
+ cache_examples=True,
874
  examples_per_page=6,
875
  label=(
876
  "Click on any example below to get started.\nFor convenience, the model generations have been"
877
  " pre-computed with `idefics-80b-instruct`."
878
  ),
879
  )
880
+ '''
881
 
882
  demo.queue(concurrency_count=40, max_size=40)
883
+ demo.launch()