expandme commited on
Commit
995895d
·
1 Parent(s): 461c47d

Adding models URL

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -18,19 +18,20 @@ model_info ={
18
  "Llama-3.2 [3B]":
19
  {'description':"""The Llama-3.2 3B Instruct model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
20
  \nA SLM (Large Language Model) is best for applications requiring fast response times, low resource consumption, and specific, narrow tasks. \n""",
21
- 'logo':'./Meta.png'},
 
22
 
23
  "Qwen2.5 [3B]":
24
  {'description':"""The Qwen2.5 3B Instruct model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
25
  \nA SLM (Large Language Model) is best for applications requiring fast response times, low resource consumption, and specific, narrow tasks. \\n""",
26
- 'logo':'./Qwen.png'},
27
-
28
 
29
  "Phi-3.5 [3.82B]":
30
  {'description':"""The Phi-3.5 mini instruct model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
31
  \nA SLM (Large Language Model) is best for applications requiring fast response times, low resource consumption, and specific, narrow tasks. \ \n""",
32
- 'logo':'./ms.png'},
33
-
34
  }
35
 
36
  def format_promt(message, custom_instructions=None):
@@ -87,7 +88,8 @@ st.sidebar.button('Reset Chat', on_click=reset_conversation)
87
 
88
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
89
  st.sidebar.markdown(model_info[selected_model]['description'])
90
- st.sidebar.image(model_info[selected_model]['logo'])
 
91
  st.sidebar.markdown("*Generated content can be inaccurate, offensive or non-factual!!!*")
92
 
93
  if "prev_option" not in st.session_state:
 
18
  "Llama-3.2 [3B]":
19
  {'description':"""The Llama-3.2 3B Instruct model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
20
  \nA SLM (Large Language Model) is best for applications requiring fast response times, low resource consumption, and specific, narrow tasks. \n""",
21
+ 'logo':'./Meta.png',
22
+ 'url':'https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct'},
23
 
24
  "Qwen2.5 [3B]":
25
  {'description':"""The Qwen2.5 3B Instruct model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
26
  \nA SLM (Large Language Model) is best for applications requiring fast response times, low resource consumption, and specific, narrow tasks. \\n""",
27
+ 'logo':'./Qwen.png',
28
+ 'url':'https://huggingface.co/Qwen/Qwen2.5-3B-Instruct'},
29
 
30
  "Phi-3.5 [3.82B]":
31
  {'description':"""The Phi-3.5 mini instruct model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
32
  \nA SLM (Large Language Model) is best for applications requiring fast response times, low resource consumption, and specific, narrow tasks. \ \n""",
33
+ 'logo':'./ms.png',
34
+ 'url':'https://huggingface.co/microsoft/Phi-3.5-mini-instruct'},
35
  }
36
 
37
  def format_promt(message, custom_instructions=None):
 
88
 
89
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
90
  st.sidebar.markdown(model_info[selected_model]['description'])
91
+ st.sidebar.markdown(f"[![Model Logo]({model_info[selected_model]['logo']})]({model_info[selected_model]['url']})")
92
+ st.sidebar.markdown(f"[View model on 🤗 Hugging Face]({model_info[selected_model]['url']})")
93
  st.sidebar.markdown("*Generated content can be inaccurate, offensive or non-factual!!!*")
94
 
95
  if "prev_option" not in st.session_state: