abdullahalzubaer commited on
Commit
3844605
1 Parent(s): 4ffb70d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -25,12 +25,10 @@ model_links ={
25
  #Pull info about the model to display
26
  model_info ={
27
  "Mistral-7B":
28
- {'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
29
- \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
30
- 'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
31
  "Phi-3.5":
32
- {'description':"""Phi-3.5-mini is a lightweight, state-of-the-art open model built upon datasets used for Phi-3 - synthetic data and filtered publicly available websites - with a focus on very high-quality, reasoning dense data. The model belongs to the Phi-3 model family and supports 128K token context length. The model underwent a rigorous enhancement process, incorporating both supervised fine-tuning, proximal policy optimization, and direct preference optimization to ensure precise instruction adherence and robust safety measures.""",
33
- 'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'}
34
 
35
  # "Gemma-7B":
36
  # {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
 
25
  #Pull info about the model to display
26
  model_info ={
27
  "Mistral-7B":
28
+ {'description':"""The Mistral model is able to have question and answer interactions.\n \
29
+ \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",},
 
30
  "Phi-3.5":
31
+ {'description':"""Phi-3.5-mini is a lightweight, state-of-the-art open model built upon datasets used for Phi-3 - synthetic data and filtered publicly available websites - with a focus on very high-quality, reasoning dense data. The model belongs to the Phi-3 model family and supports 128K token context length. The model underwent a rigorous enhancement process, incorporating both supervised fine-tuning, proximal policy optimization, and direct preference optimization to ensure precise instruction adherence and robust safety measures.""",}
 
32
 
33
  # "Gemma-7B":
34
  # {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \