Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -39,21 +39,21 @@ model_links ={
|
|
39 |
model_info ={
|
40 |
"Mistral-7B":
|
41 |
{'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
42 |
-
\nIt was created by the
|
43 |
'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
|
44 |
"Gemma-7B":
|
45 |
{'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
46 |
-
\nIt was created by the
|
47 |
'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
|
48 |
"Gemma-2B":
|
49 |
{'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
50 |
-
\nIt was created by the
|
51 |
'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
|
52 |
"Zephyr-7B":
|
53 |
{'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
54 |
\nFrom Huggingface: \n\
|
55 |
Zephyr is a series of language models that are trained to act as helpful assistants. \
|
56 |
-
|
57 |
is the third model in the series, and is a fine-tuned version of google/gemma-7b \
|
58 |
that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
|
59 |
'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
|
@@ -61,17 +61,17 @@ model_info ={
|
|
61 |
{'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
62 |
\nFrom Huggingface: \n\
|
63 |
Zephyr is a series of language models that are trained to act as helpful assistants. \
|
64 |
-
|
65 |
is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
|
66 |
that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
|
67 |
'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
|
68 |
"Meta-Llama-3-8B":
|
69 |
{'description':"""The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
70 |
-
\nIt was created by the
|
71 |
'logo':'Llama_logo.png'},
|
72 |
"Meta-Llama-3.1-8B":
|
73 |
{'description':"""The Llama (3.1) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
74 |
-
\nIt was created by the
|
75 |
'logo':'Llama3_1_logo.png'},
|
76 |
}
|
77 |
|
@@ -123,7 +123,7 @@ st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
|
123 |
st.sidebar.markdown(model_info[selected_model]['description'])
|
124 |
st.sidebar.image(model_info[selected_model]['logo'])
|
125 |
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
126 |
-
st.sidebar.markdown("\
|
127 |
st.sidebar.markdown("\nRun into issues? \nTry coming back in a bit, GPU access might be limited or something is down.")
|
128 |
|
129 |
|
|
|
39 |
model_info ={
|
40 |
"Mistral-7B":
|
41 |
{'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
42 |
+
\nIt was created by the Mistral AI team as has over **7 billion parameters.** \n""",
|
43 |
'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
|
44 |
"Gemma-7B":
|
45 |
{'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
46 |
+
\nIt was created by the Google's AI Team team as has over **7 billion parameters.** \n""",
|
47 |
'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
|
48 |
"Gemma-2B":
|
49 |
{'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
50 |
+
\nIt was created by the Google's AI Team team as has over **2 billion parameters.** \n""",
|
51 |
'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
|
52 |
"Zephyr-7B":
|
53 |
{'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
54 |
\nFrom Huggingface: \n\
|
55 |
Zephyr is a series of language models that are trained to act as helpful assistants. \
|
56 |
+
Zephyr 7B Gemma\
|
57 |
is the third model in the series, and is a fine-tuned version of google/gemma-7b \
|
58 |
that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
|
59 |
'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
|
|
|
61 |
{'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
62 |
\nFrom Huggingface: \n\
|
63 |
Zephyr is a series of language models that are trained to act as helpful assistants. \
|
64 |
+
Zephyr-7B-β\
|
65 |
is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
|
66 |
that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
|
67 |
'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
|
68 |
"Meta-Llama-3-8B":
|
69 |
{'description':"""The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
70 |
+
\nIt was created by the Meta's AI team and has over **8 billion parameters.** \n""",
|
71 |
'logo':'Llama_logo.png'},
|
72 |
"Meta-Llama-3.1-8B":
|
73 |
{'description':"""The Llama (3.1) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
74 |
+
\nIt was created by the Meta's AI team and has over **8 billion parameters.** \n""",
|
75 |
'logo':'Llama3_1_logo.png'},
|
76 |
}
|
77 |
|
|
|
123 |
st.sidebar.markdown(model_info[selected_model]['description'])
|
124 |
st.sidebar.image(model_info[selected_model]['logo'])
|
125 |
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
126 |
+
st.sidebar.markdown("\nFor More Contact: scientipix@gmail.com")
|
127 |
st.sidebar.markdown("\nRun into issues? \nTry coming back in a bit, GPU access might be limited or something is down.")
|
128 |
|
129 |
|