Wedyan2023 commited on
Commit
c2231bb
·
verified ·
1 Parent(s): 31c06a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -143
app.py CHANGED
@@ -7,164 +7,60 @@ import numpy as np
7
  import streamlit as st
8
  from openai import OpenAI
9
  import os
10
- import sys
11
- from dotenv import load_dotenv, dotenv_values
12
- load_dotenv()
13
-
14
-
15
-
16
 
 
17
 
18
- # initialize the client
19
  client = OpenAI(
20
- base_url="https://api-inference.huggingface.co/v1",
21
- api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')#"hf_xxx" # Replace with your token
22
- )
23
-
24
-
25
-
26
-
27
- #Create supported models
28
- model_links ={
29
- "Meta-Llama-3-8B":"meta-llama/Meta-Llama-3-8B-Instruct",
30
- "Mistral-7B":"mistralai/Mistral-7B-Instruct-v0.2",
31
- "Gemma-7B":"google/gemma-1.1-7b-it",
32
- "Gemma-2B":"google/gemma-1.1-2b-it",
33
- "Zephyr-7B-β":"HuggingFaceH4/zephyr-7b-beta",
34
- #"Meta-Llama-3.1-8B":"meta-llama/Meta-Llama-3.1-8B-Instruct", #TODO: Update when/if Serverless Inference available
35
-
36
  }
37
 
38
- #Pull info about the model to display
39
- model_info ={
40
- "Mistral-7B":
41
- {'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
42
- \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
43
- 'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
44
- "Gemma-7B":
45
- {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
46
- \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **7 billion parameters.** \n""",
47
- 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
48
- "Gemma-2B":
49
- {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
50
- \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **2 billion parameters.** \n""",
51
- 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
52
- "Zephyr-7B":
53
- {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
54
- \nFrom Huggingface: \n\
55
- Zephyr is a series of language models that are trained to act as helpful assistants. \
56
- [Zephyr 7B Gemma](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)\
57
- is the third model in the series, and is a fine-tuned version of google/gemma-7b \
58
- that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
59
- 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
60
- "Zephyr-7B-β":
61
- {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
62
- \nFrom Huggingface: \n\
63
- Zephyr is a series of language models that are trained to act as helpful assistants. \
64
- [Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)\
65
- is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
66
- that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
67
- 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
68
- "Meta-Llama-3-8B":
69
- {'description':"""The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
70
- \nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
71
- 'logo':'Llama_logo.png'},
72
- "Meta-Llama-3.1-8B":
73
- {'description':"""The Llama (3.1) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
74
- \nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
75
- 'logo':'Llama3_1_logo.png'},
76
- }
77
-
78
-
79
- #Random dog images for error message
80
  random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
81
  "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
82
  "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
83
- "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
84
- "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
85
- "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
86
- "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
87
- "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
88
- "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
89
- "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
90
- "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
91
- "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
92
- "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
93
-
94
-
95
 
96
  def reset_conversation():
97
- '''
98
- Resets Conversation
99
- '''
100
  st.session_state.conversation = []
101
  st.session_state.messages = []
102
  return None
103
-
104
-
105
-
106
-
107
- # Define the available models
108
- models =[key for key in model_links.keys()]
109
-
110
- # Create the sidebar with the dropdown for model selection
111
- selected_model = st.sidebar.selectbox("Select Model", models)
112
 
113
- #Create a temperature slider
114
  temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
115
 
116
-
117
- #Add reset button to clear conversation
118
- st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
119
-
120
 
121
  # Create model description
122
- st.sidebar.write(f"You're now chatting with **{selected_model}**")
123
- st.sidebar.markdown(model_info[selected_model]['description'])
124
- st.sidebar.image(model_info[selected_model]['logo'])
125
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
126
- st.sidebar.markdown("\nLearn how to build this chatbot [here](https://ngebodh.github.io/projects/2024-03-05/).")
127
- st.sidebar.markdown("\nRun into issues? \nTry coming back in a bit, GPU access might be limited or something is down.")
128
-
129
-
130
-
131
-
132
- if "prev_option" not in st.session_state:
133
- st.session_state.prev_option = selected_model
134
-
135
- if st.session_state.prev_option != selected_model:
136
- st.session_state.messages = []
137
- # st.write(f"Changed to {selected_model}")
138
- st.session_state.prev_option = selected_model
139
- reset_conversation()
140
-
141
-
142
-
143
- #Pull in the model we want to use
144
- repo_id = model_links[selected_model]
145
-
146
-
147
- st.subheader(f'AI - {selected_model}')
148
- # st.title(f'ChatBot Using {selected_model}')
149
-
150
- # Set a default model
151
- if selected_model not in st.session_state:
152
- st.session_state[selected_model] = model_links[selected_model]
153
 
154
  # Initialize chat history
155
  if "messages" not in st.session_state:
156
  st.session_state.messages = []
157
 
158
-
159
  # Display chat messages from history on app rerun
160
  for message in st.session_state.messages:
161
  with st.chat_message(message["role"]):
162
  st.markdown(message["content"])
163
 
164
-
165
-
166
  # Accept user input
167
- if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
168
 
169
  # Display user message in chat message container
170
  with st.chat_message("user"):
@@ -172,18 +68,17 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
172
  # Add user message to chat history
173
  st.session_state.messages.append({"role": "user", "content": prompt})
174
 
175
-
176
  # Display assistant response in chat message container
177
  with st.chat_message("assistant"):
178
 
179
  try:
180
  stream = client.chat.completions.create(
181
- model=model_links[selected_model],
182
  messages=[
183
  {"role": m["role"], "content": m["content"]}
184
  for m in st.session_state.messages
185
  ],
186
- temperature=temp_values,#0.5,
187
  stream=True,
188
  max_tokens=3000,
189
  )
@@ -191,20 +86,11 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
191
  response = st.write_stream(stream)
192
 
193
  except Exception as e:
194
- # st.empty()
195
- response = "😵‍💫 Looks like someone unplugged something!\
196
- \n Either the model space is being updated or something is down.\
197
- \n\
198
- \n Try again later. \
199
- \n\
200
- \n Here's a random pic of a 🐶:"
201
  st.write(response)
202
- random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
203
  st.image(random_dog_pick)
204
  st.write("This was the error message:")
205
  st.write(e)
206
 
207
-
208
-
209
-
210
- st.session_state.messages.append({"role": "assistant", "content": response})
 
7
  import streamlit as st
8
  from openai import OpenAI
9
  import os
10
+ from dotenv import load_dotenv
 
 
 
 
 
11
 
12
+ load_dotenv()
13
 
14
+ # Initialize the client
15
  client = OpenAI(
16
+ base_url="https://api-inference.huggingface.co/v1",
17
+ api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
18
+ )
19
+
20
+ # Define Llama 3 model
21
+ model_link = "meta-llama/Meta-Llama-3-8B-Instruct"
22
+ model_info = {
23
+ 'description': """The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n
24
+ It was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
25
+ 'logo': 'Llama_logo.png'
 
 
 
 
 
 
26
  }
27
 
28
+ # Random dog images for error message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
30
  "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
31
  "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
32
+ "1326984c-39b0-492c-a773-f120d747a7e2.jpg"]
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  def reset_conversation():
35
+ '''Resets Conversation'''
 
 
36
  st.session_state.conversation = []
37
  st.session_state.messages = []
38
  return None
 
 
 
 
 
 
 
 
 
39
 
40
+ # Create a temperature slider
41
  temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
42
 
43
+ # Add reset button to clear conversation
44
+ st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
 
 
45
 
46
  # Create model description
47
+ st.sidebar.write(f"You're now chatting with **Llama 3**")
48
+ st.sidebar.markdown(model_info['description'])
49
+ st.sidebar.image(model_info['logo'])
50
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
51
+ st.sidebar.markdown("\nRun into issues? \nTry again later as GPU access might be limited.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
  # Initialize chat history
54
  if "messages" not in st.session_state:
55
  st.session_state.messages = []
56
 
 
57
  # Display chat messages from history on app rerun
58
  for message in st.session_state.messages:
59
  with st.chat_message(message["role"]):
60
  st.markdown(message["content"])
61
 
 
 
62
  # Accept user input
63
+ if prompt := st.chat_input(f"Hi, I'm Llama 3, ask me a question"):
64
 
65
  # Display user message in chat message container
66
  with st.chat_message("user"):
 
68
  # Add user message to chat history
69
  st.session_state.messages.append({"role": "user", "content": prompt})
70
 
 
71
  # Display assistant response in chat message container
72
  with st.chat_message("assistant"):
73
 
74
  try:
75
  stream = client.chat.completions.create(
76
+ model=model_link,
77
  messages=[
78
  {"role": m["role"], "content": m["content"]}
79
  for m in st.session_state.messages
80
  ],
81
+ temperature=temp_values,
82
  stream=True,
83
  max_tokens=3000,
84
  )
 
86
  response = st.write_stream(stream)
87
 
88
  except Exception as e:
89
+ response = "😵‍💫 Looks like something went wrong! Try again later.\nHere's a random pic of a 🐶:"
 
 
 
 
 
 
90
  st.write(response)
91
+ random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
92
  st.image(random_dog_pick)
93
  st.write("This was the error message:")
94
  st.write(e)
95
 
96
+ st.session_state.messages.append({"role": "assistant", "content": response})