mrbeliever commited on
Commit
c48736c
·
verified ·
1 Parent(s): 78ccf9c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -86
app.py CHANGED
@@ -7,86 +7,76 @@ import sys
7
  from dotenv import load_dotenv, dotenv_values
8
  load_dotenv()
9
 
 
10
 
11
- #Comment_test_11_09_2024
12
-
13
-
14
- # initialize the client
15
  client = OpenAI(
16
- base_url="https://api-inference.huggingface.co/v1",
17
- api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')#"hf_xxx" # Replace with your token
18
- )
19
-
20
-
21
-
22
- #Create supported models
23
- model_links ={
24
- "Meta-Llama-3-8B":"meta-llama/Meta-Llama-3-8B-Instruct",
25
- "Mistral-7B":"mistralai/Mistral-7B-Instruct-v0.2",
26
- "Gemma-7B":"google/gemma-1.1-7b-it",
27
- "Gemma-2B":"google/gemma-1.1-2b-it",
28
- "Zephyr-7B-β":"HuggingFaceH4/zephyr-7b-beta",
29
- #"Meta-Llama-3.1-8B":"meta-llama/Meta-Llama-3.1-8B-Instruct", #TODO: Update when/if Serverless Inference available
30
-
31
  }
32
 
33
- #Pull info about the model to display
34
  model_info = {
35
  "Mistral-7B": {
36
  'description': """The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
37
- \nIt was created by the Mistral AI team as has over **7 billion parameters.** \n"""
38
  },
39
  "Gemma-7B": {
40
  'description': """The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
41
- \nIt was created by the Google's AI Team team as has over **7 billion parameters.** \n"""
42
  },
43
  "Gemma-2B": {
44
  'description': """The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
45
- \nIt was created by the Google's AI Team team as has over **2 billion parameters.** \n"""
46
  },
47
  "Zephyr-7B": {
48
  'description': """The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
49
  \nFrom Huggingface: \n\
50
  Zephyr is a series of language models that are trained to act as helpful assistants. \
51
- Zephyr 7B Gemma\
52
- is the third model in the series, and is a fine-tuned version of google/gemma-7b \
53
- that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n"""
54
  },
55
  "Zephyr-7B-β": {
56
  'description': """The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
57
  \nFrom Huggingface: \n\
58
  Zephyr is a series of language models that are trained to act as helpful assistants. \
59
- Zephyr-7B-β\
60
- is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
61
- that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n"""
62
  },
63
  "Meta-Llama-3-8B": {
64
  'description': """The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
65
- \nIt was created by the Meta's AI team and has over **8 billion parameters.** \n"""
66
  },
67
  "Meta-Llama-3.1-8B": {
68
  'description': """The Llama (3.1) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
69
- \nIt was created by the Meta's AI team and has over **8 billion parameters.** \n"""
70
  },
71
  }
72
 
73
-
74
- #Random dog images for error message
75
- random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
76
- "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
77
- "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
78
- "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
79
- "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
80
- "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
81
- "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
82
- "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
83
- "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
84
- "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
85
- "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
86
- "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
87
- "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
88
-
89
-
90
 
91
  def reset_conversation():
92
  '''
@@ -95,69 +85,57 @@ def reset_conversation():
95
  st.session_state.conversation = []
96
  st.session_state.messages = []
97
  return None
98
-
99
-
100
-
101
 
102
  # Define the available models
103
- models =[key for key in model_links.keys()]
104
 
105
  # Create the sidebar with the dropdown for model selection
106
  selected_model = st.sidebar.selectbox("Select Model", models)
107
 
108
- #Create a temperature slider
109
  temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
110
 
111
-
112
- #Add reset button to clear conversation
113
- st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
114
-
115
 
116
  # Create model description
117
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
118
  st.sidebar.markdown(model_info[selected_model]['description'])
119
- st.sidebar.image(model_info[selected_model]['logo'])
 
 
 
 
120
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
121
  st.sidebar.markdown("\nFor More Visit **Womener AI**")
122
  st.sidebar.markdown("\nRun into issues? \nTry coming back in a bit, GPU access might be limited or something is down.")
123
 
124
-
125
-
126
-
127
  if "prev_option" not in st.session_state:
128
  st.session_state.prev_option = selected_model
129
 
130
  if st.session_state.prev_option != selected_model:
131
  st.session_state.messages = []
132
- # st.write(f"Changed to {selected_model}")
133
  st.session_state.prev_option = selected_model
134
  reset_conversation()
135
 
136
-
137
-
138
- #Pull in the model we want to use
139
  repo_id = model_links[selected_model]
140
 
141
-
142
  st.subheader(f'AI - {selected_model}')
143
- # st.title(f'ChatBot Using {selected_model}')
144
 
145
  # Set a default model
146
  if selected_model not in st.session_state:
147
- st.session_state[selected_model] = model_links[selected_model]
148
 
149
  # Initialize chat history
150
  if "messages" not in st.session_state:
151
  st.session_state.messages = []
152
 
153
-
154
  # Display chat messages from history on app rerun
155
  for message in st.session_state.messages:
156
  with st.chat_message(message["role"]):
157
  st.markdown(message["content"])
158
 
159
-
160
-
161
  # Accept user input
162
  if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
163
 
@@ -167,7 +145,6 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
167
  # Add user message to chat history
168
  st.session_state.messages.append({"role": "user", "content": prompt})
169
 
170
-
171
  # Display assistant response in chat message container
172
  with st.chat_message("assistant"):
173
 
@@ -178,28 +155,22 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
178
  {"role": m["role"], "content": m["content"]}
179
  for m in st.session_state.messages
180
  ],
181
- temperature=temp_values,#0.5,
182
  stream=True,
183
  max_tokens=3000,
184
  )
185
-
186
  response = st.write_stream(stream)
187
 
188
  except Exception as e:
189
- # st.empty()
190
- response = "😵‍💫 Looks like someone unplugged something!\
191
- \n Either the model space is being updated or something is down.\
192
- \n\
193
- \n Try again later. \
194
- \n\
195
- \n Here's a random pic of a 🐶:"
196
  st.write(response)
197
- random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
198
  st.image(random_dog_pick)
199
  st.write("This was the error message:")
200
  st.write(e)
201
-
202
 
203
-
204
-
205
- st.session_state.messages.append({"role": "assistant", "content": response})
 
7
  from dotenv import load_dotenv, dotenv_values
8
  load_dotenv()
9
 
10
+ # Comment_test_11_09_2024
11
 
12
+ # Initialize the client
 
 
 
13
  client = OpenAI(
14
+ base_url="https://api-inference.huggingface.co/v1",
15
+ api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
16
+ )
17
+
18
+ # Create supported models
19
+ model_links = {
20
+ "Meta-Llama-3-8B": "meta-llama/Meta-Llama-3-8B-Instruct",
21
+ "Mistral-7B": "mistralai/Mistral-7B-Instruct-v0.2",
22
+ "Gemma-7B": "google/gemma-1.1-7b-it",
23
+ "Gemma-2B": "google/gemma-1.1-2b-it",
24
+ "Zephyr-7B": "HuggingFaceH4/zephyr-7b-beta",
25
+ # "Meta-Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct", # TODO: Update when/if Serverless Inference available
 
 
 
26
  }
27
 
28
+ # Pull info about the model to display
29
  model_info = {
30
  "Mistral-7B": {
31
  'description': """The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
32
+ \nIt was created by the Mistral AI team and has over **7 billion parameters.** \n"""
33
  },
34
  "Gemma-7B": {
35
  'description': """The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
36
+ \nIt was created by Google's AI Team and has over **7 billion parameters.** \n"""
37
  },
38
  "Gemma-2B": {
39
  'description': """The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
40
+ \nIt was created by Google's AI Team and has over **2 billion parameters.** \n"""
41
  },
42
  "Zephyr-7B": {
43
  'description': """The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
44
  \nFrom Huggingface: \n\
45
  Zephyr is a series of language models that are trained to act as helpful assistants. \
46
+ Zephyr 7B is the third model in the series, and is a fine-tuned version of google/gemma-7b that was trained on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n"""
 
 
47
  },
48
  "Zephyr-7B-β": {
49
  'description': """The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
50
  \nFrom Huggingface: \n\
51
  Zephyr is a series of language models that are trained to act as helpful assistants. \
52
+ Zephyr-7B-β is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 that was trained on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n"""
 
 
53
  },
54
  "Meta-Llama-3-8B": {
55
  'description': """The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
56
+ \nIt was created by Meta's AI team and has over **8 billion parameters.** \n"""
57
  },
58
  "Meta-Llama-3.1-8B": {
59
  'description': """The Llama (3.1) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
60
+ \nIt was created by Meta's AI team and has over **8 billion parameters.** \n"""
61
  },
62
  }
63
 
64
+ # Random dog images for error message
65
+ random_dog = [
66
+ "0f476473-2d8b-415e-b944-483768418a95.jpg",
67
+ "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
68
+ "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
69
+ "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
70
+ "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
71
+ "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
72
+ "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
73
+ "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
74
+ "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
75
+ "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
76
+ "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
77
+ "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
78
+ "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"
79
+ ]
 
80
 
81
  def reset_conversation():
82
  '''
 
85
  st.session_state.conversation = []
86
  st.session_state.messages = []
87
  return None
 
 
 
88
 
89
  # Define the available models
90
+ models = [key for key in model_links.keys()]
91
 
92
  # Create the sidebar with the dropdown for model selection
93
  selected_model = st.sidebar.selectbox("Select Model", models)
94
 
95
+ # Create a temperature slider
96
  temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
97
 
98
+ # Add reset button to clear conversation
99
+ st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
 
 
100
 
101
  # Create model description
102
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
103
  st.sidebar.markdown(model_info[selected_model]['description'])
104
+
105
+ # Only display the logo if it exists
106
+ if 'logo' in model_info[selected_model]:
107
+ st.sidebar.image(model_info[selected_model]['logo'])
108
+
109
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
110
  st.sidebar.markdown("\nFor More Visit **Womener AI**")
111
  st.sidebar.markdown("\nRun into issues? \nTry coming back in a bit, GPU access might be limited or something is down.")
112
 
 
 
 
113
  if "prev_option" not in st.session_state:
114
  st.session_state.prev_option = selected_model
115
 
116
  if st.session_state.prev_option != selected_model:
117
  st.session_state.messages = []
 
118
  st.session_state.prev_option = selected_model
119
  reset_conversation()
120
 
121
+ # Pull in the model we want to use
 
 
122
  repo_id = model_links[selected_model]
123
 
 
124
  st.subheader(f'AI - {selected_model}')
 
125
 
126
  # Set a default model
127
  if selected_model not in st.session_state:
128
+ st.session_state[selected_model] = model_links[selected_model]
129
 
130
  # Initialize chat history
131
  if "messages" not in st.session_state:
132
  st.session_state.messages = []
133
 
 
134
  # Display chat messages from history on app rerun
135
  for message in st.session_state.messages:
136
  with st.chat_message(message["role"]):
137
  st.markdown(message["content"])
138
 
 
 
139
  # Accept user input
140
  if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
141
 
 
145
  # Add user message to chat history
146
  st.session_state.messages.append({"role": "user", "content": prompt})
147
 
 
148
  # Display assistant response in chat message container
149
  with st.chat_message("assistant"):
150
 
 
155
  {"role": m["role"], "content": m["content"]}
156
  for m in st.session_state.messages
157
  ],
158
+ temperature=temp_values, # 0.5,
159
  stream=True,
160
  max_tokens=3000,
161
  )
162
+
163
  response = st.write_stream(stream)
164
 
165
  except Exception as e:
166
+ response = "😵‍💫 Looks like someone unplugged something! \
167
+ \n Either the model space is being updated or something is down. \
168
+ \n Try again later. \
169
+ \n Here's a random pic of a 🐶:"
 
 
 
170
  st.write(response)
171
+ random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
172
  st.image(random_dog_pick)
173
  st.write("This was the error message:")
174
  st.write(e)
 
175
 
176
+ st.session_state.messages.append({"role": "assistant", "content": response})