Elieon commited on
Commit
5dcab9b
·
verified ·
1 Parent(s): 8f370b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -3
app.py CHANGED
@@ -1,23 +1,34 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
3
 
4
- client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
5
 
6
- system_message = "You are MUSK-1, developed by a 14 year old AI engineer, Arjun Singh at Elieon."
7
 
8
  def respond(message, history, max_tokens, temperature, top_p):
 
9
  messages = [{"role": "system", "content": system_message}]
10
 
 
11
  for user_msg, assistant_msg in history:
12
  if user_msg:
13
  messages.append({"role": "user", "content": user_msg})
14
  if assistant_msg:
15
  messages.append({"role": "assistant", "content": assistant_msg})
16
 
 
17
  messages.append({"role": "user", "content": message})
18
 
 
19
  response = ""
20
 
 
21
  for message in client.chat_completion(
22
  messages,
23
  max_tokens=max_tokens,
@@ -29,6 +40,7 @@ def respond(message, history, max_tokens, temperature, top_p):
29
  response += token
30
  yield response
31
 
 
32
  demo = gr.ChatInterface(
33
  respond,
34
  additional_inputs=[
@@ -38,5 +50,6 @@ demo = gr.ChatInterface(
38
  ]
39
  )
40
 
 
41
  if __name__ == "__main__":
42
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from dotenv import load_dotenv
4
+ import os
5
 
6
+ # Load environment variables from .env file
7
+ load_dotenv()
8
+
9
+ # Get the system message from environment variables
10
+ system_message = os.getenv("SYSTEM_MESSAGE")
11
 
12
+ client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
13
 
14
  def respond(message, history, max_tokens, temperature, top_p):
15
+ # Prepare the initial message list with the system message
16
  messages = [{"role": "system", "content": system_message}]
17
 
18
+ # Add the conversation history to the messages list
19
  for user_msg, assistant_msg in history:
20
  if user_msg:
21
  messages.append({"role": "user", "content": user_msg})
22
  if assistant_msg:
23
  messages.append({"role": "assistant", "content": assistant_msg})
24
 
25
+ # Add the latest user message to the messages list
26
  messages.append({"role": "user", "content": message})
27
 
28
+ # Initialize an empty response string
29
  response = ""
30
 
31
+ # Generate the response using the Hugging Face InferenceClient
32
  for message in client.chat_completion(
33
  messages,
34
  max_tokens=max_tokens,
 
40
  response += token
41
  yield response
42
 
43
+ # Define the Gradio interface
44
  demo = gr.ChatInterface(
45
  respond,
46
  additional_inputs=[
 
50
  ]
51
  )
52
 
53
+ # Launch the Gradio app
54
  if __name__ == "__main__":
55
+ demo.launch()