Poonawala commited on
Commit
a76c40f
·
verified ·
1 Parent(s): fddcd12

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -26
app.py CHANGED
@@ -8,6 +8,7 @@ client = InferenceClient("meta-llama/Llama-3.2-3B-Instruct")
8
 
9
 
10
  def respond(
 
11
  message,
12
  history: list[tuple[str, str]],
13
  system_message,
@@ -27,17 +28,25 @@ def respond(
27
 
28
  response = ""
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
 
 
 
 
 
 
 
 
 
38
 
39
- response += token
40
- yield response
41
 
42
 
43
  # CSS for styling the interface
@@ -61,27 +70,16 @@ body {
61
  }
62
  """
63
 
64
- # Function to trigger different AI models based on button click
65
- def trigger_model(model_name, message, history, system_message, max_tokens, temperature, top_p):
66
- if model_name == "Llama":
67
- # Here, you can choose the llama model to generate a response
68
- return respond(message, history, system_message, max_tokens, temperature, top_p)
69
- elif model_name == "Chatgpt":
70
- # Placeholder for ChatGPT function (if needed)
71
- return "ChatGPT response goes here."
72
- elif model_name == "Claude":
73
- # Placeholder for Claude function (if needed)
74
- return "Claude response goes here."
75
- else:
76
- return "Model not found."
77
 
78
 
79
- # Define the Gradio interface
80
  demo = gr.Interface(
81
- fn=trigger_model,
82
  inputs=[
83
  gr.Textbox(value="Hello!", label="User Message"),
84
- gr.Textbox(value="System message", label="System Message", visible=False),
85
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
86
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
87
  gr.Slider(
 
8
 
9
 
10
  def respond(
11
+ model_name,
12
  message,
13
  history: list[tuple[str, str]],
14
  system_message,
 
28
 
29
  response = ""
30
 
31
+ # Model selection based on the button click
32
+ if model_name == "Llama":
33
+ for message in client.chat_completion(
34
+ messages,
35
+ max_tokens=max_tokens,
36
+ stream=True,
37
+ temperature=temperature,
38
+ top_p=top_p,
39
+ ):
40
+ token = message.choices[0].delta.content
41
+ response += token
42
+ elif model_name == "Chatgpt":
43
+ response = "ChatGPT functionality is not yet implemented."
44
+ elif model_name == "Claude":
45
+ response = "Claude functionality is not yet implemented."
46
+ else:
47
+ response = "Model not recognized."
48
 
49
+ return response
 
50
 
51
 
52
  # CSS for styling the interface
 
70
  }
71
  """
72
 
73
+ # Define the Gradio interface with buttons and model selection
74
+ def gradio_interface(model_name, message, history, system_message, max_tokens, temperature, top_p):
75
+ return respond(model_name, message, history, system_message, max_tokens, temperature, top_p)
 
 
 
 
 
 
 
 
 
 
76
 
77
 
 
78
  demo = gr.Interface(
79
+ fn=gradio_interface,
80
  inputs=[
81
  gr.Textbox(value="Hello!", label="User Message"),
82
+ gr.Textbox(value="You are a virtual health assistant. Your primary goal is to assist with health-related queries.", label="System Message", visible=False),
83
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
84
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
85
  gr.Slider(