lamdao commited on
Commit
1bdcf54
·
verified ·
1 Parent(s): 4f8af54

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -4
app.py CHANGED
@@ -1,9 +1,61 @@
1
  import gradio as gr
 
2
  from huggingface_hub import InferenceClient
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
 
@@ -39,9 +91,9 @@ def respond(
39
  response += token
40
  yield response
41
 
42
- """
43
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
  demo = gr.ChatInterface(
46
  respond,
47
  additional_inputs=[
@@ -60,4 +112,5 @@ demo = gr.ChatInterface(
60
 
61
 
62
  if __name__ == "__main__":
63
- demo.launch()
 
 
1
  import gradio as gr
2
+ import requests
3
  from huggingface_hub import InferenceClient
4
 
5
+ API_TOKEN = "your_huggingface_api_token" # Replace with your actual token
6
+ API_URL = "https://api-inference.huggingface.co/models/InterSync/Mistral-7B-Instruct-v0.2-Function-Calling"
7
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
8
+
9
+ def get_weather(location, unit="celsius"):
10
+ """Gets the current weather in a given location."""
11
+ # Your code to fetch weather data using an external API (e.g., OpenWeatherMap)
12
+ ...
13
+ return weather_data
14
+
15
+ def query_model(payload):
16
+ response = requests.post(API_URL, headers=headers, json=payload)
17
+ return response.json()
18
+
19
+ with gr.Blocks() as demo:
20
+ gr.Markdown("# Mistral-7B-Instruct Function Calling Demo")
21
+
22
+ with gr.Row():
23
+ with gr.Column(scale=4):
24
+ input_text = gr.Textbox(label="Enter your text", lines=5)
25
+ submit_btn = gr.Button("Submit")
26
+ with gr.Column(scale=6):
27
+ output_text = gr.Textbox(label="Model Output", lines=10)
28
+
29
+ def user(user_message, history):
30
+ return "", history + [[user_message, None]] # Add user message to chat history
31
+
32
+ def bot(history):
33
+ user_message = history[-1][0]
34
+ function_call = {
35
+ "name": "get_weather",
36
+ "arguments": {"location": user_message}
37
+ }
38
+ payload = {
39
+ "inputs": user_message,
40
+ "parameters": {
41
+ "function_call": function_call
42
+ }
43
+ }
44
+ output = query_model(payload)
45
+ bot_response = output[0]['generated_text'] # Assuming API returns text in this format
46
+ history[-1][1] = bot_response # Update the last message in history with bot response
47
+ return history
48
+
49
+ input_text.change(user, [input_text, output_text], [input_text, output_text], queue=False).then(
50
+ bot, [output_text], [output_text]
51
+ )
52
+ submit_btn.click(user, [input_text, output_text], [input_text, output_text], queue=False).then(
53
+ bot, [output_text], [output_text]
54
+ )
55
+
56
+ demo.queue().launch()
57
  """
58
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
59
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
60
 
61
 
 
91
  response += token
92
  yield response
93
 
94
+
95
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
96
+
97
  demo = gr.ChatInterface(
98
  respond,
99
  additional_inputs=[
 
112
 
113
 
114
  if __name__ == "__main__":
115
+ demo.launch()
116
+ """