Update README.md
Browse files
README.md
CHANGED
@@ -11,15 +11,26 @@ Finetuned version of [Mistral-7B-Instruct-v0.2 ](https://huggingface.co/mistrala
|
|
11 |
- **Direct Function Calls**: Mistral 7B Instruct v0.2 now supports structured function calls, allowing for the integration of external APIs and databases directly into the conversational flow. This makes it possible to execute custom searches, retrieve data from the web or specific databases, and even summarize or explain content in depth.
|
12 |
|
13 |
## Usage
|
|
|
14 |
```python
|
15 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
|
|
|
16 |
|
|
|
|
|
17 |
device = "cuda"
|
18 |
|
19 |
model = AutoModelForCausalLM.from_pretrained("InterSync/Mistral-7B-Instruct-v0.2-Function-Calling")
|
20 |
tokenizer = AutoTokenizer.from_pretrained("InterSync/Mistral-7B-Instruct-v0.2-Function-Calling")
|
|
|
|
|
|
|
|
|
21 |
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
|
|
22 |
|
|
|
|
|
23 |
tools = [
|
24 |
{
|
25 |
"type": "function",
|
@@ -36,7 +47,7 @@ tools = [
|
|
36 |
"format": {
|
37 |
"type": "string",
|
38 |
"enum": ["celsius", "fahrenheit"],
|
39 |
-
"description": "The temperature unit to use. Infer this from the
|
40 |
},
|
41 |
},
|
42 |
"required": ["location", "format"],
|
@@ -58,7 +69,7 @@ tools = [
|
|
58 |
"format": {
|
59 |
"type": "string",
|
60 |
"enum": ["celsius", "fahrenheit"],
|
61 |
-
"description": "The temperature unit to use. Infer this from the
|
62 |
},
|
63 |
"num_days": {
|
64 |
"type": "integer",
|
@@ -70,40 +81,57 @@ tools = [
|
|
70 |
}
|
71 |
},
|
72 |
]
|
|
|
73 |
|
|
|
|
|
74 |
messages = [
|
75 |
{
|
76 |
"role": "user",
|
77 |
-
"content":
|
78 |
-
You are
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
<tool_call>
|
85 |
-
|
86 |
-
|
87 |
-
""
|
|
|
88 |
},
|
89 |
{
|
90 |
"role": "assistant",
|
91 |
-
"content":
|
92 |
},
|
93 |
{
|
94 |
"role": "user",
|
95 |
-
"content": "What is the current weather in San Francisco? And
|
96 |
},
|
97 |
]
|
98 |
|
99 |
-
|
100 |
|
|
|
|
|
|
|
101 |
model_inputs = inputs.to(device)
|
|
|
|
|
|
|
|
|
102 |
model.to(device)
|
103 |
|
104 |
generate_ids = model.generate(model_inputs, streamer=streamer, do_sample=True, max_length=4096)
|
105 |
decoded = tokenizer.batch_decode(generate_ids)
|
106 |
```
|
107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
## Quantization Models
|
109 |
- Updating
|
|
|
11 |
- **Direct Function Calls**: Mistral 7B Instruct v0.2 now supports structured function calls, allowing for the integration of external APIs and databases directly into the conversational flow. This makes it possible to execute custom searches, retrieve data from the web or specific databases, and even summarize or explain content in depth.
|
12 |
|
13 |
## Usage
|
14 |
+
### Importing Libraries
|
15 |
```python
|
16 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
|
17 |
+
```
|
18 |
|
19 |
+
### Initializing Model and Tokenizer
|
20 |
+
```python
|
21 |
device = "cuda"
|
22 |
|
23 |
model = AutoModelForCausalLM.from_pretrained("InterSync/Mistral-7B-Instruct-v0.2-Function-Calling")
|
24 |
tokenizer = AutoTokenizer.from_pretrained("InterSync/Mistral-7B-Instruct-v0.2-Function-Calling")
|
25 |
+
```
|
26 |
+
|
27 |
+
### Creating the Text Streamer
|
28 |
+
```python
|
29 |
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
30 |
+
```
|
31 |
|
32 |
+
### Defining Tools
|
33 |
+
```python
|
34 |
tools = [
|
35 |
{
|
36 |
"type": "function",
|
|
|
47 |
"format": {
|
48 |
"type": "string",
|
49 |
"enum": ["celsius", "fahrenheit"],
|
50 |
+
"description": "The temperature unit to use. Infer this from the user's location.",
|
51 |
},
|
52 |
},
|
53 |
"required": ["location", "format"],
|
|
|
69 |
"format": {
|
70 |
"type": "string",
|
71 |
"enum": ["celsius", "fahrenheit"],
|
72 |
+
"description": "The temperature unit to use. Infer this from the user's location.",
|
73 |
},
|
74 |
"num_days": {
|
75 |
"type": "integer",
|
|
|
81 |
}
|
82 |
},
|
83 |
]
|
84 |
+
```
|
85 |
|
86 |
+
### Setting up the Messages
|
87 |
+
```python
|
88 |
messages = [
|
89 |
{
|
90 |
"role": "user",
|
91 |
+
"content": (
|
92 |
+
"You are Mistral with function-calling supported. You are provided with function signatures within <tools></tools> XML tags. "
|
93 |
+
"You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. "
|
94 |
+
"Here are the available tools:\n"
|
95 |
+
"<tools>\n"
|
96 |
+
f"{tools}\n"
|
97 |
+
"</tools>\n\n"
|
98 |
+
"For each function call, return a JSON object with the function name and arguments within <tool_call></tool_call> XML tags as follows:\n"
|
99 |
+
"<tool_call>\n"
|
100 |
+
"{{'arguments': <args-dict>, 'name': <function-name>}}\n"
|
101 |
+
"</tool_call>"
|
102 |
+
)
|
103 |
},
|
104 |
{
|
105 |
"role": "assistant",
|
106 |
+
"content": "How can I help you today?"
|
107 |
},
|
108 |
{
|
109 |
"role": "user",
|
110 |
+
"content": "What is the current weather in San Francisco? And can you forecast that in the next 10 days?"
|
111 |
},
|
112 |
]
|
113 |
|
114 |
+
```
|
115 |
|
116 |
+
### Preparing Model Inputs
|
117 |
+
```python
|
118 |
+
inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
119 |
model_inputs = inputs.to(device)
|
120 |
+
```
|
121 |
+
|
122 |
+
### Generating the Response
|
123 |
+
```python
|
124 |
model.to(device)
|
125 |
|
126 |
generate_ids = model.generate(model_inputs, streamer=streamer, do_sample=True, max_length=4096)
|
127 |
decoded = tokenizer.batch_decode(generate_ids)
|
128 |
```
|
129 |
|
130 |
+
### Output
|
131 |
+
```python
|
132 |
+
Sure, I can help with that. Here is the current weather in San Francisco: <tool_call>{'arguments': {'location': 'San Francisco, CA', 'format': 'celsius'}, 'name': 'get_current_weather'}</tool_call> And here is the weather forecast for the next 10 days in San Francisco: <tool_call>{'arguments': {'location': 'San Francisco, CA', 'format': 'celsius', 'num_days': 10}, 'name': 'get_n_day_weather_forecast'}</tool_call>
|
133 |
+
```
|
134 |
+
|
135 |
+
|
136 |
## Quantization Models
|
137 |
- Updating
|