Update README.md
Browse files
README.md
CHANGED
@@ -70,8 +70,87 @@ print(result)
|
|
70 |
|
71 |
## Feature: Visual Instruction Following
|
72 |
|
|
|
|
|
|
|
|
|
|
|
73 |
## Feature: Function Calling
|
74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
## Citation
|
76 |
|
77 |
```
|
|
|
70 |
|
71 |
## Feature: Visual Instruction Following
|
72 |
|
73 |
+
```python
|
74 |
+
|
75 |
+
```
|
76 |
+
|
77 |
+
|
78 |
## Feature: Function Calling
|
79 |
|
80 |
+
```python
|
81 |
+
import json
|
82 |
+
|
83 |
+
functions = [
|
84 |
+
{
|
85 |
+
"name": "get_current_weather",
|
86 |
+
"description": "Get the current weather in a given location",
|
87 |
+
"parameters": {
|
88 |
+
"type": "object",
|
89 |
+
"properties": {
|
90 |
+
"location": {
|
91 |
+
"type": "string",
|
92 |
+
"description": "The city and state, e.g. San Francisco, CA"
|
93 |
+
},
|
94 |
+
"unit": {
|
95 |
+
"type": "string",
|
96 |
+
"enum": ["celsius", "fahrenheit"]
|
97 |
+
}
|
98 |
+
},
|
99 |
+
"required": ["location"]
|
100 |
+
}
|
101 |
+
}
|
102 |
+
]
|
103 |
+
|
104 |
+
def fake_get_current_weather(location, unit=None):
|
105 |
+
return {'temperature': 30}
|
106 |
+
|
107 |
+
mapping = {
|
108 |
+
'get_current_weather': fake_get_current_weather
|
109 |
+
}
|
110 |
+
|
111 |
+
# stage 1: query
|
112 |
+
conversations = [
|
113 |
+
{"role": "user", "content": "請問台北目前溫度是攝氏幾度?"},
|
114 |
+
]
|
115 |
+
|
116 |
+
prompt = prompt_engine.get_prompt(conversations, functions=functions)
|
117 |
+
|
118 |
+
output_str = _inference(prompt, tokenizer, model, generation_config)
|
119 |
+
result = prompt_engine.parse_generated_str(output_str)
|
120 |
+
|
121 |
+
print(result)
|
122 |
+
# {'role': 'assistant', 'tool_calls': [{'id': 'call_0bcY2wePCVTg14Q6Xor93fHz', 'type': 'function', 'function': {'name': 'get_current_weather', 'arguments': '{"location": "台北", "unit": "celsius"}'}}]}
|
123 |
+
```
|
124 |
+
|
125 |
+
|
126 |
+
```python
|
127 |
+
# stage 2: execute called functions
|
128 |
+
conversations.append(result)
|
129 |
+
|
130 |
+
tool_call = result['tool_calls'][0]
|
131 |
+
func_name = tool_call['function']['name']
|
132 |
+
func = mapping[func_name]
|
133 |
+
arguments = json.loads(tool_call['function']['arguments'])
|
134 |
+
called_result = func(**arguments)
|
135 |
+
|
136 |
+
# stage 3: put executed results
|
137 |
+
conversations.append(
|
138 |
+
{
|
139 |
+
'role': 'tool',
|
140 |
+
'tool_call_id': tool_call['id'],
|
141 |
+
'name': func_name,
|
142 |
+
'content': json.dumps(called_result)
|
143 |
+
}
|
144 |
+
)
|
145 |
+
|
146 |
+
prompt = prompt_engine.get_prompt(conversations, functions=functions)
|
147 |
+
|
148 |
+
output_str2 = _inference(prompt, tokenizer, model, generation_config)
|
149 |
+
result2 = prompt_engine.parse_generated_str(output_str2)
|
150 |
+
print(result2)
|
151 |
+
# {'role': 'assistant', 'content': '台北目前的溫度是攝氏30度。'}
|
152 |
+
```
|
153 |
+
|
154 |
## Citation
|
155 |
|
156 |
```
|