RickyIG commited on
Commit
7ba8c26
·
1 Parent(s): 7d674d6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +402 -0
app.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import openai
3
+ import sys
4
+ import re
5
+
6
+ import gradio as gr
7
+ from IPython import get_ipython
8
+ import json
9
+ import requests
10
+ from tenacity import retry, wait_random_exponential, stop_after_attempt
11
+ from IPython import get_ipython
12
+ # from termcolor import colored # doesn't actually work in Colab ¯\_(ツ)_/¯
13
+ import ast
14
+
15
+ GPT_MODEL = "gpt-3.5-turbo-1106"
16
+
17
+ from google.colab import userdata
18
+ openai.api_key = userdata.get('OPENAI_API_KEY')
19
+
20
+ def exec_python(cell):
21
+ # result = 0
22
+ print(cell)
23
+ # print(type(cell))
24
+ # code = json.loads(cell)
25
+ # print(code)
26
+ # exec(code["cell"])
27
+ inputcode = cell
28
+ print(inputcode)
29
+ code = inputcode
30
+ # code_string = code["cell"]
31
+ local_namespace = {}
32
+ try:
33
+ exec(code, globals(), local_namespace)
34
+ except Exception as e:
35
+ return "tidakada"
36
+ print(local_namespace)
37
+ if not local_namespace:
38
+ return "tidakada"
39
+ else:
40
+ theanswers = local_namespace.values()
41
+ print(theanswers)
42
+ local_ans = list(theanswers)[-1]
43
+ print(local_ans)
44
+ return local_ans
45
+
46
+ # Now let's define the function specification:
47
+ functions = [
48
+ {
49
+ "name": "exec_python",
50
+ "description": "run python code and return the execution result.",
51
+ "parameters": {
52
+ "type": "object",
53
+ "properties": {
54
+ "cell": {
55
+ "type": "string",
56
+ "description": "Valid Python code to execute.",
57
+ }
58
+ },
59
+ "required": ["cell"],
60
+ },
61
+ },
62
+ ]
63
+
64
+ # In order to run these functions automatically, we should maintain a dictionary:
65
+ functions_dict = {
66
+ "exec_python": exec_python,
67
+ }
68
+
69
+ def openai_api_calculate_cost(usage,model):
70
+ pricing = {
71
+ # 'gpt-3.5-turbo-4k': {
72
+ # 'prompt': 0.0015,
73
+ # 'completion': 0.002,
74
+ # },
75
+ # 'gpt-3.5-turbo-16k': {
76
+ # 'prompt': 0.003,
77
+ # 'completion': 0.004,
78
+ # },
79
+ 'gpt-3.5-turbo-1106': {
80
+ 'prompt': 0.001,
81
+ 'completion': 0.002,
82
+ },
83
+ 'gpt-4-1106-preview': {
84
+ 'prompt': 0.01,
85
+ 'completion': 0.03,
86
+ },
87
+ 'gpt-4': {
88
+ 'prompt': 0.03,
89
+ 'completion': 0.06,
90
+ },
91
+ # 'gpt-4-32k': {
92
+ # 'prompt': 0.06,
93
+ # 'completion': 0.12,
94
+ # },
95
+ # 'text-embedding-ada-002-v2': {
96
+ # 'prompt': 0.0001,
97
+ # 'completion': 0.0001,
98
+ # }
99
+ }
100
+
101
+ try:
102
+ model_pricing = pricing[model]
103
+ except KeyError:
104
+ raise ValueError("Invalid model specified")
105
+
106
+ prompt_cost = usage['prompt_tokens'] * model_pricing['prompt'] / 1000
107
+ completion_cost = usage['completion_tokens'] * model_pricing['completion'] / 1000
108
+
109
+ total_cost = prompt_cost + completion_cost
110
+ print(f"\nTokens used: {usage['prompt_tokens']:,} prompt + {usage['completion_tokens']:,} completion = {usage['total_tokens']:,} tokens")
111
+ print(f"Total cost for {model}: ${total_cost:.4f}\n")
112
+
113
+ return total_cost
114
+
115
+
116
+ @retry(wait=wait_random_exponential(min=1, max=40), stop=stop_after_attempt(3))
117
+ def chat_completion_request(messages, model, functions=None, function_call=None, temperature=0.2, top_p=0.1):
118
+ """
119
+ This function sends a POST request to the OpenAI API to generate a chat completion.
120
+ Parameters:
121
+ - messages (list): A list of message objects. Each object should have a 'role' (either 'system', 'user', or 'assistant') and 'content'
122
+ (the content of the message).
123
+ - functions (list, optional): A list of function objects that describe the functions that the model can call.
124
+ - function_call (str or dict, optional): If it's a string, it can be either 'auto' (the model decides whether to call a function) or 'none'
125
+ (the model will not call a function). If it's a dict, it should describe the function to call.
126
+ - model (str): The ID of the model to use.
127
+ Returns:
128
+ - response (requests.Response): The response from the OpenAI API. If the request was successful, the response's JSON will contain the chat completion.
129
+ """
130
+
131
+ # Set up the headers for the API request
132
+ headers = {
133
+ "Content-Type": "application/json",
134
+ "Authorization": "Bearer " + openai.api_key,
135
+ }
136
+
137
+ # Set up the data for the API request
138
+ # json_data = {"model": model, "messages": messages}
139
+ # json_data = {"model": model, "messages": messages, "response_format":{"type": "json_object"}}
140
+ json_data = {"model": model, "messages": messages, "temperature": temperature, "top_p":top_p}
141
+
142
+ # If functions were provided, add them to the data
143
+ if functions is not None:
144
+ json_data.update({"functions": functions})
145
+
146
+ # If a function call was specified, add it to the data
147
+ if function_call is not None:
148
+ json_data.update({"function_call": function_call})
149
+
150
+ print(json_data)
151
+
152
+ # Send the API request
153
+ try:
154
+ response = requests.post(
155
+ "https://api.openai.com/v1/chat/completions",
156
+ headers=headers,
157
+ json=json_data,
158
+ )
159
+ return response
160
+ except Exception as e:
161
+ print("Unable to generate ChatCompletion response")
162
+ print(f"Exception: {e}")
163
+ return e
164
+
165
+ def first_call(init_prompt, user_input, input_temperature, input_top_p, model_dropdown_1):
166
+ # Set up a conversation
167
+ messages = []
168
+ messages.append({"role": "system", "content": init_prompt})
169
+
170
+ # Write a user message that perhaps our function can handle...?
171
+ messages.append({"role": "user", "content": user_input})
172
+
173
+ # Generate a response
174
+ chat_response = chat_completion_request(
175
+ messages, model_dropdown_1, functions=functions, function_call='auto', temperature=float(input_temperature), top_p=float(input_top_p)
176
+ )
177
+
178
+
179
+ # Save the JSON to a variable
180
+
181
+ assistant_message = chat_response.json()["choices"][0]["message"]
182
+
183
+ # Append response to conversation
184
+ messages.append(assistant_message)
185
+
186
+ usage = chat_response.json()['usage']
187
+ cost1 = openai_api_calculate_cost(usage,model_dropdown_1)
188
+
189
+ finish_response_status = chat_response.json()["choices"][0]["finish_reason"]
190
+ # Let's see what we got back before continuing
191
+ return assistant_message, cost1, messages, finish_response_status
192
+
193
+ def is_valid_dict_string(s):
194
+ try:
195
+ ast.literal_eval(s)
196
+ return True
197
+ except (SyntaxError, ValueError):
198
+ return False
199
+
200
+ def function_call_process(assistant_message):
201
+ if assistant_message.get("function_call") != None:
202
+
203
+ # Retrieve the name of the relevant function
204
+ function_name = assistant_message["function_call"]["name"]
205
+
206
+ # Retrieve the arguments to send the function
207
+ # function_args = json.loads(assistant_message["function_call"]["arguments"], strict=False)
208
+
209
+ # if isinstance(assistant_message["function_call"]["arguments"], dict):
210
+ # arg_dict = json.loads(r"{jsonload}".format(jsonload=assistant_message["function_call"]["arguments"]), strict=False)
211
+ # else:
212
+ # arg_dict = {'cell': assistant_message["function_call"]["arguments"]}
213
+ # arg_dict = assistant_message["function_call"]["arguments"]
214
+ # print(function_args)
215
+
216
+ if is_valid_dict_string(assistant_message["function_call"]["arguments"])==True:
217
+ arg_dict = json.loads(r"{jsonload}".format(jsonload=assistant_message["function_call"]["arguments"]), strict=False)
218
+ arg_dict = arg_dict['cell']
219
+ print("arg_dict : " + arg_dict)
220
+ else:
221
+ arg_dict = assistant_message["function_call"]["arguments"]
222
+ print(arg_dict)
223
+
224
+ # Look up the function and call it with the provided arguments
225
+ result = functions_dict[function_name](arg_dict)
226
+ return result
227
+ # print(result)
228
+
229
+ def custom_format(match):
230
+ # If the match is {ans}, use a single curly brace; otherwise, use double curly braces
231
+ return "{" + match.group(1) + "}" if match.group(1) == "ans" else "{{" + match.group(1) + "}}"
232
+
233
+ def second_prompt_build(prompt, log, prompt_cadangan):
234
+ pattern = re.compile(r'\{([^}]*)\}')
235
+ prompt_sub = pattern.sub(custom_format, prompt)
236
+ if log == "tidakada":
237
+ if "{ans}" in prompt:
238
+ prompt_second = prompt_sub.format(ans = prompt_cadangan)
239
+ else:
240
+ prompt_second = prompt_cadangan
241
+ else:
242
+ prompt_second = prompt_sub.format(ans=log)
243
+ return prompt_second
244
+
245
+ # def second_prompt_build(prompt, log, prompt_cadangan):
246
+ # if log == "tidakada":
247
+ # if "{ans}" in prompt:
248
+ # prompt_second = prompt.format(ans = prompt_cadangan)
249
+ # else:
250
+ # prompt_second = prompt_cadangan
251
+ # else:
252
+ # prompt_second = prompt.format(ans = log)
253
+ # # prompt_second = prompt % log
254
+ # return prompt_second
255
+
256
+ def second_call(prompt, prompt_second, messages, input_temperature_2, input_top_p_2, model_dropdown_2, function_name = "exec_python"):
257
+ # Add a new message to the conversation with the function result
258
+ messages.append({
259
+ "role": "function",
260
+ "name": function_name,
261
+ "content": str(prompt_second), # Convert the result to a string
262
+ })
263
+
264
+ # Call the model again to generate a user-facing message based on the function result
265
+ chat_response = chat_completion_request(
266
+ messages, model_dropdown_2, functions=functions, temperature=float(input_temperature_2), top_p=float(input_top_p_2)
267
+ )
268
+ print("second call : "+ str(chat_response.json()))
269
+ assistant_message = chat_response.json()["choices"][0]["message"]
270
+ messages.append(assistant_message)
271
+
272
+ usage = chat_response.json()['usage']
273
+ cost2 = openai_api_calculate_cost(usage,model_dropdown_2)
274
+
275
+ # Print the final conversation
276
+ # pretty_print_conversation(messages)
277
+ finish_response_status_2 = chat_response.json()["choices"][0]["finish_reason"]
278
+ print("finish_response_status_2 : " + finish_response_status_2)
279
+ return assistant_message, cost2, messages, finish_response_status_2
280
+
281
+ def format_math_in_sentence(sentence):
282
+ # Regular expression to find various math expressions
283
+ math_pattern = re.compile(r'\\[a-zA-Z]+\{[^\}]+\}|\\frac\{[^\}]+\}\{[^\}]+\}')
284
+
285
+ # Find all math expressions in the sentence
286
+ math_matches = re.findall(math_pattern, sentence)
287
+
288
+ # Wrap each math expression with Markdown formatting
289
+ for math_match in math_matches:
290
+ markdown_math = f"${math_match}$"
291
+ sentence = sentence.replace(math_match, markdown_math)
292
+
293
+ return sentence
294
+
295
+ def format_mathjax_equation(input_str):
296
+ # Replace \( and \) with $ for inline math
297
+ formatted_str = re.sub(r'\\\(.*?\\\)', lambda x: f"${x.group(0)[2:-2]}$", input_str)
298
+
299
+ # Replace \[ and \] with $$ for display math
300
+ formatted_str = re.sub(r'\\\[.*?\\\]', lambda x: f"$$\n{x.group(0)[2:-2]}\n$$", formatted_str)
301
+
302
+ # Replace \frac{}{} with \frac{}{}
303
+ formatted_str = re.sub(r'\\frac\{(.*?)\}\{(.*?)\}', r'\\frac{\1}{\2}', formatted_str)
304
+
305
+ # Add more rules for other common math symbols
306
+ formatted_str = re.sub(r'\\sqrt\{(.*?)\}', r'\\sqrt{\1}', formatted_str)
307
+ formatted_str = re.sub(r'\\sum\{(.*?)\}', r'\\sum{\1}', formatted_str)
308
+ formatted_str = re.sub(r'\\int\{(.*?)\}', r'\\int{\1}', formatted_str)
309
+ # Add more rules as needed
310
+
311
+ return formatted_str
312
+
313
+ def main_function(init_prompt, prompt, prompt_cadangan, user_input,input_temperature_1, input_top_p_1, input_temperature_2, input_top_p_2, model_dropdown_1, model_dropdown_2):
314
+ first_call_result, cost1, messages, finish_response_status = first_call(init_prompt, user_input, input_temperature_1, input_top_p_1, model_dropdown_1)
315
+ print("finish_response_status "+finish_response_status)
316
+ print(messages)
317
+ cost_list=[]
318
+ if finish_response_status == 'stop':
319
+ function_call_process_result = "Tidak dipanggil"
320
+ second_prompt_build_result = "Tidak dipanggil"
321
+ second_call_result = {'status':'Tidak dipanggil'}
322
+ cost2 = 0
323
+ finalmessages = messages
324
+ finalcostresult = cost1
325
+ cost_list.append(cost1)
326
+ else:
327
+ function_call_process_result = function_call_process(first_call_result)
328
+ second_prompt_build_result = second_prompt_build(prompt, function_call_process_result, prompt_cadangan)
329
+ second_call_result, cost2, finalmessages, finish_response_status_2 = second_call(function_call_process_result, second_prompt_build_result, messages, input_temperature_2, input_top_p_2, model_dropdown_2)
330
+ cost_list.append(cost1)
331
+ cost_list.append(cost2)
332
+ finalcostresult = cost1 + cost2
333
+ while finish_response_status_2 != 'stop':
334
+ function_call_process_result = function_call_process(second_call_result)
335
+ second_prompt_build_result = second_prompt_build(prompt, function_call_process_result, prompt_cadangan)
336
+ second_call_result, cost2, finalmessages, finish_response_status_2 = second_call(function_call_process_result, second_prompt_build_result, messages, input_temperature_2, input_top_p_2, model_dropdown_2)
337
+ finalcostresult += cost2
338
+ cost_list.append(cost2)
339
+ finalcostrpresult = finalcostresult * 15000
340
+ cost_dict = {f"Cost {i+1}": price for i, price in enumerate(cost_list)}
341
+ cost_dict_rp = {f"Cost {i+1}": price*15000 for i, price in enumerate(cost_list)}
342
+ veryfinaloutput = format_mathjax_equation(str(finalmessages[-1].get("content", "")))
343
+ return first_call_result, function_call_process_result, second_prompt_build_result, second_call_result, cost1, cost2, finalmessages, finalcostresult, finalcostrpresult, cost_dict, cost_dict_rp, veryfinaloutput
344
+
345
+ def gradio_function():
346
+ init_prompt = gr.Textbox(label="init_prompt (for 1st call)",value="""You are a genius math tutor, Python code expert, and a helpful assistant.
347
+ If this is a math subject or topic, you must create a code how to solve it accurately! The code must be worked and written in Python string and please use the easiest and the most correct way step by step to solve it!
348
+ After that, show me the evaluated and verified answer correctly once! This is very important to your career, so please code it perfectly, answer correctly and the answer must be already verified!
349
+ This problem is:""")
350
+ prompt = gr.Textbox(label="prompt (for 2nd call)",value="""Here's the answer: {ans}. You must solve the problem step by step correctly and accurately!
351
+ The problem:
352
+ Rara memiliki tali sepanjang 3/5 meter. Lalu Dina menambahkan tali Rara sepanjang 3/6 meter. Total panjang tali yang dimiliki Rara menjadi ... meter
353
+ A. 33/30
354
+ B. 34/30
355
+ C. 35/30
356
+ D. 36/30
357
+ The answer: {ans}
358
+ You must solve the problem by explain it step by step, then if it doesn't match with the answer, please solve it by yourself!""")
359
+ prompt_cadangan = gr.Textbox(label="Prompt Cadangan",value="""Please figure and solve it step by step with explanation by yourself. Remember, you must give the correct answer!""")
360
+ user_input = gr.Textbox(label="User Input",value="""Rara memiliki tali sepanjang 3/5 meter. Lalu Dina menambahkan tali Rara sepanjang 3/6 meter. Total panjang tali yang dimiliki Rara menjadi ... meter
361
+ A. 33/30
362
+ B. 34/30
363
+ C. 35/30
364
+ D. 36/30""")
365
+ input_temperature_1 = gr.Textbox(label="temperature_1", value=0.2)
366
+ input_top_p_1 = gr.Textbox(label="top_p_1", value=0.1)
367
+ input_temperature_2 = gr.Textbox(label="temperature_2", value=0.2)
368
+ input_top_p_2 = gr.Textbox(label="top_p_2", value=0.1)
369
+ # input_temperature_2 = gr.Textbox(label="temperature_2")
370
+ # input_top_p_2 = gr.Textbox(label="top_p_2")
371
+ output_1st_call = gr.JSON(label="Assistant (output_1st_call)")
372
+ output_fc_call = gr.Textbox(label="Function Call (exec_python) Result (output_fc_call)")
373
+ output_fc_call_with_prompt = gr.Textbox(label="Building 2nd Prompt (output_fc_call_with_2nd_prompt)")
374
+ output_2nd_call = gr.JSON(label="Assistant (output_2nd_call_buat_user)")
375
+ cost = gr.Textbox(label="Cost 1")
376
+ cost2 = gr.Textbox(label="Cost 2")
377
+ finalcost = gr.Textbox(label="Final Cost ($)")
378
+ finalcostrp = gr.Textbox(label="Final Cost (Rp)")
379
+ finalmessages = gr.JSON(label="Final Messages")
380
+ model_dropdown_1 = gr.Dropdown(["gpt-4", "gpt-4-1106-preview", "gpt-3.5-turbo-1106"], label="Model 1", info="Pilih model 1!", value="gpt-3.5-turbo-1106")
381
+ model_dropdown_2 = gr.Dropdown(["gpt-4", "gpt-4-1106-preview", "gpt-3.5-turbo-1106"], label="Model 2", info="Pilih model 2!", value="gpt-3.5-turbo-1106")
382
+ cost_list = gr.JSON(label="Cost List ($)")
383
+ cost_list_rp = gr.JSON(label="Cost List (Rp)")
384
+ prettieroutput = gr.Markdown(label="Last Output", latex_delimiters=[
385
+ {'left': "$$", 'right': "$$", 'display': True},
386
+ {'left': "$", 'right': "$", 'display': False},
387
+ {'left': "\\(", 'right': "\\)", 'display': False},
388
+ {'left': "\\[", 'right': "\\]", 'display': True}
389
+ ])
390
+
391
+ iface = gr.Interface(
392
+ fn=main_function,
393
+ inputs=[init_prompt, prompt, prompt_cadangan, user_input,input_temperature_1, input_top_p_1, input_temperature_2, input_top_p_2, model_dropdown_1, model_dropdown_2],
394
+ outputs=[output_1st_call, output_fc_call, output_fc_call_with_prompt, output_2nd_call, cost, cost2, finalmessages, finalcost, finalcostrp, cost_list, cost_list_rp, prettieroutput],
395
+ title="Test",
396
+ description="Accuracy",
397
+ )
398
+
399
+ iface.launch(share=True, debug=True)
400
+
401
+ if __name__ == "__main__":
402
+ gradio_function()