Dryanvfi commited on
Commit
25d153a
·
1 Parent(s): 87a5a35

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -49
app.py CHANGED
@@ -11,11 +11,55 @@ import plotly
11
  import gradio as gr
12
 
13
  # Initiate functions to be defined with API key input.
14
- def prompt_analyze_questions():
15
- pass
16
-
17
- def prompt_analyze_reporting():
18
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  # Data Retrieval
21
 
@@ -216,50 +260,6 @@ def getData(tlspc_api_key, openai_api_key):
216
  The column values in the 'certificateIssuingTemplateId' column in certs_request_df match the column values in the 'issuing_template_id' column in issuing_templates_df.
217
  """
218
 
219
- def prompt_analyze_reporting(prompt):
220
- output = openai.ChatCompletion.create(model="gpt-3.5-turbo",temperature = 0.0, messages=[{"role": "user", "content":
221
- data_structure_overview},
222
- {"role": "user", "content":
223
- data_relationships_overview},{"role": "user", "content":
224
- f"""Do not attempt to use .csv files in your code."""},
225
- {"role": "user", "content":
226
- f"""Only use plotly to output charts, graphs, or figures. Do not use matplotlib or other charting libraries. Name the chart object as 'fig'"""},
227
- {"role": "user", "content":
228
- f"""Create a python script to: {prompt}"""}
229
- ])
230
- global parsed_response
231
- parsed_response = output.choices[0].message.content.strip().split('```python')[len(output.choices[0].message.content.strip().split('```python')) -1 ].split('```')[0]
232
- parsed_response_global = f"""global fig
233
- global string
234
- {parsed_response}"""
235
- exec(parsed_response_global)
236
- return fig
237
-
238
- def prompt_analyze_questions(prompt):
239
- output = openai.ChatCompletion.create(model="gpt-3.5-turbo",temperature = 0.0, messages=[{"role": "user", "content":
240
- data_structure_overview},
241
- {"role": "user", "content":
242
- data_relationships_overview},{"role": "user", "content":
243
- f"""Do not attempt to use .csv files in your code."""},
244
- {"role": "user", "content":
245
- f"""Do not attempt to create charts or visualize the question with graphics. Only provide string responses."""},
246
- {"role": "user", "content":
247
- f"""If you are asked to create visualizations or graphs, create a python script to store a string variable named output_string with the text 'Sorry, I cannot create reporting, select 'Add Reporting' to create reports."""},
248
- {"role": "user", "content":
249
- f"""Create a python script to: {prompt}"""},
250
- {"role": "user", "content":
251
- f"""Store the final response as a string variable named output_string"""}
252
- ])
253
-
254
- global parsed_response
255
- parsed_response = output.choices[0].message.content.strip().split('```python')[len(output.choices[0].message.content.strip().split('```python')) -1 ].split('```')[0]
256
- parsed_response_global = f"""global fig
257
- global string
258
- {parsed_response}
259
- globals().update(locals())"""
260
- exec(parsed_response_global)
261
- return output_string
262
-
263
  # Store variables for use in other portions of the application
264
  globals().update(locals())
265
 
 
11
  import gradio as gr
12
 
13
  # Initiate functions to be defined with API key input.
14
+ data_structure_overview = ''
15
+ data_relationships_overview = ''
16
+ fig =''
17
+ output_string = ''
18
+
19
+
20
+ def prompt_analyze_reporting(prompt):
21
+ output = openai.ChatCompletion.create(model="gpt-3.5-turbo",temperature = 0.0, messages=[{"role": "user", "content":
22
+ data_structure_overview},
23
+ {"role": "user", "content":
24
+ data_relationships_overview},{"role": "user", "content":
25
+ f"""Do not attempt to use .csv files in your code."""},
26
+ {"role": "user", "content":
27
+ f"""Only use plotly to output charts, graphs, or figures. Do not use matplotlib or other charting libraries. Name the chart object as 'fig'"""},
28
+ {"role": "user", "content":
29
+ f"""Create a python script to: {prompt}"""}
30
+ ])
31
+ global parsed_response
32
+ parsed_response = output.choices[0].message.content.strip().split('```python')[len(output.choices[0].message.content.strip().split('```python')) -1 ].split('```')[0]
33
+ parsed_response_global = f"""global fig
34
+ global string
35
+ {parsed_response}"""
36
+ exec(parsed_response_global)
37
+ return fig
38
+
39
+ def prompt_analyze_questions(prompt):
40
+ output = openai.ChatCompletion.create(model="gpt-3.5-turbo",temperature = 0.0, messages=[{"role": "user", "content":
41
+ data_structure_overview},
42
+ {"role": "user", "content":
43
+ data_relationships_overview},{"role": "user", "content":
44
+ f"""Do not attempt to use .csv files in your code."""},
45
+ {"role": "user", "content":
46
+ f"""Do not attempt to create charts or visualize the question with graphics. Only provide string responses."""},
47
+ {"role": "user", "content":
48
+ f"""If you are asked to create visualizations or graphs, create a python script to store a string variable named output_string with the text 'Sorry, I cannot create reporting, select 'Add Reporting' to create reports."""},
49
+ {"role": "user", "content":
50
+ f"""Create a python script to: {prompt}"""},
51
+ {"role": "user", "content":
52
+ f"""Store the final response as a string variable named output_string"""}
53
+ ])
54
+
55
+ global parsed_response
56
+ parsed_response = output.choices[0].message.content.strip().split('```python')[len(output.choices[0].message.content.strip().split('```python')) -1 ].split('```')[0]
57
+ parsed_response_global = f"""global fig
58
+ global string
59
+ {parsed_response}
60
+ globals().update(locals())"""
61
+ exec(parsed_response_global)
62
+ return output_string
63
 
64
  # Data Retrieval
65
 
 
260
  The column values in the 'certificateIssuingTemplateId' column in certs_request_df match the column values in the 'issuing_template_id' column in issuing_templates_df.
261
  """
262
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
  # Store variables for use in other portions of the application
264
  globals().update(locals())
265