kenton-li commited on
Commit
948174f
·
1 Parent(s): 5e4a3af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -84
app.py CHANGED
@@ -10,93 +10,49 @@ tokenizer = None
10
  generator = None
11
  csv_name = "disease_database_mini.csv"
12
  df = pd.read_csv(csv_name)
13
- openai.api_key = "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"
14
 
15
- def csv_prompter(question,csv_name):
 
 
 
16
 
 
 
 
 
17
 
18
-
19
- fulltext = "A question is provided below. Given the question, extract " + \
20
- "keywords from the text. Focus on extracting the keywords that we can use " + \
21
- "to best lookup answers to the question. \n" + \
22
- "---------------------\n" + \
23
- "{}\n".format(question) + \
24
- "---------------------\n" + \
25
- "Provide keywords in the following comma-separated format.\nKeywords: "
26
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  messages = [
28
- {"role": "system", "content": ""},
29
- ]
30
- messages.append(
31
- {"role": "user", "content": f"{fulltext}"}
32
- )
33
- rsp = openai.ChatCompletion.create(
34
- model="gpt-3.5-turbo",
35
- messages=messages
36
- )
37
- keyword_list = rsp.get("choices")[0]["message"]["content"]
38
- keyword_list = keyword_list.replace(",","").split(" ")
39
-
40
- print(keyword_list)
41
- divided_text = []
42
- csvdata = df.to_dict('records')
43
- step_length = 15
44
- for csv_item in range(0,len(csvdata),step_length):
45
- csv_text = str(csvdata[csv_item:csv_item+step_length]).replace("}, {", "\n\n").replace("\"", "")#.replace("[", "").replace("]", "")
46
- divided_text.append(csv_text)
47
-
48
- answer_llm = ""
49
-
50
- score_textlist = [0] * len(divided_text)
51
-
52
- for i, chunk in enumerate(divided_text):
53
- for t, keyw in enumerate(keyword_list):
54
- if keyw.lower() in chunk.lower():
55
- score_textlist[i] = score_textlist[i] + 1
56
-
57
- answer_list = []
58
- divided_text = [item for _, item in sorted(zip(score_textlist, divided_text), reverse=True)]
59
-
60
- for i, chunk in enumerate(divided_text):
61
-
62
- if i>4:
63
- continue
64
-
65
- fulltext = "{}".format(chunk) + \
66
- "\n---------------------\n" + \
67
- "Based on the Table above and not prior knowledge, " + \
68
- "Select the Table Entries that will help to answer the question: {}\n Output in the format of \" Disease: <>; Symptom: <>; Medical Test: <>; Medications: <>;\". If there is no useful form entries, output: 'No Entry'".format(question)
69
-
70
- print(fulltext)
71
- messages = [
72
  {"role": "system", "content": ""},
73
  ]
74
- messages.append(
75
- {"role": "user", "content": f"{fulltext}"}
76
- )
77
- rsp = openai.ChatCompletion.create(
78
- model="gpt-3.5-turbo",
79
- messages=messages
80
- )
81
- answer_llm = rsp.get("choices")[0]["message"]["content"]
82
-
83
- print("\nAnswer: " + answer_llm)
84
- print()
85
- if not "No Entry" in answer_llm:
86
- answer_list.append(answer_llm)
87
-
88
-
89
-
90
- fulltext = "The original question is as follows: {}\n".format(question) + \
91
- "Based on this Table:\n" + \
92
- "------------\n" + \
93
- "{}\n".format(str("\n\n".join(answer_list))) + \
94
- "------------\n" + \
95
- "Answer: "
96
- print(fulltext)
97
- messages = [
98
- {"role": "system", "content": ""},
99
- ]
100
  messages.append(
101
  {"role": "user", "content": f"{fulltext}"}
102
  )
@@ -104,10 +60,10 @@ def csv_prompter(question,csv_name):
104
  model="gpt-3.5-turbo",
105
  messages=messages
106
  )
107
- answer_llm = rsp.get("choices")[0]["message"]["content"]
108
-
109
- print("\nFinal Answer: " + answer_llm)
110
  print()
 
 
111
 
112
  return answer_llm
113
 
 
10
  generator = None
11
  csv_name = "disease_database_mini.csv"
12
  df = pd.read_csv(csv_name)
13
+ openai.api_key = "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"
14
 
15
+ def csv_prompter(question,csv_name):
16
+ json_file = open('order.json')
17
+ json_data = json.load(json_file)
18
+ json_data = json_data['records']
19
 
20
+ print(json_data)
21
+ fulltext = []
22
+ #print all nm in json file
23
+ for i in json_data:
24
 
25
+ # identify if opts in this item
26
+ opt_list = []
27
+ if 'opt' in i:
28
+ for opt1 in i['opt']:
29
+ for opt2 in opt1['opts']:
30
+ opt_list.append(opt2['nm'])
31
+ if len(opt_list) > 100:
32
+ print(str(i['pid'])+" "+i['nm']+" Options:"+str(opt_list))
33
+ each = i['nm']+" Options:"+str(opt_list)
34
+ fulltext.append(each)
35
+ else:
36
+ print(str(i['pid'])+" "+i['nm'])
37
+ each = i['nm']
38
+ fulltext.append(each)
39
+
40
+
41
+ fulltext = '\n'.join(fulltext)
42
+ #fulltext = fulltext + "\n 00:00 - Customer: Hey, dear, can I please take out? 00:03 - Waiter: I'm up here. Okay go ahead. 00:06 - Customer: Can I get two sesame chicken dinners? 00:10 - Customer: One with lo mein and one with fried rice? 00:13 - Customer: Can I get, I know spring rolls come with each of those, but can I get just as long 00:17 - Customer: as I've got a total of two spring rolls and two egg rolls? 00:21 - Waiter: Okay. Anything? 00:22 - Customer: And a small bowl of hot and sour soup. 00:25 - Customer: And that's it. 00:26 - Waiter: Alright. Two sesame chickens, small one with fried rice, small one with lo mein, and both of those with spring rolls, two egg rolls and a small hot and sour soup. 00:30 - Customer: It's 205-473-1750. 00:36 - Waiter: What's the fun number please? 00:43 - Waiter: Just got around 20 minutes. Thank you. 00:43 - Customer: Okay, dear, thank you so much."
43
+ fulltext = fulltext+'\n'
44
+ #read txt file in lines
45
+ f = open("talk.txt", "r")
46
+ for x in f:
47
+ fulltext = fulltext + x
48
+
49
+
50
+
51
+ fulltext = fulltext+'Based on the above dialogue and menu, output the dishes ordered by the customer. Note that some dishes have sub-options (e.g. some are set meals, some are a la carte)'
52
+ print(fulltext)
53
  messages = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  {"role": "system", "content": ""},
55
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  messages.append(
57
  {"role": "user", "content": f"{fulltext}"}
58
  )
 
60
  model="gpt-3.5-turbo",
61
  messages=messages
62
  )
63
+ response = rsp.get("choices")[0]["message"]["content"]
 
 
64
  print()
65
+
66
+ print(response)
67
 
68
  return answer_llm
69