weiwei1392 commited on
Commit
a5ab0b5
·
1 Parent(s): c05d486

‘增加模拟测评加载功能’

Browse files
Files changed (3) hide show
  1. app.py +74 -20
  2. main.py +0 -16
  3. mock_data.py +118 -0
app.py CHANGED
@@ -20,6 +20,15 @@ webui_title = "📚📚📚📚📚📚📚📚📚📚📚📚* * * * * * * * *
20
  llm_name_dict = {'chatgpt-3.5': 'OpenAI3', 'chatgpt-4': 'OpenAI4', 'chatglm—6b': 'ChatGLM'}
21
 
22
 
 
 
 
 
 
 
 
 
 
23
  def llm_change(name):
24
  llm = eval(eval('llm_name_dict[name]'))()
25
  return llm
@@ -27,8 +36,10 @@ def llm_change(name):
27
 
28
  def generate_comment_post_process(_summary):
29
  _summary = _summary.split('如下:')[-1]
 
30
  while _summary[0] == '\n':
31
  _summary = _summary[1:]
 
32
  return _summary
33
 
34
 
@@ -36,18 +47,25 @@ def show_config():
36
  return gr.update(visible=True)
37
 
38
 
 
 
 
 
 
 
 
39
  def show_stand_file(stand_file_path, stand_file):
40
  df = pd.read_excel(stand_file_path)
41
  return gr.update(visible=True, value=df)
42
 
43
 
44
- def hide_config():
45
- return gr.update(visible=False), gr.update(visible=False)
46
 
47
 
48
- def clear_screen(chatbot):
49
- chatbot = [[None, None]]
50
- return chatbot
51
 
52
 
53
  def generate_mock_data(_topic, _stand_file_path, _rewrite_prompt, _generate_num, _chatbot):
@@ -103,11 +121,14 @@ with gr.Blocks(css=block_css) as demo:
103
  gr.Markdown('\n\n\n\n')
104
  gr.Markdown(webui_title)
105
  gr.Markdown('\n\n\n\n')
106
- show_config_button = gr.Button('标准评价和prompt配置', visible=True)
107
- config = gr.Column(visible=False)
108
- show_config_button.click(show_config, outputs=[config])
109
-
110
- mock = gr.Column(visible=True)
 
 
 
111
 
112
  with config:
113
  # llm = gr.State('')
@@ -116,6 +137,7 @@ with gr.Blocks(css=block_css) as demo:
116
  # model_name.change(fn=llm_change, inputs=[model_name], outputs=[llm])
117
  stand_file_path = gr.Textbox('指标评价标准.xlsx', label='请输入评价标准文件路径')
118
  stand_comment = gr.DataFrame(visible=False)
 
119
  r_prompt = """The content within the angle brackets is an evaluation of the instructional design regarding the {topic} knowledge point. However, the evaluation does not incorporate the {topic} theme. Please modify the evaluation by incorporating the {topic} theme in each sentence while maintaining the same evaluative perspective and positive/negative nature. The modified evaluation should appear to be specifically targeted towards the {topic} theme.
120
  <{stand_comment}>
121
  """
@@ -125,18 +147,19 @@ with gr.Blocks(css=block_css) as demo:
125
  '<{comment}>'
126
  """
127
  s_prompt = gr.Text(value=s_prompt, label='请配置摘要prompt', lines=3)
128
- with gr.Row():
129
- with gr.Column(scale=2):
130
- set_config_button = gr.Button('确定', visible=True)
131
- with gr.Column(scale=2):
132
- show_stand_button = gr.Button('加载/更新标准评价', visible=True)
133
- set_config_button.click(hide_config, outputs=[stand_comment, config])
 
134
  show_stand_button.click(show_stand_file, inputs=[stand_file_path, stand_comment], outputs=stand_comment)
135
 
136
  with mock:
137
  with gr.Row():
138
  with gr.Column(scale=10):
139
- topic = gr.Textbox('升华和凝华', label='请输入讲课主题')
140
 
141
  # data_out_path = gr.Textbox('', label='请��入模拟数据导出路径')
142
  generate_num = gr.Slider(1, 20, value=3, step=1, label="评课人数量", interactive=True)
@@ -144,7 +167,7 @@ with gr.Blocks(css=block_css) as demo:
144
 
145
  with gr.Row():
146
  with gr.Column(scale=2):
147
- generate_button = gr.Button('生成模拟测评')
148
  with gr.Column(scale=2):
149
  clear_screen_button = gr.Button('清屏', visible=True)
150
  with gr.Column(scale=2):
@@ -157,14 +180,45 @@ with gr.Blocks(css=block_css) as demo:
157
  chatbot = gr.Chatbot([[None, None]],
158
  elem_id="chat-box",
159
  show_label=False).style(height=1000)
160
- generate_button.click(generate_mock_data, inputs=[topic, stand_file_path, r_prompt, generate_num,
161
  chatbot], outputs=[chatbot, mock_data, stand_comment, rewrite_button, summary_button])
162
- rewrite_button.click(rewrite, inputs=[stand_comment, r_prompt, topic, mock_data, chatbot],
163
  outputs=chatbot)
164
  summary_button.click(summary, inputs=[s_prompt, mock_data, chatbot],
165
  outputs=chatbot)
166
  clear_screen_button.click(clear_screen, inputs=[chatbot], outputs=chatbot)
167
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
  # demo.queue(concurrency_count=5).launch(server_name='0.0.0.0', server_port=7858, share=True)
170
 
 
20
  llm_name_dict = {'chatgpt-3.5': 'OpenAI3', 'chatgpt-4': 'OpenAI4', 'chatglm—6b': 'ChatGLM'}
21
 
22
 
23
+ def function_select(mode):
24
+ if mode == "测评智能摘要":
25
+ return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
26
+ elif mode == "生成模拟测评":
27
+ return gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
28
+ else:
29
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
30
+
31
+
32
  def llm_change(name):
33
  llm = eval(eval('llm_name_dict[name]'))()
34
  return llm
 
36
 
37
  def generate_comment_post_process(_summary):
38
  _summary = _summary.split('如下:')[-1]
39
+ _summary = _summary.split('follows:')[-1]
40
  while _summary[0] == '\n':
41
  _summary = _summary[1:]
42
+ _summary = _summary.split('\n\n')[0]
43
  return _summary
44
 
45
 
 
47
  return gr.update(visible=True)
48
 
49
 
50
+ def load_comments(comments_file_path):
51
+ file_path = comments_file_path.name
52
+ _comments = load_scores_and_comments(file_path)
53
+
54
+ return _comments, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
55
+
56
+
57
  def show_stand_file(stand_file_path, stand_file):
58
  df = pd.read_excel(stand_file_path)
59
  return gr.update(visible=True, value=df)
60
 
61
 
62
+ def hide_stand_file():
63
+ return gr.update(visible=False)
64
 
65
 
66
+ def clear_screen(_chatbot):
67
+ _chatbot = [[None, None]]
68
+ return _chatbot
69
 
70
 
71
  def generate_mock_data(_topic, _stand_file_path, _rewrite_prompt, _generate_num, _chatbot):
 
121
  gr.Markdown('\n\n\n\n')
122
  gr.Markdown(webui_title)
123
  gr.Markdown('\n\n\n\n')
124
+ fun_mode = gr.Radio(["测评智能摘要", "生成模拟测评", "系统配置"], value="系统配置",
125
+ label="请先选择功能模式")
126
+ # show_config_button = gr.Button('标准评价和prompt配置', visible=True)
127
+ do_summary = gr.Row(visible=False)
128
+ config = gr.Column(visible=True)
129
+ mock = gr.Row(visible=False)
130
+ # show_config_button.click(show_config, outputs=[config])
131
+ fun_mode.change(fn=function_select, inputs=[fun_mode], outputs=[do_summary, mock, config])
132
 
133
  with config:
134
  # llm = gr.State('')
 
137
  # model_name.change(fn=llm_change, inputs=[model_name], outputs=[llm])
138
  stand_file_path = gr.Textbox('指标评价标准.xlsx', label='请输入评价标准文件路径')
139
  stand_comment = gr.DataFrame(visible=False)
140
+ show_stand_button = gr.Button('使用模拟测评/中位值改写功能,务必先加载标准评价', visible=True)
141
  r_prompt = """The content within the angle brackets is an evaluation of the instructional design regarding the {topic} knowledge point. However, the evaluation does not incorporate the {topic} theme. Please modify the evaluation by incorporating the {topic} theme in each sentence while maintaining the same evaluative perspective and positive/negative nature. The modified evaluation should appear to be specifically targeted towards the {topic} theme.
142
  <{stand_comment}>
143
  """
 
147
  '<{comment}>'
148
  """
149
  s_prompt = gr.Text(value=s_prompt, label='请配置摘要prompt', lines=3)
150
+ set_config_button = gr.Button('确定', visible=True)
151
+ # with gr.Row():
152
+ # with gr.Column(scale=2):
153
+ # set_config_button = gr.Button('确定', visible=True)
154
+ # with gr.Column(scale=2):
155
+ # show_stand_button = gr.Button('加载/查看标准评价', visible=True)
156
+ set_config_button.click(hide_stand_file, outputs=[stand_comment])
157
  show_stand_button.click(show_stand_file, inputs=[stand_file_path, stand_comment], outputs=stand_comment)
158
 
159
  with mock:
160
  with gr.Row():
161
  with gr.Column(scale=10):
162
+ mock_topic = gr.Textbox('升华和凝华', label='请输入讲课主题')
163
 
164
  # data_out_path = gr.Textbox('', label='请��入模拟数据导出路径')
165
  generate_num = gr.Slider(1, 20, value=3, step=1, label="评课人数量", interactive=True)
 
167
 
168
  with gr.Row():
169
  with gr.Column(scale=2):
170
+ generate_button = gr.Button('生成模拟测评(需先加载系统配置文件)')
171
  with gr.Column(scale=2):
172
  clear_screen_button = gr.Button('清屏', visible=True)
173
  with gr.Column(scale=2):
 
180
  chatbot = gr.Chatbot([[None, None]],
181
  elem_id="chat-box",
182
  show_label=False).style(height=1000)
183
+ generate_button.click(generate_mock_data, inputs=[mock_topic, stand_file_path, r_prompt, generate_num,
184
  chatbot], outputs=[chatbot, mock_data, stand_comment, rewrite_button, summary_button])
185
+ rewrite_button.click(rewrite, inputs=[stand_comment, r_prompt, mock_topic, mock_data, chatbot],
186
  outputs=chatbot)
187
  summary_button.click(summary, inputs=[s_prompt, mock_data, chatbot],
188
  outputs=chatbot)
189
  clear_screen_button.click(clear_screen, inputs=[chatbot], outputs=chatbot)
190
 
191
+ with do_summary:
192
+ with gr.Column(scale=10):
193
+ file = gr.File(label='请先上传待摘要文件')
194
+ rewrite_topic = gr.Textbox('升华和凝华', label='如采用中位值改写的方法,请输先入讲课主题', visible=False)
195
+
196
+ comments = gr.Json(visible=False)
197
+
198
+ with gr.Row():
199
+ with gr.Column(scale=2):
200
+ summary_button = gr.Button('四分位摘要', visible=False)
201
+ with gr.Column(scale=2):
202
+ rewrite_button = gr.Button('中位值改写(需先加载系统配置文件)', visible=False)
203
+
204
+ with gr.Column(scale=2):
205
+ clear_screen_button = gr.Button('清屏', visible=False)
206
+
207
+ file.upload(load_comments, inputs=[file],
208
+ outputs=[comments, summary_button, rewrite_button, clear_screen_button, rewrite_topic])
209
+
210
+ with gr.Column(scale=10):
211
+
212
+ chatbot = gr.Chatbot([[None, None]],
213
+ elem_id="chat-box",
214
+ show_label=False).style(height=1000)
215
+
216
+ rewrite_button.click(rewrite, inputs=[stand_comment, r_prompt, rewrite_topic, comments, chatbot],
217
+ outputs=chatbot)
218
+ summary_button.click(summary, inputs=[s_prompt, comments, chatbot],
219
+ outputs=chatbot)
220
+ clear_screen_button.click(clear_screen, inputs=[chatbot], outputs=chatbot)
221
+
222
 
223
  # demo.queue(concurrency_count=5).launch(server_name='0.0.0.0', server_port=7858, share=True)
224
 
main.py DELETED
@@ -1,16 +0,0 @@
1
- # This is a sample Python script.
2
-
3
- # Press Shift+F10 to execute it or replace it with your code.
4
- # Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
5
-
6
-
7
- def print_hi(name):
8
- # Use a breakpoint in the code line below to debug your script.
9
- print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
10
-
11
-
12
- # Press the green button in the gutter to run the script.
13
- if __name__ == '__main__':
14
- print_hi('PyCharm')
15
-
16
- # See PyCharm help at https://www.jetbrains.com/help/pycharm/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mock_data.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import math
3
+ import pandas as pd
4
+ import gradio as gr
5
+ import numpy as np
6
+ from typing import List
7
+ from llm import OpenAI3
8
+
9
+
10
+ llm = OpenAI3()
11
+
12
+
13
+ def _scores_recalculate(scores: List):
14
+ scores_array = np.array(scores)
15
+ scores_dis = abs(scores_array - scores_array.mean())
16
+ scores_count = np.ceil(1 / (scores_dis + scores_dis.mean()) * len(scores)).astype(int)
17
+ new_scores = []
18
+ for i in range(len(scores)):
19
+ new_scores.extend([scores[i]] * scores_count[i])
20
+ return new_scores
21
+
22
+
23
+ def generate_scores_and_comments(standard_file_path, rewrite_prompt, topic, num=10):
24
+ df = pd.read_excel(standard_file_path)
25
+ pd.set_option('display.max_colwidth', None)
26
+ standard_index = df.columns.to_list()
27
+ standard_index.pop(standard_index.index('得分'))
28
+ standard_scores = list(tuple(df['得分'].to_list()))
29
+ scores_choice = _scores_recalculate(standard_scores)
30
+ assessments = []
31
+ for i in range(num):
32
+ assessment = dict()
33
+ scores = dict()
34
+ stand_comment = []
35
+ for index in standard_index:
36
+ score = random.choice(scores_choice)
37
+ scores[index] = score
38
+ content = df[df['得分'] == score][index].to_string(index=False)
39
+ stand_comment.append(content)
40
+ mean_score = np.array(list(scores.values()))
41
+ # scores['mean_score'] = mean_score
42
+
43
+ assessment['scores'] = scores
44
+ stand_comment = ';'.join(stand_comment)
45
+ prompt = 'f"""' + rewrite_prompt + '"""'
46
+ r_comment = llm(eval(prompt))
47
+ r_comment = r_comment.split('\n')[-1]
48
+ assessment['comment'] = r_comment
49
+ assessments.append(assessment)
50
+ return assessments, df
51
+
52
+
53
+ def load_scores_and_comments(comments_file_path):
54
+ df = pd.read_excel(comments_file_path)
55
+ pd.set_option('display.max_colwidth', None)
56
+ comments = df['comments'].to_list()
57
+ df.pop('comments')
58
+ scores = df.to_dict(orient='records')
59
+ assessments = [{'scores': scores[i], 'comment': comments[i]} for i in range(len(comments))]
60
+ return assessments
61
+
62
+
63
+ def medium_score_rewrite(standard_file, rewrite_prompt, topic, assessments):
64
+ scores = [i['scores'] for i in assessments]
65
+ scores = pd.DataFrame(scores)
66
+ try:
67
+ medium = scores.quantile(0.5)
68
+ except Exception as e: # incase that the values in one column are all None
69
+ scores = scores.fillna(0)
70
+ medium = scores.quantile(0.5)
71
+ medium = medium.astype(int)
72
+
73
+ stand_comment = []
74
+ df = standard_file
75
+ for index in medium.index:
76
+ score = math.ceil(medium[index])
77
+ content = df[df['得分'] == score][index].to_string(index=False)
78
+ stand_comment.append(content)
79
+
80
+ s_comment = ';'.join(stand_comment)
81
+ prompt = 'f"""' + rewrite_prompt + '"""'
82
+ r_comment = llm(eval(prompt))
83
+ return r_comment
84
+
85
+
86
+ def quantile_summary(summary_prompt, assessments):
87
+ if not isinstance(assessments[0], dict):
88
+ comment = '\n'.join(assessments)
89
+ else:
90
+ comments = [i['comment'] for i in assessments]
91
+ comments = pd.Series(comments)
92
+ scores = [i['scores'] for i in assessments]
93
+ scores = pd.DataFrame(scores)
94
+
95
+ try:
96
+ quartiles = scores.quantile([0.25, 0.75])
97
+ except Exception as e: # incase that the values in one column are all None
98
+ scores = scores.fillna(0)
99
+ quartiles = scores.quantile([0.25, 0.75])
100
+ quartiles = quartiles.astype(int)
101
+ up = (scores - quartiles.loc[0.75]) < 0
102
+ down = (scores - quartiles.loc[0.25]) > 0
103
+ select_index = (up & down).all(axis=1)
104
+ if not select_index.any():
105
+ select_index = (up & down).any(axis=1)
106
+
107
+ if select_index.any():
108
+ select_comments = comments[select_index].to_list()
109
+ else:
110
+ select_comments = comments.to_list()
111
+
112
+ comment = '\n'.join(select_comments)
113
+
114
+ prompt = 'f"""' + summary_prompt + '"""'
115
+ s_comment = llm(eval(prompt))
116
+ return s_comment
117
+
118
+