lugiiing commited on
Commit
e29a384
Β·
verified Β·
1 Parent(s): 7d3525c

Upload 6 files

Browse files
Files changed (6) hide show
  1. README.md +7 -7
  2. requirements.txt +12 -0
  3. src/obs_eval.py +81 -0
  4. src/obs_eval.sh +5 -0
  5. src/obs_eval_gradio.py +525 -0
  6. src/prompts.py +159 -0
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
- title: Vds Eng
3
- emoji: πŸƒ
4
- colorFrom: gray
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 4.24.0
8
- app_file: app.py
9
  pinned: false
10
- license: apache-2.0
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: GPT-4 Vision for Observational Evaluation in Education
3
+ emoji: πŸ“š
4
+ colorFrom: green
5
+ colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: 4.5.0
8
+ app_file: src/obs_eval_gradio.py
9
  pinned: false
10
+ license: openrail
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain==0.0.331rc2
2
+ openai==1.5.0
3
+ openai-async==0.0.3
4
+ opencv-python==4.9.0.80
5
+ opencv-python-headless==4.9.0.80
6
+ openai-api-call==1.4.0
7
+ python-dotenv==1.0.1
8
+ gradio==4.19.1
9
+ matplotlib==3.8.3
10
+ asyncio==3.4.3
11
+ futures==3.0.5
12
+ pillow==10.2.0
src/obs_eval.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import base64
3
+ import time
4
+ import openai
5
+ import requests
6
+ import os
7
+ import argparse
8
+ from dotenv import dotenv_values, load_dotenv
9
+ import time
10
+
11
+ config = dotenv_values("/workspace/Research/PangyoPangyo/src/.env")
12
+
13
+ openai.organization = config.get('OPENAI_ORGANIZATION')
14
+ openai.api_key = config.get('OPENAI_API_KEY')
15
+
16
+ ### Define the argument parser
17
+
18
+ def define_argparser():
19
+ p = argparse.ArgumentParser()
20
+
21
+ p.add_argument("--data_path", type=str, required=True)
22
+
23
+ config = p.parse_args()
24
+
25
+ return config
26
+
27
+
28
+ def main(config):
29
+ # Ensure the dataset directory exists and has the video file
30
+ if not os.path.exists(config.data_path):
31
+ print("Video file not found. Make sure data_path exists.")
32
+ return
33
+
34
+ video = cv2.VideoCapture(config.data_path)
35
+
36
+ base64Frames = []
37
+ while video.isOpened():
38
+ success, frame = video.read()
39
+ if not success:
40
+ break
41
+ _, buffer = cv2.imencode(".jpg", frame)
42
+ base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
43
+
44
+ video.release()
45
+ print(len(base64Frames), "frames read.")
46
+
47
+ # Skipping the display part as it's not relevant in a .py script
48
+
49
+ INSTRUCTOIN = " ".join(
50
+ "These are frames of a video.",
51
+ "Create a short voiceover script in the style of a super excited brazilian sports narrator who is narrating his favorite match.",
52
+ "He is a big fan of Messi, the player who scores in this clip.",
53
+ "Use caps and exclamation marks where needed to communicate excitement.",
54
+ "Only include the narration, your output must be in english.",
55
+ "When the ball goes into the net, you must scream GOL either once or multiple times."
56
+ )
57
+
58
+ PROMPT_MESSAGES = [
59
+ {
60
+ "role": "user",
61
+ "content": [
62
+ INSTRUCTOIN,
63
+ *map(lambda x: {"image": x, "resize": 768}, base64Frames[0::10]),
64
+ ],
65
+ },
66
+ ]
67
+ params = {
68
+ "model": "gpt-4-vision-preview",
69
+ "messages": PROMPT_MESSAGES,
70
+ "api_key": openai.api_key,
71
+ "headers": {"Openai-Version": "2020-11-07"},
72
+ "max_tokens": 500,
73
+ }
74
+
75
+ result = openai.ChatCompletion.create(**params)
76
+ print(result.choices[0].message.content)
77
+
78
+
79
+ if __name__ == "__main__":
80
+ config = define_argparser()
81
+ main(config)
src/obs_eval.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+
2
+
3
+
4
+ python obs_eval.py \
5
+ --data_path "/workspace/Research/gpt_4_vision_for_eval/datasets/messi2.mp4"
src/obs_eval_gradio.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import io
3
+ import gradio as gr
4
+ import cv2
5
+ import base64
6
+ import openai
7
+ import os
8
+ import asyncio
9
+ import concurrent.futures
10
+ from openai import AsyncOpenAI
11
+
12
+ from langchain.prompts import PromptTemplate
13
+ from langchain.chat_models import ChatOpenAI
14
+ from langchain.schema import StrOutputParser
15
+ from PIL import Image
16
+ import ast
17
+ import matplotlib.pyplot as plt
18
+
19
+
20
+ from prompts import VISION_SYSTEM_PROMPT, USER_PROMPT_TEMPLATE, FINAL_EVALUATION_SYSTEM_PROMPT, FINAL_EVALUATION_USER_PROMPT, SUMMARY_AND_TABLE_PROMPT, AUDIO_SYSTEM_PROMPT
21
+ from dotenv import load_dotenv
22
+
23
+
24
+ global global_dict
25
+ global_dict = {}
26
+
27
+ ######
28
+ # SETTINGS
29
+ VIDEO_FRAME_LIMIT = 2000
30
+
31
+ ######
32
+
33
+ def validate_api_key(api_key):
34
+ client = openai.OpenAI(api_key=api_key)
35
+
36
+ try:
37
+ # Make your OpenAI API request here
38
+ response = client.chat.completions.create(
39
+ model="gpt-4",
40
+ messages=[
41
+ {"role": "user", "content": "Hello world"},
42
+ ]
43
+ )
44
+ global_dict['api_key'] = api_key
45
+
46
+ except openai.RateLimitError as e:
47
+ # Handle rate limit error (we recommend using exponential backoff)
48
+ print(f"OpenAI API request exceeded rate limit: {e}")
49
+ response = None
50
+ error = e
51
+ pass
52
+ except openai.APIConnectionError as e:
53
+ # Handle connection error here
54
+ print(f"Failed to connect to OpenAI API: {e}")
55
+ response = None
56
+ error = e
57
+ pass
58
+ except openai.APIError as e:
59
+ # Handle API error here, e.g. retry or log
60
+ print(f"OpenAI API returned an API Error: {e}")
61
+ response = None
62
+ error = e
63
+ pass
64
+
65
+ if response:
66
+ return True
67
+ else:
68
+ raise gr.Error(f"OpenAI returned an API Error: {error}")
69
+
70
+
71
+ def _process_video(video_file):
72
+ # Read and process the video file
73
+ video = cv2.VideoCapture(video_file.name)
74
+
75
+ if 'video_file' not in global_dict:
76
+ global_dict.setdefault('video_file', video_file.name)
77
+ else:
78
+ global_dict['video_file'] = video_file.name
79
+
80
+ base64Frames = []
81
+ while video.isOpened():
82
+ success, frame = video.read()
83
+ if not success:
84
+ break
85
+ _, buffer = cv2.imencode(".jpg", frame)
86
+ base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
87
+ video.release()
88
+ if len(base64Frames) > VIDEO_FRAME_LIMIT:
89
+ raise gr.Warning(f"Video's play time is too long. (>1m)")
90
+ print(len(base64Frames), "frames read.")
91
+
92
+ if not base64Frames:
93
+ raise gr.Error(f"Cannot open the video.")
94
+ return base64Frames
95
+
96
+
97
+ def _make_video_batch(video_file):
98
+
99
+ frames = _process_video(video_file)
100
+
101
+ TOTAL_FRAME_COUNT = len(frames)
102
+ BATCH_SIZE = int(1)
103
+ TOTAL_BATCH_SIZE = int(TOTAL_FRAME_COUNT * 1 / 300) # 5 = total_batch_percent
104
+ BATCH_STEP = int(TOTAL_FRAME_COUNT / TOTAL_BATCH_SIZE)
105
+
106
+ base64FramesBatch = []
107
+
108
+ for idx in range(0, TOTAL_FRAME_COUNT, BATCH_STEP * BATCH_SIZE):
109
+ #print(f'## {idx}')
110
+ temp = []
111
+ for i in range(BATCH_SIZE):
112
+ #print(f'# {idx + BATCH_STEP * i}')
113
+ if (idx + BATCH_STEP * i) < TOTAL_FRAME_COUNT:
114
+ temp.append(frames[idx + BATCH_STEP * i])
115
+ else:
116
+ continue
117
+ base64FramesBatch.append(temp)
118
+
119
+ for idx, batch in enumerate(base64FramesBatch):
120
+ # assert len(batch) <= BATCH_SIZE
121
+ print(f'##{idx} - batch_size: {len(batch)}')
122
+
123
+ if 'batched_frames' not in global_dict:
124
+ global_dict.setdefault('batched_frames', base64FramesBatch)
125
+ else:
126
+ global_dict['batched_frames'] = base64FramesBatch
127
+
128
+ return base64FramesBatch
129
+
130
+
131
+ def show_batches(video_file):
132
+
133
+ batched_frames = _make_video_batch(video_file)
134
+
135
+ images1 = []
136
+ for i, l in enumerate(batched_frames):
137
+ print(f"#### Batch_{i+1}")
138
+ for j, img in enumerate(l):
139
+ print(f'## Image_{j+1}')
140
+ image_bytes = base64.b64decode(img.encode("utf-8"))
141
+ # Convert the bytes to a stream (file-like object)
142
+ image_stream = io.BytesIO(image_bytes)
143
+ # Open the image as a PIL image
144
+ image = Image.open(image_stream)
145
+ images1.append((image, f"batch {i+1}"))
146
+ print("-"*100)
147
+
148
+ return images1
149
+
150
+
151
+ def show_audio_transcript(video_file, api_key):
152
+ previous_video_file = global_dict.get('video_file')
153
+
154
+ if global_dict.get('transcript') and previous_video_file == video_file.name:
155
+ return global_dict['transcript']
156
+ else:
157
+ audio_file = open(video_file.name, "rb")
158
+
159
+ client = openai.OpenAI(api_key=api_key)
160
+ transcript = client.audio.transcriptions.create(
161
+ model="whisper-1",
162
+ file=audio_file,
163
+ response_format="text"
164
+ )
165
+ if 'transcript' not in global_dict:
166
+ global_dict.setdefault('transcript', transcript)
167
+ else:
168
+ global_dict['transcript'] = transcript
169
+
170
+ return transcript
171
+
172
+
173
+
174
+
175
+ # 각 λ²„νŠΌμ— λŒ€ν•œ μ•‘μ…˜ ν•¨μˆ˜ μ •μ˜
176
+
177
+ audio_rubric_subsets = {'1': '1. want to be ~ λΌλŠ” ν‘œν˜„μ„ ν™œμš©ν•˜μ—¬ μž₯λž˜ν¬λ§μ„ λ§ν•œλ‹€.', '2': '(be) good at ~μ΄λΌλŠ” ν‘œν˜„μ„ ν™œμš©ν•˜μ—¬ μž₯래희망과 κ΄€λ ¨λœ μžμ‹ μ΄ 잘 ν•˜λŠ” 일을 λ§ν•œλ‹€.', '3': '직업을 λ‚˜νƒ€λ‚΄λŠ” 단어λ₯Ό μ •ν™•νžˆ μ‚¬μš©ν•œλ‹€', '4': '망섀이지 μ•Šκ³  μœ μ°½ν•˜κ²Œ λ§ν•œλ‹€.'}
178
+ rubric_subsets = {'5':'5. μžμ‹ κ° μžˆλŠ” νƒœλ„λ‘œ 카메라λ₯Ό 보며 λ§ν•œλ‹€.', '6': '6. μ μ ˆν•œ 손 λ™μž‘μ„ μ‚¬μš©ν•˜μ—¬ λ§ν•œλ‹€.'}
179
+ rubrics_keyword = '"ν•΅μ‹¬ν‘œν˜„(want to be) ν™œμš©", "ν•΅μ‹¬ν‘œν˜„(be good at) ν™œμš©", "직업을 λ‚˜νƒ€λ‚΄λŠ” 단어 ν™œμš©", "μœ μ°½μ„±", "μƒλŒ€λ°© μ‘μ‹œ", "손 λ™μž‘"'
180
+ global_dict['audio_rubric_subsets'] = audio_rubric_subsets
181
+ global_dict['rubric_subsets'] = rubric_subsets
182
+ global_dict['rubrics_keyword'] = rubrics_keyword
183
+
184
+
185
+
186
+
187
+ async def async_call_gpt_vision(client, batch, rubric_subset):
188
+ # Format the messages for the vision prompt, including the rubric subset and images in the batch
189
+ vision_prompt_messages = [
190
+ {"role": "system", "content": VISION_SYSTEM_PROMPT}, # Ensure VISION_SYSTEM_PROMPT is defined
191
+ {
192
+ "role": "user",
193
+ "content": [
194
+ PromptTemplate.from_template(USER_PROMPT_TEMPLATE).format(rubrics=rubric_subset), # Ensure USER_PROMPT_TEMPLATE is defined
195
+ *map(lambda x: {"image": x, "resize": 300}, batch),
196
+ ],
197
+ },
198
+ ]
199
+
200
+ # Parameters for the API call
201
+ params = {
202
+ "model": "gpt-4-vision-preview",
203
+ "messages": vision_prompt_messages,
204
+ "max_tokens": 1024,
205
+ }
206
+
207
+ # Asynchronous API call
208
+ try:
209
+ result_raw = await client.chat.completions.create(**params)
210
+ result = result_raw.choices[0].message.content
211
+ print(result)
212
+ return result
213
+ except Exception as e:
214
+ print(f"Error processing batch with rubric subset {rubric_subset}: {e}")
215
+ return None
216
+
217
+
218
+ async def process_rubrics_in_batches(client, frames, rubric_subsets):
219
+
220
+ results = {}
221
+ for key, rubric_subset in rubric_subsets.items():
222
+ # Process each image batch with the current rubric subset
223
+ tasks = [async_call_gpt_vision(client, batch, rubric_subset) for batch in frames]
224
+ subset_results = await asyncio.gather(*tasks)
225
+ results[key] = [result for result in subset_results if result is not None]
226
+
227
+ # Filter out None results in case of errors
228
+ return results
229
+
230
+ def wrapper_call_gpt_vision():
231
+ api_key = global_dict.get('api_key')
232
+ frames = global_dict.get('batched_frames')
233
+ rubric_subsets = global_dict.get('rubric_subsets')
234
+ client = AsyncOpenAI(api_key=api_key)
235
+
236
+ async def call_gpt_vision():
237
+ async_full_result_vision = await process_rubrics_in_batches(client, frames, rubric_subsets)
238
+ if 'full_result_vision' not in global_dict:
239
+ global_dict.setdefault('full_result_vision', async_full_result_vision)
240
+ else:
241
+ global_dict['full_result_vision'] = async_full_result_vision
242
+ return async_full_result_vision
243
+
244
+ # μƒˆ 이벀트 루프 생성 및 μ„€μ •
245
+ loop = asyncio.new_event_loop()
246
+ asyncio.set_event_loop(loop)
247
+ loop.run_until_complete(call_gpt_vision())
248
+
249
+
250
+ async def async_get_evaluation_text(client, result_subset):
251
+
252
+ result_subset_text = ' \n'.join(result_subset)
253
+ print(result_subset_text)
254
+ evaluation_text = PromptTemplate.from_template(FINAL_EVALUATION_USER_PROMPT).format(evals = result_subset_text)
255
+
256
+ evaluation_text_message = [
257
+ {"role": "system", "content": FINAL_EVALUATION_SYSTEM_PROMPT}, # Ensure VISION_SYSTEM_PROMPT is defined
258
+ {
259
+ "role": "user",
260
+ "content": evaluation_text,
261
+ },
262
+ ]
263
+ params = {
264
+ "model": "gpt-4-vision-preview",
265
+ "messages": evaluation_text_message,
266
+ "max_tokens": 1024,
267
+ }
268
+
269
+ # Asynchronous API call
270
+ try:
271
+ result_raw_2 = await client.chat.completions.create(**params)
272
+ result_2 = result_raw_2.choices[0].message.content
273
+ return result_2
274
+ except Exception as e:
275
+ print(f"Error getting evaluation text {result_subset}: {e}")
276
+ return None
277
+
278
+ # return evaluation_text
279
+
280
+ async def async_get_full_result(client, full_result_vision):
281
+
282
+ #tasks = []
283
+ results_2 = {}
284
+ # Create a task for each entry in full_result_vision and add to tasks list
285
+ for key, result_subset in full_result_vision.items():
286
+ tasks_2 = [async_get_evaluation_text(client, result_subset)]
287
+ text_results = await asyncio.gather(*tasks_2)
288
+ results_2[key] = [result_2 for result_2 in text_results if result_2 is not None]
289
+
290
+
291
+ results_2_val_list = list(results_2.values())
292
+ results_2_val = ""
293
+ for i in range(len(results_2_val_list)):
294
+ results_2_val += results_2_val_list[i][0]
295
+ results_2_val += "\n"
296
+
297
+ return results_2_val
298
+ # Combine all results into a single string
299
+
300
+
301
+ def wrapper_get_full_result():
302
+ api_key = global_dict.get('api_key')
303
+ full_result_vision = global_dict.get('full_result_vision')
304
+ client = AsyncOpenAI(api_key=api_key)
305
+
306
+ #{key: choice.choices[0].message.content for key, choice in full_result_vision.items()}
307
+
308
+ async def get_full_result():
309
+ full_text = await async_get_full_result(client,full_result_vision)
310
+ # global_dict에 κ²°κ³Όλ₯Ό μ˜¬λ°”λ₯΄κ²Œ μ €μž₯
311
+ if 'full_text' not in global_dict:
312
+ global_dict.setdefault('full_text', full_text)
313
+ else:
314
+ global_dict['full_text'] = full_text # μƒˆ κ°’μœΌλ‘œ μ΄ˆκΈ°ν™”
315
+ print("full_text: ")
316
+ print(full_text)
317
+
318
+ loop = asyncio.new_event_loop()
319
+ asyncio.set_event_loop(loop)
320
+ loop.run_until_complete(get_full_result())
321
+
322
+
323
+
324
+ def call_gpt_audio(api_key) -> str:
325
+ audio_rubric_subsets = global_dict.get('audio_rubric_subsets') #!!!!! μΆ”κ°€
326
+ transcript = global_dict.get('transcript')
327
+ openai.api_key = api_key
328
+
329
+ full_text_audio = ""
330
+
331
+ print(f"RUBRIC_AUDIO: {audio_rubric_subsets}")
332
+
333
+ PROMPT_MESSAGES = [
334
+ {
335
+ "role": "system",
336
+ "content": AUDIO_SYSTEM_PROMPT,
337
+ },
338
+ {
339
+ "role": "user",
340
+ "content": PromptTemplate.from_template(USER_PROMPT_TEMPLATE).format(rubrics=audio_rubric_subsets) + "\n\n<TEXT>\n" + transcript
341
+ },
342
+ ]
343
+ params = {
344
+ "model": "gpt-4",
345
+ "messages": PROMPT_MESSAGES,
346
+ "max_tokens": 1024,
347
+ }
348
+
349
+ try:
350
+ result = openai.chat.completions.create(**params)
351
+ full_text_audio = result.choices[0].message.content
352
+ print(full_text_audio)
353
+ except openai.OpenAIError as e:
354
+ print(f"Failed to connect to OpenAI: {e}")
355
+ pass
356
+
357
+ if 'full_text_audio' not in global_dict:
358
+ global_dict.setdefault('full_text_audio', full_text_audio)
359
+ else:
360
+ global_dict['full_text_audio'] = full_text_audio
361
+
362
+ return full_text_audio
363
+
364
+
365
+
366
+ def get_final_anser(api_key):
367
+ rubrics_keyword = global_dict.get('rubrics_keyword')
368
+ full_text_audio = global_dict.get('full_text_audio')
369
+ full_text = global_dict.get('full_text')
370
+ full = full_text_audio + full_text
371
+ global_dict['full'] = full
372
+
373
+ chain = ChatOpenAI(
374
+ api_key=api_key,
375
+ model="gpt-4",
376
+ max_tokens=1024,
377
+ temperature=0,
378
+ )
379
+ prompt = PromptTemplate.from_template(SUMMARY_AND_TABLE_PROMPT)
380
+
381
+ runnable = prompt | chain | StrOutputParser()
382
+ final_eval = runnable.invoke({"full": full, "rubrics_keyword":rubrics_keyword})
383
+
384
+ print(final_eval)
385
+
386
+ if 'final_eval' not in global_dict:
387
+ global_dict.setdefault('final_eval', final_eval)
388
+ else:
389
+ global_dict['final_eval'] = final_eval
390
+
391
+ return final_eval
392
+
393
+
394
+ def tablize_final_anser():
395
+
396
+ final_eval = global_dict.get('final_eval')
397
+ pos3 = int(final_eval.find("[["))
398
+ pos4 = int(final_eval.find("]]"))
399
+ tablize_final_eval = ast.literal_eval(final_eval[(pos3):(pos4+2)])
400
+
401
+
402
+ cat_final_eval, val_final_eval = tablize_final_eval[0], tablize_final_eval[1]
403
+ val_final_eval = [int(score) for score in val_final_eval]
404
+
405
+
406
+ fig, ax = plt.subplots()
407
+ ax.bar(cat_final_eval, val_final_eval)
408
+ ax.set_ylabel('Scores')
409
+ ax.set_title('Scores by category')
410
+ #plt.xticks(rotation=30)
411
+ plt.rc('xtick', labelsize=3)
412
+ ax.set_xticks(range(len(cat_final_eval)))
413
+ ax.set_yticks([0,2,4,6,8,10])
414
+
415
+ ax.set_xticklabels(cat_final_eval)
416
+
417
+ # PIL.Image 객체둜 λ³€ν™˜
418
+ buf = io.BytesIO()
419
+ plt.savefig(buf, format='png')
420
+ plt.close(fig)
421
+ buf.seek(0)
422
+
423
+ # PIL.Image 객체둜 λ³€ν™˜
424
+ image = Image.open(buf)
425
+ return image
426
+
427
+
428
+ def breif_final_anser():
429
+ final_eval = global_dict.get('final_eval')
430
+ pos1 = int(final_eval.find("**μ’…ν•© 점수**"))
431
+ pos2 = int(final_eval.find("----μš”μ•½ 끝----"))
432
+ breif_final_eval = final_eval[pos1:pos2]
433
+ return breif_final_eval
434
+
435
+ def fin_final_anser():
436
+ fin_final_eval = global_dict.get('full')
437
+ return fin_final_eval
438
+
439
+
440
+ def mainpage():
441
+ with gr.Blocks() as start_page:
442
+ gr.Markdown("Title")
443
+ with gr.Row():
444
+ with gr.Column(scale=1):
445
+ api_key_input = gr.Textbox(
446
+ label="Enter your OpenAI API Key",
447
+ info="Your API Key must be allowed to use GPT-4 Vision",
448
+ placeholder="sk-*********...",
449
+ lines=1
450
+ )
451
+
452
+ gr.Markdown("λΉ„λ””μ˜€ μ—…λ‘œλ“œ νŽ˜μ΄μ§€")
453
+ with gr.Row():
454
+ with gr.Column(scale=1):
455
+ video_upload = gr.File(
456
+ label="Upload your video (video under 1 minute is the best..!)",
457
+ file_types=["video"],
458
+ )
459
+
460
+ #λ‚˜μ€‘μ— 발음 감도 쑰절둜 λ°”κΎΈκΈ°!!!
461
+ """with gr.Column(scale=1):
462
+ weight_shift_button = gr.Button("Weight Shift")
463
+ balance_button = gr.Button("Balance")
464
+ form_button = gr.Button("Form")
465
+ overall_button = gr.Button("Overall")
466
+ """
467
+
468
+ with gr.Row():
469
+ with gr.Column(scale=1):
470
+ process_button = gr.Button("Process")
471
+
472
+ gr.Markdown("κ²°κ³Ό νŽ˜μ΄μ§€")
473
+ with gr.Row():
474
+ with gr.Column(scale=1):
475
+
476
+ output_box_fin_table = gr.Image(type="pil", label="Score Chart")
477
+
478
+ with gr.Column(scale=1):
479
+ output_box_fin_brief = gr.Textbox(
480
+ label="Brief Evaluation",
481
+ lines=10,
482
+ interactive=True,
483
+ show_copy_button=True,
484
+ )
485
+
486
+ with gr.Row():
487
+ with gr.Column(scale=1):
488
+
489
+ output_box_fin_fin = gr.Textbox(
490
+ label="Detailed Evaluation",
491
+ lines=10,
492
+ interactive=True,
493
+ show_copy_button=True,
494
+ )
495
+ with gr.Column(scale=1):
496
+ gallery = gr.Gallery(
497
+ label="Batched Snapshots of Video",
498
+ columns=[3],
499
+ rows=[10],
500
+ object_fit="contain",
501
+ height="auto",
502
+ )
503
+
504
+
505
+ #start_button.click(fn = video_rubric, inputs=[], outputs= [])
506
+ #weight_shift_button.click(fn = action_weight_shift, inputs=[], outputs=[])
507
+ #balance_button.click(fn = action_balance, inputs=[], outputs=[])
508
+ #form_button.click(fn = action_form, inputs=[], outputs=[])
509
+ #overall_button.click(fn = action_all, inputs=[], outputs=[])
510
+ process_button.click(fn=validate_api_key, inputs=api_key_input, outputs=None).success(fn=show_batches, inputs=[video_upload], outputs=[gallery])\
511
+ .success(fn=show_audio_transcript, inputs=[video_upload, api_key_input], outputs=[])\
512
+ .success(fn=call_gpt_audio, inputs=[api_key_input], outputs=[])\
513
+ .success(fn=lambda:wrapper_call_gpt_vision(), inputs=[], outputs=[]) \
514
+ .success(fn=lambda:wrapper_get_full_result(), inputs=[], outputs=[])\
515
+ .success(fn=get_final_anser, inputs=[api_key_input], outputs=[])\
516
+ .success(fn=tablize_final_anser, inputs=[], outputs=[output_box_fin_table])\
517
+ .success(fn=breif_final_anser, inputs=[], outputs=[output_box_fin_brief])\
518
+ .success(fn=fin_final_anser, inputs=[], outputs=[output_box_fin_fin])
519
+
520
+ start_page.launch()
521
+
522
+
523
+
524
+ if __name__ == "__main__":
525
+ mainpage()
src/prompts.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ VISION_SYSTEM_PROMPT = """<원칙>
2
+ 당신은 μ§€κΈˆλΆ€ν„° ν•œκ΅­μΈ μ΄ˆλ“±ν•™μƒμ˜ μ˜μ–΄ λ§ν•˜κΈ° λ™μž‘ λ™μ˜μƒμ„ 평가할 것이닀.
3
+ μ˜μƒ 속 λ§ν•˜κΈ° λ™μž‘μ€ μ—¬λŸ¬ μž₯의 μ΄λ―Έμ§€λ‘œ μ œμ‹œλœλ‹€.
4
+ μ΄λ―Έμ§€λŠ” 연속적인 전체 λ§ν•˜κΈ° λ™μž‘μ˜ 일뢀λ₯Ό 보여쀀닀.
5
+ 당신은 μ œμ‹œλœ μ΄λ―Έμ§€λ§Œμ„ 가지고 ν•΄λ‹Ή ν•™μƒμ˜ μ˜μ–΄ λ§ν•˜κΈ° λ™μž‘μ„ 평가해야 ν•œλ‹€.
6
+ 각 이미지λ₯Ό 평가할 λ•Œ, λ„ˆλŠ” μ „μ²΄μ˜ μ—°μ†λœ λ§ν•˜κΈ° λ™μž‘μ„ ν‰κ°€ν•˜λŠ” 것을 λ°˜λ“œμ‹œ κΈ°μ–΅ν•΄μ•Ό ν•˜κ³ , λ‚˜μ€‘μ— λΆ„μ ˆλœ 평가듀을 λͺ¨λ‘ 이어 뢙일 것을 κΈ°μ–΅ν•΄μ•Ό ν•œλ‹€.
7
+ μ ˆλŒ€ 자의적인 νŒλ‹¨μ„ 덧뢙이지 말고, μ΄λ―Έμ§€λ‘œ μ œμ‹œλœ 것듀에 λŒ€ν•΄μ„œλ§Œ 평가기쀀에 κΈ°λ°˜ν•΄μ„œ ν‰κ°€ν•˜μ—¬μ•Ό ν•œλ‹€.
8
+ μ΄ˆλ“±ν•™μƒμ„ λŒ€μƒμœΌλ‘œ ν•œ ν‰κ°€μ΄λ‹ˆ, 평가 κ²°κ³Όλ₯Ό μ„œμˆ ν•  λ•ŒλŠ” 긍정적인 μΈ‘λ©΄κ³Ό κ°œμ„ ν•΄μ•Όν•  점을 λͺ¨λ‘ 말해주고, μ μ ˆν•œ 칭찬을 μ΄μš©ν•˜μ—¬ ν•™μƒμ˜ ν•™μŠ΅ 동기가 μƒμŠΉν•˜λ„λ‘ ν•΄μ•Ό ν•œλ‹€.
9
+ 평가 κ²°κ³Όμ—λŠ” μ ˆλŒ€ λ”°μ˜΄ν‘œλ₯Ό 넣지 μ•Šμ•„μ•Ό ν•œλ‹€.
10
+
11
+
12
+ <평가 양식>
13
+ 평가기쀀_번호: (번호. μ œμ‹œλœ 평가 κΈ°μ€€ λ¬Έμž₯)
14
+ 평가: 맀우 잘 함 / 보톡 / λ…Έλ ₯ μš”ν•¨ / (ν‰κ°€λ˜μ§€ μ•ŠμŒ) - 3 λ‹¨κ³„λ‘œ ν‰κ°€ν•˜κΈ°
15
+ 이유: (κΈ°μ€€_1에 λŒ€ν•΄ μœ„μ—μ„œμ²˜λŸΌ 평가λ₯Ό λ‚΄λ¦° 이유)
16
+
17
+ 평가기쀀_번호+1: (번호+1. μ œμ‹œλœ 평가 κΈ°μ€€ λ¬Έμž₯)
18
+ ...
19
+
20
+ <μ˜ˆμ‹œ>
21
+ 평가기쀀_3: 3. μžμ‹ κ° μžˆλŠ” νƒœλ„λ‘œ 카메라λ₯Ό 보며 λ§ν•œλ‹€.
22
+ 평가: 맀우 잘 함
23
+ 이유: λ“£λŠ” μƒλŒ€λ₯Ό 바라보며 μžμ‹ κ° μžˆλŠ” νƒœλ„λ‘œ λ§ν•œ 점이 맀우 κΈμ •μ μž…λ‹ˆλ‹€.
24
+
25
+ 평가기쀀_4: 4. μ μ ˆν•œ 손 λ™μž‘μ„ μ‚¬μš©ν•˜μ—¬ λ§ν•œλ‹€.
26
+ 평가: 보톡
27
+ 이유: 손을 μ΄μš©ν•˜μ—¬ κ°•μ‘°κ°€ ν•„μš”ν•œ 뢀뢄에 κ°•μ‘°λ₯Ό ν•œ 점은 정말 μ’‹μ•˜μŠ΅λ‹ˆλ‹€. ν•˜μ§€λ§Œ μ†λ™μž‘μ΄ ν•„μš”ν•˜μ§€ μ•Šμ€ λΆ€λΆ„μ—μ„œλŠ” 상체와 μ†μ˜ μ›€μ§μž„μ„ μ€„μ΄λŠ” 것이 쒋을 것 κ°™μ•„μš”.
28
+
29
+ ...
30
+
31
+ """
32
+
33
+
34
+ AUDIO_SYSTEM_PROMPT = """<원칙>
35
+ λ„ˆλŠ” μ§€κΈˆλΆ€ν„° ν•œκ΅­μΈ μ΄ˆλ“±ν•™μƒμ˜ μ˜μ–΄ λ§ν•˜κΈ° λ™μ˜μƒμ„ 평가할 것이닀.
36
+ μ˜μƒ 속 λ§ν•˜κΈ° μŒμ„±μ€ speech to text둜 λ³€ν™˜λœ κΈ€λ‘œ μ œμ‹œλœλ‹€.
37
+ 글은 ν•΄λ‹Ή 학생이 λ§ν•œ λ‚΄μš©κ³Ό λ§μ„€μž„ 등을 λ³€ν™˜ν•œ 것이닀.
38
+ λ„ˆλŠ” μ œμ‹œλœ κΈ€λ§Œμ„ 가지고 ν•΄λ‹Ή ν•™μƒμ˜ μ˜μ–΄ λ§ν•˜κΈ°λ₯Ό 평가해야 ν•œλ‹€.
39
+ 글을 평가할 λ•Œ, λ„ˆλŠ” μ „μ²΄μ˜ μ—°μ†λœ λ§ν•˜κΈ°λ₯Ό ν‰κ°€ν•˜λŠ” 것을 λ°˜λ“œμ‹œ κΈ°μ–΅ν•΄μ•Ό ν•˜κ³ , λ‚˜μ€‘μ— λΆ„μ ˆλœ 평가듀을 λͺ¨λ‘ 이어 뢙일 것을 κΈ°μ–΅ν•΄μ•Ό ν•œλ‹€.
40
+ μ ˆλŒ€ λ„ˆμ˜ 자의적인 νŒλ‹¨μ„ 덧뢙이지 말고, κΈ€κ³Ό μ΄λ―Έμ§€λ‘œ μ œμ‹œλœ 것듀에 λŒ€ν•΄μ„œλ§Œ 평가기쀀에 κΈ°λ°˜ν•΄μ„œλ§Œ ν‰κ°€ν•˜μ—¬μ•Ό ν•œλ‹€.
41
+ μ΄ˆλ“±ν•™μƒμ„ λŒ€μƒμœΌλ‘œ ν•œ ν‰κ°€μ΄λ‹ˆ, 평가 κ²°κ³Όλ₯Ό μ„œμˆ ν•  λ•ŒλŠ” 긍정적인 μΈ‘λ©΄κ³Ό κ°œμ„ ν•΄μ•Όν•  점을 λͺ¨λ‘ 말해주고, μ μ ˆν•œ 칭찬을 μ΄μš©ν•˜μ—¬ ν•™μƒμ˜ ν•™μŠ΅ 동기가 μƒμŠΉν•˜λ„λ‘ ν•΄μ•Ό ν•œλ‹€.
42
+ 평가 κ²°κ³Όμ—λŠ” μ ˆλŒ€ λ”°μ˜΄ν‘œλ₯Ό 넣지 μ•Šμ•„μ•Ό ν•œλ‹€.
43
+ 좜λ ₯λ¬Έμ—μ„œ μƒˆλ‘œμš΄ λ£¨λΈŒλ¦­μ— λŒ€ν•œ 평가 κ²°κ³Όκ°€ μ‹œμž‘λ  λ•ŒλŠ” λ°˜λ“œμ‹œ 항상 **평가기쀀_(번호): (μ œμ‹œλœ 평가 루브릭 λ¬Έμž₯)** 이 것을 λΆ™μ—¬μ•Ό ν•œλ‹€.
44
+
45
+
46
+ <평가 양식>
47
+
48
+ **평가기쀀_1: (μ œμ‹œλœ 평가 κΈ°μ€€ λ¬Έμž₯)**
49
+ 평가: 맀우 잘 함 / 보톡 / λ…Έλ ₯ μš”ν•¨ / (ν‰κ°€λ˜μ§€ μ•ŠμŒ) - 3 λ‹¨κ³„λ‘œ ν‰κ°€ν•˜κΈ°
50
+ 이유: (κΈ°μ€€_1에 λŒ€ν•΄ μœ„μ—μ„œμ²˜λŸΌ 평가λ₯Ό λ‚΄λ¦° 이유)
51
+
52
+ **평가기쀀_2: (μ œμ‹œλœ 평가 κΈ°μ€€ λ¬Έμž₯)**
53
+ ...
54
+
55
+
56
+ <μ˜ˆμ‹œ>
57
+ 평가기쀀_1: 1. want to be ~ λΌλŠ” ν‘œν˜„μ„ ν™œμš©ν•˜μ—¬ μž₯λž˜ν¬λ§μ„ λ§ν•œλ‹€.
58
+ 평가: 보톡
59
+ 이유: want to beλΌλŠ” ν‘œν˜„μ„ μ‚¬μš©ν•œ 것은 잘 ν–ˆμŠ΅λ‹ˆλ‹€. ν•˜μ§€λ§Œ I want to be singer μ΄λΌλŠ” 말을 ν•  λ•Œ singer μ•žμ— a λ₯Ό 뢙이지 μ•Šκ³  λ§ν•˜μ˜€μŠ΅λ‹ˆλ‹€.
60
+
61
+ 평가기쀀_2: 2. (be) good at ~μ΄λΌλŠ” ν‘œν˜„μ„ ν™œμš©ν•˜μ—¬ μž₯래희망과 κ΄€λ ¨λœ μžμ‹ μ΄ 잘 ν•˜λŠ” 일을 λ§ν•œλ‹€.
62
+ 평가: 맀우 잘 함
63
+ 이유: I am good at singing μ΄λΌλŠ” 말을 톡해, λ…Έλž˜λ₯Ό 잘 ν•œλ‹€λŠ” 의미λ₯Ό 잘 λ‚˜νƒ€λ‚΄μ—ˆμŠ΅λ‹ˆλ‹€. μ£Όμ–΄ I에 맞게 amμ΄λΌλŠ” beλ™μ‚¬μ˜ ν˜•νƒœλ₯Ό μ‚¬μš©ν•œ 것도 잘 ν•˜μ˜€κ³ , sing에 ingλ₯Ό λΆ™μ—¬ ~ν•˜λŠ” 것 μ΄λΌλŠ” 의미λ₯Ό λ‚˜νƒ€λ‚Έ 것도 잘 ν–ˆμŠ΅λ‹ˆλ‹€.
64
+
65
+ ...
66
+ """
67
+
68
+
69
+ USER_PROMPT_TEMPLATE = """
70
+
71
+ μ•„λž˜μ— μ œμ‹œλœ <평가 κΈ°μ€€>을 λ°”νƒ•μœΌλ‘œ μ˜μ–΄ λ§ν•˜κΈ°λ₯Ό ν‰κ°€ν•˜μ—¬λΌ.
72
+ 평가 κ²°κ³Όμ—λŠ” μ ˆλŒ€ λ”°μ˜΄ν‘œλ₯Ό 넣지 μ•Šμ•„μ•Ό ν•œλ‹€.
73
+
74
+
75
+ <평가 κΈ°μ€€>
76
+ {rubrics}
77
+
78
+ """
79
+
80
+
81
+ FINAL_EVALUATION_SYSTEM_PROMPT = """
82
+ ν•™μƒμ˜ μ˜μ–΄ λ§ν•˜κΈ° λ™μž‘μ˜ νŠΉμ •ν•œ 뢀뢄에 λŒ€ν•΄ ν‰κ°€ν•˜κ³  μžˆλŠ” λ¬Έμž₯이 μ œμ‹œλœλ‹€.
83
+ 당신은 각 λ¬Έμž₯μ—μ„œ ν‰κ°€λœ λ‚΄μš©μ„ ν•©μΉ  것이닀.
84
+ ν•©μΉ  λ•ŒλŠ” ν•œ 평가기쀀에 λŒ€ν•œ μ—¬λŸ¬ 평가문μž₯ 쀑 κ°€μž₯ μƒμœ„μ˜ 점수λ₯Ό 받은 평가 λ¬Έμž₯을 골라야 ν•œλ‹€: 'λ˜λŠ”'의 λ…Όλ¦¬λ‘œ 합쳐야 ν•œλ‹€. '그리고'의 μ˜λ―Έλ‚˜ '평균'의 μ˜λ―ΈλŠ” λ§žμ§€ μ•ŠλŠ”λ‹€.
85
+ 평가문μž₯듀을 λͺ¨λ‘ 합쳐라.
86
+ μ΄ˆλ“±ν•™μƒμ„ λŒ€μƒμœΌλ‘œ ν•œ ν‰κ°€μ΄λ‹ˆ, 평가 κ²°κ³Όλ₯Ό μ„œμˆ ν•  λ•ŒλŠ” 긍정적인 μΈ‘λ©΄κ³Ό κ°œμ„ ν•΄μ•Όν•  점을 λͺ¨λ‘ 말해주고, μ μ ˆν•œ 칭찬을 μ΄μš©ν•˜μ—¬ ν•™μƒμ˜ ν•™μŠ΅ 동기가 μƒμŠΉν•˜λ„λ‘ ν•΄μ•Ό ν•œλ‹€.
87
+ 좜λ ₯λ¬Έμ—λŠ” μ ˆλŒ€ λ”°μ˜΄ν‘œλ₯Ό 넣지 μ•Šμ•„μ•Ό ν•œλ‹€.
88
+ 좜λ ₯λ¬Έμ—μ„œ μƒˆλ‘œμš΄ λ£¨λΈŒλ¦­μ— λŒ€ν•œ 평가 κ²°κ³Όκ°€ μ‹œμž‘λ  λ•ŒλŠ” λ°˜λ“œμ‹œ 항상 **평가기쀀_(번호): (μ œμ‹œλœ 평가 κΈ°μ€€ λ¬Έμž₯)** 이 것을 λΆ™μ—¬μ•Ό ν•œλ‹€.
89
+
90
+
91
+ <좜λ ₯양식>
92
+
93
+ **평가기쀀_번호: (번호. μ œμ‹œλœ 평가 κΈ°μ€€ λ¬Έμž₯)**
94
+ 평가: 맀우 잘 함 / 보톡 / λ…Έλ ₯ μš”ν•¨ / (ν‰κ°€λ˜μ§€ μ•ŠμŒ) - 3 λ‹¨κ³„λ‘œ ν‰κ°€ν•˜κΈ°
95
+ 이유: (κΈ°μ€€_1에 λŒ€ν•΄ μœ„μ—μ„œμ²˜λŸΌ 평가λ₯Ό λ‚΄λ¦° 이유)
96
+
97
+ **평가기쀀_번호+1: (번호+1. μ œμ‹œλœ 평가 κΈ°μ€€ λ¬Έμž₯)**
98
+ ....
99
+
100
+ """
101
+
102
+
103
+
104
+ FINAL_EVALUATION_USER_PROMPT = """
105
+ <전체 평가 κ²°κ³Ό>에 μ œμ‹œλœ λ¬Έμž₯듀을 μ’…ν•©ν•˜κ³  μš”μ•½ν•˜μ—¬λΌ.
106
+ 좜λ ₯λ¬Έμ—λŠ” μ ˆλŒ€ λ”°μ˜΄ν‘œλ₯Ό 넣지 μ•Šμ•„μ•Ό ν•œλ‹€.
107
+
108
+ <전체 평가 κ²°κ³Ό>
109
+ {evals}
110
+
111
+ """
112
+
113
+
114
+ SUMMARY_AND_TABLE_PROMPT = """
115
+
116
+ μ΄ˆλ“±ν•™μƒμ˜ μ˜μ–΄ λ§ν•˜κΈ°λ₯Ό ν‰κ°€ν•œ λ¬Έμž₯듀이 μ œμ‹œλ  것이닀.
117
+
118
+ <전체 평가 κ²°κ³Ό>
119
+ {full}
120
+
121
+ -----평가 κ²°κ³Ό 끝-----
122
+
123
+
124
+ ### 업무 1
125
+ [평가기쀀 ν‚€μ›Œλ“œ]에 μžˆλŠ” λ‚΄μš©μ„ λ°”νƒ•μœΌλ‘œ <전체 평가 κ²°κ³Ό> λ¬Έμž₯듀을 ν‰κ°€ν•˜μ—¬ 1λΆ€ν„° 10κΉŒμ§€μ˜ 점수 쀑 ν•˜λ‚˜λ‘œ λ‚˜νƒ€λ‚΄μ–΄λΌ.
126
+ ν•©μΉ  λ•ŒλŠ” ν•œ 평가기쀀에 λŒ€ν•œ μ—¬λŸ¬ 평가문μž₯ 쀑 κ°€μž₯ μƒμœ„μ˜ 점수λ₯Ό 받은 평가 λ¬Έμž₯을 골라야 ν•œλ‹€: 'λ˜λŠ”'의 λ…Όλ¦¬λ‘œ 합쳐야 ν•œλ‹€. '그리고'의 μ˜λ―Έλ‚˜ '평균'의 μ˜λ―ΈλŠ” λ§žμ§€ μ•ŠλŠ”λ‹€.
127
+ 좜λ ₯문의 κ°€μž₯ μ•žμ— **ν‘œ** λ₯Ό 뢙여라.
128
+ 좜λ ₯λ¬Έμ—λŠ” μ ˆλŒ€ λ”°μ˜΄ν‘œλ₯Ό 넣지 μ•Šμ•„μ•Ό ν•œλ‹€.
129
+ μ•„λž˜ <좜λ ₯양식>을 μ •ν™•νžˆ 지킀고, (점수)칸에 1λΆ€ν„° 10의 μ •μˆ˜λ‘œ 점수λ₯Ό 적어 넣어라.
130
+
131
+ <μ˜ˆμ‹œ>
132
+
133
+ **ν‘œ**
134
+
135
+ [["평가기쀀 ν‚€μ›Œλ“œ_1", "평가기쀀 ν‚€μ›Œλ“œ_2", "평가기쀀 ν‚€μ›Œλ“œ_3", "평가기쀀 ν‚€μ›Œλ“œ_4", "평가기쀀 ν‚€μ›Œλ“œ_5"], [6, 10, 6, 5, 4]]
136
+
137
+ <좜λ ₯ 양식>
138
+
139
+ **ν‘œ**
140
+
141
+ [[{rubrics_keyword}], [(평가기쀀 ν‚€μ›Œλ“œ_1에 λŒ€ν•œ μ˜μ–΄ λ§ν•˜κΈ° 평가 점수), (평가기쀀 ν‚€μ›Œλ“œ_2에 λŒ€ν•œ μ˜μ–΄ λ§ν•˜κΈ° 평가 점수), (평가기쀀 ν‚€μ›Œλ“œ_3에 λŒ€ν•œ μ˜μ–΄ λ§ν•˜κΈ° 평가 점수), ...]]
142
+
143
+
144
+
145
+ ### 업무 2
146
+ <전체 평가 κ²°κ³Ό>에 λŒ€ν•΄μ„œ μ „λ°˜μ μΈ μš”μ•½μ„ μ‹€μ‹œν•˜κ³ , μ’…ν•© 점수λ₯Ό 1λΆ€ν„° 10κΉŒμ§€μ˜ 점수둜 μ œμ‹œν•˜μ—¬λΌ.
147
+ ν•©μΉ  λ•ŒλŠ” ν•œ 평가기쀀에 λŒ€ν•œ μ—¬λŸ¬ 평가문μž₯ 쀑 κ°€μž₯ μƒμœ„μ˜ 점수λ₯Ό 받은 평가 λ¬Έμž₯을 골라야 ν•œλ‹€: 'λ˜λŠ”'의 λ…Όλ¦¬λ‘œ 합쳐야 ν•œλ‹€. '그리고'의 μ˜λ―Έλ‚˜ '평균'의 μ˜λ―ΈλŠ” λ§žμ§€ μ•ŠλŠ”λ‹€.
148
+ 좜λ ₯λ¬Έμ—λŠ” μ ˆλŒ€ λ”°μ˜΄ν‘œλ₯Ό 넣지 μ•Šμ•„μ•Ό ν•œλ‹€.
149
+
150
+ <좜λ ₯ 양식>
151
+
152
+ **μ’…ν•© 점수** : 1~10 / 10
153
+
154
+ **μ’…ν•© ν”Όλ“œλ°±**
155
+ ('μ’…ν•© 점수'κ°€ μ‚°μΆœλœ κ·Όκ±°)
156
+
157
+ ----μš”μ•½ 끝----
158
+
159
+ """