sguertl commited on
Commit
cca443a
·
verified ·
1 Parent(s): cdffdde

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +370 -0
app.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from openai import OpenAI
4
+ import base64
5
+ import json
6
+ import requests
7
+ import re
8
+ import pandas as pd
9
+ from huggingface_hub import InferenceClient
10
+
11
+ HF_MODEL_MISTRAL = "mistralai/Mistral-7B-Instruct-v0.3"
12
+ HF_MODEL_LLAMA = "meta-llama/Llama-3.3-70B-Instruct"
13
+
14
+ PROMPTS_DOC_URL = "https://docs.google.com/document/d/17rB_0BGQ4DGT7pwOV8O58sNvzBWgywq6ZDgzyJ9pmjs/export?format=txt"
15
+
16
+ STEP1_SYSTEM_PROMPT = "STEP1 SYSPROMPT"
17
+ STEP1_USER_PROMPT = "STEP1 USERPROMPT"
18
+ STEP2_SYSTEM_PROMPT = "STEP2 SYSPROMPT"
19
+ STEP2_USER_PROMPT = "STEP2 USERPROMPT"
20
+ STEP3A_SYSTEM_PROMPT = "STEP3A SYSPROMPT"
21
+ STEP3A_USER_PROMPT = "STEP3A USERPROMPT"
22
+ STEP3B_SYSTEM_PROMPT = "STEP3B SYSPROMPT"
23
+ STEP3B_USER_PROMPT = "STEP3B USERPROMPT"
24
+
25
+
26
+ def fetch_prompts_from_google_doc():
27
+ response = requests.get(PROMPTS_DOC_URL)
28
+ if response.status_code != 200:
29
+ raise Exception("Failed to fetch document")
30
+
31
+ text = response.text
32
+ prompts = {}
33
+
34
+ pattern = r"\{BEGIN (.*?)\}([\s\S]*?)\{END \1\}"
35
+ matches = re.findall(pattern, text)
36
+
37
+ for key, content in matches:
38
+ prompts[key.strip()] = content.strip()
39
+
40
+ return prompts
41
+
42
+ # Step 1: Extract PlantUML Code
43
+ def extract_plantuml_code(client_openai, uploaded_file, model_choice, prompts):
44
+
45
+ st.write("Model: ", model_choice)
46
+
47
+ encoded_image = base64.b64encode(uploaded_file.getvalue()).decode("utf-8")
48
+
49
+ response = client_openai.chat.completions.create(
50
+ model=model_choice,
51
+ messages=[
52
+ {
53
+ "role": "system",
54
+ "content": prompts[STEP1_SYSTEM_PROMPT],
55
+ },
56
+ {
57
+ "role": "user",
58
+ "content": [
59
+ {"type": "text", "text": prompts[STEP1_USER_PROMPT]},
60
+ {
61
+ "type": "image_url",
62
+ "image_url": {"url": f"data:image/png;base64,{encoded_image}"},
63
+ },
64
+ ],
65
+ },
66
+ ],
67
+ temperature=0.2,
68
+ top_p=0.1,
69
+ max_tokens=4096,
70
+ )
71
+
72
+ return response.choices[0].message.content
73
+
74
+
75
+ # Step 2: Compare PlantUML Code
76
+ def compare_plantuml(client_openai, client_hf_mistral, client_hf_llama, plantuml_instructor, plantuml_student, model_choice, prompts):
77
+
78
+ st.write("Model: ", model_choice)
79
+
80
+ user_prompt=f"""
81
+ {prompts[STEP2_USER_PROMPT]}
82
+
83
+ **Instructor's UML:**
84
+ {plantuml_instructor}
85
+
86
+ **Student's UML:**
87
+ {plantuml_student}
88
+ """
89
+
90
+ if model_choice in [HF_MODEL_MISTRAL]:
91
+ response = client_hf_mistral.chat_completion(
92
+ [
93
+ {
94
+ "role": "system",
95
+ "content": prompts[STEP2_SYSTEM_PROMPT],
96
+ },
97
+ {"role": "user", "content": user_prompt},
98
+ ],
99
+ max_tokens=1024,
100
+ temperature=0.2,
101
+ )
102
+ return response["choices"][0]["message"]["content"]
103
+
104
+ elif model_choice in [HF_MODEL_LLAMA]:
105
+ response = client_hf_llama.chat_completion(
106
+ [
107
+ {
108
+ "role": "system",
109
+ "content": prompts[STEP2_SYSTEM_PROMPT],
110
+ },
111
+ {"role": "user", "content": user_prompt},
112
+ ],
113
+ max_tokens=1024,
114
+ temperature=0.2,
115
+ )
116
+ return response["choices"][0]["message"]["content"]
117
+
118
+ else:
119
+ response = client_openai.chat.completions.create(
120
+ model=model_choice,
121
+ messages=[
122
+ {
123
+ "role": "system",
124
+ "content": prompts[STEP2_SYSTEM_PROMPT],
125
+ },
126
+ {
127
+ "role": "user",
128
+ "content": user_prompt,
129
+ },
130
+ ],
131
+ temperature=0.2,
132
+ top_p=0.1,
133
+ max_tokens=4096,
134
+ )
135
+ return response.choices[0].message.content
136
+
137
+ # Step 3A: Generate Student Feedback
138
+ def generate_student_feedback(client_openai, client_hf_mistral, client_hf_llama, differences, model_choice, prompts):
139
+
140
+ st.write("Model (Student Feedback):", model_choice)
141
+
142
+ user_prompt=f"""
143
+ {prompts[STEP3A_USER_PROMPT]}
144
+ {json.dumps(differences, indent=2)}
145
+ """
146
+
147
+ if model_choice in [HF_MODEL_MISTRAL]:
148
+ response = client_hf_mistral.chat_completion(
149
+ [
150
+ {
151
+ "role": "system",
152
+ "content": prompts[STEP3A_SYSTEM_PROMPT],
153
+ },
154
+ {"role": "user", "content": user_prompt},
155
+ ],
156
+ max_tokens=1024,
157
+ temperature=0.2,
158
+ )
159
+
160
+ return response["choices"][0]["message"]["content"]
161
+
162
+ elif model_choice in [HF_MODEL_LLAMA]:
163
+ response = client_hf_llama.chat_completion(
164
+ [
165
+ {
166
+ "role": "system",
167
+ "content": prompts[STEP3A_SYSTEM_PROMPT],
168
+ },
169
+ {"role": "user", "content": user_prompt},
170
+ ],
171
+ max_tokens=1024,
172
+ temperature=0.2,
173
+ )
174
+
175
+ return response["choices"][0]["message"]["content"]
176
+
177
+ else:
178
+ response = client_openai.chat.completions.create(
179
+ model=model_choice,
180
+ messages=[
181
+ {
182
+ "role": "system",
183
+ "content": prompts[STEP3A_SYSTEM_PROMPT],
184
+ },
185
+ {
186
+ "role": "user",
187
+ "content": user_prompt,
188
+ },
189
+ ],
190
+ temperature=0.2,
191
+ top_p=0.1,
192
+ max_tokens=4096,
193
+ )
194
+
195
+ return response.choices[0].message.content
196
+
197
+
198
+ # Step 3B: Generate Educator Feedback
199
+ def generate_educator_feedback(client_openai, client_hf_mistral, client_hf_llama, differences, model_choice, prompts):
200
+
201
+ st.write("Model (Educator Feedback): ", model_choice)
202
+
203
+ user_prompt=f"""
204
+ {prompts[STEP3B_USER_PROMPT]}
205
+ {json.dumps(differences, indent=2)}
206
+ """
207
+
208
+ if model_choice in [HF_MODEL_MISTRAL]:
209
+ response = client_hf_mistral.chat_completion(
210
+ [
211
+ {
212
+ "role": "system",
213
+ "content": prompts[STEP3B_SYSTEM_PROMPT],
214
+ },
215
+ {"role": "user", "content": user_prompt},
216
+ ],
217
+ max_tokens=1024,
218
+ temperature=0.2,
219
+ )
220
+
221
+ return response["choices"][0]["message"]["content"]
222
+
223
+ elif model_choice in [HF_MODEL_LLAMA]:
224
+ response = client_hf_llama.chat_completion(
225
+ [
226
+ {
227
+ "role": "system",
228
+ "content": prompts[STEP3B_SYSTEM_PROMPT],
229
+ },
230
+ {"role": "user", "content": user_prompt},
231
+ ],
232
+ max_tokens=1024,
233
+ temperature=0.2,
234
+ )
235
+
236
+ return response["choices"][0]["message"]["content"]
237
+
238
+ else:
239
+ response = client_openai.chat.completions.create(
240
+ model=model_choice,
241
+ messages=[
242
+ {
243
+ "role": "system",
244
+ "content": prompts[STEP3B_SYSTEM_PROMPT],
245
+ },
246
+ {
247
+ "role": "user",
248
+ "content": user_prompt,
249
+ },
250
+ ],
251
+ temperature=0.2,
252
+ top_p=0.1,
253
+ max_tokens=4096,
254
+ )
255
+
256
+ st.write(response)
257
+
258
+ return response.choices[0].message.content
259
+
260
+
261
+ # Streamlit app layout
262
+ st.title("LLM-based Analysis and Feedback of a UML Diagram")
263
+ st.write("The pipeline consists of three steps:")
264
+ st.write("1. Extract PlantUML code from the uploaded UML diagrams using GPT-4o or GPT-4o Mini.")
265
+ st.write("2. Compare the extracted PlantUML code.")
266
+ st.write("3. Analyse the differences and present them in a structured format.")
267
+
268
+ prompts = fetch_prompts_from_google_doc()
269
+
270
+ openai_api_key = st.text_input("OpenAI API key", type="password")
271
+ hf_api_key = st.text_input("Hugging Face API key", type="password")
272
+
273
+ if openai_api_key and hf_api_key:
274
+ client_openai = OpenAI(api_key=openai_api_key)
275
+ client_hf_mistral = InferenceClient(model=HF_MODEL_MISTRAL, token=hf_api_key)
276
+ client_hf_llama = InferenceClient(model=HF_MODEL_LLAMA, token=hf_api_key)
277
+
278
+ model_choice_step1 = st.selectbox("Select the model for Step 1", ["gpt-4o", "gpt-4o-mini"])
279
+ model_choice_step2 = st.selectbox("Select the model for Step 2", [HF_MODEL_MISTRAL, HF_MODEL_LLAMA, "gpt-4o", "gpt-4o-mini"])
280
+ model_choice_step3 = st.selectbox("Select the model for Step 3", [HF_MODEL_MISTRAL, HF_MODEL_LLAMA, "gpt-4o", "gpt-4o-mini"])
281
+
282
+ st.subheader("Step 1: PlantUML Code Extraction using GPT-4o or GPT-4o Mini")
283
+
284
+ col1, col2 = st.columns(2)
285
+ with col1:
286
+ uploaded_instructor_solution = st.file_uploader(
287
+ "Upload Instructor UML Diagram", type=["jpg", "jpeg", "png"]
288
+ )
289
+ with col2:
290
+ uploaded_student_solution = st.file_uploader(
291
+ "Upload Student UML Diagram", type=["jpg", "jpeg", "png"]
292
+ )
293
+
294
+ if (uploaded_instructor_solution is not None and uploaded_student_solution is not None):
295
+ try:
296
+ with st.spinner(
297
+ "Extracting PlantUML code from the uploaded UML diagrams..."
298
+ ):
299
+ with col1:
300
+ st.image(
301
+ uploaded_instructor_solution,
302
+ caption="Uploaded Instructor UML Diagram",
303
+ use_container_width=True,
304
+ )
305
+ st.write("")
306
+ plantuml_instructor_solution = extract_plantuml_code(
307
+ client_openai, uploaded_instructor_solution, model_choice_step1, prompts
308
+ )
309
+ with col2:
310
+ st.write("")
311
+ st.image(
312
+ uploaded_student_solution,
313
+ caption="Uploaded Student UML Diagram",
314
+ use_container_width=True,
315
+ )
316
+ st.write("")
317
+ plantuml_student_solution = extract_plantuml_code(
318
+ client_openai, uploaded_student_solution, model_choice_step1, prompts
319
+ )
320
+
321
+ st.write("Extracted PlantUML Code")
322
+ col1, col2 = st.columns(2)
323
+ with col1:
324
+ st.text_area(
325
+ "PlantUML Code for Instructor Solution",
326
+ plantuml_instructor_solution,
327
+ height=600,
328
+ )
329
+ with col2:
330
+ st.text_area(
331
+ "PlantUML Code for Student Solution",
332
+ plantuml_student_solution,
333
+ height=600,
334
+ )
335
+
336
+ st.subheader("Step 2: UML Diagram Comparison")
337
+ with st.spinner("Comparing instructor and student UML diagrams..."):
338
+ differences = compare_plantuml(
339
+ client_openai,
340
+ client_hf_mistral,
341
+ client_hf_llama,
342
+ plantuml_instructor_solution,
343
+ plantuml_student_solution,
344
+ model_choice_step2,
345
+ prompts
346
+ )
347
+ with st.expander("View differences"):
348
+ for difference in differences.split("\n"):
349
+ st.write(difference)
350
+
351
+ st.subheader("Step 3: Structured Feedback")
352
+ with st.spinner("Preparing structured feedback..."):
353
+ student_feedback = generate_student_feedback(client_openai, client_hf_mistral, client_hf_llama, differences, model_choice_step3, prompts)
354
+ educator_feedback = generate_educator_feedback(client_openai, client_hf_mistral, client_hf_llama, differences, model_choice_step3, prompts)
355
+
356
+ col1, col2 = st.columns(2)
357
+ with col1:
358
+ st.write("Student Feedback")
359
+ st.markdown(f"{student_feedback}")
360
+ with col2:
361
+ st.write("Educator Feedback")
362
+ st.markdown(f"{educator_feedback}")
363
+
364
+ except Exception as e:
365
+ st.error(f"Error: {e}")
366
+ else:
367
+ if not openai_api_key:
368
+ st.error("Please provide a valid OpenAI API key.")
369
+ else:
370
+ st.error("Please provide a valid Hugging Face API key.")