Spaces:
Running
Running
Add ERD type
Browse files
app.py
CHANGED
@@ -12,6 +12,7 @@ HF_MODEL_MISTRAL = "mistralai/Mistral-7B-Instruct-v0.3"
|
|
12 |
HF_MODEL_LLAMA = "meta-llama/Llama-3.3-70B-Instruct"
|
13 |
|
14 |
UML_PROMPTS_DOC_URL = os.environ['UML_PROMPTS_DOC_URL']
|
|
|
15 |
|
16 |
STEP1_SYSTEM_PROMPT = "STEP1 SYSPROMPT"
|
17 |
STEP1_USER_PROMPT = "STEP1 USERPROMPT"
|
@@ -23,8 +24,11 @@ STEP3B_SYSTEM_PROMPT = "STEP3B SYSPROMPT"
|
|
23 |
STEP3B_USER_PROMPT = "STEP3B USERPROMPT"
|
24 |
|
25 |
|
26 |
-
def fetch_prompts_from_google_doc():
|
27 |
-
|
|
|
|
|
|
|
28 |
if response.status_code != 200:
|
29 |
raise Exception("Failed to fetch document")
|
30 |
|
@@ -73,17 +77,17 @@ def extract_plantuml_code(client_openai, uploaded_file, model_choice, prompts):
|
|
73 |
|
74 |
|
75 |
# Step 2: Compare PlantUML Code
|
76 |
-
def compare_plantuml(client_openai, client_hf_mistral, client_hf_llama, plantuml_instructor, plantuml_student, model_choice, prompts):
|
77 |
|
78 |
st.write("Model: ", model_choice)
|
79 |
|
80 |
user_prompt=f"""
|
81 |
{prompts[STEP2_USER_PROMPT]}
|
82 |
|
83 |
-
**Instructor's
|
84 |
{plantuml_instructor}
|
85 |
|
86 |
-
**Student's
|
87 |
{plantuml_student}
|
88 |
"""
|
89 |
|
@@ -257,13 +261,20 @@ def generate_educator_feedback(client_openai, client_hf_mistral, client_hf_llama
|
|
257 |
|
258 |
|
259 |
# Streamlit app layout
|
260 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
261 |
st.write("The pipeline consists of three steps:")
|
262 |
-
st.write("1. Extract PlantUML code from the uploaded UML diagrams using GPT-4o or GPT-4o Mini.")
|
263 |
st.write("2. Compare the extracted PlantUML code.")
|
264 |
st.write("3. Analyse the differences and present them in a structured format.")
|
265 |
|
266 |
-
|
|
|
267 |
|
268 |
openai_api_key = st.text_input("OpenAI API key", type="password")
|
269 |
hf_api_key = st.text_input("Hugging Face API key", type="password")
|
@@ -282,22 +293,22 @@ if openai_api_key and hf_api_key:
|
|
282 |
col1, col2 = st.columns(2)
|
283 |
with col1:
|
284 |
uploaded_instructor_solution = st.file_uploader(
|
285 |
-
"Upload Instructor UML Diagram", type=["jpg", "jpeg", "png"]
|
286 |
)
|
287 |
with col2:
|
288 |
uploaded_student_solution = st.file_uploader(
|
289 |
-
"Upload Student UML Diagram", type=["jpg", "jpeg", "png"]
|
290 |
)
|
291 |
|
292 |
if (uploaded_instructor_solution is not None and uploaded_student_solution is not None):
|
293 |
try:
|
294 |
with st.spinner(
|
295 |
-
"Extracting PlantUML code from the uploaded UML diagrams..."
|
296 |
):
|
297 |
with col1:
|
298 |
st.image(
|
299 |
uploaded_instructor_solution,
|
300 |
-
caption="Uploaded Instructor UML Diagram",
|
301 |
use_container_width=True,
|
302 |
)
|
303 |
st.write("")
|
@@ -308,7 +319,7 @@ if openai_api_key and hf_api_key:
|
|
308 |
st.write("")
|
309 |
st.image(
|
310 |
uploaded_student_solution,
|
311 |
-
caption="Uploaded Student UML Diagram",
|
312 |
use_container_width=True,
|
313 |
)
|
314 |
st.write("")
|
@@ -331,8 +342,8 @@ if openai_api_key and hf_api_key:
|
|
331 |
height=600,
|
332 |
)
|
333 |
|
334 |
-
st.subheader("Step 2: UML Diagram Comparison")
|
335 |
-
with st.spinner("Comparing instructor and student UML diagrams..."):
|
336 |
differences = compare_plantuml(
|
337 |
client_openai,
|
338 |
client_hf_mistral,
|
@@ -340,7 +351,8 @@ if openai_api_key and hf_api_key:
|
|
340 |
plantuml_instructor_solution,
|
341 |
plantuml_student_solution,
|
342 |
model_choice_step2,
|
343 |
-
prompts
|
|
|
344 |
)
|
345 |
with st.expander("View differences"):
|
346 |
for difference in differences.split("\n"):
|
|
|
12 |
HF_MODEL_LLAMA = "meta-llama/Llama-3.3-70B-Instruct"
|
13 |
|
14 |
UML_PROMPTS_DOC_URL = os.environ['UML_PROMPTS_DOC_URL']
|
15 |
+
ERD_PROMPTS_DOC_URL = os.environ['ERD_PROMPTS_DOC_URL']
|
16 |
|
17 |
STEP1_SYSTEM_PROMPT = "STEP1 SYSPROMPT"
|
18 |
STEP1_USER_PROMPT = "STEP1 USERPROMPT"
|
|
|
24 |
STEP3B_USER_PROMPT = "STEP3B USERPROMPT"
|
25 |
|
26 |
|
27 |
+
def fetch_prompts_from_google_doc(diagram_type="UML"):
|
28 |
+
if diagram_type == "UML":
|
29 |
+
response = requests.get(UML_PROMPTS_DOC_URL)
|
30 |
+
elif diagram_type == "ERD":
|
31 |
+
response = requests.get(ERD_PROMPTS_DOC_URL)
|
32 |
if response.status_code != 200:
|
33 |
raise Exception("Failed to fetch document")
|
34 |
|
|
|
77 |
|
78 |
|
79 |
# Step 2: Compare PlantUML Code
|
80 |
+
def compare_plantuml(client_openai, client_hf_mistral, client_hf_llama, plantuml_instructor, plantuml_student, model_choice, prompts, diagram_type="UML"):
|
81 |
|
82 |
st.write("Model: ", model_choice)
|
83 |
|
84 |
user_prompt=f"""
|
85 |
{prompts[STEP2_USER_PROMPT]}
|
86 |
|
87 |
+
**Instructor's {diagram_type} Diagram:**
|
88 |
{plantuml_instructor}
|
89 |
|
90 |
+
**Student's {diagram_type} Diagram:**
|
91 |
{plantuml_student}
|
92 |
"""
|
93 |
|
|
|
261 |
|
262 |
|
263 |
# Streamlit app layout
|
264 |
+
st.set_page_config(
|
265 |
+
page_title="LLM-based Analysis and Feedback of a UML or ER Diagram",
|
266 |
+
page_icon="📝",
|
267 |
+
initial_sidebar_state="expanded",
|
268 |
+
)
|
269 |
+
|
270 |
+
st.title("LLM-based Analysis and Feedback of a UML or ERD Diagram")
|
271 |
st.write("The pipeline consists of three steps:")
|
272 |
+
st.write("1. Extract PlantUML code from the uploaded UML or ER diagrams using GPT-4o or GPT-4o Mini.")
|
273 |
st.write("2. Compare the extracted PlantUML code.")
|
274 |
st.write("3. Analyse the differences and present them in a structured format.")
|
275 |
|
276 |
+
diagram_type = st.selectbox("Select the diagram type", ["UML", "ERD"])
|
277 |
+
prompts = fetch_prompts_from_google_doc(diagram_type)
|
278 |
|
279 |
openai_api_key = st.text_input("OpenAI API key", type="password")
|
280 |
hf_api_key = st.text_input("Hugging Face API key", type="password")
|
|
|
293 |
col1, col2 = st.columns(2)
|
294 |
with col1:
|
295 |
uploaded_instructor_solution = st.file_uploader(
|
296 |
+
"Upload Instructor " + ("UML" if diagram_type == 'UML' else "ER") + " Diagram", type=["jpg", "jpeg", "png"]
|
297 |
)
|
298 |
with col2:
|
299 |
uploaded_student_solution = st.file_uploader(
|
300 |
+
"Upload Student " + ("UML" if diagram_type == 'UML' else "ER") + " Diagram", type=["jpg", "jpeg", "png"]
|
301 |
)
|
302 |
|
303 |
if (uploaded_instructor_solution is not None and uploaded_student_solution is not None):
|
304 |
try:
|
305 |
with st.spinner(
|
306 |
+
"Extracting PlantUML code from the uploaded " + ("UML" if diagram_type == 'UML' else "ER") + " diagrams..."
|
307 |
):
|
308 |
with col1:
|
309 |
st.image(
|
310 |
uploaded_instructor_solution,
|
311 |
+
caption="Uploaded Instructor " + ("UML" if diagram_type == 'UML' else "ER") + " Diagram",
|
312 |
use_container_width=True,
|
313 |
)
|
314 |
st.write("")
|
|
|
319 |
st.write("")
|
320 |
st.image(
|
321 |
uploaded_student_solution,
|
322 |
+
caption="Uploaded Student " + ("UML" if diagram_type == 'UML' else "ER") + " Diagram",
|
323 |
use_container_width=True,
|
324 |
)
|
325 |
st.write("")
|
|
|
342 |
height=600,
|
343 |
)
|
344 |
|
345 |
+
st.subheader("Step 2: " + ("UML" if diagram_type == 'UML' else "ER") + " Diagram Comparison")
|
346 |
+
with st.spinner("Comparing instructor and student " + ("UML" if diagram_type == 'UML' else "ER") + " diagrams..."):
|
347 |
differences = compare_plantuml(
|
348 |
client_openai,
|
349 |
client_hf_mistral,
|
|
|
351 |
plantuml_instructor_solution,
|
352 |
plantuml_student_solution,
|
353 |
model_choice_step2,
|
354 |
+
prompts,
|
355 |
+
diagram_type
|
356 |
)
|
357 |
with st.expander("View differences"):
|
358 |
for difference in differences.split("\n"):
|