Spaces:
Sleeping
Sleeping
import os | |
import random as rand | |
import pandas as pd | |
import time | |
import datetime | |
import base64 | |
import streamlit as st | |
#Importing variables, objects, templates from "common" | |
from common import llm, precise, random, wikipedia #language models | |
from common import standard_definition_dict, question_or_task_dict, rubric_dict #dictionaries | |
from common import prompt_answer_good, prompt_answer_bad, prompt_qc_run, prompt_qc_grade #prompt templates | |
from common import context_chain, frq_chain, evaluation_chain, topic_rand, number #prompting chains | |
from common import qc_answer_good_chain, qc_answer_bad_chain, qc_run_chain, qc_grade_chain #prompting chains | |
from common import trim_text #custom function | |
#script: | |
st.set_page_config(page_title="QC Test run FQR Generator", page_icon="⚙️", | |
menu_items={"About":"Version 1.0 \n\n Not for commercial use.", | |
"Get help":"https://www.linkedin.com/in/alex-c-fischer"}) | |
st.title("Automatized QC Testing Script for Common Core FRQ Generator") | |
with st.sidebar: | |
st.title("Menu") | |
st.link_button(label="Student", url="https://huggingface.co/spaces/AlexCasF/ForRealQuiz") | |
st.link_button(label="Contact", url="https://www.linkedin.com/in/alex-c-fischer/") | |
st.write("The original Test will now be taken by GPT-4.") | |
mode = st.radio("Choose Mode", ["Single Launch (+live generating)", "Serial Launch (+CSV-Download)"]) | |
launch_qc = st.button("Launch") | |
if mode=="Single Launch (+live generating)" and launch_qc: | |
topic_qc = topic_rand.run(number=number) | |
numb_qc = rand.randint(1, 10) | |
standard_qc = "CCSS.ELA-LITERACY.W.4."+str(numb_qc) | |
st.divider() | |
st.subheader("Random topic:") | |
st.write(topic_qc) | |
st.subheader("Random CC standard:") | |
st.write(standard_qc) | |
standard_definition_qc = standard_definition_dict[standard_qc] | |
question_or_task_qc = question_or_task_dict[standard_qc] | |
wikitext_qc = trim_text(wikipedia.run(topic_qc)) | |
st.divider() | |
st.subheader("Context:") | |
context_qc = context_chain.run(chosen_topic=topic_qc, wikitext=wikitext_qc) | |
st.write(context_qc) | |
st.divider() | |
st.subheader("Free Response Question:") | |
frq_qc = frq_chain.run(context=context_qc, standard_definition=standard_definition_qc, question_or_task=question_or_task_qc) | |
st.write(frq_qc) | |
st.divider() | |
st.subheader("Good Answer, according to GPT-4:") | |
answer_good_qc = qc_answer_good_chain.run(context=context_qc, frq=frq_qc, standard=standard_definition_qc) | |
st.write(answer_good_qc) | |
st.divider() | |
st.subheader("Evaluation on 'Good Answer':") | |
evaluation_good_qc = evaluation_chain.run( | |
context=context_qc, rubric=rubric_dict[standard_qc], | |
frq=frq_qc, chosen_answer=answer_good_qc | |
) | |
st.write(evaluation_good_qc) | |
st.divider() | |
st.subheader("Bad Answer, according to GPT-4:") | |
answer_bad_qc = qc_answer_bad_chain.run(context=context_qc, frq=frq_qc, standard=standard_definition_qc) | |
st.write(answer_bad_qc) | |
st.divider() | |
st.subheader("Evaluation on 'Bad Answer':") | |
evaluation_bad_qc = evaluation_chain.run( | |
context=context_qc, rubric=rubric_dict[standard_qc], | |
frq=frq_qc, chosen_answer=answer_bad_qc | |
) | |
st.write(evaluation_bad_qc) | |
st.divider() | |
st.subheader("Quality Control Report:") | |
qc_report = qc_run_chain.run( | |
context=context_qc, frq=frq_qc, rubric=rubric_dict[standard_qc], | |
answer_good=answer_good_qc, evaluation_good=evaluation_good_qc, | |
answer_bad=answer_bad_qc, evaluation_bad=evaluation_bad_qc) | |
st.write(qc_report) | |
with st.form("Overall Accuracy"): | |
st.header("Overall grading of generated content:") | |
qc_grade = qc_grade_chain.run(qc_report=qc_report) | |
st.header(qc_grade) | |
st.write("Want to save this run?") | |
st.write("Menu in upper right corner > Print > PDF") | |
st.form_submit_button("Clear All & Rerun") | |
if mode=="Serial Launch (+CSV-Download)": | |
batch = st.number_input("Number of reruns", min_value=1, max_value=20, value=1, step=1) | |
comment = st.text_input("Comment - note your prompt fine tunings here, to track and analyse their effects") | |
if launch_qc: | |
df = pd.DataFrame(columns=["Round", "Comment", "Standard", "Topic", "Context", "FRQ", "Good Answer", "Good Evaluation", "Bad Answer", "Bad Evaluation", "Quality Control Report", "Overall Accurancy"]) | |
progress = st.progress(0) | |
for i in range(batch): | |
progress.progress((i + 1) / batch) | |
topic_qc = topic_rand.run(number=number) | |
numb_qc = rand.randint(1, 10) | |
standard_qc = "CCSS.ELA-LITERACY.W.4."+str(numb_qc) | |
standard_definition_qc = standard_definition_dict[standard_qc] | |
question_or_task_qc = question_or_task_dict[standard_qc] | |
wikitext_qc = trim_text(wikipedia.run(topic_qc)) | |
context_qc = context_chain.run(chosen_topic=topic_qc, wikitext=wikitext_qc) | |
frq_qc = frq_chain.run(context=context_qc, standard_definition=standard_definition_qc, question_or_task=question_or_task_qc) | |
answer_good_qc = qc_answer_good_chain.run(context=context_qc, frq=frq_qc, standard=standard_definition_qc) | |
evaluation_good_qc = evaluation_chain.run(context=context_qc, rubric=rubric_dict[standard_qc], frq=frq_qc, chosen_answer=answer_good_qc) | |
answer_bad_qc = qc_answer_bad_chain.run(context=context_qc, frq=frq_qc, standard=standard_definition_qc) | |
evaluation_bad_qc = evaluation_chain.run(context=context_qc, rubric=rubric_dict[standard_qc], frq=frq_qc, chosen_answer=answer_bad_qc) | |
qc_report = qc_run_chain.run(context=context_qc, frq=frq_qc, rubric=rubric_dict[standard_qc], answer_good=answer_good_qc, evaluation_good=evaluation_good_qc, answer_bad=answer_bad_qc, evaluation_bad=evaluation_bad_qc) | |
qc_grade = qc_grade_chain.run(qc_report=qc_report) | |
df.loc[len(df.index)] = {"Round":i+1, "Comment":comment, "Standard":standard_qc, | |
"Topic":topic_qc, "Context":context_qc, "FRQ":frq_qc, | |
"Good Answer":answer_good_qc, "Good Evaluation":evaluation_good_qc, | |
"Bad Answer":answer_bad_qc, "Bad Evaluation":evaluation_bad_qc, | |
"Quality Control Report":qc_report, "Overall Accurancy":qc_grade} | |
time.sleep(0.1) | |
progress.empty() | |
csv = df.to_csv(index=False) | |
b64 = base64.b64encode(csv.encode()).decode() | |
now = datetime.datetime.now() | |
timestamp_str = now.strftime("%Y-%m-%d_%H-%M-%S") | |
filename = f"{timestamp_str}_testruns_{batch}_rows.csv" | |
href = f'<a href="data:file/csv;base64,{b64}" download="{filename}">Download Results CSV</a>' | |
st.markdown(href, unsafe_allow_html=True) | |