Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,6 @@ import streamlit as st
|
|
2 |
import openai
|
3 |
from dotenv import load_dotenv
|
4 |
import os
|
5 |
-
from fpdf import FPDF
|
6 |
|
7 |
# Load the OpenAI API Key
|
8 |
api_key = st.text_input('Enter your OpenAI API Key', type="password")
|
@@ -71,41 +70,6 @@ def classic_mbti_weighted(responses):
|
|
71 |
mbti_type += trait2
|
72 |
return mbti_type
|
73 |
|
74 |
-
# Function to generate PDF report
|
75 |
-
def generate_pdf_report(participant_name, responses, mbti_type_classic, mbti_type_llm):
|
76 |
-
pdf = FPDF()
|
77 |
-
pdf.set_auto_page_break(auto=True, margin=15)
|
78 |
-
pdf.add_page()
|
79 |
-
|
80 |
-
# Add a title
|
81 |
-
pdf.set_font("Arial", 'B', 16)
|
82 |
-
pdf.cell(200, 10, txt="FlexTemp Personality Test Report", ln=True, align="C")
|
83 |
-
|
84 |
-
# Add participant name
|
85 |
-
pdf.set_font("Arial", size=12)
|
86 |
-
pdf.ln(10)
|
87 |
-
pdf.cell(200, 10, txt=f"Name: {participant_name}", ln=True)
|
88 |
-
|
89 |
-
# Add the responses
|
90 |
-
pdf.ln(10)
|
91 |
-
pdf.cell(200, 10, txt="Your Answers:", ln=True)
|
92 |
-
for i, question in enumerate(questions):
|
93 |
-
response = responses[i]
|
94 |
-
pdf.cell(200, 10, txt=f"Q{i+1}: {question['text']} - {response}", ln=True)
|
95 |
-
|
96 |
-
# Add MBTI results
|
97 |
-
pdf.ln(10)
|
98 |
-
pdf.cell(200, 10, txt=f"Your MBTI Type (Weighted): {mbti_type_classic}", ln=True)
|
99 |
-
pdf.cell(200, 10, txt=f"Your MBTI Type (AI Prediction): {mbti_type_llm}", ln=True)
|
100 |
-
|
101 |
-
# Save PDF to a memory buffer
|
102 |
-
from io import BytesIO
|
103 |
-
pdf_output = BytesIO()
|
104 |
-
pdf.output(pdf_output)
|
105 |
-
pdf_output.seek(0) # Reset pointer to the start of the buffer
|
106 |
-
|
107 |
-
return pdf_output
|
108 |
-
|
109 |
# Streamlit component to display the quiz and handle responses
|
110 |
def show_mbti_quiz():
|
111 |
st.title('FlexTemp Personality Test')
|
@@ -133,7 +97,6 @@ def show_mbti_quiz():
|
|
133 |
st.write(f"Your MBTI type based on weighted answers: {mbti_type_classic}")
|
134 |
|
135 |
# You can add LLM-based prediction if needed here (example OpenAI-based model)
|
136 |
-
mbti_type_llm = ""
|
137 |
if api_key:
|
138 |
# Run the LLM (GPT-4, for example) model to generate a personality type.
|
139 |
prompt = f"""
|
@@ -144,7 +107,7 @@ def show_mbti_quiz():
|
|
144 |
"""
|
145 |
try:
|
146 |
response = openai.ChatCompletion.create(
|
147 |
-
model="gpt-
|
148 |
messages=[{"role": "system", "content": "You are a helpful assistant."},
|
149 |
{"role": "user", "content": prompt}]
|
150 |
)
|
@@ -152,18 +115,6 @@ def show_mbti_quiz():
|
|
152 |
st.write(f"Your MBTI type according to AI: {mbti_type_llm}")
|
153 |
except Exception as e:
|
154 |
st.error(f"Error occurred: {e}")
|
155 |
-
|
156 |
-
# Generate PDF Report
|
157 |
-
pdf_output = generate_pdf_report(participant_name, responses, mbti_type_classic, mbti_type_llm)
|
158 |
-
|
159 |
-
# Create a download button for the PDF
|
160 |
-
st.download_button(
|
161 |
-
label="Download your MBTI report",
|
162 |
-
data=pdf_output,
|
163 |
-
file_name=f"{participant_name}_MBTI_report.pdf",
|
164 |
-
mime="application/pdf"
|
165 |
-
)
|
166 |
-
|
167 |
else:
|
168 |
st.warning("Please answer all the questions!")
|
169 |
|
@@ -175,4 +126,4 @@ def main():
|
|
175 |
st.info("Please enter your OpenAI API Key to begin the quiz.")
|
176 |
|
177 |
if __name__ == "__main__":
|
178 |
-
main()
|
|
|
2 |
import openai
|
3 |
from dotenv import load_dotenv
|
4 |
import os
|
|
|
5 |
|
6 |
# Load the OpenAI API Key
|
7 |
api_key = st.text_input('Enter your OpenAI API Key', type="password")
|
|
|
70 |
mbti_type += trait2
|
71 |
return mbti_type
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
# Streamlit component to display the quiz and handle responses
|
74 |
def show_mbti_quiz():
|
75 |
st.title('FlexTemp Personality Test')
|
|
|
97 |
st.write(f"Your MBTI type based on weighted answers: {mbti_type_classic}")
|
98 |
|
99 |
# You can add LLM-based prediction if needed here (example OpenAI-based model)
|
|
|
100 |
if api_key:
|
101 |
# Run the LLM (GPT-4, for example) model to generate a personality type.
|
102 |
prompt = f"""
|
|
|
107 |
"""
|
108 |
try:
|
109 |
response = openai.ChatCompletion.create(
|
110 |
+
model="gpt-4o",
|
111 |
messages=[{"role": "system", "content": "You are a helpful assistant."},
|
112 |
{"role": "user", "content": prompt}]
|
113 |
)
|
|
|
115 |
st.write(f"Your MBTI type according to AI: {mbti_type_llm}")
|
116 |
except Exception as e:
|
117 |
st.error(f"Error occurred: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
else:
|
119 |
st.warning("Please answer all the questions!")
|
120 |
|
|
|
126 |
st.info("Please enter your OpenAI API Key to begin the quiz.")
|
127 |
|
128 |
if __name__ == "__main__":
|
129 |
+
main()
|