File size: 7,050 Bytes
2d38629 ba4ff10 161ae7c ba4ff10 161ae7c 2d38629 ba4ff10 2d38629 ba4ff10 161ae7c ba4ff10 9efae50 ba4ff10 2d38629 ba4ff10 161ae7c 2d38629 fc6a751 2d38629 161ae7c ba4ff10 161ae7c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
# Import necessary libraries
import streamlit as st
from langchain_community.document_loaders import PyPDFLoader
import openai
from langchain.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain.chat_models import ChatOpenAI
from fpdf import FPDF
import os
from datetime import datetime, timedelta
# Set up Streamlit UI
st.title('Educational Assistant')
st.header('Summary, Quiz Generator, Q&A, and Topics to be Covered')
st.sidebar.title('Drop your PDF here')
# Input OpenAI API key from keyboard
openai_api_key = st.sidebar.text_input("Enter your OpenAI API Key", type="password")
user_file_upload = st.sidebar.file_uploader(label='', type='pdf')
# Sidebar option selection for Summary, Quiz, Q&A, or Topics to be Covered
option = st.sidebar.radio("Choose an option", ('Generate Summary', 'Generate Quiz', 'Ask a Question', 'Topics to be Covered'))
# Input for asking questions (only visible when "Ask a Question" is selected)
question_input = None
if option == 'Ask a Question':
question_input = st.text_input("Enter your question about the document:")
# Function to generate a PDF and allow download
def generate_pdf(response, filename="response.pdf"):
pdf = FPDF()
pdf.add_page()
# Adding a Unicode-compatible font (like Arial Unicode MS or other compatible font)
pdf.add_font('ArialUnicode', '', 'arialuni.ttf', uni=True) # Path to font, make sure this is correct for your system
pdf.set_font('ArialUnicode', '', 12)
# Add the response text
pdf.multi_cell(0, 10, response)
# Save to a temporary file
pdf.output(filename)
# Return the file path
return filename
if openai_api_key:
# Set OpenAI API key
openai.api_key = openai_api_key
if user_file_upload:
# Read the uploaded file
pdf_data = user_file_upload.read()
# Save the uploaded file to a temporary location
with open("temp_pdf_file.pdf", "wb") as f:
f.write(pdf_data)
# Load the temporary PDF file
loader = PyPDFLoader("temp_pdf_file.pdf")
data = loader.load_and_split()
## Prompt Template for Summary
prompt_1 = ChatPromptTemplate.from_messages(
[
("system", "You are a smart assistant. Give a summary of the user's PDF. Be polite."),
("user", "{data}")
]
)
# Pass the OpenAI API key explicitly to the ChatOpenAI instance
llm_summary = ChatOpenAI(model="gpt-4o-mini", openai_api_key=openai_api_key) # Pass the key here
output_parser = StrOutputParser()
chain_1 = prompt_1 | llm_summary | output_parser
## Prompt Template for Quiz
prompt_2 = ChatPromptTemplate.from_messages(
[
("system", "You are a smart assistant. Generate 10 multiple-choice quiz questions with 4 options each (including correct and incorrect options) from the user's PDF. Please also include the correct answer in your response. Be polite."),
("user", "{data}")
]
)
# Pass the OpenAI API key explicitly to the ChatOpenAI instance
llm_quiz = ChatOpenAI(model="gpt-4o-mini", openai_api_key=openai_api_key) # Pass the key here
output_parser = StrOutputParser()
chain_2 = prompt_2 | llm_quiz | output_parser
## Prompt Template for Question-Answering
prompt_3 = ChatPromptTemplate.from_messages(
[
("system", "You are a smart assistant. Answer the user's question based on the content of the PDF. Be polite."),
("user", "{data}\n\nUser's question: {question}")
]
)
# Pass the OpenAI API key explicitly to the ChatOpenAI instance
llm_qa = ChatOpenAI(model="gpt-4o-mini", openai_api_key=openai_api_key) # Pass the key here
output_parser = StrOutputParser()
chain_3 = prompt_3 | llm_qa | output_parser
## Prompt Template for Topics to be Covered
prompt_4 = ChatPromptTemplate.from_messages(
[
("system", "You are a smart assistant. Analyze the user's PDF and generate 7 topics based on the content for the next 7 days. Be polite."),
("user", "{data}")
]
)
# Pass the OpenAI API key explicitly to the ChatOpenAI instance
llm_topics = ChatOpenAI(model="gpt-4o-mini", openai_api_key=openai_api_key) # Pass the key here
output_parser = StrOutputParser()
chain_4 = prompt_4 | llm_topics | output_parser
if option == 'Generate Summary':
# Generate summary
summary_response = chain_1.invoke({'data': data})
st.write(summary_response)
# Generate PDF for the summary and offer it as a download
pdf_filename = generate_pdf(summary_response, filename="summary_response.pdf")
st.download_button("Download Summary as PDF", data=open(pdf_filename, "rb").read(), file_name=pdf_filename, mime="application/pdf")
elif option == 'Generate Quiz':
# Generate quiz
quiz_response = chain_2.invoke({'data': data})
st.write(quiz_response)
# Generate PDF for the quiz and offer it as a download
pdf_filename = generate_pdf(quiz_response, filename="quiz_response.pdf")
st.download_button("Download Quiz as PDF", data=open(pdf_filename, "rb").read(), file_name=pdf_filename, mime="application/pdf")
elif option == 'Ask a Question' and question_input:
# Add a "Generate Answer" button
generate_answer = st.button("Generate Answer")
if generate_answer:
# Generate answer for the user's question
question_answer_response = chain_3.invoke({'data': data, 'question': question_input})
st.write(question_answer_response)
# Generate PDF for the question answer and offer it as a download
pdf_filename = generate_pdf(question_answer_response, filename="question_answer_response.pdf")
st.download_button("Download Answer as PDF", data=open(pdf_filename, "rb").read(), file_name=pdf_filename, mime="application/pdf")
elif option == 'Topics to be Covered':
# Generate topics for the next 7 days
topics_response = chain_4.invoke({'data': data})
topics = topics_response.split("\n") # Split response into topics
# Get today's date and create a table for the topics for the next 7 days
start_date = datetime.today()
table_data = []
for i in range(7):
day_date = start_date + timedelta(days=i)
topic = topics[i] if i < len(topics) else "Topic not available"
table_data.append([day_date.strftime("%dth %b %Y"), topic])
# Display the topics table
st.write("### Topics to be Covered in the Next 7 Days")
st.table(table_data)
else:
st.sidebar.warning("Please enter your OpenAI API Key to proceed.")
|