LuckRafly commited on
Commit
489cc5d
1 Parent(s): 3a03fa8

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +62 -0
  2. function.py +50 -0
  3. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from function import GetLLMResponse
3
+
4
+ # List of math topics and difficulty levels
5
+ math_topics = {
6
+ "Elementary School Level": ["Basic Arithmetic", "Place Value", "Fraction", "Decimals", "Geomerty"],
7
+ "Middle School Level": ["Algebra", "Ratio and Proportion", "Percentages", "Geometry", "Integers and Rational Numbers"],
8
+ "High School Level": ["Algebra II", "Trigonometry", "Pre-Calculus", "Calculus", "Statistics and Probability"]
9
+ }
10
+
11
+ # Page configuration
12
+ st.set_page_config(page_title="Generate Math Quizzes",
13
+ page_icon="🧮",
14
+ layout="centered",
15
+ initial_sidebar_state="collapsed")
16
+
17
+ # Header and description
18
+ st.title("Generate Math Quizzes 🧮")
19
+ st.text("Choose the difficulty level and topic for your math quizzes.")
20
+
21
+ # User input for quiz generation
22
+ ## Layout in columns
23
+ col1, col2, col3 = st.columns([1, 1, 1])
24
+
25
+ with col1:
26
+ selected_topic_level = st.selectbox('Select Topic Level', list(math_topics.keys()))
27
+
28
+ with col2:
29
+ selected_topic = st.selectbox('Select Topic', math_topics[selected_topic_level])
30
+
31
+ with col3:
32
+ num_quizzes = st.slider('Number Quizzes', min_value=1, max_value= 5, value=1)
33
+
34
+ submit = st.button('Generate Quizzes')
35
+
36
+
37
+ # Final Response
38
+ if submit:
39
+ with st.spinner("Generating Quizzes..."):
40
+ response = GetLLMResponse(selected_topic_level, selected_topic, num_quizzes)
41
+ st.success("Quizzes Generated!")
42
+
43
+ # Display questions and answers in a table
44
+ if response:
45
+ st.subheader("Quiz Questions and Answers:")
46
+ # Prepare data for the table
47
+ col1, col2 = st.columns(2)
48
+ with col1:
49
+ st.subheader("Questions")
50
+ questions = response.get('questions')
51
+ st.write(questions)
52
+
53
+ with col2:
54
+ st.subheader("Answers")
55
+ answers = response.get('answer')
56
+ st.write(answers)
57
+
58
+ else:
59
+ st.warning("No Quiz Questions and Answers")
60
+
61
+ else:
62
+ st.warning("Click the 'Generate Quizzes' button to create quizzes.")
function.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.prompts import PromptTemplate
2
+ from langchain.llms import CTransformers
3
+ from langchain.chains import LLMChain
4
+ from langchain.chains import SequentialChain
5
+
6
+ config = {'max_new_tokens': 256, 'temperature': 0.1}
7
+
8
+ # Create function for app
9
+ def GetLLMResponse(selected_topic_level,
10
+ selected_topic,
11
+ num_quizzes):
12
+
13
+ # Calling llama model
14
+ llm = CTransformers(model="D:\Code Workspace\DL Model\llama-2-7b-chat.ggmlv3.q8_0.bin",
15
+ model_type = 'llama',
16
+ config = config)
17
+
18
+ # llm = CTransformers(model='TheBloke/Llama-2-7B-Chat-GGML',
19
+ # model_file = 'llama-2-7b-chat.ggmlv3.q8_0.bin',
20
+ # model_type = 'llama',
21
+ # config = config)
22
+
23
+ ## Create LLM Chaining
24
+ questions_template = "Generate a {selected_topic_level} math quiz on the topic of {selected_topic}. Include {num_quizzes} questions without providing answers."
25
+ questions_prompt = PromptTemplate(input_variables=["selected_topic_level", "selected_topic", "num_quizzes"],
26
+ template=questions_template)
27
+ questions_chain = LLMChain(llm= llm,
28
+ prompt = questions_prompt,
29
+ output_key = "questions")
30
+
31
+
32
+ answer_template = "From this Question:\n {questions}\n\n gave me answer to each one of them"
33
+ answer_prompt = PromptTemplate(input_variables = ["questions"],
34
+ template = answer_template)
35
+ answer_chain = LLMChain(llm = llm,
36
+ prompt = answer_prompt,
37
+ output_key = "answer")
38
+
39
+ ## Create Sequential Chaining
40
+ seq_chain = SequentialChain(chains = [questions_chain, answer_chain],
41
+ input_variables = ['selected_topic_level', 'selected_topic', 'num_quizzes'],
42
+ output_variables = ['questions', 'answer'])
43
+
44
+ response = seq_chain({'selected_topic_level': selected_topic_level,
45
+ 'selected_topic': selected_topic,
46
+ 'num_quizzes' : num_quizzes})
47
+
48
+ ## Generate the response from the llama 2 model
49
+ print(response)
50
+ return response
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ sentence-transformers
2
+ uvicorn
3
+ ctransformers
4
+ langchain
5
+ python-box
6
+ streamlit
7
+ pandas