Update app.py
Browse files
app.py
CHANGED
@@ -4,25 +4,25 @@ import streamlit as st
|
|
4 |
from langchain_community.llms import OpenAI
|
5 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
6 |
|
7 |
-
|
8 |
-
|
9 |
|
10 |
|
11 |
-
|
12 |
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
#
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
#
|
24 |
|
25 |
-
|
26 |
|
27 |
|
28 |
|
@@ -35,7 +35,7 @@ def GetLLMResponse(selected_topic_level, selected_topic,num_quizzes, model):
|
|
35 |
|
36 |
|
37 |
|
38 |
-
question_prompt = (f'I want you to just generate question with this specification: Generate a {selected_topic_level} math quiz on the topic of
|
39 |
|
40 |
st.write("running get llm response and print question prompt",question_prompt)
|
41 |
if model == "Open AI":
|
@@ -51,10 +51,10 @@ def GetLLMResponse(selected_topic_level, selected_topic,num_quizzes, model):
|
|
51 |
|
52 |
|
53 |
st.write("print questions",questions)
|
54 |
-
|
55 |
|
56 |
# st.write(questions,answers)
|
57 |
-
return(questions)
|
58 |
|
59 |
|
60 |
|
@@ -103,7 +103,7 @@ def main():
|
|
103 |
|
104 |
# Final Response
|
105 |
if submit:
|
106 |
-
questions = GetLLMResponse(selected_topic_level, selected_topic, num_quizzes, model)
|
107 |
# st.write("printing the response",questions,answers)
|
108 |
# with st.spinner("Generating Quizzes..."):
|
109 |
# questions,answers = GetLLMResponse(selected_topic_level, selected_topic, num_quizzes, model)
|
|
|
4 |
from langchain_community.llms import OpenAI
|
5 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
6 |
|
7 |
+
def get_answers(questions,model):
|
8 |
+
st.write("running get answers function answering following questions",questions)
|
9 |
|
10 |
|
11 |
+
answer_prompt = (f"Answer the following questions {questions}")
|
12 |
|
13 |
|
14 |
+
if model == "Open AI":
|
15 |
+
llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"])
|
16 |
+
answers = llm(answer_prompt)
|
17 |
+
# return questions
|
18 |
|
19 |
+
elif model == "Gemini":
|
20 |
+
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"])
|
21 |
+
answers = llm.invoke(answer_prompt)
|
22 |
+
answers = answers.content
|
23 |
+
# return questions.content
|
24 |
|
25 |
+
return(answers)
|
26 |
|
27 |
|
28 |
|
|
|
35 |
|
36 |
|
37 |
|
38 |
+
question_prompt = (f'I want you to just generate question with this specification: Generate a {selected_topic_level} math quiz on the topic of {selected_topic}. Generate only {num_quizzes} questions not more and without providing answers.')
|
39 |
|
40 |
st.write("running get llm response and print question prompt",question_prompt)
|
41 |
if model == "Open AI":
|
|
|
51 |
|
52 |
|
53 |
st.write("print questions",questions)
|
54 |
+
answers = get_answers(questions,model)
|
55 |
|
56 |
# st.write(questions,answers)
|
57 |
+
return(questions,answers)
|
58 |
|
59 |
|
60 |
|
|
|
103 |
|
104 |
# Final Response
|
105 |
if submit:
|
106 |
+
questions,answers = GetLLMResponse(selected_topic_level, selected_topic, num_quizzes, model)
|
107 |
# st.write("printing the response",questions,answers)
|
108 |
# with st.spinner("Generating Quizzes..."):
|
109 |
# questions,answers = GetLLMResponse(selected_topic_level, selected_topic, num_quizzes, model)
|