Spaces:
Runtime error
Runtime error
from langchain_community.llms import OpenAI | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
import streamlit as st | |
def get_answers(questions,model): | |
st.write("running get answers function answering following questions",questions) | |
answer_prompt = ( "I want you to become a teacher answer this specific Question: {questions}. You should gave me a straightforward and consise explanation and answer to each one of them") | |
if model == "Open AI": | |
llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"]) | |
answers = llm(answer_prompt) | |
# return questions | |
elif model == "Gemini": | |
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"]) | |
answers = llm.invoke(answer_prompt) | |
answers = answers.content | |
# return questions.content | |
return(answers) | |
def GetLLMResponse(selected_topic_level, selected_topic,num_quizzes, model): | |
question_prompt = ('I want you to just generate question with this specification: Generate a {selected_topic_level} math quiz on the topic of {selected_topic}. Generate only {num_quizzes} questions not more and without providing answers.') | |
st.write("running get llm response and print question prompt",question_prompt) | |
if model == "Open AI": | |
llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"]) | |
questions = llm(question_prompt) | |
elif model == "Gemini": | |
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"]) | |
questions = llm.invoke(question_prompt) | |
questions = questions.content | |
# return questions.content | |
st.write("print questions",questions) | |
answers = get_answers(questions,model) | |
st.write(questions,answers) | |
return(questions,answers) | |