from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from dotenv import load_dotenv
import os

load_dotenv()
openai_key = os.getenv(
    "OPENAI_API_KEY"
)  # may wanna ask user for this or handle error when its not there
# if not openai_key:
#     raise ValueError("OpenAI API key not found in environment variables.")

def get_response(user_query, chat_history, context):
    template = """
    You are a helpful assistant. Answer the following questions considering the background information of the conversation:

    Chat History: {chat_history}

    Background Information: {context}

    User question: {user_question}
    """

    llm = ChatOpenAI(api_key=openai_key)
    try:
        prompt = ChatPromptTemplate.from_template(template)

        llm = ChatOpenAI(api_key=openai_key)

        chain = prompt | llm | StrOutputParser()

        value = chain.stream(
            {
                "chat_history": chat_history,
                "context": context,
                "user_question": user_query,
            }
        )
        if value:
            response = " ".join([part for part in value])
            return response
        else:
            return "No response received from model."
    except Exception as e:
        return f"Error in generating response: {str(e)}"