originws-app / query_executor.py
Maurizio Dipierro
origin working
cd65ba5
import logging
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_anthropic import ChatAnthropic
class QuestionAnsweringAssistant:
RAG_TEMPLATE = """
You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise but friendly.
If the question is about yourself, answer you're the digital assistant coach of OriginWS.
<context>
{context}
</context>
Answer the following question:
{question}"""
CONDENSE_PROMPT = """
Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
Here's some example:
user: jungle up... di cosa si tratta?
assistant: Jungle Up è una nuova tipologia di allenamento. Combina tecniche di allenamento ispirate ai movimenti animali.
user: è possibile iscriversi?
Follow Up Input: è possibile iscriversi?
Standalone question: è possibile iscriversi al corso Jungle Up?
----
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:
"""
def __init__(self, logger: logging.Logger, model_name="gpt-4o-mini", temperature=0):
self.logger = logger
self.llm = ChatOpenAI(model=model_name, temperature=temperature)
self.logger.info("QuestionAnsweringAssistant initialized with model: %s", model_name)
def format_docs(self, docs):
"""Format documents into a single string."""
formatted_docs = "\n\n".join(doc.page_content for doc in docs)
self.logger.debug("Formatted documents for context: %s", formatted_docs)
return formatted_docs
def condense_query(self, messages, message):
"""Rephrase the follow-up question to be a standalone question."""
self.logger.debug("Condensing query. History: %s, Current message: %s", messages, message)
# Format the chat history for the prompt
chat_history = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
# Use the prompt to rephrase the last user message
prompt_input = self.CONDENSE_PROMPT.format(chat_history=chat_history, question=message)
self.logger.info("Prompt condense: %s", prompt_input)
response = self.llm.invoke(prompt_input)
#self.logger.info("Condensed query response: %s", response)
return response.content
def execute_query(self, question, vectorstore):
"""Run the query against the vectorstore and return a response."""
self.logger.info("Searching for condensed question: %s", question)
docs = vectorstore.similarity_search(question, k=10)
self.logger.info("Found %d relevant documents for the query.", len(docs))
# Define the RAG prompt template
rag_prompt = ChatPromptTemplate.from_template(self.RAG_TEMPLATE)
# Create the chain
chain = (
RunnablePassthrough.assign(context=lambda input: self.format_docs(input["context"]))
| rag_prompt
| self.llm
| StrOutputParser()
)
# Run the chain with the query
response = chain.invoke({"context": docs, "question": question})
#self.logger.info("Query executed successfully. Response: %s", response)
return response