Spaces:
Sleeping
Sleeping
File size: 1,694 Bytes
4db208a 8514dc9 4db208a 8514dc9 4db208a 8514dc9 4db208a 8514dc9 4db208a 8514dc9 4db208a 8514dc9 4db208a 8514dc9 4db208a 8514dc9 4db208a 8514dc9 4db208a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
from langchain_core.runnables import RunnableLambda
from langchain.schema.runnable import RunnablePassthrough
from data.retriever import Retriever
from langchain_google_genai import ChatGoogleGenerativeAI
from llm.gemini import Gemini
class GenerateQuestionsService:
_retrieve = Retriever()
_model = Gemini()
def handle(self, query: str):
rag_chain = {
"context": self._retrieve.retriever | RunnableLambda(self._format_docs),
"question": RunnablePassthrough(),
} | RunnableLambda(self._get_questions)
response_rag = self._retrieve.docs_retriever
rag_result = rag_chain.invoke(query)
retriever_result = response_rag.invoke(query)
print("RAG result:", rag_result)
print("Retriever result:", retriever_result)
return {"rag_result": rag_result, "retriever_result": retriever_result}
def _get_questions(self, _dict):
question = _dict["question"]
context = _dict["context"]
messages = self._model.template.format_messages(
context=context,
question=question,
format_questions_instructions=self._model._format_questions_instructions,
)
tries = 0
while tries < 3:
try:
chat = ChatGoogleGenerativeAI(model="gemini-pro")
response = chat.invoke(messages)
return self._model.parser.parse(response.content)
except Exception as e:
print(e)
tries += 1
return "Não foi possível gerar as questões."
def _format_docs(self, docs):
return "\n\n".join(doc.page_content for doc in docs)
|