Spaces:
Sleeping
Sleeping
# Databricks notebook source | |
from src.retriever import init_vectorDB_from_doc, retriever | |
from transformers import AutoTokenizer, pipeline | |
from typing import List,Optional, Tuple # import the Tuple type | |
from langchain.docstore.document import Document as LangchainDocument | |
from langchain_community.vectorstores import FAISS | |
def promt_template(query: str,READER_MODEL_NAME:str,context:str): | |
prompt_in_chat_format = [ | |
{ | |
"role": "system", | |
"content": """Using the information contained in the context, | |
give a comprehensive answer to the question. | |
Respond only to the question asked, response should be concise and relevant to the question. | |
Provide the number of the source document when relevant.If the nswer cannot be deduced from the context, do not give an answer. Please answer in french""", | |
}, | |
{ | |
"role": "user", | |
"content": """Context: | |
{context} | |
--- | |
Now here is the question you need to answer. | |
Question: {query}""", | |
}, | |
] | |
tokenizer = AutoTokenizer.from_pretrained(READER_MODEL_NAME) | |
RAG_PROMPT_TEMPLATE = tokenizer.apply_chat_template( | |
prompt_in_chat_format, tokenize=False, add_generation_prompt=True) | |
return RAG_PROMPT_TEMPLATE | |
def answer_with_rag( | |
query: str,embedding_model, vectorDB: FAISS,READER_MODEL_NAME:str, | |
reranker,llm: pipeline, num_doc_before_rerank: int = 5, | |
num_final_relevant_docs: int = 5, | |
rerank: bool = True | |
) -> Tuple[str, List[LangchainDocument]]: | |
# Build the final prompt | |
relevant_docs= retriever(query,vectorDB,reranker,num_doc_before_rerank,num_final_relevant_docs,rerank) | |
context = "\nExtracted documents:\n" | |
context += "".join([f"Document {str(i)}:::\n" + doc for i, doc in enumerate(relevant_docs)]) | |
#print("=> Context:") | |
#print(context) | |
RAG_PROMPT_TEMPLATE = promt_template(query,READER_MODEL_NAME,context) | |
final_prompt =RAG_PROMPT_TEMPLATE.format(query=query, context=context,READER_MODEL_NAME=READER_MODEL_NAME) | |
print("=> Final prompt:") | |
#print(final_prompt) | |
# Redact an answer | |
print("=> Generating answer...") | |
answer = llm(final_prompt)[0]["generated_text"] | |
return answer, relevant_docs |