File size: 2,091 Bytes
a6e92fe
 
 
 
58e5d73
a6e92fe
 
7e61f94
58e5d73
 
 
4055a36
 
 
 
 
 
 
58e5d73
 
 
 
 
 
 
a6e92fe
 
58e5d73
a6e92fe
 
 
58e5d73
 
 
a6e92fe
 
58e5d73
 
 
a6e92fe
 
 
 
0b0ce2d
a6e92fe
58e5d73
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
# Databricks notebook source
from src.retriever import init_vectorDB_from_doc, retriever

from transformers import AutoTokenizer, pipeline
from langchain_core.prompts import ChatPromptTemplate
from typing import List,Optional, Tuple # import the Tuple type
from langchain.docstore.document import Document as LangchainDocument
from langchain_community.vectorstores import FAISS
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
def promt_template():
    prompt_in_chat_format = """Using the information contained in the given context, give a comprehensive answer to the question.
            Respond only to the question asked, response should be concise and relevant to the question.
            Provide the number of the source document when relevant.If the answer cannot be deduced from the context, do not give an answer. Please answer in french
            {context}
             """
           
    prompt = ChatPromptTemplate.from_messages(
        [
            ("system",prompt_in_chat_format),
            ("human", "{query}")
        ])
    #RAG_PROMPT_TEMPLATE = tokenizer.apply_chat_template(
    #prompt_in_chat_format, tokenize=False, add_generation_prompt=True)
    return prompt 

def answer_with_rag(
    query: str, retriever,llm

) -> Tuple[str, List[LangchainDocument]]:
    # Build the final prompt
    #relevant_docs= retriever(query,vectorDB,reranker,num_doc_before_rerank,num_final_relevant_docs,rerank) 
    #context = "\nExtracted documents:\n"
    #context += "".join([f"Document {str(i)}:::\n" + doc for i, doc in enumerate(relevant_docs)])
    #print("=> Context:")
    #print(context)
    RAG_PROMPT_TEMPLATE = promt_template()
    document_chain = create_stuff_documents_chain(llm, RAG_PROMPT_TEMPLATE)
    retrieval_chain=create_retrieval_chain(retriever,document_chain)
    print("=> Final prompt:")
    #print(final_prompt)
    # Redact an answer
    print("=> Generating answer...")
    response=retrieval_chain.invoke(query)

    return response['answer'], response["context"]