import time import yaml import logging import gradio as gr from langchain.prompts.chat import ChatPromptTemplate from huggingface_hub import hf_hub_download, whoami from app.source.backend.llm_utils import get_llm from app.source.backend.document_store import pickle_to_document_store from app.source.backend.get_prompts import get_qa_prompts from app.source.frontend.utils import ( make_html_source, make_html_presse_source, init_env, ) from app.source.backend.prompt_utils import to_chat_instruction, SpecialTokens init_env() with open("./app/config.yaml") as f: config = yaml.full_load(f) prompts = {} for source in config["prompt_naming"]: with open(f"./app/prompt_{source}.yaml") as f: prompts[source] = yaml.full_load(f) ## Building LLM print("Building LLM") model = "gpt35turbo" llm = get_llm() ## Loading_tools print("Loading Databases") qdrants = { tab: pickle_to_document_store( hf_hub_download( repo_id="SpinozaProject/spinoza-database", filename=f"database_{tab}.pickle", repo_type="dataset", ) ) for tab in config["prompt_naming"] } ## Load Prompts print("Loading Prompts") chat_qa_prompts, chat_reformulation_prompts, chat_summarize_memory_prompts = {}, {}, {} for source, prompt in prompts.items(): chat_qa_prompt, chat_reformulation_prompt = get_qa_prompts(config, prompt) chat_qa_prompts[source] = chat_qa_prompt chat_reformulation_prompts[source] = chat_reformulation_prompt # chat_summarize_memory_prompts[source] = chat_summarize_memory_prompt with open("./assets/style.css", "r") as f: css = f.read() def update_tabs(outil, visible_tabs): visible_tabs = outil return visible_tabs special_tokens = SpecialTokens(config) synthesis_template = """You are a factual journalist that summarize the secialized awnsers from thechnical sources. Based on the folowing question: {question} And the following expert answer: {answers} Answer the question, in French. When using legal awnsers, keep tracking of the name of the articles. When using ADEME awnsers, name the sources that are mainly used. List the different element mentionned, and highlight the agreement points between the sources, as well as the contradictions or differences. Generate the answer as markdown, with an aerated layout, and headlines in bold Start by a general summary, agreement and contracdiction, and then go into detail without paraphasing the experts awnsers. """ synthesis_prompt = to_chat_instruction(synthesis_template, special_tokens) synthesis_prompt_template = ChatPromptTemplate.from_messages([synthesis_prompt]) def zip_longest_fill(*args, fillvalue=None): # zip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D- iterators = [iter(it) for it in args] num_active = len(iterators) if not num_active: return cond = True fillvalues = [None] * len(iterators) while cond: values = [] for i, it in enumerate(iterators): try: value = next(it) except StopIteration: value = fillvalues[i] values.append(value) new_cond = False for i, elt in enumerate(values): if elt != fillvalues[i]: new_cond = True cond = new_cond fillvalues = values.copy() yield tuple(values) def build_data_dict(config): data_dict = {} for tab in config["tabs"]: data_dict[tab] = { "tab": { "init_value": tab, "component": None, "elem_id": "tab", }, "description": { "init_value": config["tabs"][tab], "component": None, "elem_id": "desc", }, "question": { "init_value": None, "component": None, "elem_id": "question", }, "answer": { "init_value": None, "component": None, "elem_id": "answer", }, "sources": { "init_value": None, "component": None, "elem_id": "src", }, } return data_dict def init_gradio(data, config=config): for t in data: data[t]["tab"]["component"] = gr.Tab( data[t]["tab"]["init_value"], elem_id="tab" ) with data[t]["tab"]["component"]: for fields in data[t]: if fields == "question": data[t][fields]["component"] = gr.Textbox( elem_id=data[t][fields]["elem_id"], show_label=False, interactive=True, placeholder="", ) # elif fields == "answer": # data[t][fields]["component"] = gr.Textbox( # elem_id=data[t][fields]["elem_id"], # show_label=True, # interactive=True, # placeholder="", # show_copy_button=True # ) elif fields != "tab": data[t][fields]["component"] = gr.Markdown( data[t][fields]["init_value"], elem_id=data[t][fields]["elem_id"], ) # data[t][fields]["component"] = gr.Textbox( # value=data[t][fields]["init_value"], # elem_id=data[t][fields]["elem_id"], # show_label=True, # interactive=False, # show_copy_button=True, # ) return data def add_warning(): return "*Les éléments cochés ont commencé à être généré dans les onglets spécifiques, la synthèse ne sera disponible qu'après la mise à disposition de ces derniers.*" def format_question(question): return f"{question}" # ### def parse_question(question): x = question.replace("
", "").replace("
\n", "") if "### " in x: return x.split("### ")[1] return x def reformulate(outils, question, tab, config=config): if tab in outils: return llm.stream( chat_reformulation_prompts[config["source_mapping"][tab]], {"question": parse_question(question)}, ) else: return iter([None] * 5) def reformulate_single_question(outils, question, tab, config=config): for elt in reformulate(outils, question, tab, config=config): time.sleep(0.02) yield elt def reformulate_questions(outils, question, config=config): for elt in zip_longest_fill( *[reformulate(outils, question, tab, config=config) for tab in config["tabs"]] ): time.sleep(0.02) yield elt def add_question(question): return question def answer(question, source, outils, tab, config=config): if tab in outils: if len(source) < 10: return iter(["Aucune source trouvée, veuillez reformuler votre question"]) else: return llm.stream( chat_qa_prompts[config["source_mapping"][tab]], { "question": parse_question(question), "sources": source.replace("", "").replace("
\n", ""), }, ) else: return iter([None] * 5) def answer_single_question(outils, source, question, tab, config=config): for elt in answer(question, source, outils, tab, config=config): time.sleep(0.02) yield elt def answer_questions(outils, *questions_sources, config=config): questions = [elt for elt in questions_sources[: len(questions_sources) // 2]] sources = [elt for elt in questions_sources[len(questions_sources) // 2 :]] for elt in zip_longest_fill( *[ answer(question, source, outils, tab, config=config) for question, source, tab in zip(questions, sources, config["tabs"]) ] ): time.sleep(0.02) yield elt def get_source_link(metadata): return metadata["file_url"] + f"#page={metadata['content_page_number'] + 1}" def get_button(i, tag): return f"""""" def get_html_sources(buttons, cards): return f"""
Sources utilisées :
", "").replace("
\n", ""), k=k, # filter=get_qdrant_filters(filters), ) sources = [(doc, score) for doc, score in sources if score >= min_similarity] buttons_ids = list(range(len(sources))) buttons = " ".join( [get_button(i, tab) for i, source in zip(buttons_ids, sources)] ) formated = ( "\n\n".join( [ make_html_presse_source(source[0], i, tab, source[1], config) for i, source in zip(buttons_ids, sources) ] ) if tab == "Presse" else "\n\n".join( [ make_html_source(source[0], i, tab, source[1], config) for i, source in zip(buttons_ids, sources) ] ) ) formated = get_html_sources(buttons, formated) if sources else "" text = "\n\n".join( [ f"Doc {str(i)} with source type {elt[0].metadata.get('file_source_type')}:\n" + elt[0].page_content for i, elt in enumerate(sources) ] ) return str(formated), str(text) # formated_sources, text_sources else: return "", "" def retrieve_sources(outils, *questions, qdrants=qdrants, config=config): results = [ get_sources(outils, question, tab, qdrants, config) for question, tab in zip(questions, config["tabs"]) ] formated_sources = [source[0] for source in results] text_sources = [source[1] for source in results] return tuple(formated_sources + text_sources) def get_experts(outils, *answers, config=config): return "\n\n".join( [ f"{tab}\n{answers[i]}" for i, tab in enumerate(config["tabs"]) if (tab in outils) ] ) def get_synthesis(outils, question, *answers, config=config): answer = [] for i, tab in enumerate(config["tabs"]): if (tab in outils) & (len(str(answers[i])) >= 100): answer.append( f"{tab}\n{answers[i]}".replace("", "").replace("
\n", "") ) if len(answer) == 0: return "Aucune source n'a pu être identifiée pour répondre, veuillez modifier votre question" else: for elt in llm.stream( synthesis_prompt_template, { "question": question.replace("", "").replace("
\n", ""), "answers": "\n\n".join(answer), }, ): time.sleep(0.01) yield elt def get_listener(): return """ function my_func_body() { const body = document.querySelector("body"); body.addEventListener("click", e => { console.log(e) const sourceId = "btn_" + e.target.id.split("_")[1] + "_" + e.target.id.split("_")[2] + "_source" console.log(sourceId) if (document.getElementById(sourceId).style.display === "none") { document.getElementById(sourceId).style.display = ""; } else { document.getElementById(sourceId).style.display = "none"; } } )} """ def get_source_template(buttons, divs_source): return """
Sources utilisées :