import time import streamlit as st import logging from json import JSONDecodeError from markdown import markdown import random from typing import List, Dict, Any, Tuple, Optional from haystack.document_stores import FAISSDocumentStore from haystack.nodes import EmbeddingRetriever from haystack.pipelines import ExtractiveQAPipeline from haystack.nodes import FARMReader from haystack.pipelines import ExtractiveQAPipeline from annotated_text import annotation import shutil from urllib.parse import unquote # FAISS index directory INDEX_DIR = 'data/index' QUESTIONS_PATH = 'data/questions.txt' RETRIEVER_MODEL = "sentence-transformers/multi-qa-mpnet-base-dot-v1" RETRIEVER_MODEL_FORMAT = "sentence_transformers" READER_MODEL = "deepset/roberta-base-squad2" READER_CONFIG_THRESHOLD = 0.15 RETRIEVER_TOP_K = 10 READER_TOP_K = 5 # pipe=None # the following function is cached to make index and models load only at start @st.cache(hash_funcs={"builtins.SwigPyObject": lambda _: None}, allow_output_mutation=True) def start_haystack(): """ load document store, retriever, reader and create pipeline """ shutil.copy(f'{INDEX_DIR}/faiss_document_store.db', '.') document_store = FAISSDocumentStore( faiss_index_path=f'{INDEX_DIR}/my_faiss_index.faiss', faiss_config_path=f'{INDEX_DIR}/my_faiss_index.json') print(f'Index size: {document_store.get_document_count()}') retriever = EmbeddingRetriever( document_store=document_store, embedding_model=RETRIEVER_MODEL, model_format=RETRIEVER_MODEL_FORMAT ) reader = FARMReader(model_name_or_path=READER_MODEL, use_gpu=False, confidence_threshold=READER_CONFIG_THRESHOLD) pipe = ExtractiveQAPipeline(reader, retriever) return pipe @st.cache() def load_questions(): with open(QUESTIONS_PATH) as fin: questions = [line.strip() for line in fin.readlines() if not line.startswith('#')] return questions def set_state_if_absent(key, value): if key not in st.session_state: st.session_state[key] = value pipe = start_haystack() # the pipeline is not included as parameter of the following function, # because it is difficult to cache @st.cache(persist=True, allow_output_mutation=True) def query(question: str, retriever_top_k: int = 10, reader_top_k: int = 5): """Run query and get answers""" params = {"Retriever": {"top_k": retriever_top_k}, "Reader": {"top_k": reader_top_k}} results = pipe.run(question, params=params) return results def main(): questions = load_questions() # Persistent state set_state_if_absent('question', "Where is Twin Peaks?") set_state_if_absent('answer', '') set_state_if_absent('results', None) set_state_if_absent('raw_json', None) set_state_if_absent('random_question_requested', False) # Small callback to reset the interface in case the text of the question changes def reset_results(*args): st.session_state.answer = None st.session_state.results = None st.session_state.raw_json = None # sidebar style st.markdown( """ """, unsafe_allow_html=True) # spotify webplayer st.sidebar.markdown("""

""", unsafe_allow_html=True) # Search bar question = st.text_input("", value=st.session_state.question, max_chars=100, on_change=reset_results ) col1, col2 = st.columns(2) col1.markdown( "", unsafe_allow_html=True) col2.markdown( "", unsafe_allow_html=True) # Run button run_pressed = col1.button("Run") # Get next random question from the CSV if col2.button("Random question"): reset_results() question = random.choice(questions) # Avoid picking the same question twice (the change is not visible on the UI) while question == st.session_state.question: question = random.choice(questions) st.session_state.question = question st.session_state.random_question_requested = True # Re-runs the script setting the random question as the textbox value # Unfortunately necessary as the Random Question button is _below_ the textbox raise st.script_runner.RerunException( st.script_request_queue.RerunData(None)) else: st.session_state.random_question_requested = False run_query = (run_pressed or question != st.session_state.question) \ and not st.session_state.random_question_requested # Get results for query if run_query and question: time_start = time.time() reset_results() st.session_state.question = question with st.spinner( "🧠    Performing neural search on documents..." ): try: st.session_state.results = query( question, RETRIEVER_TOP_K, READER_TOP_K) time_end = time.time() print(f'elapsed time: {time_end - time_start}') except JSONDecodeError as je: st.error( "πŸ‘“    An error occurred reading the results. Is the document store working?") return except Exception as e: logging.exception(e) st.error("🐞    An error occurred during the request.") return if st.session_state.results: st.write("## Results:") alert_irrelevance = True if len(st.session_state.results['answers']) == 0: st.info("πŸ€”    Haystack is unsure whether any of the documents contain an answer to your question. Try to reformulate it!") for count, result in enumerate(st.session_state.results['answers']): result = result.to_dict() if result["answer"]: if alert_irrelevance and result['score'] < 0.50: alert_irrelevance = False st.write("""

Attention, the following answers have low relevance:

""", unsafe_allow_html=True) answer, context = result["answer"], result["context"] start_idx = context.find(answer) end_idx = start_idx + len(answer) # Hack due to this bug: https://github.com/streamlit/streamlit/issues/3190 st.write(markdown("- ..."+context[:start_idx] + str(annotation(answer, "ANSWER", "#3e1c21")) + context[end_idx:]+"..."), unsafe_allow_html=True) source = "" name = unquote(result['meta']['name']).replace('_', ' ') url = result['meta']['url'] source = f"[{name}]({url})" st.markdown( f"**Score:** {result['score']:.2f} - **Source:** {source}") main()