import streamlit as st from openai import OpenAI import glob import pickle from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter from langchain.callbacks import get_openai_callback from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnableParallel from documents import documents docs=documents OPENAI_API_KEY = st.secrets["OPENAI_API_KEY"] # Get all the filenames from the docs folder # files = glob.glob("./docs/*.txt") # Load files into readable documents # docs = [] # for file in files: # loader = UnstructuredFileLoader(file) # docs.append(loader.load()[0]) # Config # Default def_model = "gpt-3.5-turbo" def_temperature = 0.0 def_k = 3 def_chunk_size = 500 def_chunk_overlap = 0 directory = "./chroma_db" st.set_page_config(initial_sidebar_state="collapsed") with st.sidebar: if st.toggle("Experimental"): disabled = False else: disabled = True model = st.selectbox("Model name", [def_model], disabled=True) temperature = st.number_input("Temperature", value=def_temperature, min_value=0.0, step=0.2, max_value=1.0, placeholder=def_temperature, disabled=disabled) k = st.number_input("Number of documents to include", value=def_k, min_value=1, step=1, placeholder=def_k, disabled=disabled) if st.toggle("Splitting", value=True, disabled=disabled): chunk_size = st.number_input("Chunk size", value=def_chunk_size, step=250, placeholder=def_chunk_size, disabled=disabled) # Defines the chunks in amount of tokens in which the files are split. Also defines the amount of tokens that are feeded into the context. chunk_overlap = st.number_input("Chunk overlap", value=def_chunk_overlap, step=10, placeholder=def_chunk_overlap, disabled=disabled) text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) splits = text_splitter.split_documents(docs) vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings(), persist_directory=directory) if chunk_size != def_chunk_size | chunk_overlap != def_chunk_overlap: splits = text_splitter.split_documents(docs) vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings()) else: vectorstore = Chroma.from_documents(documents=docs, embedding=OpenAIEmbeddings(), persist_directory=f"{directory}/complete") prompt_template =""" ### Wenn du gefragt wirst wer du bist: Du heißt "Volker". Du bist ein Assistent für die Beantwortung von Fragen zu Long-Covid (Post-Covid synonym verwenden). ### Du weißt nichts über Krankheiten wie 'tumorbedingte Fatigue', 'Multiple Sklerose', 'Hashimoto-Thyreoditis' oder 'Krebs'. Werden Fragen zu diesen Erkrankungen gestellt, beantworte sie mit "Dazu fehlen mir Informationen". ### Du beantwortest keine Fragen zu 'Tod', 'Suizid', 'Selbstverletzung', oder anderen potenziell schädigenden Themen. Werden Fragen zum 'Tod' gestellt, verweise auf den behandelnden Arzt. Bei Fragen zu Suizid verweise auf die Telefonseelsorge: 0800 1110111 ### Du gibst keine Ratschläge zur Diagnose, Behandlung oder Therapie. Wenn du die Antwort nicht weißt, sag einfach, dass du es nicht weißt. ### Wenn du allgemeine unspezifische Fragen gestellt bekommst, antworte oberflächlich und frage nach einer präziseren Fragestellung. Antworte immer in ganzen Sätzen und verwende korrekte Grammatik und Rechtschreibung. Antworte nur auf Deutsch. Antworte kurz mit maximal fünf Sätzen außer es wird von dir eine ausführlichere Antwort verlangt. Verwende zur Beantwortung der Frage nur den vorhandenen Kontext. Frage: {question} Kontext: {context} Antwort: """ # Source: hub.pull("rlm/rag-prompt") # (1) Retriever retriever = vectorstore.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": 0.3, "k": k}) # (2) Prompt prompt = ChatPromptTemplate.from_template(prompt_template) # (3) LLM # Define the LLM we want to use. Default is "gpt-3.5-turbo" with temperature 0. # Temperature is a number between 0 and 1. With 0.8 it generates more random answers, with 0.2 it is more focused on the retrieved content. With temperature = 0 it uses log-probabilities depending on the content. llm = ChatOpenAI(model_name=model, temperature=temperature) def format_docs(docs): return "\n\n".join(doc.page_content for doc in docs) # rag_chain = ( # {"context": retriever | format_docs, "question": RunnablePassthrough()} # | prompt # | llm # | StrOutputParser() # ) rag_chain_from_docs = ( RunnablePassthrough.assign(context=(lambda x: format_docs(x["context"]))) | prompt | llm | StrOutputParser() ) rag_chain = RunnableParallel( {"context": retriever, "question": RunnablePassthrough()} ).assign(answer=rag_chain_from_docs) st.title("🐔 Volker-Chat") def click_button(prompt): st.session_state.clicked = True st.session_state['prompt'] = prompt c = st.container() c.write("Beispielfragen") col1, col2, col3 = c.columns(3) col1.button("Mehr zu 'Lernen'", on_click=click_button, args=["Was macht die Säule 'Lernen' aus?"]) col1.button("Was macht die Fimo Health App?", on_click=click_button, args=["Was macht die Fimo Health App?"]) col2.button("Mehr zu 'Tracken'", on_click=click_button, args=["Was macht die Säule 'Tracken' aus?"]) col2.button("Was ist Pacing?", on_click=click_button, args=["Was ist Pacing?"]) col3.button("Mehr zu 'Handeln'", on_click=click_button, args=["Was macht die Säule 'Handeln' aus?"]) if 'clicked' not in st.session_state: st.session_state.clicked = False if "messages" not in st.session_state: st.session_state["messages"] = [{"role": "assistant", "content": "Ahoi! Ich bin Volker. Wie kann ich dir helfen?"}] for msg in st.session_state.messages: st.chat_message(msg["role"]).write(msg["content"]) if st.session_state.clicked: prompt = st.session_state['prompt'] st.chat_message("user").write(prompt) with get_openai_callback() as cb: response = rag_chain.invoke(prompt) st.chat_message("assistant").write(response['answer']) with st.expander("Kontext ansehen"): for citation in response["context"]: st.write("[...] ", str(citation.page_content), " [...]") st.write(str(citation.metadata['source'])) st.write(str("---")*20) with st.sidebar: sidebar_c = st.container() sidebar_c.success(cb) if prompt := st.chat_input(): st.chat_message("user").write(prompt) with get_openai_callback() as cb: response = rag_chain.invoke(prompt) st.chat_message("assistant").write(response['answer']) with st.expander("Kontext ansehen"): for citation in response["context"]: st.write("[...] ", str(citation.page_content), " [...]") st.write(str(citation.metadata['source'])) st.write(str("---")*20) with st.sidebar: sidebar_c = st.container() sidebar_c.success(cb) # cleanup st.session_state.clicked = False vectorstore.delete_collection()