Spaces:
Sleeping
Sleeping
File size: 5,741 Bytes
3e8fafc 4dfafae 3e8fafc a3155e0 3e8fafc a3155e0 3e8fafc 4dfafae 3e8fafc 4dfafae 3e8fafc 4dfafae 3e8fafc 4dfafae 3e8fafc 4dfafae 3e8fafc 4dfafae 3e8fafc 4dfafae 3e8fafc 6fb2144 3e8fafc 4dfafae 3e8fafc 6fb2144 3e8fafc b9d30a5 3e8fafc b9d30a5 3e8fafc 6fb2144 3e8fafc 4dfafae 3e8fafc 4dfafae 3e8fafc 4dfafae 3e8fafc 4dfafae 3e8fafc a3155e0 3e8fafc 4dfafae f6d5698 a3155e0 4dfafae 3e8fafc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
import os
import streamlit as st
from langchain.embeddings import HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings
from langchain.vectorstores.faiss import FAISS
from langchain.chains import ChatVectorDBChain
from huggingface_hub import snapshot_download
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
st.set_page_config(page_title="CFA Level 1", page_icon="π")
#### sidebar section 1 ####
with st.sidebar:
book = st.radio("Choose an Embedding Model: ",
["Instruct", "Sbert"]
)
#load embedding models
@st.experimental_singleton(show_spinner=True)
def load_embedding_models(model):
if model == 'Sbert':
model_sbert = "sentence-transformers/all-mpnet-base-v2"
emb = HuggingFaceEmbeddings(model_name=model_sbert)
elif model == 'Instruct':
embed_instruction = "Represent the financial paragraph for document retrieval: "
query_instruction = "Represent the question for retrieving supporting documents: "
model_instr = "hkunlp/instructor-large"
emb = HuggingFaceInstructEmbeddings(model_name=model_instr,
embed_instruction=embed_instruction,
query_instruction=query_instruction)
return emb
st.title(f"Talk to CFA Level 1 Book")
st.markdown(f"#### Have a conversation with the CFA Curriculum by the CFA Institute π")
embeddings = load_embedding_models(book)
##### functionss ####
@st.experimental_singleton(show_spinner=False)
def load_vectorstore(_embeddings):
# download from hugging face
cache_dir="cfa_level_1_cache"
snapshot_download(repo_id="nickmuchi/CFA_Level_1_Text_Embeddings",
repo_type="dataset",
revision="main",
allow_patterns="CFA_Level_1/*",
cache_dir=cache_dir,
)
target_dir = "CFA_Level_1"
# Walk through the directory tree recursively
for root, dirs, files in os.walk(cache_dir):
# Check if the target directory is in the list of directories
if target_dir in dirs:
# Get the full path of the target directory
target_path = os.path.join(root, target_dir)
print(target_path)
# load faiss
docsearch = FAISS.load_local(folder_path=target_path, embeddings=_embeddings)
return docsearch
@st.experimental_memo(show_spinner=False)
def load_prompt():
system_template="""You are an expert in finance, economics, investing, ethics, derivatives and markets.
Use the following pieces of context to answer the users question. If you don't know the answer,
just say that you don't know, don't try to make up an answer. Provide a source reference.
ALWAYS return a "sources" part in your answer.
The "sources" part should be a reference to the source of the documents from which you got your answer. List all sources used
The output should be a markdown code snippet formatted in the following schema:
```json
{{
answer: is foo
sources: xyz
}}
```
Begin!
----------------
{context}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}")
]
prompt = ChatPromptTemplate.from_messages(messages)
return prompt
@st.experimental_singleton(show_spinner=False)
def load_chain():
llm = ChatOpenAI(temperature=0)
qa = ChatVectorDBChain.from_llm(llm,
load_vectorstore(embeddings),
qa_prompt=load_prompt(),
return_source_documents=True)
return qa
def get_answer(question):
chain = load_chain()
result = chain({"query": question})
answer = result["result"]
# pages
unique_sources = set()
for item in result['source_documents']:
unique_sources.add(item.metadata['page'])
unique_pages = ""
for item in unique_sources:
unique_pages += str(item) + ", "
# will look like 1, 2, 3,
pages = unique_pages[:-2] # removes the last comma and space
# source text
full_source = ""
for item in result['source_documents']:
full_source += f"- **Page: {item.metadata['page']}**" + "\n" + item.page_content + "\n\n"
# will look like:
# - Page: {number}
# {extracted text from book}
extract = full_source
return answer, pages, extract
##### sidebar section 2 ####
api_key = os.environ["OPENAI_API_KEY"]
##### main ####
user_input = st.text_input("Your question", "What is an MBS and who are the main issuer and investors of the MBS market?", key="input")
col1, col2 = st.columns([10, 1])
# show question
col1.write(f"**You:** {user_input}")
# ask button to the right of the displayed question
ask = col2.button("Ask", type="primary")
if ask:
with st.spinner("this can take about a minute for your first question because some models have to be downloaded π₯Ίππ»ππ»"):
try:
answer, pages, extract = get_answer(question=user_input)
except Exception as e:
st.write(f"Error with Download: {e}")
st.stop()
st.write(f"{answer}")
# sources
with st.expander(label = f"From pages: {pages}", expanded = False):
st.markdown(extract) |