Spaces:
Running
Running
from collections import defaultdict | |
import json | |
import os | |
import re | |
from langchain.tools.retriever import create_retriever_tool | |
from langchain.agents import AgentExecutor | |
from langchain.agents import create_openai_tools_agent | |
from langchain.agents.format_scratchpad.openai_tools import format_to_openai_tool_messages | |
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser | |
from langchain_core.documents import Document | |
from langchain_core.prompts import PromptTemplate | |
from langchain_core.prompts import ChatPromptTemplate | |
from langchain_core.prompts import MessagesPlaceholder | |
from langchain_core.messages import AIMessage | |
from langchain_core.messages import HumanMessage | |
from langchain_core.runnables import RunnableParallel | |
from langchain_core.runnables import RunnablePassthrough | |
from langchain_core.output_parsers import StrOutputParser | |
from langchain_community.callbacks import get_openai_callback | |
from langchain_community.callbacks import StreamlitCallbackHandler | |
from langchain_community.embeddings import HuggingFaceBgeEmbeddings | |
from langchain_community.vectorstores.utils import DistanceStrategy | |
from langchain_openai import ChatOpenAI | |
from langchain_anthropic import ChatAnthropic | |
from langchain_pinecone import PineconeVectorStore | |
from pinecone import Pinecone | |
import streamlit as st | |
st.set_page_config(layout="wide", page_title="LegisQA") | |
os.environ["LANGCHAIN_API_KEY"] = st.secrets["langchain_api_key"] | |
os.environ["LANGCHAIN_TRACING_V2"] = "true" | |
os.environ["LANGCHAIN_PROJECT"] = st.secrets["langchain_project"] | |
os.environ["TOKENIZERS_PARALLELISM"] = "false" | |
SS = st.session_state | |
SEED = 292764 | |
CONGRESS_NUMBERS = [113, 114, 115, 116, 117, 118] | |
SPONSOR_PARTIES = ["D", "R", "L", "I"] | |
CONGRESS_GOV_TYPE_MAP = { | |
"hconres": "house-concurrent-resolution", | |
"hjres": "house-joint-resolution", | |
"hr": "house-bill", | |
"hres": "house-resolution", | |
"s": "senate-bill", | |
"sconres": "senate-concurrent-resolution", | |
"sjres": "senate-joint-resolution", | |
"sres": "senate-resolution", | |
} | |
OPENAI_CHAT_MODELS = [ | |
"gpt-3.5-turbo-0125", | |
"gpt-4-0125-preview", | |
] | |
ANTHROPIC_CHAT_MODELS = [ | |
"claude-3-opus-20240229", | |
"claude-3-sonnet-20240229", | |
"claude-3-haiku-20240307", | |
] | |
CHAT_MODELS = OPENAI_CHAT_MODELS + ANTHROPIC_CHAT_MODELS | |
def get_sponsor_url(bioguide_id: str) -> str: | |
return f"https://bioguide.congress.gov/search/bio/{bioguide_id}" | |
def get_congress_gov_url(congress_num: int, legis_type: str, legis_num: int) -> str: | |
lt = CONGRESS_GOV_TYPE_MAP[legis_type] | |
return f"https://www.congress.gov/bill/{int(congress_num)}th-congress/{lt}/{int(legis_num)}" | |
def load_bge_embeddings(): | |
model_name = "BAAI/bge-small-en-v1.5" | |
model_kwargs = {"device": "cpu"} | |
encode_kwargs = {"normalize_embeddings": True} | |
emb_fn = HuggingFaceBgeEmbeddings( | |
model_name=model_name, | |
model_kwargs=model_kwargs, | |
encode_kwargs=encode_kwargs, | |
query_instruction="Represent this question for searching relevant passages: ", | |
) | |
return emb_fn | |
def load_pinecone_vectorstore(): | |
emb_fn = load_bge_embeddings() | |
vectorstore = PineconeVectorStore( | |
embedding=emb_fn, | |
text_key="text", | |
distance_strategy=DistanceStrategy.COSINE, | |
pinecone_api_key=st.secrets["pinecone_api_key"], | |
index_name=st.secrets["pinecone_index_name"], | |
) | |
return vectorstore | |
def render_outreach_links(): | |
nomic_base_url = "https://atlas.nomic.ai/data/gabrielhyperdemocracy" | |
nomic_map_name = "us-congressional-legislation-s1024o256nomic" | |
nomic_url = f"{nomic_base_url}/{nomic_map_name}/map" | |
hf_url = "https://huggingface.co/hyperdemocracy" | |
pc_url = "https://www.pinecone.io/blog/serverless" | |
st.subheader(":brain: About [hyperdemocracy](https://hyperdemocracy.us)") | |
st.subheader(f":world_map: Visualize [nomic atlas]({nomic_url})") | |
st.subheader(f":hugging_face: Raw [huggingface datasets]({hf_url})") | |
st.subheader(f":evergreen_tree: Index [pinecone serverless]({pc_url})") | |
def group_docs(docs) -> list[tuple[str, list[Document]]]: | |
doc_grps = defaultdict(list) | |
# create legis_id groups | |
for doc in docs: | |
doc_grps[doc.metadata["legis_id"]].append(doc) | |
# sort docs in each group by start index | |
for legis_id in doc_grps.keys(): | |
doc_grps[legis_id] = sorted( | |
doc_grps[legis_id], | |
key=lambda x: x.metadata["start_index"], | |
) | |
# sort groups by number of docs | |
doc_grps = sorted( | |
tuple(doc_grps.items()), | |
key=lambda x: -len(x[1]), | |
) | |
return doc_grps | |
def format_docs(docs): | |
"""JSON grouped""" | |
doc_grps = group_docs(docs) | |
out = [] | |
for legis_id, doc_grp in doc_grps: | |
dd = { | |
"legis_id": doc_grp[0].metadata["legis_id"], | |
"title": doc_grp[0].metadata["title"], | |
"sponsor": doc_grp[0].metadata["sponsor_full_name"], | |
"snippets": [doc.page_content for doc in doc_grp], | |
} | |
out.append(dd) | |
return json.dumps(out, indent=4) | |
def escape_markdown(text): | |
MD_SPECIAL_CHARS = r"\`*_{}[]()#+-.!$" | |
for char in MD_SPECIAL_CHARS: | |
text = text.replace(char, "\\" + char) | |
return text | |
def get_vectorstore_filter(): | |
vs_filter = {} | |
if SS["filter_legis_id"] != "": | |
vs_filter["legis_id"] = SS["filter_legis_id"] | |
if SS["filter_bioguide_id"] != "": | |
vs_filter["sponsor_bioguide_id"] = SS["filter_bioguide_id"] | |
vs_filter = {**vs_filter, "congress_num": {"$in": SS["filter_congress_nums"]}} | |
vs_filter = {**vs_filter, "sponsor_party": {"$in": SS["filter_sponsor_parties"]}} | |
return vs_filter | |
def render_doc_grp(legis_id: str, doc_grp: list[Document]): | |
first_doc = doc_grp[0] | |
congress_gov_url = get_congress_gov_url( | |
first_doc.metadata["congress_num"], | |
first_doc.metadata["legis_type"], | |
first_doc.metadata["legis_num"], | |
) | |
congress_gov_link = f"[congress.gov]({congress_gov_url})" | |
ref = "{} chunks from {}\n\n{}\n\n{}\n\n[{} ({}) ]({})".format( | |
len(doc_grp), | |
first_doc.metadata["legis_id"], | |
first_doc.metadata["title"], | |
congress_gov_link, | |
first_doc.metadata["sponsor_full_name"], | |
first_doc.metadata["sponsor_bioguide_id"], | |
get_sponsor_url(first_doc.metadata["sponsor_bioguide_id"]), | |
) | |
doc_contents = [ | |
"[start_index={}] ".format(int(doc.metadata["start_index"])) + doc.page_content | |
for doc in doc_grp | |
] | |
with st.expander(ref): | |
st.write(escape_markdown("\n\n...\n\n".join(doc_contents))) | |
def legis_id_to_link(legis_id: str) -> str: | |
congress_num, legis_type, legis_num = legis_id.split("-") | |
return get_congress_gov_url(congress_num, legis_type, legis_num) | |
def legis_id_match_to_link(matchobj): | |
mstring = matchobj.string[matchobj.start() : matchobj.end()] | |
url = legis_id_to_link(mstring) | |
link = f"[{mstring}]({url})" | |
return link | |
def replace_legis_ids_with_urls(text): | |
pattern = "11[345678]-[a-z]+-\d{1,5}" | |
rtext = re.sub(pattern, legis_id_match_to_link, text) | |
return rtext | |
def render_guide(): | |
st.write( | |
""" | |
When you send a query to LegisQA, it will attempt to retrieve relevant content from the past six congresses ([113th-118th](https://en.wikipedia.org/wiki/List_of_United_States_Congresses)) covering 2013 to the present, pass it to a [large language model (LLM)](https://en.wikipedia.org/wiki/Large_language_model), and generate a response. This technique is known as Retrieval Augmented Generation (RAG). You can read [an academic paper](https://proceedings.neurips.cc/paper/2020/hash/6b493230205f780e1bc26945df7481e5-Abstract.html) or [a high level summary](https://research.ibm.com/blog/retrieval-augmented-generation-RAG) to get more details. Once the response is generated, the retrieved content will be available for inspection with links to the bills and sponsors. | |
## Disclaimer | |
This is a research project. The RAG technique helps to ground the LLM response by providing context from a trusted source, but it does not guarantee a high quality response. We encourage you to play around, find questions that work and find questions that fail. There is a small monthly budget dedicated to the OpenAI endpoints. Once that is used up each month, queries will no longer work. | |
## Sidebar Config | |
Use the `Generative Config` to change LLM parameters. | |
Use the `Retrieval Config` to change the number of chunks retrieved from our congress corpus and to apply various filters to the content before it is retrieved (e.g. filter to a specific set of congresses). Use the `Prompt Config` to try out different document formatting and prompting strategies. | |
""" | |
) | |
def render_example_queries(): | |
with st.expander("Example Queries"): | |
st.write( | |
""" | |
``` | |
What are the themes around artificial intelligence? | |
``` | |
``` | |
Write a well cited 3 paragraph essay on food insecurity. | |
``` | |
``` | |
Create a table summarizing major climate change ideas with columns legis_id, title, idea. | |
``` | |
``` | |
Write an action plan to keep social security solvent. | |
``` | |
``` | |
Suggest reforms that would benefit the Medicaid program. | |
``` | |
""" | |
) | |
def render_sidebar(): | |
with st.container(border=True): | |
render_outreach_links() | |
st.checkbox("escape markdown in answer", key="response_escape_markdown") | |
st.checkbox("add legis urls in answer", value=True, key="response_add_legis_urls") | |
with st.expander("Generative Config"): | |
st.selectbox(label="model name", options=CHAT_MODELS, key="model_name") | |
st.slider( | |
"temperature", min_value=0.0, max_value=2.0, value=0.0, key="temperature" | |
) | |
st.slider( | |
"max_output_tokens", min_value=512, max_value=1024, key="max_output_tokens" | |
) | |
st.slider("top_p", min_value=0.0, max_value=1.0, value=1.0, key="top_p") | |
with st.expander("Retrieval Config"): | |
st.slider( | |
"Number of chunks to retrieve", | |
min_value=1, | |
max_value=32, | |
value=8, | |
key="n_ret_docs", | |
) | |
st.text_input("Bill ID (e.g. 118-s-2293)", key="filter_legis_id") | |
st.text_input("Bioguide ID (e.g. R000595)", key="filter_bioguide_id") | |
st.multiselect( | |
"Congress Numbers", | |
CONGRESS_NUMBERS, | |
default=CONGRESS_NUMBERS, | |
key="filter_congress_nums", | |
) | |
st.multiselect( | |
"Sponsor Party", | |
SPONSOR_PARTIES, | |
default=SPONSOR_PARTIES, | |
key="filter_sponsor_parties", | |
) | |
def render_query_rag_tab(): | |
render_example_queries() | |
QUERY_TEMPLATE = """Use the following excerpts from US congressional legislation to respond to the user's query. The excerpts are formatted as a JSON list. Each JSON object has "legis_id", "title", "sponsor", and "snippets" keys. If a snippet is useful in writing part of your response, then cite the "title", "legis_id", and "sponsor" in the response. If you don't know how to respond, just tell the user. | |
--- | |
Congressional Legislation Excerpts: | |
{context} | |
--- | |
Query: {query}""" | |
prompt = ChatPromptTemplate.from_messages( | |
[ | |
("system", "You are an expert legislative analyst."), | |
("human", QUERY_TEMPLATE), | |
] | |
) | |
with st.form("query_form"): | |
st.text_area("Enter query:", key="query") | |
query_submitted = st.form_submit_button("Submit") | |
if query_submitted: | |
vs_filter = get_vectorstore_filter() | |
retriever = vectorstore.as_retriever( | |
search_kwargs={"k": SS["n_ret_docs"], "filter": vs_filter}, | |
) | |
rag_chain = ( | |
RunnableParallel( | |
{ | |
"docs": retriever, # list of docs | |
"query": RunnablePassthrough(), # str | |
} | |
) | |
.assign(context=(lambda x: format_docs(x["docs"]))) | |
.assign(output=prompt | llm | StrOutputParser()) | |
) | |
if SS["model_name"] in OPENAI_CHAT_MODELS: | |
with get_openai_callback() as cb: | |
SS["out"] = rag_chain.invoke(SS["query"]) | |
SS["cb"] = cb | |
else: | |
SS["out"] = rag_chain.invoke(SS["query"]) | |
if "out" in SS: | |
out_display = SS["out"]["output"] | |
if SS["response_escape_markdown"]: | |
out_display = escape_markdown(out_display) | |
if SS["response_add_legis_urls"]: | |
out_display = replace_legis_ids_with_urls(out_display) | |
with st.container(border=True): | |
st.write("Response") | |
st.info(out_display) | |
if "cb" in SS: | |
with st.container(border=True): | |
st.write("API Usage") | |
st.warning(SS["cb"]) | |
with st.container(border=True): | |
doc_grps = group_docs(SS["out"]["docs"]) | |
st.write( | |
"Retrieved Chunks (note that you may need to 'right click' on links in the expanders to follow them)" | |
) | |
for legis_id, doc_grp in doc_grps: | |
render_doc_grp(legis_id, doc_grp) | |
with st.expander("Debug"): | |
st.write(SS["out"]) | |
def render_query_agent_tab(): | |
from custom_tools import get_retriever_tool | |
from langchain_community.tools import WikipediaQueryRun | |
from langchain_community.utilities import WikipediaAPIWrapper | |
from langchain.agents import load_tools | |
from langchain.agents import create_react_agent | |
from langchain import hub | |
if SS["model_name"] not in OPENAI_CHAT_MODELS: | |
st.write("only supported with OpenAI for now") | |
return | |
vs_filter = get_vectorstore_filter() | |
retriever = vectorstore.as_retriever( | |
search_kwargs={"k": SS["n_ret_docs"], "filter": vs_filter}, | |
) | |
legis_retrieval_tool = get_retriever_tool( | |
retriever, | |
"search_legislation", | |
"Searches and returns excerpts from congressional legislation. Always call this tool first.", | |
format_docs, | |
) | |
api_wrapper = WikipediaAPIWrapper(top_k_results=4, doc_content_chars_max=800) | |
wiki_search_tool = WikipediaQueryRun(api_wrapper=api_wrapper) | |
ddg_tool = load_tools(["ddg-search"])[0] | |
avatars = {"human": "user", "ai": "assistant"} | |
tools = [legis_retrieval_tool, wiki_search_tool, ddg_tool] | |
llm_with_tools = llm.bind_tools(tools) | |
agent_prompt = ChatPromptTemplate.from_messages( | |
[ | |
("system", "You are a helpful assistant."), | |
("human", "{input}"), | |
MessagesPlaceholder(variable_name="agent_scratchpad"), | |
] | |
) | |
agent = ( | |
{ | |
"input": lambda x: x["input"], | |
"agent_scratchpad": lambda x: format_to_openai_tool_messages( | |
x["intermediate_steps"] | |
), | |
} | |
| agent_prompt | |
| llm_with_tools | |
| OpenAIToolsAgentOutputParser() | |
) | |
prompt = hub.pull("hwchase17/react") | |
agent = create_react_agent(llm, tools, prompt) | |
agent_executor = AgentExecutor( | |
agent=agent, | |
tools=tools, | |
return_intermediate_steps=True, | |
handle_parsing_errors=True, | |
verbose=True, | |
) | |
if user_input := st.chat_input(key="single_query_agent_input"): | |
st.chat_message("user").write(user_input) | |
with st.chat_message("assistant"): | |
st_callback = StreamlitCallbackHandler(st.container()) | |
response = agent_executor.invoke({"input": user_input}, {"callbacks": [st_callback]}) | |
st.write(response["output"]) | |
def render_chat_agent_tab(): | |
st.write("Coming Soon") | |
################## | |
st.title( | |
":classical_building: LegisQA - Chat With Congressional Bills :classical_building:" | |
) | |
with st.sidebar: | |
render_sidebar() | |
if SS["model_name"] in OPENAI_CHAT_MODELS: | |
llm = ChatOpenAI( | |
model_name=SS["model_name"], | |
temperature=SS["temperature"], | |
openai_api_key=st.secrets["openai_api_key"], | |
model_kwargs={"top_p": SS["top_p"], "seed": SEED}, | |
max_tokens=SS["max_output_tokens"], | |
) | |
elif SS["model_name"] in ANTHROPIC_CHAT_MODELS: | |
llm = ChatAnthropic( | |
model_name=SS["model_name"], | |
temperature=SS["temperature"], | |
anthropic_api_key=st.secrets["anthropic_api_key"], | |
top_p=SS["top_p"], | |
max_tokens_to_sample=SS["max_output_tokens"], | |
) | |
else: | |
raise ValueError() | |
vectorstore = load_pinecone_vectorstore() | |
query_rag_tab, query_agent_tab, chat_agent_tab, guide_tab = st.tabs([ | |
"query_rag", | |
"query_agent", | |
"chat_agent", | |
"guide", | |
]) | |
with query_rag_tab: | |
render_query_rag_tab() | |
with query_agent_tab: | |
render_query_agent_tab() | |
with chat_agent_tab: | |
render_chat_agent_tab() | |
with guide_tab: | |
render_guide() | |