File size: 13,531 Bytes
df237f6 85cc753 6bcdb09 e603c6c cd9150d e603c6c cd9150d e603c6c cd9150d e603c6c cd9150d e603c6c 85cc753 674face ca49a1b cd9150d ca49a1b cd9150d 63a633d 430b690 cd9150d ca49a1b cd9150d 6978666 848919d ab8363f 3f767b9 cd9150d cadc002 ab45f35 cd9150d ab45f35 cd9150d ab45f35 ca49a1b 430b690 bfa3ec5 1e6395c 430b690 ca49a1b cc63789 e64f989 cc63789 e64f989 ca49a1b 91a2e1f ca49a1b efe17b7 430b690 efe17b7 ca49a1b d4a78d3 ca49a1b 430b690 e01cf86 6193768 47a95ac 2cfbdba 6193768 8e99f61 6193768 a7844e4 2cfbdba cd9150d a7844e4 cd9150d e5f6885 cd9150d e5f6885 2cfbdba 9f8d49e e64f989 fba6e77 430b690 cd9150d fba6e77 e64f989 cd9150d 9d81a2d cd9150d 9d81a2d e64f989 9d81a2d cd9150d 9d81a2d 4b85076 cd9150d 4b85076 9d81a2d 4b85076 9d81a2d 63a633d fba6e77 9d81a2d fba6e77 9d81a2d 4b85076 d7e50c8 8711c74 430b690 33b064d 430b690 63a633d 8f7a4f3 33a218d c01d2ef 0c7e093 dc4fdc8 0c7e093 8f7a4f3 63a633d 8f7a4f3 63a633d 8f7a4f3 33a218d 8f7a4f3 37d8ced 63a633d 82ef5f2 e64f989 63a633d ca49a1b 63a633d 848919d ca49a1b 4b85076 b1228c8 9918933 8f7a4f3 63a633d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 |
import streamlit as st
import os
import pkg_resources
# # Using this wacky hack to get around the massively ridicolous managed env loading order
# def is_installed(package_name, version):
# try:
# pkg = pkg_resources.get_distribution(package_name)
# return pkg.version == version
# except pkg_resources.DistributionNotFound:
# return False
# @st.cache_resource
# def install_packages():
# install_commands = []
# if not is_installed("spaces", "0.12.0"):
# install_commands.append("pip install spaces==0.12.0")
# if not is_installed("pydantic", "1.8.2"):
# install_commands.append("pip install pydantic==1.8.2")
# if install_commands:
# os.system(" && ".join(install_commands))
# # install packages if necessary
# # install_packages()
import re
import json
from dotenv import load_dotenv
import numpy as np
import pandas as pd
import getpass
import os
from dotenv import load_dotenv, find_dotenv
from pinecone import Pinecone, ServerlessSpec
from langchain_pinecone import PineconeVectorStore
from langchain_huggingface import HuggingFaceEmbeddings
# from langchain_core.output_parsers import StrOutputParser
# from langchain_core.runnables import RunnablePassthrough
# from langchain_openai import ChatOpenAI
from langchain.docstore.document import Document
from openai import OpenAI
client = OpenAI(
organization='org-x0YBcOjkdPyf6ExxWCkmFHAj',
project='proj_40oH22n9XudeKL2rgka1IQ5B',
api_key='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'
)
pinecone_api_key = os.environ.get("PINECONE_API_KEY")
@st.cache_resource
def initialize_embeddings(model_name: str = "all-mpnet-base-v2"):
embeddings = HuggingFaceEmbeddings(model_name=model_name)
return embeddings
@st.cache_resource
def initialize_vector_store(pinecone_api_key: str, index_name: str):
# Initialize Pinecone
pc = Pinecone(api_key=pinecone_api_key)
# Access the index
index = pc.Index(index_name)
# Use the cached embeddings
embeddings = initialize_embeddings()
# Create the vector store
vector_store = PineconeVectorStore(index=index, embedding=embeddings, text_key='content')
return vector_store, embeddings
# Unpack the tuple into both vector_store and embeddings
vector_store, embeddings = initialize_vector_store(pinecone_api_key, index_name="cpv-full-southern-africa-test")
def get_docs(query, country = [], vulnerability_cat = []):
if not country:
country = "All Countries"
if not vulnerability_cat:
if country == "All Countries":
filters = None
else:
filters = {'country': {'$in': country}}
else:
if country == "All Countries":
filters = {'vulnerability_cat': {'$in': vulnerability_cat}}
else:
filters = {'country': {'$in': country},'vulnerability_cat': {'$in': vulnerability_cat}}
docs = vector_store.similarity_search_by_vector_with_score(
embeddings.embed_query(query),
k=20,
filter=filters,
)
# Break out the key fields and convert to pandas for filtering
docs_dict = [{**x[0].metadata,"score":x[1],"content":x[0].page_content} for x in docs]
df_docs = pd.DataFrame(docs_dict)
# Get ourselves an index setup from which to base the source reference number from (in the prompt and matching afterwards)
df_docs = df_docs.reset_index()
df_docs['ref_id'] = df_docs.index + 1 # start the index at 1
# Convert back to Document format
ls_dict = []
# Iterate over df and add relevant fields to the dict object
for index, row in df_docs.iterrows():
# Create a Document object for each row
doc = Document(
page_content = row['content'],
metadata={'country': row['country'],'document': row['document'], 'page': row['page'], 'file_name': row['file_name'], 'ref_id': row['ref_id'], 'vulnerability_cat': row['vulnerability_cat'], 'score': row['score']}
)
# Append the Document object to the documents list
ls_dict.append(doc)
return ls_dict
prompt_template="Answer the given question using the following documents. \
Formulate your answer in the style of an academic report. \
Provide example quotes and citations using extracted text from the documents. \
Use facts and numbers from the documents in your answer. \
ALWAYS include references for information used from documents at the end of each applicable sentence using the format: '[ref. #]', where '[ref. #]' is included in the text provided at the start of each document (demarcated by the pattern '- &&& [ref. #] document_name &&&:')'. \
Do not include page numbers in the references. \
If no relevant information to answer the question is present in the documents, just say you don't have enough information to answer."
# Create a list of options for the dropdown
# model_options = ['chatGPT','Llama2']
# Create a list of options for the dropdown
country_options = ['All Countries','Angola','Botswana','Lesotho','Kenya','Malawi','Mozambique','Namibia','Rwanda','South Africa','Zambia','Zimbabwe']
# Create a list of options for the dropdown
vulnerability_options = ['All Categories','Agricultural communities', 'Children', 'Coastal communities', 'Ethnic, racial or other minorities', 'Fishery communities', 'Informal sector workers', 'Members of indigenous and local communities', 'Migrants and displaced persons', 'Older persons', 'Persons living in poverty', 'Persons with disabilities', 'Persons with pre-existing health conditions', 'Residents of drought-prone regions', 'Rural populations', 'Sexual minorities (LGBTQI+)', 'Urban populations', 'Women and other genders','Other']
# List of examples
examples = [
"-",
"What specific initiatives are presented in the context to address the needs of groups such as women and children to the effects climate change?",
"In addition to gender, children, and youth, is there any mention of other groups facing disproportional impacts from climate change due to their geographic location, socio-economic status, age, gender, health, and occupation?"
]
def get_refs(docs, res):
'''
Parse response for engineered reference ids (refer to prompt template)
Extract documents using reference ids
'''
res = res.lower() # Convert to lowercase for matching
# This pattern should be returned by gpt3.5
# pattern = r'ref\. (\d+)\]\.'
pattern = r'ref\. (\d+)'
ref_ids = [int(match) for match in re.findall(pattern, res)]
# extract
result_str = "" # Initialize an empty string to store the result
for i in range(len(docs)):
ref_id = docs[i].metadata['ref_id']
if ref_id in ref_ids:
if docs[i].metadata['document'] == "Supplementary":
result_str += "**Ref. " + str(ref_id) + " [" + docs[i].metadata['country'] + " " + docs[i].metadata['document'] + ':' + docs[i].metadata['file_name'] + ' p' + str(docs[i].metadata['page']) + '; vulnerabilities: ' + docs[i].metadata['vulnerability_cat'] + "]:** " + "*'" + docs[i].page_content + "'*<br> <br>" # Add <br> for a line break
else:
result_str += "**Ref. " + str(ref_id) + " [" + docs[i].metadata['country'] + " " + docs[i].metadata['document'] + ' p' + str(docs[i].metadata['page']) + '; vulnerabilities: ' + docs[i].metadata['vulnerability_cat'] + "]:** " + "*'" + docs[i].page_content + "'*<br> <br>" # Add <br> for a line break
return result_str
# define a special function for putting the prompt together (as we can't use haystack)
def get_prompt(docs, input_query):
base_prompt=prompt_template
# Add the metadata data for references
context = ' - '.join(['&&& [ref. '+str(d.metadata['ref_id'])+'] '+d.metadata['document']+' &&&: '+d.page_content for d in docs])
prompt = base_prompt+"; Context: "+context+"; Question: "+input_query+"; Answer:"
return(prompt)
def run_query(query, country, model_sel):
# first call the retriever function using selected filters
docs = get_docs(query, country=country,vulnerability_cat=vulnerabilities_cat)
# model selector (not currently being used)
if model_sel == "chatGPT":
# instantiate ChatCompletion as a generator object (stream is set to True)
# response = openai.ChatCompletion.create(model="gpt-4o-mini-2024-07-18", messages=[{"role": "user", "content": get_prompt(docs, query)}], stream=True)
stream = client.chat.completions.create(
model="gpt-4o-mini-2024-07-18",
messages=[{"role": "user", "content": get_prompt(docs, query)}],
stream=True,
)
# iterate through the streamed output
report = []
for chunk in stream:
if chunk.choices[0].delta.content is not None:
# print(chunk.choices[0].delta.content, end="")
report.append(chunk.choices[0].delta.content)
result = "".join(report).strip()
res_box.success(result) # output to response text box
references = get_refs(docs, result) # extract references from the generated text
# Llama2 selection (was running on HF)
# else:
# res = client.text_generation(get_prompt(docs, query=input_query), max_new_tokens=4000, temperature=0.01, model=model)
# output = res
# references = get_refs(docs, res)
st.markdown("----")
st.markdown('**REFERENCES:**')
st.markdown('References are based on text automatically extracted from climate policy documents. These extracts may contain non-legible characters or disjointed text as an artifact of the extraction procedure')
st.markdown(references, unsafe_allow_html=True)
#___________________________________________________________________________________________________________
# Sidebar (filters)
with st.sidebar:
country = st.sidebar.multiselect('Filter by country:', country_options)
vulnerabilities_cat = st.sidebar.multiselect('Filter by vulnerabilities category:', vulnerability_options)
with st.expander("ℹ️ - About filters", expanded=False):
st.markdown(
"""
* *These selections will filter the data matched against your query*
* *For a comparative analysis of multiple countries or vulnerability categories, select the items you require or select **'All Countries'** or **'All Categories'***
* *Be careful in using the vulnerabilities category filter, as many of the categories are not well represented in the documents. Therefore, this will severly limit the data available for analysis*
"""
)
# Main window title
with st.container():
st.markdown("<h2 style='text-align: center;'> Climate Policy Documents: Vulnerabilities Analysis Q&A </h2>", unsafe_allow_html=True)
st.write(' ')
# Main window instructions
with st.expander("ℹ️ - About this app", expanded=False):
st.write(
"""
This tool seeks to provide an interface for quering national climate policy documents (NDCs, LTS etc.). The current version is powered by chatGPT (3.5). The document store is limited to 10 Southern African countries (Angola, Botswana, Eswatini, Lesotho, Malawi, Mozambique, Namibia, South Africa, Zambia, Zimbabwe), as well as Kenya and Rwanda. The intended use case is to allow users to interact with the documents and obtain valuable insights on various vulnerable groups affected by climate change.
**DISCLAIMER:** *This prototype tool based on LLMs (Language Models) is provided "as is" for experimental and exploratory purposes only, and should not be used for critical or production applications. Users are advised that the tool may contain errors, bugs, or limitations and should be used with caution and awareness of potential risks, and the developers make no warranties or guarantees regarding its performance, reliability, or suitability for any specific purpose.*
""")
# Display the text passages as radio buttons
selected_example = st.radio("Example questions", examples)
st.write(
"""
You can request comparative analyses between countries by filtering by country, and using more advanced prompts. For example:
*Provide a comparative analysis between Angola and Kenya with regard to specific initiatives presented in the context to address the needs of groups such women and children to the effects climate change.*
Make sure your filters match the countries you have specified for the analysis!
""")
# Dropdown selectbox: model (currently not used)
# model_sel = st.selectbox('Select an LLM:', model_options)
model_sel = "chatGPT"
#----Model Select logic-------
if model_sel == "chatGPT":
model_name = "gpt-3.5-turbo"
# else:
# model = "meta-llama/Llama-2-70b-chat-hf"
# # Instantiate the inference client
# client = InferenceClient()
# get prompt from user or example prompt
if selected_example == "-": #hyphen used as a work around (st won't allow null selection)
text = st.text_area('Enter your question in the text box below using natural language or select an example from above:')
else:
text = st.text_area('Enter your question in the text box below using natural language or select an example from above:', value=selected_example)
if st.button('Submit'):
st.markdown("----")
st.markdown('**RESPONSE:**')
res_box = st.empty()
run_query(text, country=country, model_sel=model_sel)
|