ruisp commited on
Commit
16abd01
·
1 Parent(s): de81dc4

Added Error Check

Browse files

App raises error when it does not detect date elements.

Files changed (2) hide show
  1. filterminutes.py +16 -6
  2. public_app.py +25 -19
filterminutes.py CHANGED
@@ -1,4 +1,6 @@
1
  import logging
 
 
2
 
3
  log = logging.getLogger('filter methods')
4
  logging.basicConfig(level=logging.INFO)
@@ -57,15 +59,23 @@ def search_with_filter(vector_store, query, filter_dict, target_k=5, init_k=100,
57
  step : int
58
  The size of the step when enlarging the search.
59
 
60
- Returns: List of at least target_k Documents for post-processing
61
 
62
  """
63
  context = filter_docs_by_meta(vector_store.similarity_search(query, k=init_k), filter_dict)
64
- while len(context) < target_k:
 
 
 
 
 
65
  log.info(f'Context contains {len(context)} documents')
66
- log.info(f'Expanding search with k={init_k}')
67
- init_k += step
68
- context = filter_docs_by_meta(vector_store.similarity_search(query, k=init_k), filter_dict)
69
- log.info(f'Done. Context contains {len(context)} Documents matching the filtering criteria')
 
 
 
70
  return context
71
 
 
1
  import logging
2
+ import gradio as gr
3
+ import numpy as np
4
 
5
  log = logging.getLogger('filter methods')
6
  logging.basicConfig(level=logging.INFO)
 
59
  step : int
60
  The size of the step when enlarging the search.
61
 
62
+ Returns: List of at least target_k Documents for post-processing.
63
 
64
  """
65
  context = filter_docs_by_meta(vector_store.similarity_search(query, k=init_k), filter_dict)
66
+ len_docs_begin = len(context)
67
+ if len_docs_begin >= target_k:
68
+ log.info(f'Initial search contains {len_docs_begin} Documents. Expansion not required. ')
69
+ return context
70
+ CUT_THE_LOOP_N = 10
71
+ for top_k_docs in np.arange(init_k, CUT_THE_LOOP_N * init_k, step):
72
  log.info(f'Context contains {len(context)} documents')
73
+ log.info(f'Expanding search with k={top_k_docs}')
74
+ context = filter_docs_by_meta(vector_store.similarity_search(query, k=int(top_k_docs)), filter_dict)
75
+ if len(context) == target_k:
76
+ log.info(f'Success. Context contains {len(context)} Documents matching the filtering criteria')
77
+ return context
78
+ log.info(f'Failed to reach target number of documents after {CUT_THE_LOOP_N} loops,'
79
+ f' context contains {len(context)} Documents matching the filtering criteria')
80
  return context
81
 
public_app.py CHANGED
@@ -1,4 +1,5 @@
1
  import logging
 
2
 
3
  from langchain import PromptTemplate, LLMChain
4
  from langchain.chains.question_answering import load_qa_chain
@@ -32,7 +33,7 @@ def load_chains(open_ai_key):
32
  return date_extractor, fed_chain
33
 
34
 
35
- def get_chain(query, api_key):
36
  """
37
  Detects the date, computes similarity, and answers the query using
38
  only documents corresponding to the date requested.
@@ -51,34 +52,39 @@ def get_chain(query, api_key):
51
  date_extractor, fed_chain = load_chains(api_key)
52
  logging.info('Extracting the date in numeric format..')
53
  date_response = date_extractor.run(query)
54
- if date_response != 'False':
 
 
 
55
  filter_date = json.loads(date_response)
56
-
57
  logging.info(f'Date parameters retrieved: {filter_date}')
58
  logging.info('Running the qa with filtered context..')
59
  filtered_context = search_with_filter(vs, query, init_k=200, step=300, target_k=7, filter_dict=filter_date)
60
-
61
  logging.info(20 * '-' + 'Metadata for the documents to be used' + 20 * '-')
62
  for doc in filtered_context:
63
  logging.info(doc.metadata)
64
- else:
65
- logging.info('No date elements found. Running the qa without filtering can output incorrect results.')
66
- filtered_context = vs.similarity_search(query, k=7)
67
  return fed_chain({'input_documents': filtered_context[:7], 'question': query})['output_text']
68
 
69
 
70
  if __name__ == '__main__':
71
  app = gr.Interface(fn=get_chain,
72
- inputs=[gr.Textbox(lines=2, placeholder="Enter your query", label='Your query'),
73
- gr.Textbox(lines=1, placeholder="Your OpenAI API key here", label='OpenAI Key')],
74
- description='Query the public database in FRED from 1936-2023',
75
- outputs=gr.Textbox(lines=1, label='Answer'),
76
- title='Chat with the FOMC meeting minutes',
77
- examples=[['What was the economic outlook from the staff presented in the meeting '
78
- 'of April 2009 with respect to labour market developments and industrial production?'],
79
- ['Who were the voting members present in the meeting on March 2010?'],
80
- ['How important was the pandemic of Covid-19 in the discussions during 2020?'],
81
- ['What was the impact of the oil crisis for the economic outlook during 1973?']],
82
- cache_examples=False
83
- )
 
 
 
 
 
 
 
84
  app.launch()
 
1
  import logging
2
+ import os
3
 
4
  from langchain import PromptTemplate, LLMChain
5
  from langchain.chains.question_answering import load_qa_chain
 
33
  return date_extractor, fed_chain
34
 
35
 
36
+ def get_chain(query, api_key=os.environ['OPENAI_API_KEY']):
37
  """
38
  Detects the date, computes similarity, and answers the query using
39
  only documents corresponding to the date requested.
 
52
  date_extractor, fed_chain = load_chains(api_key)
53
  logging.info('Extracting the date in numeric format..')
54
  date_response = date_extractor.run(query)
55
+ if date_response == 'False':
56
+ logging.info('No date elements found. Running the qa without filtering can output incorrect results.')
57
+ raise gr.Error('No date elements found. Please include temporal references in in your query.')
58
+ else:
59
  filter_date = json.loads(date_response)
 
60
  logging.info(f'Date parameters retrieved: {filter_date}')
61
  logging.info('Running the qa with filtered context..')
62
  filtered_context = search_with_filter(vs, query, init_k=200, step=300, target_k=7, filter_dict=filter_date)
 
63
  logging.info(20 * '-' + 'Metadata for the documents to be used' + 20 * '-')
64
  for doc in filtered_context:
65
  logging.info(doc.metadata)
 
 
 
66
  return fed_chain({'input_documents': filtered_context[:7], 'question': query})['output_text']
67
 
68
 
69
  if __name__ == '__main__':
70
  app = gr.Interface(fn=get_chain,
71
+ inputs=[gr.Textbox(lines=2, placeholder="Enter your query", label='Your query'),
72
+ gr.Textbox(lines=1, placeholder="Your OpenAI API key here", label='OpenAI Key')],
73
+ description='Here, you can query the [minutes](www.federalreserve.gov) of the Federal '
74
+ 'Open Market Committee meetings from March 1936 to May 2023. Click the examples'
75
+ ' below to see an illustration of the tool in action.',
76
+ article='**Disclaimer**: This app is for demonstration purposes only, and it may take some '
77
+ 'time to load'
78
+ 'during periods of heavy load',
79
+ analytics_enabled=True,
80
+ outputs=gr.Textbox(lines=1, label='Answer'),
81
+ title='Chat with the FOMC meeting minutes',
82
+ examples=[['What was the economic outlook from the staff presented in the meeting '
83
+ 'of April 2009 with respect to labour market developments and industrial production?'],
84
+ ['Who were the voting members present in the meeting on March 2010?'],
85
+ ['How important was the pandemic of Covid-19 in the discussions during 2020?'],
86
+ ['What was the impact of the oil crisis for the economic outlook during 1973?']],
87
+ cache_examples=True
88
+ )
89
+ app.queue()
90
  app.launch()