Geraldine commited on
Commit
12fb790
·
1 Parent(s): 901aafa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -4
app.py CHANGED
@@ -67,9 +67,8 @@ def llm_response(openai_key, message, chat_history):
67
  model_kwargs={"max_length":512,"do_sample":True,
68
  "temperature":0.2})
69
  qa_chain = RetrievalQA.from_chain_type(llm = llm,
70
- chain_type = "stuff", # map_reduce, map_rerank, stuff, refine
71
  retriever = vectordb.as_retriever(search_kwargs = {"k": 10}),
72
- #chain_type_kwargs = {"prompt": PROMPT},
73
  return_source_documents = False,
74
  verbose = True)
75
  result = qa_chain(message)["result"]
@@ -94,8 +93,6 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
94
  with gr.Row():
95
  load_docs = gr.Button("Load documents and urls", variant="primary", scale=1)
96
  loading_status = gr.Textbox(label="Loading status", placeholder="", interactive=False, scale=0)
97
- if loading_status == "loaded":
98
- gr.Info("Documents loaded")
99
  with gr.Row():
100
  with gr.Column(scale=1):
101
  msg = gr.Textbox(label="User message")
 
67
  model_kwargs={"max_length":512,"do_sample":True,
68
  "temperature":0.2})
69
  qa_chain = RetrievalQA.from_chain_type(llm = llm,
70
+ chain_type = "stuff",
71
  retriever = vectordb.as_retriever(search_kwargs = {"k": 10}),
 
72
  return_source_documents = False,
73
  verbose = True)
74
  result = qa_chain(message)["result"]
 
93
  with gr.Row():
94
  load_docs = gr.Button("Load documents and urls", variant="primary", scale=1)
95
  loading_status = gr.Textbox(label="Loading status", placeholder="", interactive=False, scale=0)
 
 
96
  with gr.Row():
97
  with gr.Column(scale=1):
98
  msg = gr.Textbox(label="User message")