extract result from llm response
Browse files
app.py
CHANGED
@@ -76,7 +76,8 @@ class RAGDemo(object):
|
|
76 |
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
|
77 |
verbose=True,
|
78 |
)
|
79 |
-
|
|
|
80 |
|
81 |
def __call__(self):
|
82 |
with gr.Blocks() as demo:
|
@@ -87,7 +88,7 @@ class RAGDemo(object):
|
|
87 |
with gr.Column():
|
88 |
model_name = gr.Dropdown(
|
89 |
choices=['gemini-1.0-pro'],
|
90 |
-
value='
|
91 |
label="model"
|
92 |
)
|
93 |
api_key = gr.Textbox(placeholder="your api key for LLM", label="api key")
|
|
|
76 |
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
|
77 |
verbose=True,
|
78 |
)
|
79 |
+
resp = basic_qa.invoke(input_text)
|
80 |
+
return resp['result']
|
81 |
|
82 |
def __call__(self):
|
83 |
with gr.Blocks() as demo:
|
|
|
88 |
with gr.Column():
|
89 |
model_name = gr.Dropdown(
|
90 |
choices=['gemini-1.0-pro'],
|
91 |
+
value='gemini-1.0-pro',
|
92 |
label="model"
|
93 |
)
|
94 |
api_key = gr.Textbox(placeholder="your api key for LLM", label="api key")
|