allenpark commited on
Commit
5aab811
·
verified ·
1 Parent(s): 68fe1ac

Refactor deconstruct interface to columns with textbox

Browse files
Files changed (1) hide show
  1. app.py +11 -2
app.py CHANGED
@@ -149,14 +149,23 @@ outputs = [
149
 
150
  with gr.Blocks() as demo:
151
  gr.Markdown(HEADER)
152
- gr.Interface(fn=model_call, inputs=inputs, outputs=outputs)
 
153
  # tokenizer_state = gr.State()
154
  # model_state = gr.State()
 
 
 
 
 
 
 
 
155
 
156
  # model_dropdown = gr.Dropdown(choices=["Patronus Lynx 8B", "Patronus Lynx 70B"], value="Patronus Lynx 8B", label="Model")
157
  # model_dropdown.change(fn=update_model, inputs=[model_dropdown, tokenizer_state, model_state], outputs=[tokenizer_state, model_state])
158
 
159
- # submit_button.click(fn=model_call, inputs=inputs, outputs=output)
160
 
161
  # initial_tokenizer, initial_model = load_model_and_tokenizer("Patronus Lynx 8B")
162
  # demo.load(fn=lambda: (initial_tokenizer, initial_model), outputs=[tokenizer_state, model_state])
 
149
 
150
  with gr.Blocks() as demo:
151
  gr.Markdown(HEADER)
152
+ # gr.Interface(fn=model_call, inputs=inputs, outputs=outputs)
153
+
154
  # tokenizer_state = gr.State()
155
  # model_state = gr.State()
156
+ with gr.Column(scale=1):
157
+ question = gr.Textbox(label="Question")
158
+ document = gr.Textbox(label="Document")
159
+ answer = gr.Textbox(label="Answer")
160
+ submit_button = gr.Textbox(label="Submit")
161
+ with gr.Column(scale=1):
162
+ reasoning = gr.Textbox(label="Reasoning")
163
+ score = gr.Textbox(label="Score")
164
 
165
  # model_dropdown = gr.Dropdown(choices=["Patronus Lynx 8B", "Patronus Lynx 70B"], value="Patronus Lynx 8B", label="Model")
166
  # model_dropdown.change(fn=update_model, inputs=[model_dropdown, tokenizer_state, model_state], outputs=[tokenizer_state, model_state])
167
 
168
+ submit_button.click(fn=model_call, inputs=[question, document, answer], outputs=[reasoning, score])
169
 
170
  # initial_tokenizer, initial_model = load_model_and_tokenizer("Patronus Lynx 8B")
171
  # demo.load(fn=lambda: (initial_tokenizer, initial_model), outputs=[tokenizer_state, model_state])