Spaces:
Sleeping
Sleeping
elibrowne
commited on
Commit
·
61aa64b
1
Parent(s):
70590fe
State?
Browse files
app.py
CHANGED
@@ -8,13 +8,18 @@ from huggingface_hub import hf_hub_download, file_exists, HfApi
|
|
8 |
from random import shuffle
|
9 |
from markdown import markdown
|
10 |
|
11 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
user_data = {}
|
13 |
current_response = {}
|
14 |
current_question = {} # read-only within gradio blocks
|
15 |
user_id = "no_id"
|
16 |
-
qIDs = ["mbe_46", "mbe_132", "mbe_287", "mbe_326", "mbe_334", "mbe_389", "mbe_563", "mbe_614", "mbe_642", "mbe_747", "mbe_779", "mbe_826", "mbe_845", "mbe_1042", "mbe_1134"]
|
17 |
-
mode_options = ["e5", "colbert"]
|
18 |
# Control global variables
|
19 |
step = 0
|
20 |
mode = 1
|
@@ -99,6 +104,8 @@ def load_current_question():
|
|
99 |
current_question = all_questions[qid]
|
100 |
reset_current_response(user_data["order"][q_index])
|
101 |
|
|
|
|
|
102 |
# THEMING: colors and styles (Gradio native)
|
103 |
|
104 |
theme = gr.themes.Soft(
|
@@ -111,6 +118,92 @@ theme = gr.themes.Soft(
|
|
111 |
# BLOCKS: main user interface
|
112 |
|
113 |
with gr.Blocks(theme = theme) as user_eval:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
# Title text introducing study
|
115 |
forward_btn = gr.Textbox("unchanged", visible = False, elem_id = "togglebutton") # used for toggling windows
|
116 |
gr.HTML("""
|
@@ -165,15 +258,12 @@ with gr.Blocks(theme = theme) as user_eval:
|
|
165 |
eval_satisfied = gr.Slider(1, 5, step = 0.5, label = "User Satisfaction", value = 3)
|
166 |
btn_g = gr.Button("Next")
|
167 |
|
168 |
-
def next_p(e0, e1, e2, e3):
|
169 |
-
|
170 |
-
global mode
|
171 |
-
global current_response
|
172 |
-
step += 1
|
173 |
# Add user data to the current response
|
174 |
current_response["e5_scores"].append([e0, e1, e2, e3])
|
175 |
# Next item
|
176 |
-
if step
|
177 |
# Step 10: all sources
|
178 |
collapsible_string = "<h2> Set of Passages </h2>\n"
|
179 |
for i, passage in enumerate(current_question["top10_e5"]):
|
@@ -188,7 +278,10 @@ with gr.Blocks(theme = theme) as user_eval:
|
|
188 |
eval_0: gr.Radio(value = None),
|
189 |
eval_1: gr.Slider(value = 3),
|
190 |
eval_2: gr.Slider(value = 3),
|
191 |
-
eval_3: gr.Slider(value = 0)
|
|
|
|
|
|
|
192 |
}
|
193 |
else:
|
194 |
return {
|
@@ -198,15 +291,14 @@ with gr.Blocks(theme = theme) as user_eval:
|
|
198 |
eval_0: gr.Radio(value = None),
|
199 |
eval_1: gr.Slider(value = 3),
|
200 |
eval_2: gr.Slider(value = 3),
|
201 |
-
eval_3: gr.Slider(value = 0)
|
|
|
|
|
|
|
202 |
}
|
203 |
|
204 |
-
def next_g(e_h, e_s):
|
205 |
-
|
206 |
-
global mode
|
207 |
-
global user_data
|
208 |
-
global current_response
|
209 |
-
step += 1
|
210 |
|
211 |
if step == 11:
|
212 |
# Step 11: guaranteed to be generation
|
@@ -218,22 +310,28 @@ with gr.Blocks(theme = theme) as user_eval:
|
|
218 |
<h2> Autogenerated Response </h2>
|
219 |
<p>""" + markdown(current_question["generation_e5"]) + "</p>"),
|
220 |
eval_helps: gr.Slider(value = 0),
|
221 |
-
eval_satisfied: gr.Slider(value = 3)
|
|
|
|
|
|
|
|
|
222 |
}
|
223 |
# Steps 12 and 13 are gold passage + gold passage generation IF it is applicable
|
224 |
if step > 11: # and not current_question["top10_contains_gold_passage"]
|
225 |
# When mode is 0 -> reset with mode = 1
|
|
|
226 |
if mode == 0:
|
227 |
# The user just evaluated a generation for mode 0
|
228 |
current_response[user_data["modes"][user_data["current"]][mode] + "_generation"] = [e_h, e_s]
|
229 |
return {
|
230 |
-
selection: gr.HTML("""
|
231 |
<h2> Retrieved Passage </h2>
|
232 |
-
<p> """ + current_question["top10_" + user_data["modes"][user_data["current"]][1]][0] + "</p>"), # hard coded: first passage (0) of mode 2 (1),
|
233 |
forward_btn: gr.Textbox("load new data"),
|
234 |
eval_helps: gr.Slider(value = 0),
|
235 |
eval_satisfied: gr.Slider(value = 3)
|
236 |
}
|
|
|
237 |
# When mode is 1 -> display GP and GP generation, then switch
|
238 |
if step == 12:
|
239 |
# The user just evaluated a generation for mode 1
|
@@ -245,7 +343,11 @@ with gr.Blocks(theme = theme) as user_eval:
|
|
245 |
<p> """ + current_question["gold_passage"] + "</p>"),
|
246 |
forward_btn: gr.Textbox(),
|
247 |
eval_helps: gr.Slider(value = 0),
|
248 |
-
eval_satisfied: gr.Slider(value = 3)
|
|
|
|
|
|
|
|
|
249 |
}
|
250 |
elif step == 13:
|
251 |
# The user just evaluated the gold passage
|
@@ -256,7 +358,11 @@ with gr.Blocks(theme = theme) as user_eval:
|
|
256 |
<p> """ + markdown(current_question["gold_passage_generation"]) + "</p>"),
|
257 |
forward_btn: gr.Textbox(),
|
258 |
eval_helps: gr.Slider(value = 0),
|
259 |
-
eval_satisfied: gr.Slider(value = 3)
|
|
|
|
|
|
|
|
|
260 |
}
|
261 |
else: # step = 14
|
262 |
# The user just evaluated the gold passage generation
|
@@ -269,37 +375,15 @@ with gr.Blocks(theme = theme) as user_eval:
|
|
269 |
selection: gr.Markdown("Advancing to the next question..."),
|
270 |
forward_btn: gr.Textbox("changed" + str(user_data["current"])), # current forces event to trigger always
|
271 |
eval_helps: gr.Slider(value = 0),
|
272 |
-
eval_satisfied: gr.Slider(value = 3)
|
|
|
|
|
|
|
|
|
273 |
}
|
274 |
|
275 |
-
|
276 |
-
|
277 |
-
else:
|
278 |
-
# When mode is 0 -> reset with mode = 1
|
279 |
-
if mode == 0:
|
280 |
-
return {
|
281 |
-
selection: gr.HTML(\"""
|
282 |
-
<h2> Retrieved Passage </h2>
|
283 |
-
<p> \""" + current_question["top10_" + user_data["modes"][user_data["current"]][1]][0] + "</p>"), # hard coded: first passage (0) of mode 2 (1)
|
284 |
-
forward_btn: gr.Textbox("load new data"),
|
285 |
-
eval_helps: gr.Slider(value = 1),
|
286 |
-
eval_satisfied: gr.Slider(value = 1)
|
287 |
-
}
|
288 |
-
# When mode is 1 -> change question
|
289 |
-
user_data["current"] += 1
|
290 |
-
user_data["responses"].append(current_response) # adds new answers to current list of responses
|
291 |
-
# Update stored data with new current, additional data
|
292 |
-
update_huggingface(user_id)
|
293 |
-
load_current_question()
|
294 |
-
return {
|
295 |
-
selection: gr.Markdown("Advancing to the next question..."),
|
296 |
-
forward_btn: gr.Textbox("changed"),
|
297 |
-
eval_helps: gr.Slider(value = 1),
|
298 |
-
eval_satisfied: gr.Slider(value = 1)
|
299 |
-
}
|
300 |
-
"""
|
301 |
-
btn_p.click(fn = next_p, inputs = [eval_0, eval_1, eval_2, eval_3], outputs = [selection, scores_p, scores_g, eval_0, eval_1, eval_2, eval_3])
|
302 |
-
btn_g.click(fn = next_g, inputs = [eval_helps, eval_satisfied], outputs = [selection, forward_btn, eval_helps, eval_satisfied])
|
303 |
|
304 |
# Question and answering dynamics
|
305 |
with gr.Row(equal_height = False, visible = False) as question:
|
@@ -346,9 +430,7 @@ with gr.Blocks(theme = theme) as user_eval:
|
|
346 |
c.click(fn = answer_c, outputs = [question, evals])
|
347 |
d.click(fn = answer_d, outputs = [question, evals])
|
348 |
|
349 |
-
def toggle():
|
350 |
-
global step
|
351 |
-
global mode
|
352 |
step = 0
|
353 |
if mode == 0: # temporarily disabled — will never be mode 0
|
354 |
mode = 1 # update mode to 1, will restart with same Q, next set of Ps
|
@@ -358,6 +440,8 @@ with gr.Blocks(theme = theme) as user_eval:
|
|
358 |
scores_g: gr.Column(visible = False),
|
359 |
evals: gr.Row(visible = True),
|
360 |
question: gr.Row(visible = False),
|
|
|
|
|
361 |
}
|
362 |
else:
|
363 |
# reset mode to 0, will restart with new Q (set up new Q), first set of Ps
|
@@ -388,10 +472,12 @@ with gr.Blocks(theme = theme) as user_eval:
|
|
388 |
+ """ + new_answers[3]),
|
389 |
selection: gr.HTML("""
|
390 |
<h2> Retrieved Passage </h2>
|
391 |
-
<p> """ + current_question["top10_e5"][0] + "</p>")
|
|
|
|
|
392 |
} # note change from "top10_" + user_data["modes"][user_data["current"]][mode]][0]
|
393 |
|
394 |
-
forward_btn.change(fn = toggle, inputs =
|
395 |
|
396 |
with gr.Row() as login:
|
397 |
with gr.Column():
|
@@ -408,11 +494,10 @@ with gr.Blocks(theme = theme) as user_eval:
|
|
408 |
email.change(fn = sanitize_login, inputs = [email], outputs = [s])
|
409 |
|
410 |
def submit_email(email):
|
411 |
-
global user_id
|
412 |
user_id = email
|
413 |
-
load_user_data(user_id) # calls login, downloads data, initializes session
|
414 |
# After loading user data, update with current question
|
415 |
-
load_current_question()
|
416 |
new_answers = current_question["answers"].copy()
|
417 |
new_answers[current_question["correct_answer_index"]] = "**" + current_question["answers"][current_question["correct_answer_index"]] + "** ✅"
|
418 |
return {
|
@@ -437,9 +522,12 @@ with gr.Blocks(theme = theme) as user_eval:
|
|
437 |
a: gr.Button(current_question["answers"][0]),
|
438 |
b: gr.Button(current_question["answers"][1]),
|
439 |
c: gr.Button(current_question["answers"][2]),
|
440 |
-
d: gr.Button(current_question["answers"][3])
|
|
|
|
|
|
|
441 |
}
|
442 |
-
s.click(fn = submit_email, inputs = [email], outputs = [question, login, selection, passage_display, q_text, a, b, c, d])
|
443 |
|
444 |
# Starts on question, switches to evaluation after the user answers
|
445 |
user_eval.launch()
|
|
|
8 |
from random import shuffle
|
9 |
from markdown import markdown
|
10 |
|
11 |
+
# Read-only reference variables
|
12 |
+
qIDs = ["mbe_46", "mbe_132", "mbe_287", "mbe_326", "mbe_334", "mbe_389", "mbe_563", "mbe_614", "mbe_642", "mbe_747", "mbe_779", "mbe_826", "mbe_845", "mbe_1042", "mbe_1134"]
|
13 |
+
mode_options = ["e5", "colbert"]
|
14 |
+
with open("question_data.json", "r") as f:
|
15 |
+
all_questions = json.load(f)
|
16 |
+
|
17 |
+
"""
|
18 |
+
# State variables which interact with loading and unloading
|
19 |
user_data = {}
|
20 |
current_response = {}
|
21 |
current_question = {} # read-only within gradio blocks
|
22 |
user_id = "no_id"
|
|
|
|
|
23 |
# Control global variables
|
24 |
step = 0
|
25 |
mode = 1
|
|
|
104 |
current_question = all_questions[qid]
|
105 |
reset_current_response(user_data["order"][q_index])
|
106 |
|
107 |
+
"""
|
108 |
+
|
109 |
# THEMING: colors and styles (Gradio native)
|
110 |
|
111 |
theme = gr.themes.Soft(
|
|
|
118 |
# BLOCKS: main user interface
|
119 |
|
120 |
with gr.Blocks(theme = theme) as user_eval:
|
121 |
+
# ALL VARIABLES AND LOADING
|
122 |
+
|
123 |
+
# State variables which interact with loading and unloading
|
124 |
+
user_data = gr.State({})
|
125 |
+
current_response = gr.State({})
|
126 |
+
current_question = gr.State({}) # read-only within gradio blocks
|
127 |
+
user_id = gr.State("no_id")
|
128 |
+
# Control global variables
|
129 |
+
step = gr.State(0)
|
130 |
+
mode = 1 # mode is always 1 for now
|
131 |
+
|
132 |
+
def load_user_data(id):
|
133 |
+
filename = id.replace('@', '_AT_').replace('.', '_DOT_')
|
134 |
+
if file_exists(filename = "users/" + filename + ".json", repo_id = "ebrowne/test-data", repo_type = "dataset", token = os.getenv("HF_TOKEN")):
|
135 |
+
print("File exists, downloading data.")
|
136 |
+
# If the ID exists, download the file from HuggingFace
|
137 |
+
path = hf_hub_download(repo_id = "ebrowne/test-data", token = os.getenv("HF_TOKEN"), filename = "users/" + filename + ".json", repo_type = "dataset")
|
138 |
+
# Add their current status to user_data
|
139 |
+
with open(path, "r") as f:
|
140 |
+
return json.load(f)
|
141 |
+
else:
|
142 |
+
# If the ID doesn't exist, create a format for the file and upload it to HuggingFace
|
143 |
+
print("File does not exist, creating user.")
|
144 |
+
shuffle(qIDs)
|
145 |
+
modes = []
|
146 |
+
for i in range(len(qIDs)):
|
147 |
+
temp = mode_options[:]
|
148 |
+
shuffle(temp)
|
149 |
+
modes.append(temp)
|
150 |
+
# This is the format for a user's file on HuggingFace
|
151 |
+
return {
|
152 |
+
"user_id": id, # original in email format, which was passed here
|
153 |
+
"order": qIDs, # randomized order for each user
|
154 |
+
"modes": modes, # randomized order for each user
|
155 |
+
"current": 0, # user starts on first question
|
156 |
+
"responses": [] # formatted as a list of current_responses
|
157 |
+
}
|
158 |
+
# No longer uploading after first creation: user must answer question for that.
|
159 |
+
|
160 |
+
def update_huggingface(id, data):
|
161 |
+
print("Updating data...")
|
162 |
+
filename = id.replace('@', '_AT_').replace('.', '_DOT_')
|
163 |
+
# Create a local file that will be uploaded to HuggingFace
|
164 |
+
with open(filename + ".json", "w") as f:
|
165 |
+
json.dump(data, f)
|
166 |
+
# Upload to hub (overwriting existing files...)
|
167 |
+
api = HfApi()
|
168 |
+
api.upload_file(
|
169 |
+
path_or_fileobj=filename + ".json",
|
170 |
+
path_in_repo="users/" + filename + ".json",
|
171 |
+
repo_id="ebrowne/test-data",
|
172 |
+
repo_type="dataset",
|
173 |
+
token = os.getenv("HF_TOKEN")
|
174 |
+
)
|
175 |
+
|
176 |
+
def reset_current_response(qid, user_id):
|
177 |
+
return {
|
178 |
+
current_response : {
|
179 |
+
"user_id": user_id,
|
180 |
+
"question_id": qid,
|
181 |
+
"user_answer": 0,
|
182 |
+
"e5_scores": [], # list of ten [score, score, score, score]
|
183 |
+
"e5_set": [], # two values
|
184 |
+
"e5_generation": [], # two values
|
185 |
+
"colbert_scores": [],
|
186 |
+
"colbert_set": [],
|
187 |
+
"colbert_generation": [],
|
188 |
+
"gold_set": [],
|
189 |
+
"gold_generation": []
|
190 |
+
}
|
191 |
+
}
|
192 |
+
|
193 |
+
# Loads the user's current question — this is the first question that the user has not made any progress on.
|
194 |
+
def load_current_question():
|
195 |
+
q_index = user_data["current"]
|
196 |
+
if q_index >= len(all_questions):
|
197 |
+
print("Done")
|
198 |
+
gr.Info("You've finished — thank you so much! There are no more questions. :)")
|
199 |
+
reset_current_response("USER FINISHED")
|
200 |
+
return {"question": "You're done! Thanks so much for your help.", "answers": ["I want to log out now.", "I want to keep answering questions.","I want to keep answering questions.", "I want to keep answering questions."], "correct_answer_index": 0, "top10_e5": ["You're done; thank you!", "You're done; thank you!", "You're done; thank you!", "You're done; thank you!", "You're done; thank you!", "You're done; thank you!", "You're done; thank you!", "You're done; thank you!", "You're done; thank you!", "You're done; thank you!"], "generation_e5": "I don't know how to exit this code right now, so you're in an endless loop of this question until you quit.", "top10_colbert": ["You're done; thank you!", "You're done; thank you!", "You're done; thank you!", "You're done; thank you!", "You're done; thank you!", "You're done; thank you!", "You're done; thank you!", "You're done; thank you!", "You're done; thank you!", "You're done; thank you!"], "generation_colbert": "I don't know how to exit this code right now, so you're in an endless loop of this question until you quit.", "top10_contains_gold_passage": False, "gold_passage": "GOLD PASSAGE: LOG OFF!", "gold_passage_generation": "what do you gain"}
|
201 |
+
else:
|
202 |
+
qid = user_data["order"][q_index]
|
203 |
+
reset_current_response(user_data["order"][q_index])
|
204 |
+
return all_questions[qid]
|
205 |
+
|
206 |
+
|
207 |
# Title text introducing study
|
208 |
forward_btn = gr.Textbox("unchanged", visible = False, elem_id = "togglebutton") # used for toggling windows
|
209 |
gr.HTML("""
|
|
|
258 |
eval_satisfied = gr.Slider(1, 5, step = 0.5, label = "User Satisfaction", value = 3)
|
259 |
btn_g = gr.Button("Next")
|
260 |
|
261 |
+
def next_p(e0, e1, e2, e3, cur_step, mode, current_response):
|
262 |
+
step = cur_step + 1
|
|
|
|
|
|
|
263 |
# Add user data to the current response
|
264 |
current_response["e5_scores"].append([e0, e1, e2, e3])
|
265 |
# Next item
|
266 |
+
if step >= len(current_question["top10_e5"]): # should always be 10 (DEBUG: >= to avoid out of bounds)
|
267 |
# Step 10: all sources
|
268 |
collapsible_string = "<h2> Set of Passages </h2>\n"
|
269 |
for i, passage in enumerate(current_question["top10_e5"]):
|
|
|
278 |
eval_0: gr.Radio(value = None),
|
279 |
eval_1: gr.Slider(value = 3),
|
280 |
eval_2: gr.Slider(value = 3),
|
281 |
+
eval_3: gr.Slider(value = 0),
|
282 |
+
step: step,
|
283 |
+
mode: 1,
|
284 |
+
current_response: current_response
|
285 |
}
|
286 |
else:
|
287 |
return {
|
|
|
291 |
eval_0: gr.Radio(value = None),
|
292 |
eval_1: gr.Slider(value = 3),
|
293 |
eval_2: gr.Slider(value = 3),
|
294 |
+
eval_3: gr.Slider(value = 0),
|
295 |
+
step: step,
|
296 |
+
mode: 1,
|
297 |
+
current_response: current_response
|
298 |
}
|
299 |
|
300 |
+
def next_g(e_h, e_s, cur_step, mode, user_data, current_response):
|
301 |
+
step = cur_step + 1
|
|
|
|
|
|
|
|
|
302 |
|
303 |
if step == 11:
|
304 |
# Step 11: guaranteed to be generation
|
|
|
310 |
<h2> Autogenerated Response </h2>
|
311 |
<p>""" + markdown(current_question["generation_e5"]) + "</p>"),
|
312 |
eval_helps: gr.Slider(value = 0),
|
313 |
+
eval_satisfied: gr.Slider(value = 3),
|
314 |
+
step: step,
|
315 |
+
mode: mode,
|
316 |
+
user_data: user_data,
|
317 |
+
current_response: current_response
|
318 |
}
|
319 |
# Steps 12 and 13 are gold passage + gold passage generation IF it is applicable
|
320 |
if step > 11: # and not current_question["top10_contains_gold_passage"]
|
321 |
# When mode is 0 -> reset with mode = 1
|
322 |
+
"""
|
323 |
if mode == 0:
|
324 |
# The user just evaluated a generation for mode 0
|
325 |
current_response[user_data["modes"][user_data["current"]][mode] + "_generation"] = [e_h, e_s]
|
326 |
return {
|
327 |
+
selection: gr.HTML(\"""
|
328 |
<h2> Retrieved Passage </h2>
|
329 |
+
<p> \""" + current_question["top10_" + user_data["modes"][user_data["current"]][1]][0] + "</p>"), # hard coded: first passage (0) of mode 2 (1),
|
330 |
forward_btn: gr.Textbox("load new data"),
|
331 |
eval_helps: gr.Slider(value = 0),
|
332 |
eval_satisfied: gr.Slider(value = 3)
|
333 |
}
|
334 |
+
"""
|
335 |
# When mode is 1 -> display GP and GP generation, then switch
|
336 |
if step == 12:
|
337 |
# The user just evaluated a generation for mode 1
|
|
|
343 |
<p> """ + current_question["gold_passage"] + "</p>"),
|
344 |
forward_btn: gr.Textbox(),
|
345 |
eval_helps: gr.Slider(value = 0),
|
346 |
+
eval_satisfied: gr.Slider(value = 3),
|
347 |
+
step: step,
|
348 |
+
mode: mode,
|
349 |
+
user_data: user_data,
|
350 |
+
current_response: current_response
|
351 |
}
|
352 |
elif step == 13:
|
353 |
# The user just evaluated the gold passage
|
|
|
358 |
<p> """ + markdown(current_question["gold_passage_generation"]) + "</p>"),
|
359 |
forward_btn: gr.Textbox(),
|
360 |
eval_helps: gr.Slider(value = 0),
|
361 |
+
eval_satisfied: gr.Slider(value = 3),
|
362 |
+
step: step,
|
363 |
+
mode: mode,
|
364 |
+
user_data: user_data,
|
365 |
+
current_response: current_response
|
366 |
}
|
367 |
else: # step = 14
|
368 |
# The user just evaluated the gold passage generation
|
|
|
375 |
selection: gr.Markdown("Advancing to the next question..."),
|
376 |
forward_btn: gr.Textbox("changed" + str(user_data["current"])), # current forces event to trigger always
|
377 |
eval_helps: gr.Slider(value = 0),
|
378 |
+
eval_satisfied: gr.Slider(value = 3),
|
379 |
+
step: step,
|
380 |
+
mode: mode,
|
381 |
+
user_data: user_data,
|
382 |
+
current_response: current_response
|
383 |
}
|
384 |
|
385 |
+
btn_p.click(fn = next_p, inputs = [eval_0, eval_1, eval_2, eval_3, step, mode, current_response], outputs = [selection, scores_p, scores_g, eval_0, eval_1, eval_2, eval_3, step, mode, current_response])
|
386 |
+
btn_g.click(fn = next_g, inputs = [eval_helps, eval_satisfied, step, mode, user_data, current_response], outputs = [selection, forward_btn, eval_helps, eval_satisfied, step, mode, user_data, current_response])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
387 |
|
388 |
# Question and answering dynamics
|
389 |
with gr.Row(equal_height = False, visible = False) as question:
|
|
|
430 |
c.click(fn = answer_c, outputs = [question, evals])
|
431 |
d.click(fn = answer_d, outputs = [question, evals])
|
432 |
|
433 |
+
def toggle(step, mode):
|
|
|
|
|
434 |
step = 0
|
435 |
if mode == 0: # temporarily disabled — will never be mode 0
|
436 |
mode = 1 # update mode to 1, will restart with same Q, next set of Ps
|
|
|
440 |
scores_g: gr.Column(visible = False),
|
441 |
evals: gr.Row(visible = True),
|
442 |
question: gr.Row(visible = False),
|
443 |
+
step: step,
|
444 |
+
mode: mode
|
445 |
}
|
446 |
else:
|
447 |
# reset mode to 0, will restart with new Q (set up new Q), first set of Ps
|
|
|
472 |
+ """ + new_answers[3]),
|
473 |
selection: gr.HTML("""
|
474 |
<h2> Retrieved Passage </h2>
|
475 |
+
<p> """ + current_question["top10_e5"][0] + "</p>"),
|
476 |
+
step: step,
|
477 |
+
mode: mode
|
478 |
} # note change from "top10_" + user_data["modes"][user_data["current"]][mode]][0]
|
479 |
|
480 |
+
forward_btn.change(fn = toggle, inputs = [step, mode], outputs = [scores_p, scores_g, evals, question, q_text, a, b, c, d, passage_display, selection, step, mode])
|
481 |
|
482 |
with gr.Row() as login:
|
483 |
with gr.Column():
|
|
|
494 |
email.change(fn = sanitize_login, inputs = [email], outputs = [s])
|
495 |
|
496 |
def submit_email(email):
|
|
|
497 |
user_id = email
|
498 |
+
user_data = load_user_data(user_id) # calls login, downloads data, initializes session
|
499 |
# After loading user data, update with current question
|
500 |
+
current_question = load_current_question()
|
501 |
new_answers = current_question["answers"].copy()
|
502 |
new_answers[current_question["correct_answer_index"]] = "**" + current_question["answers"][current_question["correct_answer_index"]] + "** ✅"
|
503 |
return {
|
|
|
522 |
a: gr.Button(current_question["answers"][0]),
|
523 |
b: gr.Button(current_question["answers"][1]),
|
524 |
c: gr.Button(current_question["answers"][2]),
|
525 |
+
d: gr.Button(current_question["answers"][3]),
|
526 |
+
user_id: user_id,
|
527 |
+
user_data: user_data,
|
528 |
+
current_question: current_question
|
529 |
}
|
530 |
+
s.click(fn = submit_email, inputs = [email], outputs = [question, login, selection, passage_display, q_text, a, b, c, d, user_id, user_data, current_question])
|
531 |
|
532 |
# Starts on question, switches to evaluation after the user answers
|
533 |
user_eval.launch()
|