Spaces:
Running
Running
mylibrar
commited on
Commit
·
6713f3f
1
Parent(s):
9868b53
Add help messages to interface
Browse files
app.py
CHANGED
@@ -16,26 +16,33 @@ with st.sidebar:
|
|
16 |
st.markdown(html, unsafe_allow_html=True)
|
17 |
|
18 |
metric = st.radio(
|
19 |
-
"Choose a metric", options=os.listdir(os.path.join(EVAL_DIR))
|
|
|
20 |
)
|
21 |
|
22 |
n_shot = st.radio(
|
23 |
-
"Selece an n-shot number", os.listdir(os.path.join(EVAL_DIR, metric))
|
|
|
24 |
)
|
25 |
|
26 |
col1, col2 = st.columns(2)
|
27 |
|
28 |
def render_column(col_label):
|
29 |
st.header(f"Checkpoint {col_label}")
|
30 |
-
ckpt = st.select_slider('Select a checkpoint', sorted(os.listdir(os.path.join(EVAL_DIR, metric, n_shot))), key=col_label + '1')
|
31 |
st.write(f'Veiwing Evaluation Results for Checkpoint: `{ckpt}`')
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
34 |
with tarfile.open(os.path.join(EVAL_DIR, metric, n_shot, ckpt, file), "r:gz") as tar:
|
35 |
f = tar.extractfile(tar.next())
|
36 |
eval_json = json.load(f)
|
37 |
if isinstance(eval_json, list):
|
38 |
-
doc_id = st.slider("Select a document id", 0, len(eval_json) - 1, 0, 1, key=col_label + '3')
|
39 |
st.json(eval_json[doc_id])
|
40 |
else:
|
41 |
st.json(eval_json)
|
|
|
16 |
st.markdown(html, unsafe_allow_html=True)
|
17 |
|
18 |
metric = st.radio(
|
19 |
+
"Choose a metric", options=os.listdir(os.path.join(EVAL_DIR)),
|
20 |
+
help="type of evaluation benchmark task"
|
21 |
)
|
22 |
|
23 |
n_shot = st.radio(
|
24 |
+
"Selece an n-shot number", os.listdir(os.path.join(EVAL_DIR, metric)),
|
25 |
+
help="number of examples included in few-shot prompting"
|
26 |
)
|
27 |
|
28 |
col1, col2 = st.columns(2)
|
29 |
|
30 |
def render_column(col_label):
|
31 |
st.header(f"Checkpoint {col_label}")
|
32 |
+
ckpt = st.select_slider('Select a checkpoint', sorted(os.listdir(os.path.join(EVAL_DIR, metric, n_shot))), key=col_label + '1', help="checkpoint index from 3 to 360")
|
33 |
st.write(f'Veiwing Evaluation Results for Checkpoint: `{ckpt}`')
|
34 |
+
suffix, result_file = ".tar.gz", "results.json"
|
35 |
+
file_list: list = sorted(f_name[:-len(suffix)] for f_name in os.listdir(os.path.join(EVAL_DIR, metric, n_shot, ckpt)))
|
36 |
+
if result_file in file_list:
|
37 |
+
file_list.remove(result_file)
|
38 |
+
file_list = [result_file] + file_list
|
39 |
+
file = st.selectbox("Select a file", file_list, key=col_label + '2', help="a list of raw output files from evaluation results")
|
40 |
+
file += suffix
|
41 |
with tarfile.open(os.path.join(EVAL_DIR, metric, n_shot, ckpt, file), "r:gz") as tar:
|
42 |
f = tar.extractfile(tar.next())
|
43 |
eval_json = json.load(f)
|
44 |
if isinstance(eval_json, list):
|
45 |
+
doc_id = st.slider("Select a document id", 0, len(eval_json) - 1, 0, 1, key=col_label + '3', help="index of a specific question/task in current file")
|
46 |
st.json(eval_json[doc_id])
|
47 |
else:
|
48 |
st.json(eval_json)
|