Spaces:
Runtime error
Runtime error
add comments
Browse files
app.py
CHANGED
@@ -1,9 +1,19 @@
|
|
1 |
-
from datasets import load_dataset
|
2 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
HF_API_TOKEN = st.secrets["HF_API_TOKEN"]
|
5 |
PROMPT_COLOR = "#CA437E"
|
6 |
|
|
|
7 |
def safe_text(text):
|
8 |
text = text.replace("\n", "<br>")
|
9 |
return f"<pre>{text}</pre>"
|
@@ -16,20 +26,44 @@ def prompt_markup_format(text):
|
|
16 |
def generation_markup_format(text):
|
17 |
return f"<font color={PROMPT_COLOR}>{text}</pre></font>"
|
18 |
|
19 |
-
|
|
|
20 |
ds = ds["train"]
|
21 |
|
22 |
possible_prompts = ds.unique("prompt")
|
23 |
-
chosen_prompt = st.selectbox("Chose a prompt", possible_prompts)
|
24 |
-
st.markdown(safe_text(chosen_prompt), unsafe_allow_html=True)
|
25 |
|
26 |
-
|
|
|
|
|
|
|
|
|
27 |
|
|
|
|
|
|
|
28 |
|
29 |
-
index_sample = st.number_input("Index of the chosen example", min_value=0, max_value=len(sub_ds) - 1, value=0, step=1)
|
30 |
-
sample = sub_ds[index_sample]
|
31 |
-
markdown_text = generation_markup_format(safe_text(sample['generation']))
|
32 |
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from datasets import load_dataset
|
3 |
+
|
4 |
+
st.set_page_config(
|
5 |
+
page_icon="🧊",
|
6 |
+
layout="wide",
|
7 |
+
)
|
8 |
+
|
9 |
+
st.write(
|
10 |
+
"This is an application for viewing different generations for the same prompt. The generations vary depending on the checkpoint used and also the parameters used for the generation."
|
11 |
+
)
|
12 |
|
13 |
HF_API_TOKEN = st.secrets["HF_API_TOKEN"]
|
14 |
PROMPT_COLOR = "#CA437E"
|
15 |
|
16 |
+
|
17 |
def safe_text(text):
|
18 |
text = text.replace("\n", "<br>")
|
19 |
return f"<pre>{text}</pre>"
|
|
|
26 |
def generation_markup_format(text):
|
27 |
return f"<font color={PROMPT_COLOR}>{text}</pre></font>"
|
28 |
|
29 |
+
|
30 |
+
ds = load_dataset("SaulLu/bloom-generations", use_auth_token=HF_API_TOKEN)
|
31 |
ds = ds["train"]
|
32 |
|
33 |
possible_prompts = ds.unique("prompt")
|
|
|
|
|
34 |
|
35 |
+
col_1, col_2 = st.columns(2)
|
36 |
+
with col_1:
|
37 |
+
st.markdown("<h1 style='text-align: center'>Prompt</h1>", unsafe_allow_html=True)
|
38 |
+
chosen_prompt = st.selectbox("Chose a prompt", possible_prompts)
|
39 |
+
st.markdown(safe_text(chosen_prompt), unsafe_allow_html=True)
|
40 |
|
41 |
+
sub_ds = ds.filter(
|
42 |
+
lambda exs: [prompt == chosen_prompt for prompt in exs["prompt"]], batched=True
|
43 |
+
)
|
44 |
|
|
|
|
|
|
|
45 |
|
46 |
+
with col_2:
|
47 |
+
st.markdown(
|
48 |
+
"<h1 style='text-align: center'>Generation</h1>", unsafe_allow_html=True
|
49 |
+
)
|
50 |
+
index_sample = st.number_input(
|
51 |
+
"Index of the chosen example",
|
52 |
+
min_value=0,
|
53 |
+
max_value=len(sub_ds) - 1,
|
54 |
+
value=0,
|
55 |
+
step=1,
|
56 |
+
)
|
57 |
+
sample = sub_ds[index_sample]
|
58 |
+
markdown_text = generation_markup_format(safe_text(sample["generation"]))
|
59 |
+
st.markdown(markdown_text, unsafe_allow_html=True)
|
60 |
+
st.markdown(
|
61 |
+
"<h2 style='text-align: center'>Generation configuration</h2>",
|
62 |
+
unsafe_allow_html=True,
|
63 |
+
)
|
64 |
+
config = {
|
65 |
+
key: value
|
66 |
+
for key, value in sample.items()
|
67 |
+
if key not in ["prompt", "generation"]
|
68 |
+
}
|
69 |
+
config
|