Spaces:
Runtime error
Runtime error
mariagrandury
commited on
Commit
•
08d4272
1
Parent(s):
c068c68
Update application
Browse files- app.py +12 -9
- prompts.py +0 -5
app.py
CHANGED
@@ -5,10 +5,6 @@ import requests
|
|
5 |
from mtranslate import translate
|
6 |
import streamlit as st
|
7 |
|
8 |
-
from prompts import PROMPT_LIST
|
9 |
-
|
10 |
-
|
11 |
-
headers = {}
|
12 |
|
13 |
LOGO = "https://raw.githubusercontent.com/nlp-en-es/assets/main/logo.png"
|
14 |
|
@@ -21,12 +17,18 @@ MODELS = {
|
|
21 |
},
|
22 |
}
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
def query(payload, model_name):
|
26 |
data = json.dumps(payload)
|
27 |
print("model url:", MODELS[model_name]["url"])
|
28 |
response = requests.request(
|
29 |
-
"POST", MODELS[model_name]["url"], headers=
|
30 |
)
|
31 |
return json.loads(response.content.decode("utf-8"))
|
32 |
|
@@ -50,9 +52,11 @@ def process(
|
|
50 |
return query(payload, model_name)
|
51 |
|
52 |
|
|
|
53 |
st.set_page_config(page_title="Spanish GPT-2 Demo", page_icon=LOGO)
|
54 |
st.title("Spanish GPT-2")
|
55 |
|
|
|
56 |
# Sidebar
|
57 |
st.sidebar.image(LOGO)
|
58 |
st.sidebar.subheader("Configurable parameters")
|
@@ -89,6 +93,8 @@ do_sample = st.sidebar.selectbox(
|
|
89 |
help="Whether or not to use sampling; use greedy decoding otherwise.",
|
90 |
)
|
91 |
|
|
|
|
|
92 |
st.markdown(
|
93 |
"""
|
94 |
Spanish GPT-2 models trained from scratch on two different datasets. One
|
@@ -104,10 +110,7 @@ st.markdown(
|
|
104 |
"""
|
105 |
)
|
106 |
|
107 |
-
model_name = st.selectbox(
|
108 |
-
"Model",
|
109 |
-
(["Model trained on OSCAR", "Model trained on the Large Spanish Corpus"]),
|
110 |
-
)
|
111 |
|
112 |
ALL_PROMPTS = list(PROMPT_LIST.keys()) + ["Custom"]
|
113 |
prompt = st.selectbox("Prompt", ALL_PROMPTS, index=len(ALL_PROMPTS) - 1)
|
|
|
5 |
from mtranslate import translate
|
6 |
import streamlit as st
|
7 |
|
|
|
|
|
|
|
|
|
8 |
|
9 |
LOGO = "https://raw.githubusercontent.com/nlp-en-es/assets/main/logo.png"
|
10 |
|
|
|
17 |
},
|
18 |
}
|
19 |
|
20 |
+
PROMPT_LIST = {
|
21 |
+
"Érase una vez...": ["Érase una vez "],
|
22 |
+
"¡Hola!": ["¡Hola! Me llamo "],
|
23 |
+
"¿Ser o no ser?": ["En mi opinión, 'ser' es "],
|
24 |
+
}
|
25 |
+
|
26 |
|
27 |
def query(payload, model_name):
|
28 |
data = json.dumps(payload)
|
29 |
print("model url:", MODELS[model_name]["url"])
|
30 |
response = requests.request(
|
31 |
+
"POST", MODELS[model_name]["url"], headers={}, data=data
|
32 |
)
|
33 |
return json.loads(response.content.decode("utf-8"))
|
34 |
|
|
|
52 |
return query(payload, model_name)
|
53 |
|
54 |
|
55 |
+
# Page
|
56 |
st.set_page_config(page_title="Spanish GPT-2 Demo", page_icon=LOGO)
|
57 |
st.title("Spanish GPT-2")
|
58 |
|
59 |
+
|
60 |
# Sidebar
|
61 |
st.sidebar.image(LOGO)
|
62 |
st.sidebar.subheader("Configurable parameters")
|
|
|
93 |
help="Whether or not to use sampling; use greedy decoding otherwise.",
|
94 |
)
|
95 |
|
96 |
+
|
97 |
+
# Body
|
98 |
st.markdown(
|
99 |
"""
|
100 |
Spanish GPT-2 models trained from scratch on two different datasets. One
|
|
|
110 |
"""
|
111 |
)
|
112 |
|
113 |
+
model_name = st.selectbox("Model", (list(MODELS.keys())))
|
|
|
|
|
|
|
114 |
|
115 |
ALL_PROMPTS = list(PROMPT_LIST.keys()) + ["Custom"]
|
116 |
prompt = st.selectbox("Prompt", ALL_PROMPTS, index=len(ALL_PROMPTS) - 1)
|
prompts.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
PROMPT_LIST = {
|
2 |
-
"Érase una vez...": ["Érase una vez "],
|
3 |
-
"¡Hola!": ["¡Hola! Me llamo "],
|
4 |
-
"¿Ser o no ser?": ["En mi opinión, 'ser' es "],
|
5 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|