Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,17 +3,25 @@ import random
|
|
3 |
import requests
|
4 |
from mtranslate import translate
|
5 |
import streamlit as st
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
PROMPT_LIST = {
|
8 |
-
"
|
9 |
-
"
|
10 |
-
"
|
11 |
}
|
12 |
-
def query(payload,
|
13 |
data = json.dumps(payload)
|
14 |
-
print("model url:",
|
15 |
response = requests.request(
|
16 |
-
"POST",
|
17 |
)
|
18 |
return json.loads(response.content.decode("utf-8"))
|
19 |
def process(
|
@@ -34,9 +42,10 @@ def process(
|
|
34 |
}
|
35 |
return query(payload, model_name)
|
36 |
# Page
|
37 |
-
st.set_page_config(page_title="
|
38 |
-
st.title("
|
39 |
# Sidebar
|
|
|
40 |
st.sidebar.subheader("Configurable parameters")
|
41 |
max_len = st.sidebar.number_input(
|
42 |
"Maximum length",
|
@@ -68,12 +77,19 @@ do_sample = st.sidebar.selectbox(
|
|
68 |
# Body
|
69 |
st.markdown(
|
70 |
"""
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
-
The models
|
|
|
|
|
74 |
"""
|
75 |
)
|
76 |
-
model_name =
|
77 |
ALL_PROMPTS = list(PROMPT_LIST.keys()) + ["Custom"]
|
78 |
prompt = st.selectbox("Prompt", ALL_PROMPTS, index=len(ALL_PROMPTS) - 1)
|
79 |
if prompt == "Custom":
|
@@ -107,10 +123,24 @@ if st.button("Run"):
|
|
107 |
st.write(f"{error}")
|
108 |
else:
|
109 |
result = result[0]["generated_text"]
|
110 |
-
st.write(result.replace("\
|
111 |
-
", " \
|
112 |
-
"))
|
113 |
st.text("English translation")
|
114 |
-
st.write(translate(result, "en", "es").replace("\
|
115 |
-
|
116 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import requests
|
4 |
from mtranslate import translate
|
5 |
import streamlit as st
|
6 |
+
LOGO = "https://raw.githubusercontent.com/nlp-en-es/assets/main/logo.png"
|
7 |
+
MODELS = {
|
8 |
+
"Model trained on OSCAR": {
|
9 |
+
"url": "https://api-inference.huggingface.co/models/flax-community/gpt-2-spanish"
|
10 |
+
},
|
11 |
+
"Model trained on the Large Spanish Corpus": {
|
12 |
+
"url": "https://api-inference.huggingface.co/models/mrm8488/spanish-gpt2"
|
13 |
+
},
|
14 |
+
}
|
15 |
PROMPT_LIST = {
|
16 |
+
"Érase una vez...": ["Érase una vez "],
|
17 |
+
"¡Hola!": ["¡Hola! Me llamo "],
|
18 |
+
"¿Ser o no ser?": ["En mi opinión, 'ser' es "],
|
19 |
}
|
20 |
+
def query(payload, model_name):
|
21 |
data = json.dumps(payload)
|
22 |
+
print("model url:", MODELS[model_name]["url"])
|
23 |
response = requests.request(
|
24 |
+
"POST", MODELS[model_name]["url"], headers={}, data=data
|
25 |
)
|
26 |
return json.loads(response.content.decode("utf-8"))
|
27 |
def process(
|
|
|
42 |
}
|
43 |
return query(payload, model_name)
|
44 |
# Page
|
45 |
+
st.set_page_config(page_title="Spanish GPT-2 Demo", page_icon=LOGO)
|
46 |
+
st.title("Spanish GPT-2")
|
47 |
# Sidebar
|
48 |
+
st.sidebar.image(LOGO)
|
49 |
st.sidebar.subheader("Configurable parameters")
|
50 |
max_len = st.sidebar.number_input(
|
51 |
"Maximum length",
|
|
|
77 |
# Body
|
78 |
st.markdown(
|
79 |
"""
|
80 |
+
Spanish GPT-2 models trained from scratch on two different datasets. One
|
81 |
+
model is trained on the Spanish portion of
|
82 |
+
[OSCAR](https://huggingface.co/datasets/viewer/?dataset=oscar)
|
83 |
+
and the other on the
|
84 |
+
[large_spanish_corpus](https://huggingface.co/datasets/viewer/?dataset=large_spanish_corpus)
|
85 |
+
aka BETO's corpus.
|
86 |
|
87 |
+
The models are trained with Flax and using TPUs sponsored by Google since this is part of the
|
88 |
+
[Flax/Jax Community Week](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104)
|
89 |
+
organised by HuggingFace.
|
90 |
"""
|
91 |
)
|
92 |
+
model_name = st.selectbox("Model", (list(MODELS.keys())))
|
93 |
ALL_PROMPTS = list(PROMPT_LIST.keys()) + ["Custom"]
|
94 |
prompt = st.selectbox("Prompt", ALL_PROMPTS, index=len(ALL_PROMPTS) - 1)
|
95 |
if prompt == "Custom":
|
|
|
123 |
st.write(f"{error}")
|
124 |
else:
|
125 |
result = result[0]["generated_text"]
|
126 |
+
st.write(result.replace("\n", " \n"))
|
|
|
|
|
127 |
st.text("English translation")
|
128 |
+
st.write(translate(result, "en", "es").replace("\n", " \n"))
|
129 |
+
st.markdown(
|
130 |
+
"""
|
131 |
+
### Team members
|
132 |
+
- Manuel Romero ([mrm8488](https://huggingface.co/mrm8488))
|
133 |
+
- María Grandury ([mariagrandury](https://huggingface.co/mariagrandury))
|
134 |
+
- Pablo González de Prado ([Pablogps](https://huggingface.co/Pablogps))
|
135 |
+
- Daniel Vera ([daveni](https://huggingface.co/daveni))
|
136 |
+
- Sri Lakshmi ([srisweet](https://huggingface.co/srisweet))
|
137 |
+
- José Posada ([jdposa](https://huggingface.co/jdposa))
|
138 |
+
- Santiago Hincapie ([shpotes](https://huggingface.co/shpotes))
|
139 |
+
- Jorge ([jorgealro](https://huggingface.co/jorgealro))
|
140 |
+
|
141 |
+
### More information
|
142 |
+
You can find more information about these models in their cards:
|
143 |
+
- [Model trained on OSCAR](https://huggingface.co/models/flax-community/gpt-2-spanish)
|
144 |
+
- [Model trained on the Large Spanish Corpus](https://huggingface.co/mrm8488/spanish-gpt2)
|
145 |
+
"""
|
146 |
+
)
|