Spaces:
Running
Running
Modifify eng/gl interface
Browse files- README.md +1 -1
- app.py +27 -53
- interface_texts.csv +1 -0
README.md
CHANGED
@@ -10,4 +10,4 @@ pinned: false
|
|
10 |
license: mit
|
11 |
---
|
12 |
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
10 |
license: mit
|
11 |
---
|
12 |
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
@@ -3,45 +3,35 @@ import gradio as gr
|
|
3 |
from gradio.components import Slider
|
4 |
import torch
|
5 |
from transformers import pipeline
|
6 |
-
import pandas as pd
|
7 |
|
8 |
# Model, information and examples ----------------------------------------------
|
9 |
MODEL_NAMES = ["FLOR-1.3B-GL","Cerebras-1.3B-GL"]
|
10 |
-
|
11 |
-
# Galician LLMs
|
12 |
-
|
13 |
-
|
14 |
-
This space contains the Galician language models developed by [Proxecto Nós](https://nos.gal/en/proxecto-nos).
|
15 |
-
|
16 |
-
|
17 |
-
💐 **[FLOR-1.3B-GL](https://huggingface.co/proxectonos/FLOR-1.3B-GL)** is a 1.3B parameters model which is a Continual pretraining from [FLOR-1.3B](https://huggingface.co/projecte-aina/FLOR-1.3B), which is based in [Bloom 1.7B](https://huggingface.co/bigscience/bloom-1b7).
|
18 |
-
|
19 |
-
👀 **Learn more about FLOR-1.3B-GL:** [HF official model card](https://huggingface.co/proxectonos/FLOR-1.3B-GL).
|
20 |
-
|
21 |
-
|
22 |
-
🧠 **[Cerebras-1.3B-GL](https://huggingface.co/proxectonos/Cerebras-1.3B-GL)** is a 1.3B parameters model based in [Cerebras-GPT 1.3B](https://huggingface.co/cerebras/Cerebras-GPT-1.3B).
|
23 |
-
|
24 |
-
👀 **Learn more about Cerebras-1.3B-GL:** [HF official model card](https://huggingface.co/proxectonos/Cerebras-1.3B-GL)
|
25 |
-
"""
|
26 |
-
|
27 |
-
markdown_description_gl = """
|
28 |
-
# LLMs de galego
|
29 |
|
30 |
|
31 |
Este espazo contén diferentes Grandes Modelos da Linguaxe feitos para o galego desenvolvidos polo [Proxecto Nós](https://nos.gal/en/proxecto-nos).
|
32 |
|
|
|
33 |
|
34 |
💐 **[FLOR-1.3B-GL](https://huggingface.co/proxectonos/FLOR-1.3B-GL)** é un modelo de parámetros 1.3B que é un preadestramento continuo de [FLOR-1.3B]( https://huggingface.co/projecte-aina/FLOR-1.3B), baseado a súa vez en [Bloom 1.7B](https://huggingface.co/bigscience/bloom-1b7).
|
35 |
|
|
|
|
|
36 |
👀 **Máis información sobre FLOR-1.3B-GL:** [tarxeta modelo oficial HF](https://huggingface.co/proxectonos/FLOR-1.3B-GL).
|
37 |
|
|
|
|
|
|
|
|
|
38 |
|
39 |
-
|
40 |
|
41 |
👀 **Máis información sobre Cerebras-1.3B-GL:** [tarxeta modelo oficial HF](https://huggingface.co/proxectonos/Cerebras-1.3B-GL)
|
|
|
|
|
42 |
"""
|
43 |
|
44 |
-
markdown_description ={"en": markdown_description_en,"gl": markdown_description_gl}
|
45 |
short_prompts_examples = [
|
46 |
["A receita tradicional das filloas é"],
|
47 |
["O neno vivía preto de"]
|
@@ -60,10 +50,6 @@ generator_model_flor = pipeline("text-generation", model=model_id_flor)
|
|
60 |
model_id_cerebras = "proxectonos/Cerebras-1.3B-GL"
|
61 |
generator_model_cerebras = pipeline("text-generation", model=model_id_cerebras, token=os.environ['TOKEN_HF'])
|
62 |
|
63 |
-
# Load language texts ---------------------------------------------------------
|
64 |
-
df_interface = pd.read_csv("interface_texts.csv")
|
65 |
-
language = "gl"
|
66 |
-
|
67 |
# Generation functions ---------------------------------------------------------
|
68 |
def get_model(model_selection):
|
69 |
if model_selection == "FLOR-1.3B-GL":
|
@@ -98,16 +84,6 @@ def predict(prompt, model_select, max_length, repetition_penalty, temperature):
|
|
98 |
return generated_sequence
|
99 |
|
100 |
# Gradio app ---------------------------------------------------------
|
101 |
-
def get_text_lang(variable):
|
102 |
-
return df_interface.loc[df_interface['variable'] == variable, language].values[0]
|
103 |
-
|
104 |
-
def change_language(demo):
|
105 |
-
if language == "gl":
|
106 |
-
language = "en"
|
107 |
-
else:
|
108 |
-
language = "gl"
|
109 |
-
demo.launch()
|
110 |
-
|
111 |
def clear():
|
112 |
return (
|
113 |
None,
|
@@ -140,62 +116,60 @@ def gradio_app():
|
|
140 |
with gr.Blocks(theme=fronted_theme) as demo:
|
141 |
with gr.Row():
|
142 |
with gr.Column(scale=0.1):
|
143 |
-
change_lang = gr.Button(value=get_text_lang("change_lang"))
|
144 |
gr.HTML('<img src="https://huggingface.co/spaces/proxectonos/README/resolve/main/title-card.png" width="100%" style="border-radius: 0.75rem;">')
|
145 |
with gr.Column():
|
146 |
-
gr.Markdown(markdown_description
|
147 |
with gr.Row(equal_height=True):
|
148 |
model_select = gr.Dropdown(
|
149 |
-
label=
|
150 |
choices=MODEL_NAMES,
|
151 |
value=MODEL_NAMES[0],
|
152 |
interactive=True
|
153 |
)
|
154 |
with gr.Row(equal_height=True):
|
155 |
with gr.Column():
|
156 |
-
text_gl = gr.Textbox(label=
|
157 |
lines=6, placeholder="e.g. O neno vai a escola con ")
|
158 |
with gr.Row(variant="panel"):
|
159 |
-
with gr.Accordion(
|
160 |
max_length = Slider(
|
161 |
minimum=1,
|
162 |
maximum=200,
|
163 |
step=1,
|
164 |
value=30,
|
165 |
-
label=
|
166 |
)
|
167 |
repetition_penalty = Slider(
|
168 |
minimum=0.1,
|
169 |
maximum=4,
|
170 |
step=0.1,
|
171 |
value=1.3,
|
172 |
-
label=
|
173 |
)
|
174 |
temperature = Slider(
|
175 |
minimum=0,
|
176 |
maximum=1,
|
177 |
value=0.5,
|
178 |
-
label=
|
179 |
)
|
180 |
-
generator_btn = gr.Button(value=
|
181 |
with gr.Column():
|
182 |
-
generated_gl = gr.Textbox(label=
|
183 |
lines=6,
|
184 |
-
placeholder=
|
185 |
interactive=False,
|
186 |
show_copy_button=True)
|
187 |
-
pass_btn = gr.Button(value=
|
188 |
-
clean_btn = gr.Button(value=
|
189 |
|
190 |
generator_btn.click(predict, inputs=[text_gl, model_select, max_length, repetition_penalty, temperature], outputs=generated_gl, api_name="generate-flor-gl")
|
191 |
clean_btn.click(fn=clear, inputs=[], outputs=[text_gl, generated_gl, max_length, repetition_penalty, temperature], queue=False, api_name=False)
|
192 |
pass_btn.click(fn=pass_to_input, inputs=[generated_gl], outputs=[text_gl,generated_gl], queue=False, api_name=False)
|
193 |
-
|
194 |
-
|
195 |
with gr.Row():
|
196 |
with gr.Column(scale=0.5):
|
197 |
gr.Examples(
|
198 |
-
label =
|
199 |
examples = short_prompts_examples,
|
200 |
inputs = [text_gl],
|
201 |
outputs = [max_length, repetition_penalty, temperature],
|
@@ -203,7 +177,7 @@ def gradio_app():
|
|
203 |
run_on_click = True
|
204 |
)
|
205 |
gr.Examples(
|
206 |
-
label =
|
207 |
examples = few_shot_prompts_examples,
|
208 |
inputs = [text_gl],
|
209 |
outputs = [max_length, repetition_penalty, temperature],
|
|
|
3 |
from gradio.components import Slider
|
4 |
import torch
|
5 |
from transformers import pipeline
|
|
|
6 |
|
7 |
# Model, information and examples ----------------------------------------------
|
8 |
MODEL_NAMES = ["FLOR-1.3B-GL","Cerebras-1.3B-GL"]
|
9 |
+
markdown_description = """
|
10 |
+
# LLMs de galego / Galician LLMs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
Este espazo contén diferentes Grandes Modelos da Linguaxe feitos para o galego desenvolvidos polo [Proxecto Nós](https://nos.gal/en/proxecto-nos).
|
14 |
|
15 |
+
*This space contains the Galician language models developed by [Proxecto Nós](https://nos.gal/en/proxecto-nos).*
|
16 |
|
17 |
💐 **[FLOR-1.3B-GL](https://huggingface.co/proxectonos/FLOR-1.3B-GL)** é un modelo de parámetros 1.3B que é un preadestramento continuo de [FLOR-1.3B]( https://huggingface.co/projecte-aina/FLOR-1.3B), baseado a súa vez en [Bloom 1.7B](https://huggingface.co/bigscience/bloom-1b7).
|
18 |
|
19 |
+
*💐 **[FLOR-1.3B-GL](https://huggingface.co/proxectonos/FLOR-1.3B-GL)** is a 1.3B parameters model which is a Continual pretraining from [FLOR-1.3B](https://huggingface.co/projecte-aina/FLOR-1.3B), which is based in [Bloom 1.7B](https://huggingface.co/bigscience/bloom-1b7).*
|
20 |
+
|
21 |
👀 **Máis información sobre FLOR-1.3B-GL:** [tarxeta modelo oficial HF](https://huggingface.co/proxectonos/FLOR-1.3B-GL).
|
22 |
|
23 |
+
*👀 **Learn more about FLOR-1.3B-GL:** [HF official model card](https://huggingface.co/proxectonos/FLOR-1.3B-GL).*
|
24 |
+
|
25 |
+
|
26 |
+
🧠 **[Cerebras-1.3B-GL](https://huggingface.co/proxectonos/Cerebras-1.3B-GL)** é un modelo de parámetros 1.3B baseado en [Cerebras-GPT 1.3B](https://huggingface.co/cerebras/Cerebras-GPT-1.3B).
|
27 |
|
28 |
+
*🧠 **[Cerebras-1.3B-GL](https://huggingface.co/proxectonos/Cerebras-1.3B-GL)** is a 1.3B parameters model based in [Cerebras-GPT 1.3B](https://huggingface.co/cerebras/Cerebras-GPT-1.3B).*
|
29 |
|
30 |
👀 **Máis información sobre Cerebras-1.3B-GL:** [tarxeta modelo oficial HF](https://huggingface.co/proxectonos/Cerebras-1.3B-GL)
|
31 |
+
|
32 |
+
*👀 **Learn more about Cerebras-1.3B-GL:** [HF official model card](https://huggingface.co/proxectonos/Cerebras-1.3B-GL)*
|
33 |
"""
|
34 |
|
|
|
35 |
short_prompts_examples = [
|
36 |
["A receita tradicional das filloas é"],
|
37 |
["O neno vivía preto de"]
|
|
|
50 |
model_id_cerebras = "proxectonos/Cerebras-1.3B-GL"
|
51 |
generator_model_cerebras = pipeline("text-generation", model=model_id_cerebras, token=os.environ['TOKEN_HF'])
|
52 |
|
|
|
|
|
|
|
|
|
53 |
# Generation functions ---------------------------------------------------------
|
54 |
def get_model(model_selection):
|
55 |
if model_selection == "FLOR-1.3B-GL":
|
|
|
84 |
return generated_sequence
|
85 |
|
86 |
# Gradio app ---------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
def clear():
|
88 |
return (
|
89 |
None,
|
|
|
116 |
with gr.Blocks(theme=fronted_theme) as demo:
|
117 |
with gr.Row():
|
118 |
with gr.Column(scale=0.1):
|
|
|
119 |
gr.HTML('<img src="https://huggingface.co/spaces/proxectonos/README/resolve/main/title-card.png" width="100%" style="border-radius: 0.75rem;">')
|
120 |
with gr.Column():
|
121 |
+
gr.Markdown(markdown_description)
|
122 |
with gr.Row(equal_height=True):
|
123 |
model_select = gr.Dropdown(
|
124 |
+
label="Selecione un modelo / Select a model",
|
125 |
choices=MODEL_NAMES,
|
126 |
value=MODEL_NAMES[0],
|
127 |
interactive=True
|
128 |
)
|
129 |
with gr.Row(equal_height=True):
|
130 |
with gr.Column():
|
131 |
+
text_gl = gr.Textbox(label="Entrada / Input",
|
132 |
lines=6, placeholder="e.g. O neno vai a escola con ")
|
133 |
with gr.Row(variant="panel"):
|
134 |
+
with gr.Accordion("Parámetros do modelo / Model parameters", open=False):
|
135 |
max_length = Slider(
|
136 |
minimum=1,
|
137 |
maximum=200,
|
138 |
step=1,
|
139 |
value=30,
|
140 |
+
label="Max tokens"
|
141 |
)
|
142 |
repetition_penalty = Slider(
|
143 |
minimum=0.1,
|
144 |
maximum=4,
|
145 |
step=0.1,
|
146 |
value=1.3,
|
147 |
+
label="Penalización por repetición / Repetition penalty""
|
148 |
)
|
149 |
temperature = Slider(
|
150 |
minimum=0,
|
151 |
maximum=1,
|
152 |
value=0.5,
|
153 |
+
label="Temperatura / Temperature"
|
154 |
)
|
155 |
+
generator_btn = gr.Button(value="Xerar / Generate",variant='primary')
|
156 |
with gr.Column():
|
157 |
+
generated_gl = gr.Textbox(label="Saída / Output",
|
158 |
lines=6,
|
159 |
+
placeholder="O texto xerado aparecerá aquí...",
|
160 |
interactive=False,
|
161 |
show_copy_button=True)
|
162 |
+
pass_btn = gr.Button(value="Pasar texto xerado á entrada / Pass generated text to input",variant='secondary')
|
163 |
+
clean_btn = gr.Button(value="Limpar / Clear",variant='secondary')
|
164 |
|
165 |
generator_btn.click(predict, inputs=[text_gl, model_select, max_length, repetition_penalty, temperature], outputs=generated_gl, api_name="generate-flor-gl")
|
166 |
clean_btn.click(fn=clear, inputs=[], outputs=[text_gl, generated_gl, max_length, repetition_penalty, temperature], queue=False, api_name=False)
|
167 |
pass_btn.click(fn=pass_to_input, inputs=[generated_gl], outputs=[text_gl,generated_gl], queue=False, api_name=False)
|
168 |
+
|
|
|
169 |
with gr.Row():
|
170 |
with gr.Column(scale=0.5):
|
171 |
gr.Examples(
|
172 |
+
label = "Prompts curtos / Short prompts",
|
173 |
examples = short_prompts_examples,
|
174 |
inputs = [text_gl],
|
175 |
outputs = [max_length, repetition_penalty, temperature],
|
|
|
177 |
run_on_click = True
|
178 |
)
|
179 |
gr.Examples(
|
180 |
+
label = "Prompts con poucos exemplos / Few-shot prompts",
|
181 |
examples = few_shot_prompts_examples,
|
182 |
inputs = [text_gl],
|
183 |
outputs = [max_length, repetition_penalty, temperature],
|
interface_texts.csv
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
variable,en,gl
|
2 |
change_lang, Cambiar a Galego, Switch to English
|
|
|
3 |
model_select,Model selection,Seleccione un modelo
|
4 |
text_gl,Input,Entrada
|
5 |
accordion_parameters,Model parameters,Parámetros do modelo
|
|
|
1 |
variable,en,gl
|
2 |
change_lang, Cambiar a Galego, Switch to English
|
3 |
+
change_lang_url,
|
4 |
model_select,Model selection,Seleccione un modelo
|
5 |
text_gl,Input,Entrada
|
6 |
accordion_parameters,Model parameters,Parámetros do modelo
|