Spaces:
Runtime error
Runtime error
menouar
commited on
Commit
Β·
6674f1f
1
Parent(s):
e2f32aa
Update UI
Browse files- app.py +52 -9
- utils/components_creator.py +7 -7
app.py
CHANGED
@@ -31,11 +31,24 @@ css = """
|
|
31 |
align-items: center;
|
32 |
transition: var(--button-transition);
|
33 |
box-shadow: var(--button-shadow);
|
34 |
-
text-align: center
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
}
|
|
|
36 |
.a_custom a {
|
37 |
text-decoration: none;
|
38 |
color: white;
|
|
|
|
|
|
|
|
|
|
|
39 |
}
|
40 |
"""
|
41 |
|
@@ -44,6 +57,10 @@ def centered_column():
|
|
44 |
return gr.Column(elem_classes=["container"])
|
45 |
|
46 |
|
|
|
|
|
|
|
|
|
47 |
def should_login_to_hf_model(model_id: str):
|
48 |
return model_id == gemma.name or model_id == llama.name
|
49 |
|
@@ -213,21 +230,47 @@ def generate_code(components: dict[Component, Any]):
|
|
213 |
|
214 |
with gr.Blocks(css=css, theme=gr.themes.Soft(text_size='lg', font=["monospace"],
|
215 |
primary_hue=gr.themes.colors.blue)) as demo:
|
216 |
-
gr.Label("UI-Guided LLM
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
221 |
|
222 |
all_components: Set[Component] = set()
|
223 |
|
224 |
-
gr.HTML("<h2 style='text-align: center;'>
|
225 |
with gr.Row():
|
226 |
model_selection = gr.Dropdown(
|
227 |
[model.name for model in models],
|
228 |
elem_id=MODEL_SELECTION_ID,
|
229 |
-
label="Select a
|
230 |
-
info="Select a Large Language Model (LLM) to
|
231 |
)
|
232 |
version_selection = gr.Dropdown(
|
233 |
choices=[], label="Select a Model Version π", info="", visible=False, elem_id=MODEL_VERSION_SELECTION_ID
|
|
|
31 |
align-items: center;
|
32 |
transition: var(--button-transition);
|
33 |
box-shadow: var(--button-shadow);
|
34 |
+
text-align: center;
|
35 |
+
cursor: pointer;
|
36 |
+
}
|
37 |
+
|
38 |
+
.a_custom:hover {
|
39 |
+
border-color: var(--button-primary-border-color-hover);
|
40 |
+
background: var(--button-primary-background-fill-hover);
|
41 |
+
color: var(--button-primary-text-color-hover);
|
42 |
}
|
43 |
+
|
44 |
.a_custom a {
|
45 |
text-decoration: none;
|
46 |
color: white;
|
47 |
+
display: block;
|
48 |
+
}
|
49 |
+
|
50 |
+
.dashed_row {
|
51 |
+
border: 1px dashed #60a5fa;
|
52 |
}
|
53 |
"""
|
54 |
|
|
|
57 |
return gr.Column(elem_classes=["container"])
|
58 |
|
59 |
|
60 |
+
def dashed_row():
|
61 |
+
return gr.Row(elem_classes=["dashed_row"])
|
62 |
+
|
63 |
+
|
64 |
def should_login_to_hf_model(model_id: str):
|
65 |
return model_id == gemma.name or model_id == llama.name
|
66 |
|
|
|
230 |
|
231 |
with gr.Blocks(css=css, theme=gr.themes.Soft(text_size='lg', font=["monospace"],
|
232 |
primary_hue=gr.themes.colors.blue)) as demo:
|
233 |
+
gr.Label("UI-Guided LLM Fine-Tuning Jupyter Notebook Generator π οΈπ§ ", show_label=False)
|
234 |
+
|
235 |
+
gr.Markdown('''
|
236 |
+
This space generates a **Jupyter Notebook file (.ipynb)** πβοΈ that guides you through the
|
237 |
+
entire process of **supervised fine-tuning** of a raw Large Language Model (**LLM**) π§ on a chosen dataset in
|
238 |
+
the **Conversational format**. The process is facilitated by an intuitive **User Interface (UI)** ππ»:
|
239 |
+
''', elem_classes=["center_text"])
|
240 |
+
with dashed_row():
|
241 |
+
with centered_column():
|
242 |
+
with gr.Accordion("1. No Coding Required", open=False):
|
243 |
+
gr.Markdown("The UI guides you through the entire process, eliminating the need for manual coding.")
|
244 |
+
with gr.Accordion("2. Customizable Parameters", open=False):
|
245 |
+
gr.Markdown(
|
246 |
+
"You can customize the most commonly used parameters for supervised fine-tuning to suit your needs.")
|
247 |
+
with centered_column():
|
248 |
+
with gr.Accordion("3. Comprehensive Notebook", open=False):
|
249 |
+
gr.Markdown("The generated .ipynb contains all steps, from installing libraries and writing a "
|
250 |
+
"README.md, "
|
251 |
+
"to pushing the final model to the Hugging Face Hub.")
|
252 |
+
with gr.Accordion("4. Preview Before Download", open=False):
|
253 |
+
gr.Markdown("You can preview the generated .ipynb before downloading it to ensure it "
|
254 |
+
"meets "
|
255 |
+
"your requirements.")
|
256 |
+
with centered_column():
|
257 |
+
with gr.Accordion("5. User-Friendly", open=False):
|
258 |
+
gr.Markdown("The UI is designed to be easy to use and understand, making the fine-tuning process "
|
259 |
+
"accessible "
|
260 |
+
"to everyone.")
|
261 |
+
with gr.Accordion("6. Open-Source", open=False):
|
262 |
+
gr.Markdown(
|
263 |
+
"This space is open source, so you can collaborate to improve it and make it more powerful.")
|
264 |
|
265 |
all_components: Set[Component] = set()
|
266 |
|
267 |
+
gr.HTML("<h2 style='text-align: center;'>Model π§ </h2>")
|
268 |
with gr.Row():
|
269 |
model_selection = gr.Dropdown(
|
270 |
[model.name for model in models],
|
271 |
elem_id=MODEL_SELECTION_ID,
|
272 |
+
label="Select a raw LLM",
|
273 |
+
info="Select a raw Large Language Model (LLM) to fine-tune."
|
274 |
)
|
275 |
version_selection = gr.Dropdown(
|
276 |
choices=[], label="Select a Model Version π", info="", visible=False, elem_id=MODEL_VERSION_SELECTION_ID
|
utils/components_creator.py
CHANGED
@@ -51,12 +51,12 @@ def add_quantization_components1() -> Set[Component]:
|
|
51 |
|
52 |
|
53 |
def add_dataset_components() -> Set[Component]:
|
54 |
-
dataset_selection = gr.Dropdown(
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
seed = gr.Slider(0, 256, step=1, value=42, elem_id=DATASET_SHUFFLING_SEED, label="Random Seed",
|
61 |
info="Set a random seed for shuffling the dataset.", interactive=True)
|
62 |
|
@@ -170,7 +170,7 @@ def add_training_args_3() -> Set[Component]:
|
|
170 |
def add_outputs() -> (Component, Component):
|
171 |
output_dir = gr.Textbox(interactive=True,
|
172 |
label="output_dir",
|
173 |
-
info='The output directory where the model
|
174 |
elem_id=OUTPUT_DIR_ID)
|
175 |
|
176 |
push_to_hub = gr.Checkbox(label="push_to_hub", value=False, interactive=True,
|
|
|
51 |
|
52 |
|
53 |
def add_dataset_components() -> Set[Component]:
|
54 |
+
dataset_selection = gr.Dropdown([dt.path for dt in ft_datasets],
|
55 |
+
elem_id=DATASET_SELECTION_ID,
|
56 |
+
label="Select a Dataset",
|
57 |
+
info="Choose a dataset to finetune the model in the ChatML format."
|
58 |
+
)
|
59 |
+
|
60 |
seed = gr.Slider(0, 256, step=1, value=42, elem_id=DATASET_SHUFFLING_SEED, label="Random Seed",
|
61 |
info="Set a random seed for shuffling the dataset.", interactive=True)
|
62 |
|
|
|
170 |
def add_outputs() -> (Component, Component):
|
171 |
output_dir = gr.Textbox(interactive=True,
|
172 |
label="output_dir",
|
173 |
+
info='The output directory where the model and checkpoints will be saved.',
|
174 |
elem_id=OUTPUT_DIR_ID)
|
175 |
|
176 |
push_to_hub = gr.Checkbox(label="push_to_hub", value=False, interactive=True,
|