Spaces:
Runtime error
Runtime error
menouar
commited on
Commit
·
0e9e537
1
Parent(s):
c7b9e3f
Improve comments
Browse files- app.py +1 -1
- utils/notebook_generator.py +2 -2
app.py
CHANGED
@@ -304,7 +304,7 @@ with gr.Blocks(css=css, theme=gr.themes.Soft(text_size='lg', font=["monospace"],
|
|
304 |
with gr.Row():
|
305 |
all_components.update(add_pad_tokens())
|
306 |
|
307 |
-
gr.HTML("<h2 style='text-align: center;'>
|
308 |
with gr.Row():
|
309 |
with centered_column():
|
310 |
all_components.update(add_lora_components1())
|
|
|
304 |
with gr.Row():
|
305 |
all_components.update(add_pad_tokens())
|
306 |
|
307 |
+
gr.HTML("<h2 style='text-align: center;'>LoRA Configuration</h2>")
|
308 |
with gr.Row():
|
309 |
with centered_column():
|
310 |
all_components.update(add_lora_components1())
|
utils/notebook_generator.py
CHANGED
@@ -219,7 +219,7 @@ a 24GB GPU for fine-tuning.
|
|
219 |
|
220 |
|
221 |
def create_lora_config_cells(cells: list, r: int, alpha: int, dropout: float, bias: str):
|
222 |
-
text_cell = nbf.v4.new_markdown_cell("## Setting
|
223 |
code = f"""
|
224 |
from peft import LoraConfig
|
225 |
|
@@ -379,7 +379,7 @@ torch.cuda.empty_cache()
|
|
379 |
|
380 |
def create_merge_lora_cells(cells: list, output_dir):
|
381 |
text_cell = nbf.v4.new_markdown_cell(
|
382 |
-
"""## Merging
|
383 |
|
384 |
While utilizing `LoRA`, we focus on training the adapters rather than the entire model. Consequently, during the
|
385 |
model saving process, only the `adapter weights` are preserved, not the complete model. If we wish to save the
|
|
|
219 |
|
220 |
|
221 |
def create_lora_config_cells(cells: list, r: int, alpha: int, dropout: float, bias: str):
|
222 |
+
text_cell = nbf.v4.new_markdown_cell("## Setting LoRA Config")
|
223 |
code = f"""
|
224 |
from peft import LoraConfig
|
225 |
|
|
|
379 |
|
380 |
def create_merge_lora_cells(cells: list, output_dir):
|
381 |
text_cell = nbf.v4.new_markdown_cell(
|
382 |
+
"""## Merging LoRA Adapters into the Original Model
|
383 |
|
384 |
While utilizing `LoRA`, we focus on training the adapters rather than the entire model. Consequently, during the
|
385 |
model saving process, only the `adapter weights` are preserved, not the complete model. If we wish to save the
|