Spaces:
Runtime error
Runtime error
import os | |
import subprocess | |
from typing import Union | |
from huggingface_hub import whoami, HfApi | |
from fastapi import FastAPI | |
from starlette.middleware.sessions import SessionMiddleware | |
import sys | |
# ai-toolkit์ด ์์ผ๋ฉด ์ค์น | |
if not os.path.exists("ai-toolkit"): | |
subprocess.run("git clone https://github.com/ostris/ai-toolkit.git", shell=True) | |
subprocess.run("cd ai-toolkit && git submodule update --init --recursive", shell=True) | |
# ai-toolkit ๊ฒฝ๋ก ์ถ๊ฐ | |
toolkit_path = os.path.join(os.getcwd(), "ai-toolkit") | |
sys.path.append(toolkit_path) | |
# ํ์ํ ํจํค์ง ์ค์น | |
subprocess.run("pip install -r ai-toolkit/requirements.txt", shell=True) | |
is_spaces = True if os.environ.get("SPACE_ID") else False | |
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" | |
import sys | |
from dotenv import load_dotenv | |
load_dotenv() | |
# Add the current working directory to the Python path | |
sys.path.insert(0, os.getcwd()) | |
import gradio as gr | |
from PIL import Image | |
import torch | |
import uuid | |
import shutil | |
import json | |
import yaml | |
from slugify import slugify | |
from transformers import AutoProcessor, AutoModelForCausalLM | |
# Gradio app ์ค์ | |
app = FastAPI() | |
app.add_middleware(SessionMiddleware, secret_key="your-secret-key") | |
if not is_spaces: | |
sys.path.insert(0, "ai-toolkit") | |
from toolkit.job import get_job | |
gr.OAuthProfile = None | |
gr.OAuthToken = None | |
MAX_IMAGES = 150 | |
# Hugging Face ํ ํฐ ์ค์ | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
if not HF_TOKEN: | |
raise ValueError("HF_TOKEN environment variable is not set") | |
if is_spaces: | |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True) | |
import spaces | |
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" | |
os.environ["HUGGING_FACE_HUB_TOKEN"] = HF_TOKEN | |
# HF API ์ด๊ธฐํ | |
api = HfApi(token=HF_TOKEN) | |
def load_captioning(uploaded_files, concept_sentence): | |
uploaded_images = [file for file in uploaded_files if not file.endswith('.txt')] | |
txt_files = [file for file in uploaded_files if file.endswith('.txt')] | |
txt_files_dict = {os.path.splitext(os.path.basename(txt_file))[0]: txt_file for txt_file in txt_files} | |
updates = [] | |
if len(uploaded_images) <= 1: | |
raise gr.Error( | |
"Please upload at least 2 images to train your model (the ideal number with default settings is between 4-30)" | |
) | |
elif len(uploaded_images) > MAX_IMAGES: | |
raise gr.Error(f"For now, only {MAX_IMAGES} or less images are allowed for training") | |
# Update for the captioning_area | |
# for _ in range(3): | |
updates.append(gr.update(visible=True)) | |
# Update visibility and image for each captioning row and image | |
for i in range(1, MAX_IMAGES + 1): | |
# Determine if the current row and image should be visible | |
visible = i <= len(uploaded_images) | |
# Update visibility of the captioning row | |
updates.append(gr.update(visible=visible)) | |
# Update for image component - display image if available, otherwise hide | |
image_value = uploaded_images[i - 1] if visible else None | |
updates.append(gr.update(value=image_value, visible=visible)) | |
corresponding_caption = False | |
if(image_value): | |
base_name = os.path.splitext(os.path.basename(image_value))[0] | |
print(base_name) | |
print(image_value) | |
if base_name in txt_files_dict: | |
print("entrou") | |
with open(txt_files_dict[base_name], 'r') as file: | |
corresponding_caption = file.read() | |
# Update value of captioning area | |
text_value = corresponding_caption if visible and corresponding_caption else "[trigger]" if visible and concept_sentence else None | |
updates.append(gr.update(value=text_value, visible=visible)) | |
# Update for the sample caption area | |
updates.append(gr.update(visible=True)) | |
# Update prompt samples | |
updates.append(gr.update(placeholder=f'A portrait of person in a bustling cafe {concept_sentence}', value=f'A person in a bustling cafe {concept_sentence}')) | |
updates.append(gr.update(placeholder=f"A mountainous landscape in the style of {concept_sentence}")) | |
updates.append(gr.update(placeholder=f"A {concept_sentence} in a mall")) | |
return updates | |
def hide_captioning(): | |
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False) | |
def create_dataset(*inputs): | |
print("Creating dataset") | |
images = inputs[0] | |
destination_folder = str(f"datasets/{uuid.uuid4()}") | |
if not os.path.exists(destination_folder): | |
os.makedirs(destination_folder) | |
jsonl_file_path = os.path.join(destination_folder, "metadata.jsonl") | |
with open(jsonl_file_path, "a") as jsonl_file: | |
for index, image in enumerate(images): | |
new_image_path = shutil.copy(image, destination_folder) | |
original_caption = inputs[index + 1] | |
file_name = os.path.basename(new_image_path) | |
data = {"file_name": file_name, "prompt": original_caption} | |
jsonl_file.write(json.dumps(data) + "\n") | |
return destination_folder | |
def run_captioning(images, concept_sentence, *captions): | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
torch_dtype = torch.float16 | |
model = AutoModelForCausalLM.from_pretrained( | |
"microsoft/Florence-2-large", torch_dtype=torch_dtype, trust_remote_code=True | |
).to(device) | |
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-large", trust_remote_code=True) | |
captions = list(captions) | |
for i, image_path in enumerate(images): | |
print(captions[i]) | |
if isinstance(image_path, str): # If image is a file path | |
image = Image.open(image_path).convert("RGB") | |
prompt = "<DETAILED_CAPTION>" | |
inputs = processor(text=prompt, images=image, return_tensors="pt").to(device, torch_dtype) | |
generated_ids = model.generate( | |
input_ids=inputs["input_ids"], pixel_values=inputs["pixel_values"], max_new_tokens=1024, num_beams=3 | |
) | |
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0] | |
parsed_answer = processor.post_process_generation( | |
generated_text, task=prompt, image_size=(image.width, image.height) | |
) | |
caption_text = parsed_answer["<DETAILED_CAPTION>"].replace("The image shows ", "") | |
if concept_sentence: | |
caption_text = f"{caption_text} [trigger]" | |
captions[i] = caption_text | |
yield captions | |
model.to("cpu") | |
del model | |
del processor | |
if is_spaces: | |
run_captioning = spaces.GPU()(run_captioning) | |
def recursive_update(d, u): | |
for k, v in u.items(): | |
if isinstance(v, dict) and v: | |
d[k] = recursive_update(d.get(k, {}), v) | |
else: | |
d[k] = v | |
return d | |
def start_training( | |
lora_name, | |
concept_sentence, | |
which_model, | |
steps, | |
lr, | |
rank, | |
dataset_folder, | |
sample_1, | |
sample_2, | |
sample_3, | |
use_more_advanced_options, | |
more_advanced_options, | |
): | |
if not lora_name: | |
raise gr.Error("You forgot to insert your LoRA name! This name has to be unique.") | |
try: | |
username = whoami()["name"] | |
except: | |
raise gr.Error("Failed to get username. Please check your HF_TOKEN.") | |
print("Started training") | |
slugged_lora_name = slugify(lora_name) | |
# Load the default config | |
with open("train_lora_flux_24gb.yaml", "r") as f: | |
config = yaml.safe_load(f) | |
# dev ๋ชจ๋ธ ์ค์ | |
config["config"]["name"] = slugged_lora_name | |
config["config"]["process"][0]["model"]["name_or_path"] = "black-forest-labs/FLUX.1-dev" | |
config["config"]["process"][0]["model"]["assistant_lora_path"] = None # adapter ์์ด ์ค์ | |
config["config"]["process"][0]["model"]["low_vram"] = False | |
config["config"]["process"][0]["train"]["skip_first_sample"] = True | |
config["config"]["process"][0]["train"]["steps"] = int(steps) | |
config["config"]["process"][0]["train"]["lr"] = float(lr) | |
config["config"]["process"][0]["network"]["linear"] = int(rank) | |
config["config"]["process"][0]["network"]["linear_alpha"] = int(rank) | |
config["config"]["process"][0]["datasets"][0]["folder_path"] = dataset_folder | |
config["config"]["process"][0]["save"]["push_to_hub"] = True | |
config["config"]["process"][0]["save"]["hf_repo_id"] = f"{username}/{slugged_lora_name}" | |
config["config"]["process"][0]["save"]["hf_private"] = True | |
config["config"]["process"][0]["save"]["hf_token"] = HF_TOKEN | |
config["config"]["process"][0]["sample"]["sample_steps"] = 28 | |
if concept_sentence: | |
config["config"]["process"][0]["trigger_word"] = concept_sentence | |
if sample_1 or sample_2 or sample_3: | |
config["config"]["process"][0]["train"]["disable_sampling"] = False | |
config["config"]["process"][0]["sample"]["sample_every"] = steps | |
config["config"]["process"][0]["sample"]["prompts"] = [] | |
if sample_1: | |
config["config"]["process"][0]["sample"]["prompts"].append(sample_1) | |
if sample_2: | |
config["config"]["process"][0]["sample"]["prompts"].append(sample_2) | |
if sample_3: | |
config["config"]["process"][0]["sample"]["prompts"].append(sample_3) | |
else: | |
config["config"]["process"][0]["train"]["disable_sampling"] = True | |
if(use_more_advanced_options): | |
more_advanced_options_dict = yaml.safe_load(more_advanced_options) | |
config["config"]["process"][0] = recursive_update(config["config"]["process"][0], more_advanced_options_dict) | |
print(config) | |
try: | |
# Save the updated config | |
random_config_name = str(uuid.uuid4()) | |
os.makedirs("tmp", exist_ok=True) | |
config_path = f"tmp/{random_config_name}-{slugged_lora_name}.yaml" | |
with open(config_path, "w") as f: | |
yaml.dump(config, f) | |
# ์ง์ ๋ก์ปฌ GPU์์ ํ์ต ์คํ | |
from toolkit.job import get_job | |
job = get_job(config_path) | |
job.run() | |
job.cleanup() | |
except Exception as e: | |
raise gr.Error(f"Training failed: {str(e)}") | |
return f"""# Training completed successfully! | |
## Your model is available at: <a href='https://huggingface.co/{username}/{slugged_lora_name}'>{username}/{slugged_lora_name}</a>""" | |
def update_pricing(steps): | |
try: | |
seconds_per_iteration = 7.54 | |
total_seconds = (steps * seconds_per_iteration) + 240 | |
cost_per_second = 0.80/60/60 | |
cost = round(cost_per_second * total_seconds, 2) | |
cost_preview = f'''To train this LoRA, a paid L4 GPU will be hooked under the hood during training and then removed once finished. | |
### Estimated to cost <b>< US$ {str(cost)}</b> for {round(int(total_seconds)/60, 2)} minutes with your current train settings <small>({int(steps)} iterations at {seconds_per_iteration}s/it)</small>''' | |
return gr.update(visible=True), cost_preview, gr.update(visible=False), gr.update(visible=True) | |
except: | |
return gr.update(visible=False), "", gr.update(visible=False), gr.update(visible=True) | |
def swap_base_model(model): | |
return gr.update(visible=True) if model == "[dev] (high quality model, non-commercial license)" else gr.update(visible=False) | |
config_yaml = ''' | |
device: cuda:0 | |
model: | |
is_flux: true | |
quantize: true | |
network: | |
linear: 16 #it will overcome the 'rank' parameter | |
linear_alpha: 16 #you can have an alpha different than the ranking if you'd like | |
type: lora | |
sample: | |
guidance_scale: 3.5 | |
height: 1024 | |
neg: '' #doesn't work for FLUX | |
sample_every: 1000 | |
sample_steps: 28 | |
sampler: flowmatch | |
seed: 42 | |
walk_seed: true | |
width: 1024 | |
save: | |
dtype: float16 | |
hf_private: true | |
max_step_saves_to_keep: 4 | |
push_to_hub: true | |
save_every: 10000 | |
train: | |
batch_size: 1 | |
dtype: bf16 | |
ema_config: | |
ema_decay: 0.99 | |
use_ema: true | |
gradient_accumulation_steps: 1 | |
gradient_checkpointing: true | |
noise_scheduler: flowmatch | |
optimizer: adamw8bit #options: prodigy, dadaptation, adamw, adamw8bit, lion, lion8bit | |
train_text_encoder: false #probably doesn't work for flux | |
train_unet: true | |
''' | |
custom_theme = gr.themes.Base( | |
primary_hue="indigo", | |
secondary_hue="slate", | |
neutral_hue="slate", | |
).set( | |
# ๊ธฐ๋ณธ ๋ฐฐ๊ฒฝ ๋ฐ ๋ณด๋ | |
background_fill_primary="#1a1a1a", | |
background_fill_secondary="#2d2d2d", | |
border_color_primary="#404040", | |
# ๋ฒํผ ์คํ์ผ | |
button_primary_background_fill="#4F46E5", | |
button_primary_background_fill_dark="#4338CA", | |
button_primary_background_fill_hover="#6366F1", | |
button_primary_border_color="#4F46E5", | |
button_primary_border_color_dark="#4338CA", | |
button_primary_text_color="white", | |
button_primary_text_color_dark="white", | |
button_secondary_background_fill="#374151", | |
button_secondary_background_fill_dark="#1F2937", | |
button_secondary_background_fill_hover="#4B5563", | |
button_secondary_text_color="white", | |
button_secondary_text_color_dark="white", | |
# ๋ธ๋ก ์คํ์ผ | |
block_background_fill="#2d2d2d", | |
block_background_fill_dark="#1F2937", | |
block_label_background_fill="#4F46E5", | |
block_label_background_fill_dark="#4338CA", | |
block_label_text_color="white", | |
block_label_text_color_dark="white", | |
block_title_text_color="white", | |
block_title_text_color_dark="white", | |
# ์ ๋ ฅ ํ๋ ์คํ์ผ | |
input_background_fill="#374151", | |
input_background_fill_dark="#1F2937", | |
input_border_color="#4B5563", | |
input_border_color_dark="#374151", | |
input_placeholder_color="#9CA3AF", | |
input_placeholder_color_dark="#6B7280", | |
# ๊ทธ๋ฆผ์ ํจ๊ณผ | |
shadow_spread="8px", | |
shadow_inset="0px 2px 4px 0px rgba(0,0,0,0.1)", | |
# ์ปจํ ์ด๋ ์คํ์ผ | |
panel_background_fill="#2d2d2d", | |
panel_background_fill_dark="#1F2937", | |
# ๋ณด๋ ์คํ์ผ | |
border_color_accent="#4F46E5", | |
border_color_accent_dark="#4338CA" | |
) | |
css=''' | |
/* ๊ธฐ๋ณธ ์คํ์ผ */ | |
h1 { | |
font-size: 3em; | |
text-align: center; | |
margin-bottom: 0.5em; | |
color: white !important; | |
} | |
h3 { | |
margin-top: 0; | |
font-size: 1.2em; | |
color: white !important; | |
} | |
/* Markdown ํ ์คํธ ์คํ์ผ */ | |
.markdown { | |
color: white !important; | |
} | |
.markdown h1, | |
.markdown h2, | |
.markdown h3, | |
.markdown h4, | |
.markdown h5, | |
.markdown h6, | |
.markdown p { | |
color: white !important; | |
} | |
/* ์ปดํฌ๋ํธ ์คํ์ผ */ | |
.container { | |
max-width: 1200px; | |
margin: 0 auto; | |
padding: 20px; | |
} | |
/* ์ ๋ ฅ ํ๋ ์คํ์ผ */ | |
.input-group { | |
background: var(--block-background-fill); | |
padding: 15px; | |
border-radius: 12px; | |
margin-bottom: 20px; | |
box-shadow: 0 2px 4px rgba(0,0,0,0.1); | |
} | |
/* ๋ชจ๋ ์ ๋ ฅ ํ๋ ํ ์คํธ ์์ */ | |
input, textarea, .gradio-textbox input, .gradio-textbox textarea, .gradio-number input { | |
color: white !important; | |
} | |
/* ๋ผ๋ฒจ ํ ์คํธ ์คํ์ผ */ | |
label, .label-text { | |
color: white !important; | |
} | |
/* ๋ผ๋์ค ๋ฒํผ ํ ์คํธ */ | |
.gradio-radio label span { | |
color: white !important; | |
} | |
/* ์ฒดํฌ๋ฐ์ค ํ ์คํธ */ | |
.gradio-checkbox label span { | |
color: white !important; | |
} | |
/* ๋ฒํผ ์คํ์ผ */ | |
.button { | |
height: 40px; | |
border-radius: 8px; | |
transition: all 0.3s ease; | |
color: white !important; | |
} | |
.button:hover { | |
transform: translateY(-2px); | |
box-shadow: 0 4px 6px rgba(0,0,0,0.1); | |
} | |
/* ์ด๋ฏธ์ง ์ ๋ก๋ ์์ญ */ | |
.image-upload-area { | |
border: 2px dashed var(--input-border-color); | |
border-radius: 12px; | |
padding: 20px; | |
text-align: center; | |
margin-bottom: 20px; | |
color: white !important; | |
} | |
/* ์บก์ ์์ญ */ | |
.caption-area { | |
background: var(--block-background-fill); | |
padding: 15px; | |
border-radius: 12px; | |
margin-top: 20px; | |
color: white !important; | |
} | |
.caption-row { | |
display: flex; | |
align-items: center; | |
margin-bottom: 10px; | |
gap: 10px; | |
} | |
/* ๊ณ ๊ธ ์ต์ ์์ญ */ | |
.advanced-options { | |
background: var(--block-background-fill); | |
padding: 15px; | |
border-radius: 12px; | |
margin-top: 20px; | |
color: white !important; | |
} | |
/* ์งํ ์ํ ํ์ */ | |
.progress-area { | |
background: var(--block-background-fill); | |
padding: 15px; | |
border-radius: 12px; | |
margin-top: 20px; | |
text-align: center; | |
color: white !important; | |
} | |
/* ํ๋ ์ด์คํ๋ ํ ์คํธ */ | |
::placeholder { | |
color: rgba(255, 255, 255, 0.5) !important; | |
} | |
/* ์ฝ๋ ์๋ํฐ ํ ์คํธ */ | |
.gradio-code { | |
color: white !important; | |
} | |
/* ์์ฝ๋์ธ ํ ์คํธ */ | |
.gradio-accordion .label-wrap { | |
color: white !important; | |
} | |
/* ๋ฐ์ํ ๋์์ธ */ | |
@media (max-width: 768px) { | |
.caption-row { | |
flex-direction: column; | |
} | |
} | |
/* ์คํฌ๋กค๋ฐ ์คํ์ผ */ | |
::-webkit-scrollbar { | |
width: 8px; | |
} | |
::-webkit-scrollbar-track { | |
background: var(--background-fill-primary); | |
border-radius: 4px; | |
} | |
::-webkit-scrollbar-thumb { | |
background: var(--primary-500); | |
border-radius: 4px; | |
} | |
::-webkit-scrollbar-thumb:hover { | |
background: var(--primary-600); | |
} | |
/* ๋ชจ๋ ํ ์คํธ ์ ๋ ฅ ์์ */ | |
.gradio-container input[type="text"], | |
.gradio-container textarea, | |
.gradio-container .input-text, | |
.gradio-container .input-textarea { | |
color: white !important; | |
} | |
/* ๋๋กญ๋ค์ด ํ ์คํธ */ | |
select, option { | |
color: white !important; | |
} | |
/* ๋ฒํผ ํ ์คํธ */ | |
button { | |
color: white !important; | |
} | |
''' | |
# Gradio ์ฑ ์์ | |
with gr.Blocks(theme=custom_theme, css=css) as demo: | |
gr.Markdown( | |
"""# ๐ Gini LoRA ํ์ต | |
### 1)LoRA ์ด๋ฆ ์์ด๋ก '์ ๋ ฅ' 2)ํธ๋ฆฌ๊ฑฐ ๋จ์ด ์์ด๋ก '์ ๋ ฅ' 3)๊ธฐ๋ณธ ๋ชจ๋ธ 'ํด๋ฆญ' 4)์ด๋ฏธ์ง(์ต์ 2์ฅ~์ต๋ 150์ฅ ๋ฏธ๋ง) '์ ๋ก๋' 5)๋น์ ์ธ์ LLM ๋ผ๋ฒจ๋ง 'ํด๋ฆญ' 6)START ํด๋ฆญ""", | |
elem_classes=["markdown"] | |
) | |
with gr.Tab("Train"): | |
with gr.Column(elem_classes="container"): | |
# LoRA ์ค์ ๊ทธ๋ฃน | |
with gr.Group(elem_classes="input-group"): | |
with gr.Row(): | |
lora_name = gr.Textbox( | |
label="LoRA ์ด๋ฆ", | |
info="๊ณ ์ ํ ์ด๋ฆ์ด์ด์ผ ํฉ๋๋ค", | |
placeholder="์: Persian Miniature Painting style, Cat Toy" | |
) | |
concept_sentence = gr.Textbox( | |
label="ํธ๋ฆฌ๊ฑฐ ๋จ์ด/๋ฌธ์ฅ", | |
info="์ฌ์ฉํ ํธ๋ฆฌ๊ฑฐ ๋จ์ด๋ ๋ฌธ์ฅ", | |
placeholder="p3rs0n์ด๋ trtcrd๊ฐ์ ํน์ดํ ๋จ์ด, ๋๋ 'in the style of CNSTLL'๊ฐ์ ๋ฌธ์ฅ" | |
) | |
model_warning = gr.Markdown(visible=False) | |
which_model = gr.Radio( | |
["๊ณ ํ๋ฆฌํฐ ๋ง์ถค ํ์ต ๋ชจ๋ธ"], | |
label="๊ธฐ๋ณธ ๋ชจ๋ธ", | |
value="[dev] (high quality model)" | |
) | |
# ์ด๋ฏธ์ง ์ ๋ก๋ ์์ญ | |
with gr.Group(visible=True, elem_classes="image-upload-area") as image_upload: | |
with gr.Row(): | |
images = gr.File( | |
file_types=["image", ".txt"], | |
label="Upload your images", | |
file_count="multiple", | |
interactive=True, | |
visible=True, | |
scale=1, | |
) | |
with gr.Column(scale=3, visible=False) as captioning_area: | |
with gr.Column(): | |
gr.Markdown( | |
"""# ์ด๋ฏธ์ง ๋ผ๋ฒจ๋ง | |
<p style="margin-top:0"> ๋น์ ์ธ์ LLM์ด ์ด๋ฏธ์ง๋ฅผ ์ธ์ํ์ฌ ์๋์ผ๋ก ๋ผ๋ฒจ๋ง(์ด๋ฏธ์ง ์ธ์์ ์ํ ํ์ ์ค๋ช ). [trigger] 'ํธ๋ฆฌ๊ฑฐ ์๋'๋ ํ์ตํ ๋ชจ๋ธ์ ์คํํ๋ ๊ณ ์ ํค๊ฐ /trigger word.</p> | |
""", elem_classes="group_padding") | |
do_captioning = gr.Button("๋น์ ์ธ์ LLM ์๋ ๋ผ๋ฒจ๋ง") | |
output_components = [captioning_area] | |
caption_list = [] | |
for i in range(1, MAX_IMAGES + 1): | |
locals()[f"captioning_row_{i}"] = gr.Row(visible=False) | |
with locals()[f"captioning_row_{i}"]: | |
locals()[f"image_{i}"] = gr.Image( | |
type="filepath", | |
width=111, | |
height=111, | |
min_width=111, | |
interactive=False, | |
scale=2, | |
show_label=False, | |
show_share_button=False, | |
show_download_button=False, | |
) | |
locals()[f"caption_{i}"] = gr.Textbox( | |
label=f"Caption {i}", scale=15, interactive=True | |
) | |
output_components.append(locals()[f"captioning_row_{i}"]) | |
output_components.append(locals()[f"image_{i}"]) | |
output_components.append(locals()[f"caption_{i}"]) | |
caption_list.append(locals()[f"caption_{i}"]) | |
with gr.Accordion("Advanced options", open=False): | |
steps = gr.Number(label="Steps", value=1000, minimum=1, maximum=10000, step=1) | |
lr = gr.Number(label="Learning Rate", value=4e-4, minimum=1e-6, maximum=1e-3, step=1e-6) | |
rank = gr.Number(label="LoRA Rank", value=16, minimum=4, maximum=128, step=4) | |
with gr.Accordion("Even more advanced options", open=False): | |
if(is_spaces): | |
gr.Markdown("Attention: changing this parameters may make your training fail or go out-of-memory if training on Spaces. Only change settings here it if you know what you are doing. Beware that training is done in an L4 GPU with 24GB of RAM") | |
use_more_advanced_options = gr.Checkbox(label="Use more advanced options", value=False) | |
more_advanced_options = gr.Code(config_yaml, language="yaml") | |
with gr.Accordion("Sample prompts (optional)", visible=False) as sample: | |
gr.Markdown( | |
"Include sample prompts to test out your trained model. Don't forget to include your trigger word/sentence (optional)" | |
) | |
sample_1 = gr.Textbox(label="Test prompt 1") | |
sample_2 = gr.Textbox(label="Test prompt 2") | |
sample_3 = gr.Textbox(label="Test prompt 3") | |
with gr.Group(visible=False) as cost_preview: | |
cost_preview_info = gr.Markdown(elem_id="cost_preview_info", elem_classes="group_padding") | |
payment_update = gr.Button("I have set up a payment method", visible=False) | |
output_components.append(sample) | |
output_components.append(sample_1) | |
output_components.append(sample_2) | |
output_components.append(sample_3) | |
start = gr.Button("START ํด๋ฆญ('์ฝ 25~30๋ถ ํ ํ์ต์ด ์ข ๋ฃ๋๊ณ ์๋ฃ ๋ฉ์์ง๊ฐ ์ถ๋ ฅ๋ฉ๋๋ค.)'", visible=False) | |
progress_area = gr.Markdown("") | |
dataset_folder = gr.State() | |
images.upload( | |
load_captioning, | |
inputs=[images, concept_sentence], | |
outputs=output_components | |
).then( | |
update_pricing, | |
inputs=[steps], | |
outputs=[cost_preview, cost_preview_info, payment_update, start] | |
) | |
images.clear( | |
hide_captioning, | |
outputs=[captioning_area, cost_preview, sample, start] | |
) | |
images.delete( | |
load_captioning, | |
inputs=[images, concept_sentence], | |
outputs=output_components | |
).then( | |
update_pricing, | |
inputs=[steps], | |
outputs=[cost_preview, cost_preview_info, payment_update, start] | |
) | |
gr.on( | |
triggers=[steps.change], | |
fn=update_pricing, | |
inputs=[steps], | |
outputs=[cost_preview, cost_preview_info, payment_update, start] | |
) | |
start.click(fn=create_dataset, inputs=[images] + caption_list, outputs=dataset_folder).then( | |
fn=start_training, | |
inputs=[ | |
lora_name, | |
concept_sentence, | |
which_model, | |
steps, | |
lr, | |
rank, | |
dataset_folder, | |
sample_1, | |
sample_2, | |
sample_3, | |
use_more_advanced_options, | |
more_advanced_options | |
], | |
outputs=progress_area, | |
) | |
do_captioning.click(fn=run_captioning, inputs=[images, concept_sentence] + caption_list, outputs=caption_list) | |
if __name__ == "__main__": | |
demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True) |