HoKa commited on
Commit
f79c13a
·
verified ·
1 Parent(s): 5700475

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - text-to-image
4
+ - flux
5
+ - lora
6
+ - diffusers
7
+ - template:sd-lora
8
+ - fluxgym
9
+
10
+
11
+ base_model: black-forest-labs/FLUX.1-dev
12
+ instance_prompt: Amin Zendegani
13
+ license: other
14
+ license_name: flux-1-dev-non-commercial-license
15
+ license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
16
+ ---
17
+
18
+ # Amin Zendegani
19
+
20
+ A Flux LoRA trained on a local computer with [Fluxgym](https://github.com/cocktailpeanut/fluxgym)
21
+
22
+ <Gallery />
23
+
24
+ ## Trigger words
25
+
26
+ You should use `Amin Zendegani` to trigger the image generation.
27
+
28
+ ## Download model and use it with ComfyUI, AUTOMATIC1111, SD.Next, Invoke AI, Forge, etc.
29
+
30
+ Weights for this model are available in Safetensors format.
31
+
amin-zendegani-000004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e47cd30a1453f7e208dcce5d1b41e876c75e88dfbfc86266267b0ac2ad3b29aa
3
+ size 39761000
amin-zendegani-000008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61955a4656131ee09201cc3c9a5b48d743b471bdb2acad19fa6cc8e004a394c3
3
+ size 39761000
amin-zendegani-000012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4de8ac12e46f462e61b03f53226ec48fb9f93662e8fc5710e2e672242d2d634
3
+ size 39761000
amin-zendegani.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11448448f7fce586466801d19a6f6d68b9f2dc75b35713eaf920440850b41d39
3
+ size 39761000
dataset.toml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [general]
2
+ shuffle_caption = false
3
+ caption_extension = '.txt'
4
+ keep_tokens = 1
5
+
6
+ [[datasets]]
7
+ resolution = 512
8
+ batch_size = 1
9
+ keep_tokens = 1
10
+
11
+ [[datasets.subsets]]
12
+ image_dir = '/home/ubuntu/fluxgym/datasets/amin-zendegani'
13
+ class_tokens = 'Amin Zendegani'
14
+ num_repeats = 10
sample_prompts.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Amin Zendegani
train.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch \
2
+ --mixed_precision bf16 \
3
+ --num_cpu_threads_per_process 1 \
4
+ sd-scripts/flux_train_network.py \
5
+ --pretrained_model_name_or_path "/home/ubuntu/fluxgym/models/unet/flux1-dev.sft" \
6
+ --clip_l "/home/ubuntu/fluxgym/models/clip/clip_l.safetensors" \
7
+ --t5xxl "/home/ubuntu/fluxgym/models/clip/t5xxl_fp16.safetensors" \
8
+ --ae "/home/ubuntu/fluxgym/models/vae/ae.sft" \
9
+ --cache_latents_to_disk \
10
+ --save_model_as safetensors \
11
+ --sdpa --persistent_data_loader_workers \
12
+ --max_data_loader_n_workers 2 \
13
+ --seed 42 \
14
+ --gradient_checkpointing \
15
+ --mixed_precision bf16 \
16
+ --save_precision bf16 \
17
+ --network_module networks.lora_flux \
18
+ --network_dim 4 \
19
+ --optimizer_type adamw8bit \
20
+ --learning_rate 8e-4 \
21
+ --cache_text_encoder_outputs \
22
+ --cache_text_encoder_outputs_to_disk \
23
+ --fp8_base \
24
+ --highvram \
25
+ --max_train_epochs 16 \
26
+ --save_every_n_epochs 4 \
27
+ --dataset_config "/home/ubuntu/fluxgym/outputs/amin-zendegani/dataset.toml" \
28
+ --output_dir "/home/ubuntu/fluxgym/outputs/amin-zendegani" \
29
+ --output_name amin-zendegani \
30
+ --timestep_sampling shift \
31
+ --discrete_flow_shift 3.1582 \
32
+ --model_prediction_type raw \
33
+ --guidance_scale 1 \
34
+ --loss_type l2 \