Upload folder using huggingface_hub
Browse files- README.md +6 -11
- dataset.toml +1 -1
- q-avatar-000004.safetensors +2 -2
- q-avatar-000008.safetensors +2 -2
- q-avatar-000012.safetensors +2 -2
- q-avatar.safetensors +2 -2
- sample/q-avatar_000100_00_20241018073646.png +0 -0
- sample/q-avatar_000200_00_20241018073818.png +0 -0
- sample/q-avatar_000300_00_20241018073948.png +0 -0
- sample/q-avatar_000400_00_20241018074119.png +0 -0
- sample/q-avatar_000500_00_20241018074252.png +0 -0
- sample/q-avatar_000600_00_20241018074425.png +0 -0
- sample/q-avatar_000700_00_20241018074556.png +0 -0
- sample/q-avatar_000800_00_20241018074726.png +0 -0
- sample/q-avatar_000900_00_20241018074858.png +0 -0
- sample/q-avatar_001000_00_20241018075030.png +0 -0
- sample/q-avatar_001100_00_20241018075200.png +0 -0
- sample/q-avatar_001200_00_20241018075331.png +0 -0
- sample_prompts.txt +1 -2
- train.sh +7 -23
README.md
CHANGED
@@ -8,16 +8,11 @@ tags:
|
|
8 |
- fluxgym
|
9 |
widget:
|
10 |
- output:
|
11 |
-
url: sample/q-
|
12 |
-
text: a Q avatar
|
13 |
-
-
|
14 |
-
|
15 |
-
|
16 |
-
base_model: black-forest-labs/FLUX.1-dev
|
17 |
-
instance_prompt: a Q avatar of
|
18 |
-
license: other
|
19 |
-
license_name: flux-1-dev-non-commercial-license
|
20 |
-
license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
|
21 |
---
|
22 |
|
23 |
# q_avatar
|
@@ -28,7 +23,7 @@ A Flux LoRA trained on a local computer with [Fluxgym](https://github.com/cockta
|
|
28 |
|
29 |
## Trigger words
|
30 |
|
31 |
-
You should use `a Q avatar
|
32 |
|
33 |
## Download model and use it with ComfyUI, AUTOMATIC1111, SD.Next, Invoke AI, Forge, etc.
|
34 |
|
|
|
8 |
- fluxgym
|
9 |
widget:
|
10 |
- output:
|
11 |
+
url: sample/q-avatar_001200_00_20241018075331.png
|
12 |
+
text: a Q avatar
|
13 |
+
base_model: stabilityai/stable-diffusion-xl-base-1.0
|
14 |
+
instance_prompt: a Q avatar
|
15 |
+
license: openrail++
|
|
|
|
|
|
|
|
|
|
|
16 |
---
|
17 |
|
18 |
# q_avatar
|
|
|
23 |
|
24 |
## Trigger words
|
25 |
|
26 |
+
You should use `a Q avatar` to trigger the image generation.
|
27 |
|
28 |
## Download model and use it with ComfyUI, AUTOMATIC1111, SD.Next, Invoke AI, Forge, etc.
|
29 |
|
dataset.toml
CHANGED
@@ -10,5 +10,5 @@ keep_tokens = 1
|
|
10 |
|
11 |
[[datasets.subsets]]
|
12 |
image_dir = '/app/fluxgym/datasets/q-avatar'
|
13 |
-
class_tokens = 'a Q avatar
|
14 |
num_repeats = 10
|
|
|
10 |
|
11 |
[[datasets.subsets]]
|
12 |
image_dir = '/app/fluxgym/datasets/q-avatar'
|
13 |
+
class_tokens = 'a Q avatar'
|
14 |
num_repeats = 10
|
q-avatar-000004.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6adb4c46e22678dc09b72e7151e88fb7ab8b8e963a42b1da87582abd45ebe941
|
3 |
+
size 25635684
|
q-avatar-000008.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b2bec8785973c44e1cd4a5d486c6e43dfb1833f236ce4541cfe4cfaa8843e675
|
3 |
+
size 25635684
|
q-avatar-000012.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7ded7d821cf86f975c343e4fee3afb74dc539aceebf2b88a32e3305878d29745
|
3 |
+
size 25635684
|
q-avatar.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c19ced7aa4689bab9b7226c5cb9e366d437eced074fa1442188bfd876fda1a95
|
3 |
+
size 25635684
|
sample/q-avatar_000100_00_20241018073646.png
ADDED
![]() |
sample/q-avatar_000200_00_20241018073818.png
ADDED
![]() |
sample/q-avatar_000300_00_20241018073948.png
ADDED
![]() |
sample/q-avatar_000400_00_20241018074119.png
ADDED
![]() |
sample/q-avatar_000500_00_20241018074252.png
ADDED
![]() |
sample/q-avatar_000600_00_20241018074425.png
ADDED
![]() |
sample/q-avatar_000700_00_20241018074556.png
ADDED
![]() |
sample/q-avatar_000800_00_20241018074726.png
ADDED
![]() |
sample/q-avatar_000900_00_20241018074858.png
ADDED
![]() |
sample/q-avatar_001000_00_20241018075030.png
ADDED
![]() |
sample/q-avatar_001100_00_20241018075200.png
ADDED
![]() |
sample/q-avatar_001200_00_20241018075331.png
ADDED
![]() |
sample_prompts.txt
CHANGED
@@ -1,2 +1 @@
|
|
1 |
-
a Q avatar
|
2 |
-
in style with Q avatar
|
|
|
1 |
+
a Q avatar
|
|
train.sh
CHANGED
@@ -1,34 +1,18 @@
|
|
1 |
accelerate launch \
|
2 |
-
--mixed_precision bf16 \
|
3 |
--num_cpu_threads_per_process 1 \
|
4 |
-
sd-scripts/
|
5 |
-
--pretrained_model_name_or_path "/app/fluxgym/models/unet/
|
6 |
-
--clip_l "/app/fluxgym/models/clip/clip_l.safetensors" \
|
7 |
-
--t5xxl "/app/fluxgym/models/clip/t5xxl_fp16.safetensors" \
|
8 |
-
--ae "/app/fluxgym/models/vae/ae.sft" \
|
9 |
-
--cache_latents_to_disk \
|
10 |
--save_model_as safetensors \
|
11 |
-
--
|
12 |
-
--
|
13 |
-
--
|
14 |
-
--gradient_checkpointing \
|
15 |
-
--mixed_precision bf16 \
|
16 |
-
--save_precision bf16 \
|
17 |
-
--network_module networks.lora_flux \
|
18 |
--network_dim 4 \
|
19 |
--optimizer_type adamw8bit \--sample_prompts="/app/fluxgym/outputs/q-avatar/sample_prompts.txt" --sample_every_n_steps="100" \
|
20 |
--learning_rate 8e-4 \
|
21 |
-
--cache_text_encoder_outputs \
|
22 |
-
--cache_text_encoder_outputs_to_disk \
|
23 |
-
--fp8_base \
|
24 |
-
--highvram \
|
25 |
--max_train_epochs 16 \
|
26 |
--save_every_n_epochs 4 \
|
27 |
--dataset_config "/app/fluxgym/outputs/q-avatar/dataset.toml" \
|
28 |
--output_dir "/app/fluxgym/outputs/q-avatar" \
|
29 |
--output_name q-avatar \
|
30 |
-
--
|
31 |
-
|
32 |
-
--model_prediction_type raw \
|
33 |
-
--guidance_scale 1 \
|
34 |
-
--loss_type l2 \
|
|
|
1 |
accelerate launch \
|
|
|
2 |
--num_cpu_threads_per_process 1 \
|
3 |
+
sd-scripts/sdxl_train_network.py \
|
4 |
+
--pretrained_model_name_or_path "/app/fluxgym/models/unet/stabilityai/stable-diffusion-xl-base-1.0/sd_xl_base_1.0.safetensors" \
|
|
|
|
|
|
|
|
|
5 |
--save_model_as safetensors \
|
6 |
+
--mixed_precision fp16 \
|
7 |
+
--save_precision fp16 \
|
8 |
+
--network_module networks.lora \
|
|
|
|
|
|
|
|
|
9 |
--network_dim 4 \
|
10 |
--optimizer_type adamw8bit \--sample_prompts="/app/fluxgym/outputs/q-avatar/sample_prompts.txt" --sample_every_n_steps="100" \
|
11 |
--learning_rate 8e-4 \
|
|
|
|
|
|
|
|
|
12 |
--max_train_epochs 16 \
|
13 |
--save_every_n_epochs 4 \
|
14 |
--dataset_config "/app/fluxgym/outputs/q-avatar/dataset.toml" \
|
15 |
--output_dir "/app/fluxgym/outputs/q-avatar" \
|
16 |
--output_name q-avatar \
|
17 |
+
--xformers --no_half_vae \
|
18 |
+
|
|
|
|
|
|