John6666 commited on
Commit
7e78e4c
Β·
verified Β·
1 Parent(s): c6f2919

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +13 -12
  2. app.py +49 -0
  3. requirements.txt +14 -0
README.md CHANGED
@@ -1,12 +1,13 @@
1
- ---
2
- title: Flux To Diffusers Zero Test
3
- emoji: πŸš€
4
- colorFrom: gray
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 5.22.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
1
+ ---
2
+ title: test flux safetensors to diffusers for Zero GPU
3
+ emoji: πŸ™„
4
+ colorFrom: indigo
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 4.44.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+
3
+ @spaces.GPU
4
+ def dummy_gpu():
5
+ pass
6
+
7
+ import gradio as gr
8
+ import torch
9
+ from pathlib import Path
10
+ from diffusers import FluxPipeline, FluxTransformer2DModel
11
+ from huggingface_hub import hf_hub_download, HfApi
12
+
13
+ IS_TURBO = False
14
+ TEMP_DIR = "./temp"
15
+ repo_id = "camenduru/FLUX.1-dev-diffusers"
16
+ #repo_id = "black-forest-labs/FLUX.1-schnell" # if schnell
17
+ #repo_id = "aoxo/flux.1dev-abliteratedv2" # if dev.abl
18
+ dtype = torch.bfloat16
19
+ #cp = hf_hub_download("John6666/flux1-backup-202502", "ultrarealFineTune_v1.safetensors", repo_type="dataset")
20
+ cp = hf_hub_download("John6666/flux1-backup-202502", "jibMixFlux_v8AccentueightNSFW.safetensors", repo_type="dataset")
21
+ transformer = FluxTransformer2DModel.from_single_file(cp, subfolder="transformer", torch_dtype=dtype, config=repo_id)
22
+ pipe = FluxPipeline.from_pretrained(repo_id, transformer=transformer, torch_dtype=dtype)
23
+
24
+ if IS_TURBO:
25
+ pipe.to("cuda")
26
+ pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"), adapter_name="hyper-sd")
27
+ pipe.set_adapters(["hyper-sd"], adapter_weights=[0.125])
28
+ pipe.fuse_lora()
29
+ pipe.unload_lora_weights()
30
+ pipe.to("cpu")
31
+
32
+ def upload_model(repo_id: str="", token: str="", progress=gr.Progress(track_tqdm=True)):
33
+ if not token: return "Token not found."
34
+ pipe.save_pretrained(TEMP_DIR)
35
+ api = HfApi(token=token if token else False)
36
+ api.create_repo(repo_id=repo_id, token=token, private=True, exist_ok=True)
37
+ api.upload_folder(repo_id=repo_id, repo_type="model", folder_path=TEMP_DIR, path_in_repo=".")
38
+ api.upload_file(repo_id=repo_id, repo_type="model", path_or_fileobj=cp, path_in_repo=Path(cp).name)
39
+ return ""
40
+
41
+ with gr.Blocks() as demo:
42
+ repo_id = gr.Textbox(label="Repo ID", value="")
43
+ hf_token = gr.Textbox(label="Your HF write token", value="")
44
+ run_button = gr.Button("Submit", variant="primary")
45
+ info_md = gr.Markdown("<br><br><br>")
46
+
47
+ run_button.click(upload_model, [repo_id, hf_token], [info_md])
48
+
49
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ huggingface_hub
2
+ #torch==2.4.0
3
+ #torchvision
4
+ #torchaudio
5
+ torchao==0.9.0
6
+ diffusers==0.32.2
7
+ peft
8
+ transformers==4.48.3
9
+ numpy
10
+ scipy
11
+ Pillow
12
+ sentencepiece
13
+ protobuf
14
+ triton