jokerbit commited on
Commit
6bde6be
·
verified ·
1 Parent(s): daa6aa5

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flux-schnell-edge-inference
2
+
3
+ This holds the baseline for the FLUX Schnel NVIDIA GeForce RTX 4090 contest, which can be forked freely and optimized
4
+
5
+ Some recommendations are as follows:
6
+ - Installing dependencies should be done in `pyproject.toml`, including git dependencies
7
+ - HuggingFace models should be specified in the `models` array in the `pyproject.toml` file, and will be downloaded before benchmarking
8
+ - The pipeline does **not** have internet access so all dependencies and models must be included in the `pyproject.toml`
9
+ - Compiled models should be hosted on HuggingFace and included in the `models` array in the `pyproject.toml` (rather than compiling during loading). Loading time matters far more than file sizes
10
+ - Avoid changing `src/main.py`, as that includes mostly protocol logic. Most changes should be in `models` and `src/pipeline.py`
11
+ - Ensure the entire repository (excluding dependencies and HuggingFace models) is under 16MB
12
+
13
+ For testing, you need a docker container with pytorch and ubuntu 22.04.
14
+ You can download your listed dependencies with `uv`, installed with:
15
+ ```bash
16
+ pipx ensurepath
17
+ pipx install uv
18
+ ```
19
+ You can then relock with `uv lock`, and then run with `uv run start_inference`
pyproject.toml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "torchao==0.6.1",
19
+ "hf_transfer==0.1.8",
20
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
21
+ "bitsandbytes>=0.45.0",
22
+ "setuptools>=75.3.0",
23
+ ]
24
+
25
+ [[tool.edge-maxxing.models]]
26
+ repository = "black-forest-labs/FLUX.1-schnell"
27
+ revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
28
+ exclude = ["transformer"]
29
+
30
+ [[tool.edge-maxxing.models]]
31
+ repository = "RobertML/FLUX.1-schnell-int8wo"
32
+ revision = "307e0777d92df966a3c0f99f31a6ee8957a9857a"
33
+
34
+ [project.scripts]
35
+ start_inference = "main:main"
src/flux_schnell_edge_inference.egg-info/PKG-INFO ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: flux-schnell-edge-inference
3
+ Version: 8
4
+ Summary: An edge-maxxing model submission for the 4090 Flux contest
5
+ Requires-Python: <3.13,>=3.10
6
+ Requires-Dist: diffusers==0.31.0
7
+ Requires-Dist: transformers==4.46.2
8
+ Requires-Dist: accelerate==1.1.0
9
+ Requires-Dist: omegaconf==2.3.0
10
+ Requires-Dist: torch==2.5.1
11
+ Requires-Dist: protobuf==5.28.3
12
+ Requires-Dist: sentencepiece==0.2.0
13
+ Requires-Dist: torchao==0.6.1
14
+ Requires-Dist: hf_transfer==0.1.8
15
+ Requires-Dist: edge-maxxing-pipelines@ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines
src/flux_schnell_edge_inference.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ README.md
2
+ pyproject.toml
3
+ src/main.py
4
+ src/pipeline.py
5
+ src/flux_schnell_edge_inference.egg-info/PKG-INFO
6
+ src/flux_schnell_edge_inference.egg-info/SOURCES.txt
7
+ src/flux_schnell_edge_inference.egg-info/dependency_links.txt
8
+ src/flux_schnell_edge_inference.egg-info/entry_points.txt
9
+ src/flux_schnell_edge_inference.egg-info/requires.txt
10
+ src/flux_schnell_edge_inference.egg-info/top_level.txt
src/flux_schnell_edge_inference.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
src/flux_schnell_edge_inference.egg-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ start_inference = main:main
src/flux_schnell_edge_inference.egg-info/requires.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ diffusers==0.31.0
2
+ transformers==4.46.2
3
+ accelerate==1.1.0
4
+ omegaconf==2.3.0
5
+ torch==2.5.1
6
+ protobuf==5.28.3
7
+ sentencepiece==0.2.0
8
+ torchao==0.6.1
9
+ hf_transfer==0.1.8
10
+ edge-maxxing-pipelines@ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines
src/flux_schnell_edge_inference.egg-info/top_level.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ main
2
+ pipeline
src/main.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from multiprocessing.connection import Listener
3
+ from os import chmod, remove
4
+ from os.path import abspath, exists
5
+ from pathlib import Path
6
+
7
+ from PIL.JpegImagePlugin import JpegImageFile
8
+ from pipelines.models import TextToImageRequest
9
+
10
+ from pipeline import load_pipeline, infer
11
+
12
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
13
+
14
+
15
+ def main():
16
+ print(f"Loading pipeline")
17
+ pipeline = load_pipeline()
18
+
19
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
20
+
21
+ if exists(SOCKET):
22
+ remove(SOCKET)
23
+
24
+ with Listener(SOCKET) as listener:
25
+ chmod(SOCKET, 0o777)
26
+
27
+ print(f"Awaiting connections")
28
+ with listener.accept() as connection:
29
+ print(f"Connected")
30
+
31
+ while True:
32
+ try:
33
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
34
+ except EOFError:
35
+ print(f"Inference socket exiting")
36
+
37
+ return
38
+
39
+ image = infer(request, pipeline)
40
+
41
+ data = BytesIO()
42
+ image.save(data, format=JpegImageFile.format)
43
+
44
+ packet = data.getvalue()
45
+
46
+ connection.send_bytes(packet)
47
+
48
+
49
+ if __name__ == '__main__':
50
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import os
3
+ from typing import TypeAlias
4
+
5
+ import torch
6
+ from PIL.Image import Image
7
+ from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKL
8
+ from huggingface_hub.constants import HF_HUB_CACHE
9
+ from pipelines.models import TextToImageRequest
10
+ from torch import Generator
11
+ from torchao.quantization import quantize_, int8_weight_only
12
+ from transformers import T5EncoderModel, CLIPTextModel
13
+
14
+ Pipeline: TypeAlias = FluxPipeline
15
+
16
+ CHECKPOINT = "black-forest-labs/FLUX.1-schnell"
17
+ REVISION = "741f7c3ce8b383c54771c7003378a50191e9efe9"
18
+
19
+
20
+ def load_pipeline() -> Pipeline:
21
+ text_encoder = CLIPTextModel.from_pretrained(
22
+ CHECKPOINT,
23
+ revision=REVISION,
24
+ subfolder="text_encoder",
25
+ local_files_only=True,
26
+ torch_dtype=torch.bfloat16,
27
+ )
28
+
29
+ text_encoder_2 = T5EncoderModel.from_pretrained(
30
+ CHECKPOINT,
31
+ revision=REVISION,
32
+ subfolder="text_encoder_2",
33
+ local_files_only=True,
34
+ torch_dtype=torch.bfloat16,
35
+ )
36
+
37
+ vae = AutoencoderKL.from_pretrained(
38
+ CHECKPOINT,
39
+ revision=REVISION,
40
+ subfolder="vae",
41
+ local_files_only=True,
42
+ torch_dtype=torch.bfloat16,
43
+ )
44
+
45
+ path = os.path.join(HF_HUB_CACHE, "models--RobertML--FLUX.1-schnell-int8wo/snapshots/307e0777d92df966a3c0f99f31a6ee8957a9857a")
46
+
47
+ transformer = FluxTransformer2DModel.from_pretrained(
48
+ path,
49
+ torch_dtype=torch.bfloat16,
50
+ use_safetensors=False,
51
+ )
52
+
53
+ pipeline = FluxPipeline.from_pretrained(
54
+ CHECKPOINT,
55
+ revision=REVISION,
56
+ local_files_only=True,
57
+ text_encoder=text_encoder,
58
+ text_encoder_2=text_encoder_2,
59
+ transformer=transformer,
60
+ vae=vae,
61
+ torch_dtype=torch.bfloat16,
62
+ ).to("cuda")
63
+ pipeline.vae = torch.compile(pipe.vae, mode="reduce-overhead")
64
+ pipeline("")
65
+
66
+ return pipeline
67
+
68
+ def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
69
+ gc.collect()
70
+ torch.cuda.empty_cache()
71
+ torch.cuda.reset_peak_memory_stats()
72
+
73
+ generator = Generator(pipeline.device).manual_seed(request.seed)
74
+
75
+ return pipeline(
76
+ request.prompt,
77
+ generator=generator,
78
+ guidance_scale=0.0,
79
+ num_inference_steps=4,
80
+ max_sequence_length=256,
81
+ height=request.height,
82
+ width=request.width,
83
+ ).images[0]
uv.lock ADDED
The diff for this file is too large to render. See raw diff