silencer107 commited on
Commit
22a3153
·
verified ·
1 Parent(s): b20aa95

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. README.md +19 -0
  2. pyproject.toml +25 -0
  3. src/main.py +59 -0
  4. src/pipeline.py +116 -0
  5. uv.lock +0 -0
README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flux-schnell-edge-inference
2
+
3
+ This holds the baseline for the FLUX Schnel NVIDIA GeForce RTX 4090 contest, which can be forked freely and optimized
4
+
5
+ Some recommendations are as follows:
6
+ - Installing dependencies should be done in `pyproject.toml`, including git dependencies
7
+ - HuggingFace models should be specified in the `models` array in the `pyproject.toml` file, and will be downloaded before benchmarking
8
+ - The pipeline does **not** have internet access so all dependencies and models must be included in the `pyproject.toml`
9
+ - Compiled models should be hosted on HuggingFace and included in the `models` array in the `pyproject.toml` (rather than compiling during loading). Loading time matters far more than file sizes
10
+ - Avoid changing `src/main.py`, as that includes mostly protocol logic. Most changes should be in `models` and `src/pipeline.py`
11
+ - Ensure the entire repository (excluding dependencies and HuggingFace models) is under 16MB
12
+
13
+ For testing, you need a docker container with pytorch and ubuntu 22.04.
14
+ You can download your listed dependencies with `uv`, installed with:
15
+ ```bash
16
+ pipx ensurepath
17
+ pipx install uv
18
+ ```
19
+ You can then relock with `uv lock`, and then run with `uv run start_inference`
pyproject.toml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "7"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ ]
20
+
21
+ [tool.edge-maxxing]
22
+ models = ["black-forest-labs/FLUX.1-schnell"]
23
+
24
+ [project.scripts]
25
+ start_inference = "main:main"
src/main.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+
8
+ import torch
9
+
10
+ from PIL.JpegImagePlugin import JpegImageFile
11
+ from pipelines.models import TextToImageRequest
12
+
13
+ from pipeline import load_pipeline, infer
14
+
15
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
16
+
17
+
18
+ def at_exit():
19
+ torch.cuda.empty_cache()
20
+
21
+
22
+ def main():
23
+ atexit.register(at_exit)
24
+
25
+ print(f"Loading pipeline")
26
+ pipeline = load_pipeline()
27
+
28
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
29
+
30
+ if exists(SOCKET):
31
+ remove(SOCKET)
32
+
33
+ with Listener(SOCKET) as listener:
34
+ chmod(SOCKET, 0o777)
35
+
36
+ print(f"Awaiting connections")
37
+ with listener.accept() as connection:
38
+ print(f"Connected")
39
+
40
+ while True:
41
+ try:
42
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
43
+ except EOFError:
44
+ print(f"Inference socket exiting")
45
+
46
+ return
47
+
48
+ image = infer(request, pipeline)
49
+
50
+ data = BytesIO()
51
+ image.save(data, format=JpegImageFile.format)
52
+
53
+ packet = data.getvalue()
54
+
55
+ connection.send_bytes(packet)
56
+
57
+
58
+ if __name__ == '__main__':
59
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import FluxPipeline, AutoencoderKL
2
+ from diffusers.image_processor import VaeImageProcessor
3
+ from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
4
+ import torch
5
+ import gc
6
+ from PIL.Image import Image
7
+ from pipelines.models import TextToImageRequest
8
+ from torch import Generator
9
+
10
+ Pipeline = None
11
+
12
+ CHECKPOINT = "black-forest-labs/FLUX.1-schnell"
13
+
14
+ def empty_cache():
15
+ gc.collect()
16
+ torch.cuda.empty_cache()
17
+ torch.cuda.reset_max_memory_allocated()
18
+ torch.cuda.reset_peak_memory_stats()
19
+
20
+ def load_pipeline() -> Pipeline:
21
+ infer(TextToImageRequest(prompt=""), Pipeline)
22
+
23
+ return Pipeline
24
+
25
+
26
+ def encode_prompt(prompt: str):
27
+ text_encoder = CLIPTextModel.from_pretrained(
28
+ CHECKPOINT,
29
+ subfolder="text_encoder",
30
+ torch_dtype=torch.bfloat16,
31
+ )
32
+
33
+ text_encoder_2 = T5EncoderModel.from_pretrained(
34
+ CHECKPOINT,
35
+ subfolder="text_encoder_2",
36
+ torch_dtype=torch.bfloat16,
37
+ )
38
+
39
+ tokenizer = CLIPTokenizer.from_pretrained(CHECKPOINT, subfolder="tokenizer")
40
+ tokenizer_2 = T5TokenizerFast.from_pretrained(CHECKPOINT, subfolder="tokenizer_2")
41
+
42
+ pipeline = FluxPipeline.from_pretrained(
43
+ CHECKPOINT,
44
+ text_encoder=text_encoder,
45
+ text_encoder_2=text_encoder_2,
46
+ tokenizer=tokenizer,
47
+ tokenizer_2=tokenizer_2,
48
+ transformer=None,
49
+ vae=None,
50
+ ).to("cuda")
51
+
52
+ with torch.no_grad():
53
+ return pipeline.encode_prompt(
54
+ prompt=prompt,
55
+ prompt_2=None,
56
+ max_sequence_length=256,
57
+ )
58
+
59
+
60
+ def infer_latents(prompt_embeds, pooled_prompt_embeds, width: int | None, height: int | None, seed: int | None):
61
+ pipeline = FluxPipeline.from_pretrained(
62
+ CHECKPOINT,
63
+ text_encoder=None,
64
+ text_encoder_2=None,
65
+ tokenizer=None,
66
+ tokenizer_2=None,
67
+ vae=None,
68
+ torch_dtype=torch.bfloat16,
69
+ ).to("cuda")
70
+
71
+ if seed is None:
72
+ generator = None
73
+ else:
74
+ generator = Generator(pipeline.device).manual_seed(seed)
75
+
76
+ return pipeline(
77
+ prompt_embeds=prompt_embeds,
78
+ pooled_prompt_embeds=pooled_prompt_embeds,
79
+ num_inference_steps=4,
80
+ guidance_scale=0.0,
81
+ width=width,
82
+ height=height,
83
+ generator=generator,
84
+ output_type="latent",
85
+ ).images
86
+
87
+
88
+ def infer(request: TextToImageRequest, _pipeline: Pipeline) -> Image:
89
+ empty_cache()
90
+
91
+ prompt_embeds, pooled_prompt_embeds, text_ids = encode_prompt(request.prompt)
92
+
93
+ empty_cache()
94
+
95
+ latents = infer_latents(prompt_embeds, pooled_prompt_embeds, request.width, request.height, request.seed)
96
+
97
+ empty_cache()
98
+
99
+ vae = AutoencoderKL.from_pretrained(
100
+ CHECKPOINT,
101
+ subfolder="vae",
102
+ torch_dtype=torch.bfloat16,
103
+ ).to("cuda")
104
+
105
+ vae_scale_factor = 2 ** (len(vae.config.block_out_channels))
106
+ image_processor = VaeImageProcessor(vae_scale_factor=vae_scale_factor)
107
+
108
+ height = request.height or 64 * vae_scale_factor
109
+ width = request.width or 64 * vae_scale_factor
110
+
111
+ with torch.no_grad():
112
+ latents = FluxPipeline._unpack_latents(latents, height, width, vae_scale_factor)
113
+ latents = (latents / vae.config.scaling_factor) + vae.config.shift_factor
114
+
115
+ image = vae.decode(latents, return_dict=False)[0]
116
+ return image_processor.postprocess(image, output_type="pil")[0]
uv.lock ADDED
The diff for this file is too large to render. See raw diff