saddam213 commited on
Commit
5fc6833
·
verified ·
1 Parent(s): f048b38

Upload 23 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Sample.png filter=lfs diff=lfs merge=lfs -text
37
+ Sample2.png filter=lfs diff=lfs merge=lfs -text
38
+ Sample3.png filter=lfs diff=lfs merge=lfs -text
39
+ Sample4.png filter=lfs diff=lfs merge=lfs -text
40
+ text_encoder_2/model.onnx.data filter=lfs diff=lfs merge=lfs -text
41
+ text_encoder/model.onnx.data filter=lfs diff=lfs merge=lfs -text
42
+ transformer/model.onnx.data filter=lfs diff=lfs merge=lfs -text
43
+ vae_decoder/model.onnx.data filter=lfs diff=lfs merge=lfs -text
44
+ vae_encoder/model.onnx.data filter=lfs diff=lfs merge=lfs -text
Icon.png ADDED
README.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: image-to-image
3
+ ---
4
+ # FLUX.1-Kontext - Onnx Olive DirectML Optimized
5
+
6
+ ## Original Model
7
+ https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev
8
+
9
+
10
+ ## C# Inference Demo
11
+ https://github.com/TensorStack-AI/OnnxStack
12
+
13
+ ```csharp
14
+ // Create Pipeline
15
+ var pipeline = FluxPipeline.CreatePipeline("D:\\Models\\FLUX.1-Kontext-amuse", ModelType.Instruct);
16
+
17
+ // Prompt
18
+ var promptOptions = new PromptOptions
19
+ {
20
+ Prompt = "Add sunglasses and a hat to the woman"
21
+ };
22
+
23
+ // Scheduler Options
24
+ var schedulerOptions = pipeline.DefaultSchedulerOptions with
25
+ {
26
+ InferenceSteps = 28,
27
+ GuidanceScale = 2.5f,
28
+ SchedulerType = SchedulerType.FlowMatchEulerDiscrete,
29
+ };
30
+
31
+ // Run pipeline
32
+ var result = await pipeline.GenerateImageAsync(promptOptions, schedulerOptions);
33
+
34
+ // Save Image Result
35
+ await result.SaveAsync("Result.png");
36
+ ```
37
+ ## Inference Result
38
+ ![Intro Image](Sample.png)
Sample.png ADDED

Git LFS Details

  • SHA256: a056ee65ea7e95f122f93200b99df2268d8a761c7c69acd018edea6348e0cbc5
  • Pointer size: 132 Bytes
  • Size of remote file: 1.14 MB
Sample2.png ADDED

Git LFS Details

  • SHA256: 9c27a9951153b84edd3556a6d218420e1ebb4e99c20b6e36bfa81eba544271ae
  • Pointer size: 132 Bytes
  • Size of remote file: 1.54 MB
Sample3.png ADDED

Git LFS Details

  • SHA256: bd9c9c53316bec5cd2a7e5b0ec0896ed074eacd290589bf3e0a5a361951dfb69
  • Pointer size: 132 Bytes
  • Size of remote file: 1.76 MB
Sample4.png ADDED

Git LFS Details

  • SHA256: 2214b4d264202e03bd0f13f538819bef00668f12528d849efdfb98ef51736242
  • Pointer size: 132 Bytes
  • Size of remote file: 1.74 MB
amuse_template.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Id": "B839B0CA-97B8-4603-9097-0BC641EDE391",
3
+ "FileVersion": "1",
4
+ "Created": "2025-03-07T00:00:00",
5
+ "IsProtected": false,
6
+ "Name": "FLUX.1-Kontext",
7
+ "ImageIcon": "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/Icon.png",
8
+ "Author": "BlackForestLabs",
9
+ "Description": "FLUX.1 [Kontext] is a 12-billion parameter rectified flow transformer designed to generate detailed and high-quality images directly from text descriptions. This model offers impressive capabilities in transforming prompts into vivid and accurate visual representations.",
10
+ "Rank": 30,
11
+ "Group": "Online",
12
+ "Template": "FluxKontext",
13
+ "Category": "StableDiffusion",
14
+ "StableDiffusionTemplate": {
15
+ "PipelineType": "Flux",
16
+ "ModelType": "Instruct",
17
+ "SampleSize": 1024,
18
+ "TokenizerLength": 768,
19
+ "Tokenizer2Limit": 512,
20
+ "Optimization": "None",
21
+ "DiffuserTypes": [
22
+ "ImageToImage"
23
+ ],
24
+ "SchedulerDefaults": {
25
+ "SchedulerType": "FlowMatchEulerDiscrete",
26
+ "Steps": 28,
27
+ "StepsMin": 1,
28
+ "StepsMax": 100,
29
+ "Guidance": 1,
30
+ "GuidanceMin": 1,
31
+ "GuidanceMax": 1,
32
+ "Guidance2": 3.5,
33
+ "Guidance2Min": 0,
34
+ "Guidance2Max": 15,
35
+ "TimestepSpacing": "Linspace",
36
+ "BetaSchedule": "ScaledLinear",
37
+ "BetaStart": 0.00085,
38
+ "BetaEnd": 0.012
39
+ }
40
+ },
41
+ "MemoryMin": 41,
42
+ "MemoryMax": 52,
43
+ "DownloadSize": 34,
44
+ "Website": "https://blackforestlabs.ai",
45
+ "Licence": "https://github.com/black-forest-labs/flux/blob/main/model_licenses/LICENSE-FLUX1-kontext",
46
+ "LicenceType": "NonCommercial",
47
+ "IsLicenceAccepted": false,
48
+ "Repository": "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse",
49
+ "RepositoryFiles": [
50
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/text_encoder/model.onnx",
51
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/text_encoder_2/model.onnx",
52
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/text_encoder_2/model.onnx.data",
53
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/tokenizer/merges.txt",
54
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/tokenizer/special_tokens_map.json",
55
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/tokenizer/vocab.json",
56
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/tokenizer_2/special_tokens_map.json",
57
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/tokenizer_2/spiece.model",
58
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/tokenizer_2/tokenizer.json",
59
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/transformer/model.onnx",
60
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/transformer/model.onnx.data",
61
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/vae_decoder/model.onnx",
62
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/vae_decoder/model.onnx.data",
63
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/vae_encoder/model.onnx",
64
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/vae_encoder/model.onnx.data",
65
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/amuse_template.json",
66
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/README.md"
67
+ ],
68
+ "PreviewImages": [
69
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/Sample.png",
70
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/Sample2.png",
71
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/Sample3.png",
72
+ "https://huggingface.co/TensorStack/FLUX.1-Kontext-amuse/resolve/main/Sample4.png"
73
+ ],
74
+ "Tags": []
75
+ }
text_encoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fa553e25a5b293966fd831fbd69f3a391acd59be04b85c416330809e4b07233
3
+ size 313594
text_encoder/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2015263f98ae9f6e0ea233756bd5ea51ddb1f36ec6a2dd862e59d82611d1db7
3
+ size 246120960
text_encoder_2/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a88d6fc9d7bc87a371deffdea3038c5c6bd6e990ae0002462b1f81af4389a49d
3
+ size 488164
text_encoder_2/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:523a4913b0170e5ac5994a557f65d7fadc893a0392b1d6ecaccc02451b08aa73
3
+ size 11537887232
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": {
105
+ "content": "</s>",
106
+ "lstrip": false,
107
+ "normalized": false,
108
+ "rstrip": false,
109
+ "single_word": false
110
+ },
111
+ "pad_token": {
112
+ "content": "<pad>",
113
+ "lstrip": false,
114
+ "normalized": false,
115
+ "rstrip": false,
116
+ "single_word": false
117
+ },
118
+ "unk_token": {
119
+ "content": "<unk>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false
124
+ }
125
+ }
tokenizer_2/spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
3
+ size 791656
tokenizer_2/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
transformer/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58c9dc1822d77a3e021955f8889b661057bf629270859dc14fd24008c695f20d
3
+ size 6969394
transformer/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a5e8f2e00db706e0bafa11532c588e144f91b25e84b447e14234b4b670c655b
3
+ size 23806758912
vae_decoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d9921d5ceab77f8ce59a7b077c71dba9652722e36610c0ccd4aa817796e26b5
3
+ size 134332
vae_decoder/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7851af2be3f720d2e383974f0421a9a0ea7d5c6863cb23c1f001d5847d31c40
3
+ size 198172160
vae_encoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9049b13e7fad9cea48da83d9425cf68389431c57b6ec97b9fe33caabd58f27a7
3
+ size 119375
vae_encoder/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c64a1d631cba8cc459b8c35b172a308e3de308f921091b1d05982acbec8c6db
3
+ size 137088512