John6666 commited on
Commit
7a48bc8
1 Parent(s): 2c1ed05

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +4 -5
  2. all_models.py +121 -64
  3. app.py +163 -81
  4. externalmod.py +105 -24
README.md CHANGED
@@ -1,14 +1,13 @@
1
  ---
2
- title: 866 AI Art Models 6 Outputs (Gradio 4.x)
3
  emoji: 🛕🛕
4
  colorFrom: green
5
  colorTo: blue
6
  sdk: gradio
7
- sdk_version: 4.41.0
8
  app_file: app.py
9
- pinned: false
10
- duplicated_from: Yntec/Diffusion80XX
11
- short_description: Compare up to 6 image models!
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Huggingface Diffusion
3
  emoji: 🛕🛕
4
  colorFrom: green
5
  colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 5.0.1
8
  app_file: app.py
9
+ pinned: true
10
+ short_description: Compare 909+ AI Art Models 6 at a time!
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
all_models.py CHANGED
@@ -1,14 +1,75 @@
1
  models = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  "Yntec/beLIEve", #865
3
  "digiplay/MilkyWonderland_v1", #866
4
  "Yntec/Hyperlink", #861
5
  "digiplay/AnalogMadness-realistic-model-v5", #864
6
  "Yntec/HyperRemix", #859
7
- "digiplay/ZHMix-Dramatic-v2.0",
8
  "Yntec/realisticStockPhoto3", #863
9
  "digiplay/fishmix_other_v1",
10
  "Yntec/HyperPhotoGASM", #858
11
- "digiplay/Gap_2.6",
12
  "Yntec/CrystalReality", #855
13
  "John6666/meinamix-meinav11-sd15", #857
14
  "Yntec/Roleplay", #850
@@ -25,8 +86,11 @@ models = [
25
  "digiplay/rRealism_v1.0_riiwa", #848
26
  "Yntec/ClayStyle", #838
27
  "digiplay/TWingshadow_v1.0", #847
 
28
  "digiplay/TWingshadow_v1.0_finetune", #846
 
29
  "digiplay/V3_by_Hans_Asian",
 
30
  "digiplay/Maji5PlusCCTV", #843
31
  "Yntec/3DKX2",
32
  "digiplay/cosfMix_v1", #223
@@ -73,7 +137,7 @@ models = [
73
  "Yntec/AbsoluteReality", #15K
74
  "digiplay/PikasAnimatedMix_v1", #805
75
  "Yntec/BeautyFoolRemix", #800
76
- "digiplay/dosmixVAE-mangled", #804
77
  "Yntec/DisneyPixarCartoon768", #803
78
  "digiplay/AgainMix_v2.0", #802
79
  "Yntec/mistoonRuby3", #801
@@ -81,7 +145,7 @@ models = [
81
  "Yntec/AtoZ", #799
82
  "digiplay/fantasticmix_k1", #797
83
  "Yntec/FilmGirlRemix", #795
84
- "digiplay/fCAnimeMix_v5", #798
85
  "Yntec/Vintage", #794
86
  "digiplay/BeautyFoolReality_4", #796
87
  "Yntec/UltraHighDefinition", #791
@@ -101,19 +165,19 @@ models = [
101
  "Yntec/ArthemyComics", #781
102
  "digiplay/EtherRealMix_LUX2",
103
  "Yntec/ReVAnimatedRemix", #780
104
- "digiplay/CampurSari_Gen1",
105
  "Yntec/AnythingNostalgic", #775
106
  "digiplay/WhiteDreamyHillMix_v1_VAE", #713
107
  "Yntec/AnyLoRa-768", #778
108
- "digiplay/CCTV2.5d_v1", #219
109
  "Yntec/AnythingV5-768", #777
110
  "digiplay/LEAU",
111
  "Yntec/Moistalgia", #773
112
- "digiplay/Colorful_v1.3", #774
113
  "Yntec/DeleteThis", #770
114
  "digiplay/LemonTea2.5D",
115
  "Yntec/ModernDisney", #764
116
- "digiplay/HadrianDelice_DeliceV1", #772
117
  "Yntec/IsThisDisney", #767
118
  "digiplay/MRMD_0505", #769
119
  "Yntec/RetroArt", #760
@@ -121,7 +185,7 @@ models = [
121
  "Yntec/Fanatic", #758
122
  "digiplay/fantastel_V1", #759
123
  "Yntec/Mo-Di-Diffusion-768", #768
124
- "digiplay/bluePencil_v09b", #766
125
  "Yntec/AnythingV4.5.6.7.8", #763
126
  "digiplay/majicMIX_realistic_v7", #761
127
  "Yntec/ElldrethsRetroMix", #4K
@@ -133,7 +197,7 @@ models = [
133
  "Yntec/DucHaitenAIart-beta", #3K
134
  "digiplay/Acorn_Photo_v1", #757
135
  "Yntec/a-ZovyaRPGV4", #756
136
- "digiplay/EdisonNilMix_v1", # Added 7.10 Updated 12.5
137
  "Yntec/KrazyGlue", #745
138
  "digiplay/AnyPastel", #752
139
  "Yntec/EpicDiffusion", #753
@@ -148,7 +212,7 @@ models = [
148
  "digiplay/MixTape_RocknRoll_v3punk_bake_fp16",
149
  "Yntec/AbyssOrangeMix", #740
150
  "digiplay/PerfectDeliberate-Anime_v2", #734
151
- "digiplay/SomethingPhenomenal_vivacityV2", #735
152
  "Yntec/CocaCola", #733
153
  "digiplay/majicMixHorror_v1", #738
154
  "digiplay/Noosphere_v4.2", #737
@@ -165,25 +229,21 @@ models = [
165
  "digiplay/pan04", #725
166
  "Yntec/Voxel", #715
167
  "digiplay/FishMix_v1.1",
168
- "digiplay/chrysanthemumMix_v1",
169
  "Yntec/DreamlikeDiffusion", #722
170
- "digiplay/mothmix_v1.41", #720
171
- "digiplay/BreakDro_i1464", #719
172
  "Yntec/HyperRealism", #721
173
  "digiplay/aurorafantasy_v1", #717
174
  "digiplay/CoharuMix_real",
175
  "Yntec/Prodigy", #712
176
  "digiplay/ya3_xt", #711
177
- "digiplay/XtReMixUltimateMerge_v1.5", #710
178
  "Yntec/Protogen_Unofficial_Release", #709
179
- "digiplay/majicMIX_lux_v3", #708
180
- "digiplay/richyrichmix_V2Fp16", #707
181
- "Yntec/Timeless", #703
182
- "digiplay/seizamix_v2", #706
183
  "digiplay/AnalogMadness-realistic-model-v4", #705
 
 
184
  "Yntec/Fabulous", #700
185
  "digiplay/realspice_v2", #702
186
- "digiplay/supashymix_v30Lite", #701
187
  "Yntec/DucHaitenRetro2", #704
188
  "digiplay/BeautifulFantasyRealMix_diffusers", #698
189
  "Yntec/IncredibleLife", #699
@@ -191,17 +251,17 @@ models = [
191
  "Yntec/AnythingV3.1", #699
192
  "digiplay/AstrAnime_v6", #698
193
  "Yntec/TimelessDiffusion768", #699
194
- "digiplay/Koji_v2.1_diffusers",
195
  "Yntec/RetroLife", #691
196
- "digiplay/CamelliaMIx_2.5D_v1_VAE", #696
197
  "Yntec/CuteFurry",
198
- "digiplay/MengX_Mix_Fantasy_v4", #689
199
  "Yntec/theallysMixIIChurned", #694
200
  "digiplay/MengX_Mix_Real_v3",
201
  "Yntec/BrainDance", #693
202
  "digiplay/ARRealVX1.1",
203
  "Yntec/Remedy", #697
204
- "digiplay/PlanetBumix_v1",
205
  "Yntec/Paramount",
206
  "digiplay/Yuzu_v1.1", #688
207
  "Yntec/Playground", #690
@@ -259,39 +319,23 @@ models = [
259
  "Yntec/Infinite80s",
260
  "digiplay/AI-infinity-V1-fp16",
261
  "Yntec/InfiniteLiberty",
262
- "digiplay/XXMix_9realistic_v1",
263
  "Yntec/aBagOfChips",
264
- "digiplay/Dolka_Rusalka_v0.5.1",
265
  "Yntec/IsThisArt",
266
- "digiplay/YutaMix_realistic_v11",
267
  "Yntec/Jackpot",
268
- "digiplay/XRYCJ_RealisticModel",
269
  "Yntec/C-.-_-.-Aravaggio",
270
- "digiplay/RunDiffusionFX2.5D_v1_diffusers",
271
  "Yntec/Stuff",
272
- "digiplay/NightmareShaper_v2DarkageLobotomy",
273
  "Yntec/LiberteRedmond",
274
- "digiplay/nk15_diffusers", #230
275
  "Yntec/Emoticons",
276
- "digiplay/xxgSl526_v1",
277
  "Yntec/BabeBae",
278
- "digiplay/2-KWI", #213
279
  "Yntec/SinkOrSwim",
280
- "digiplay/majicMIXfantasy_v1",
281
  "Yntec/Nostalgic",
282
- "digiplay/yiSMix2.8D_v1",
283
  "Yntec/mixRealisticFantasy",
284
- "digiplay/hellopure_v2.23",
285
  "Yntec/Astro_-_-Gemu",
286
- "digiplay/YabaLMixTrue25D_V1.0",
287
  "Yntec/StorybookRedmond",
288
- "digiplay/AnalogMadness-realistic-model-v7", #842
289
  "Yntec/Cheesecake",
290
- "digiplay/CuriousMerge2.5D_v40E", #771
291
  "Yntec/GimmeDatDing",
292
- "John6666/cookie-run-character-style-v1-sd15-lora", #853
293
  "Yntec/GenerateMe",
294
- "digiplay/Hassaku_1.3", #830
295
  "Yntec/Gacha",
296
  "Yntec/incha_re_zoro",
297
  "Yntec/GodMode",
@@ -355,7 +399,6 @@ models = [
355
  "Yntec/Abased", #2k
356
  "Yntec/SCMix", #2k
357
  "Yntec/Hassaku", #2k
358
- "Yntec/m0nst3rfy3", #2k
359
  "Yntec/PotaytoPotahto", #2K
360
  "Yntec/3DCute", #2K
361
  "Yntec/SuperCuteRemix", #2K
@@ -396,7 +439,6 @@ models = [
396
  "Yntec/vividicAnime", #2K
397
  "Yntec/WoopWoopRemix", #2K
398
  "Yntec/ArcticFowl", #2K
399
- "Yntec/CrystalClearRemix", #5k
400
  "Yntec/CrystalClear", #3k
401
  "Yntec/Reanimate", #5k
402
  "Yntec/Deliberate2", #5k
@@ -507,26 +549,21 @@ models = [
507
  "Yntec/Hiten",
508
  "digiplay/fCAnimeMix_v6", #776
509
  "digiplay/YabaLMixAnimeRealistic_V1.0", #754
510
- "digiplay/WhiteDreamyHillMix_v1", #220
511
- "digiplay/fCAnimeMix_v4", #747
512
- "digiplay/HadrianDelice_DeliceV1.5", #743
513
- "digiplay/fCAnimeMix_v2", #716
514
- "digiplay/HadrianDelice_BYC1.0", #728
515
  "digiplay/majicMIX_realistic_v6",
516
  "digiplay/Noosphere_v4", #723
517
- "digiplay/PerfectDeliberate-Anime_v1", #724
518
  "digiplay/CamelliaMIx_2.5D_v1", #695
519
  "digiplay/CamelliaMIx_2.5D_diffusers", #692
520
  "digiplay/fantasticmix2.5D_v4.0",
521
- "digiplay/majicMIX_realistic_v1",
522
  "digiplay/RunDiffusionFXPhotorealistic_v1",
523
-
 
 
 
524
  "digiplay/DucHaiten-Real3D-NSFW-V1",
525
  "digiplay/kencanmix_v1.5",
526
- "digiplay/ZHMix-Dramatic-v3.0",
527
- "digiplay/Gap",
528
  "digiplay/ya3_VAE",
529
- "digiplay/asyncsMIX_v2",
530
  "digiplay/fantasticmix_v65_test",
531
  "digiplay/AingDiffusion8",
532
  "digiplay/AingDiffusion9",
@@ -553,7 +590,23 @@ models = [
553
  "digiplay/OldFish_v1.1mix_hello",
554
  "digiplay/OldFish_v1.1_personal_HDmix",
555
  "digiplay/FishMix_v1",
 
556
  "digiplay/Yntec_Wonder_0508_DDIM", #818
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
557
  "John6666/juggernaut-reborn-sd15", #860
558
  "digiplay/AnalogMadness-realistic-model-v6", #862
559
  "DucHaiten/DucHaitenDreamWorld",
@@ -688,11 +741,9 @@ models = [
688
  "digiplay/Noosphere_v3",
689
  "digiplay/PeachMixsRelistic_R0", #262
690
  "wavymulder/timeless-diffusion",
691
- "digiplay/WhiteDreamyHillMix_v1", #220
692
-
693
  "DucHaiten/DucHaitenAnime",
694
  "DucHaiten/DucHaitenAIart",
695
- "digiplay/BeenYouLiteL11_diffusers",
696
  "Manseo/Colorful-v4.5-Plus", #244
697
  "Guizmus/SDArt_ChaosAndOrder",
698
  "DucHaiten/DH_ClassicAnime",
@@ -700,7 +751,6 @@ models = [
700
  "johnslegers/epic-diffusion-v1.1",
701
  "emilianJR/epiCRealism",
702
  "johnslegers/epic-diffusion",
703
- "digiplay/fantasticAnime_diffusers",
704
  "stablediffusionapi/ghostmix",
705
  "Duskfallcrew/EpicMix_Realism",
706
  "nitrosocke/Nitro-Diffusion",
@@ -748,7 +798,7 @@ models = [
748
  "ogkalu/Comic-Diffusion",
749
  "Guizmus/SDArt_ChaosAndOrder768",
750
  "gsdf/Counterfeit-V2.0",
751
- "dwancin/memoji", #07.11
752
  "nousr/robo-diffusion-2-base",
753
 
754
  ##"hakurei/waifu-diffusion",
@@ -794,7 +844,6 @@ models = [
794
  "WarriorMama777/BloodOrangeMix",
795
  "wavymulder/collage-diffusion",
796
  "stablediffusionapi/camelliamixline",
797
- "digiplay/CiderMix_ciderR", #260
798
  "Johnhex/Clam", #243
799
  "stablediffusionapi/cosmic-babes",
800
  "digiplay/CoffeeDonut_v1",
@@ -887,12 +936,20 @@ models = [
887
  "ItsJayQz/Valorant_Diffusion",
888
  "Fictiverse/Stable_Diffusion_VoxelArt_Model", #204
889
  "wavymulder/wavyfusion",
890
- "Yntec/HassanRemix",
891
- "Yntec/Reddit",
892
- "Yntec/CinematicReality",
893
- "runwayml/stable-diffusion-v1-5", #555
894
  "CompVis/stable-diffusion-v1-4", #530
895
  "CompVis/stable-diffusion-v1-3", #207
896
  "CompVis/stable-diffusion-v1-2", #208
897
  "CompVis/stable-diffusion-v1-1", #209
898
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  models = [
2
+ "Yntec/LadyNostalgia", #909
3
+ "digiplay/SomethingPhenomenal_vivacityV2", #735
4
+ "Yntec/darelitesFantasyMix", #908
5
+ "digiplay/chrysanthemumMix_v1",
6
+ "Yntec/handpaintedRPGIcons", #906
7
+ "digiplay/majicMIX_realistic_v1",
8
+ "Yntec/MemojiRemix", #905
9
+ "digiplay/asyncsMIX_v2",
10
+ "Yntec/StaticMVintage", #903
11
+ "digiplay/Koji_v2.1_diffusers",
12
+ "Yntec/animeFIVEHUNDREDTWENTY-SIX", #902
13
+ "digiplay/ZHMix-Dramatic-v3.0",
14
+ "Yntec/3DCartoonVision", #901
15
+ "digiplay/Gap",
16
+ "Yntec/TwoAndAHalfDimensions", #900
17
+ "digiplay/EdisonNilMix_v1", # Added 7.10 Updated 12.5
18
+ "Yntec/RadiantDiversions", #899
19
+ "digiplay/MengX_Mix_Fantasy_v4", #689
20
+ "Yntec/RetroRetro", #897
21
+ "digiplay/PlanetBumix_v1",
22
+ "Yntec/ClassicToons", #897
23
+ "digiplay/CamelliaMIx_2.5D_v1_VAE", #696
24
+ "Yntec/Disneyify", #896
25
+ "black-forest-labs/FLUX.1-schnell", #876
26
+ "digiplay/supashymix_v30Lite", #701
27
+ "Yntec/epiCEpic", #889
28
+ "LostMedia/RetroDiffusion", #894
29
+ "Yntec/QGO", #893
30
+ "digiplay/seizamix_v2", #706
31
+ "Yntec/PixelKicks", #895
32
+ "digiplay/majicMIX_lux_v3", #708
33
+ "Yntec/VisionVision", #888
34
+ "LostMedia/0-1982-1084-0065", #892
35
+ "Yntec/m0nst3rfy3",
36
+ "digiplay/fantasticAnime_diffusers",
37
+ "Yntec/photographerAlpha7", #891
38
+ "digiplay/richyrichmix_V2Fp16", #707
39
+ "Yntec/ChunkyCat", #880
40
+ "digiplay/dosmixVAE-mangled", #804
41
+ "Yntec/TickleYourFancy", #879
42
+ "digiplay/fCAnimeMix_v4", #747
43
+ "Yntec/realistic-vision-v13", #886
44
+ "John6666/naclo74models-mih-entrance-sd15", #883
45
+ "Yntec/ZootVision", #884
46
+ "digiplay/HadrianDelice_BYC1.0", #728
47
+ "Yntec/breakdomain", #882
48
+ "digiplay/PerfectDeliberate-Anime_v1", #724
49
+ "Yntec/CrystalClearRemix", ##4
50
+ "digiplay/BreakDro_i1464", #719
51
+ "Yntec/AllRoadsLeadToRetro", #877
52
+ "digiplay/mothmix_v1.41", #720
53
+ "Yntec/AnimephilesAnonymous", #875
54
+ "digiplay/CCTV2.5d_v1", #219
55
+ "Yntec/InsaneSurreality", #873
56
+ "digiplay/bluePencil_v09b", #766
57
+ "Yntec/WinningBlunder", #868
58
+ "digiplay/Colorful_v1.3", #774
59
+ "Yntec/DreamlikePhotoReal2", #872
60
+ "digiplay/BeenYouLiteL11_diffusers",
61
+ "Yntec/Surreality", #870
62
+ "digiplay/xxgSl526_v1", ##400
63
  "Yntec/beLIEve", #865
64
  "digiplay/MilkyWonderland_v1", #866
65
  "Yntec/Hyperlink", #861
66
  "digiplay/AnalogMadness-realistic-model-v5", #864
67
  "Yntec/HyperRemix", #859
68
+ "digiplay/RunDiffusionFX2.5D_v1_diffusers",
69
  "Yntec/realisticStockPhoto3", #863
70
  "digiplay/fishmix_other_v1",
71
  "Yntec/HyperPhotoGASM", #858
72
+ "digiplay/XRYCJ_RealisticModel",
73
  "Yntec/CrystalReality", #855
74
  "John6666/meinamix-meinav11-sd15", #857
75
  "Yntec/Roleplay", #850
 
86
  "digiplay/rRealism_v1.0_riiwa", #848
87
  "Yntec/ClayStyle", #838
88
  "digiplay/TWingshadow_v1.0", #847
89
+ "Yntec/HassanRemix",
90
  "digiplay/TWingshadow_v1.0_finetune", #846
91
+ "Yntec/Reddit",
92
  "digiplay/V3_by_Hans_Asian",
93
+ "Yntec/CinematicReality",
94
  "digiplay/Maji5PlusCCTV", #843
95
  "Yntec/3DKX2",
96
  "digiplay/cosfMix_v1", #223
 
137
  "Yntec/AbsoluteReality", #15K
138
  "digiplay/PikasAnimatedMix_v1", #805
139
  "Yntec/BeautyFoolRemix", #800
140
+ "digiplay/yiSMix2.8D_v1", ##600
141
  "Yntec/DisneyPixarCartoon768", #803
142
  "digiplay/AgainMix_v2.0", #802
143
  "Yntec/mistoonRuby3", #801
 
145
  "Yntec/AtoZ", #799
146
  "digiplay/fantasticmix_k1", #797
147
  "Yntec/FilmGirlRemix", #795
148
+ "digiplay/hellopure_v2.23", ##1K
149
  "Yntec/Vintage", #794
150
  "digiplay/BeautyFoolReality_4", #796
151
  "Yntec/UltraHighDefinition", #791
 
165
  "Yntec/ArthemyComics", #781
166
  "digiplay/EtherRealMix_LUX2",
167
  "Yntec/ReVAnimatedRemix", #780
168
+ "digiplay/majicMIXfantasy_v1", ##700
169
  "Yntec/AnythingNostalgic", #775
170
  "digiplay/WhiteDreamyHillMix_v1_VAE", #713
171
  "Yntec/AnyLoRa-768", #778
172
+ "digiplay/CuriousMerge2.5D_v40E", #771
173
  "Yntec/AnythingV5-768", #777
174
  "digiplay/LEAU",
175
  "Yntec/Moistalgia", #773
176
+ "digiplay/AnalogMadness-realistic-model-v7", #842
177
  "Yntec/DeleteThis", #770
178
  "digiplay/LemonTea2.5D",
179
  "Yntec/ModernDisney", #764
180
+ "John6666/cute-illustration-style-reinforced-model-v61-sd15", #878
181
  "Yntec/IsThisDisney", #767
182
  "digiplay/MRMD_0505", #769
183
  "Yntec/RetroArt", #760
 
185
  "Yntec/Fanatic", #758
186
  "digiplay/fantastel_V1", #759
187
  "Yntec/Mo-Di-Diffusion-768", #768
188
+ "digiplay/WhiteDreamyHillMix_v1", #220
189
  "Yntec/AnythingV4.5.6.7.8", #763
190
  "digiplay/majicMIX_realistic_v7", #761
191
  "Yntec/ElldrethsRetroMix", #4K
 
197
  "Yntec/DucHaitenAIart-beta", #3K
198
  "digiplay/Acorn_Photo_v1", #757
199
  "Yntec/a-ZovyaRPGV4", #756
200
+ "digiplay/Gap_2.6",
201
  "Yntec/KrazyGlue", #745
202
  "digiplay/AnyPastel", #752
203
  "Yntec/EpicDiffusion", #753
 
212
  "digiplay/MixTape_RocknRoll_v3punk_bake_fp16",
213
  "Yntec/AbyssOrangeMix", #740
214
  "digiplay/PerfectDeliberate-Anime_v2", #734
215
+ "digiplay/XXMix_9realistic_v1",
216
  "Yntec/CocaCola", #733
217
  "digiplay/majicMixHorror_v1", #738
218
  "digiplay/Noosphere_v4.2", #737
 
229
  "digiplay/pan04", #725
230
  "Yntec/Voxel", #715
231
  "digiplay/FishMix_v1.1",
232
+ "digiplay/Dolka_Rusalka_v0.5.1",
233
  "Yntec/DreamlikeDiffusion", #722
234
+ "digiplay/XtReMixUltimateMerge_v1.5", #710
235
+ "digiplay/YabaLMixTrue25D_V1.0", ##900
236
  "Yntec/HyperRealism", #721
237
  "digiplay/aurorafantasy_v1", #717
238
  "digiplay/CoharuMix_real",
239
  "Yntec/Prodigy", #712
240
  "digiplay/ya3_xt", #711
 
241
  "Yntec/Protogen_Unofficial_Release", #709
 
 
 
 
242
  "digiplay/AnalogMadness-realistic-model-v4", #705
243
+ "Yntec/Timeless", #703
244
+ "digiplay/2-KWI", #213 ##900
245
  "Yntec/Fabulous", #700
246
  "digiplay/realspice_v2", #702
 
247
  "Yntec/DucHaitenRetro2", #704
248
  "digiplay/BeautifulFantasyRealMix_diffusers", #698
249
  "Yntec/IncredibleLife", #699
 
251
  "Yntec/AnythingV3.1", #699
252
  "digiplay/AstrAnime_v6", #698
253
  "Yntec/TimelessDiffusion768", #699
254
+ "digiplay/YutaMix_realistic_v11",
255
  "Yntec/RetroLife", #691
256
+ "digiplay/nk15_diffusers", #230
257
  "Yntec/CuteFurry",
258
+ "digiplay/NightmareShaper_v2DarkageLobotomy",
259
  "Yntec/theallysMixIIChurned", #694
260
  "digiplay/MengX_Mix_Real_v3",
261
  "Yntec/BrainDance", #693
262
  "digiplay/ARRealVX1.1",
263
  "Yntec/Remedy", #697
264
+ "digiplay/Hassaku_1.3", #830
265
  "Yntec/Paramount",
266
  "digiplay/Yuzu_v1.1", #688
267
  "Yntec/Playground", #690
 
319
  "Yntec/Infinite80s",
320
  "digiplay/AI-infinity-V1-fp16",
321
  "Yntec/InfiniteLiberty",
 
322
  "Yntec/aBagOfChips",
 
323
  "Yntec/IsThisArt",
 
324
  "Yntec/Jackpot",
 
325
  "Yntec/C-.-_-.-Aravaggio",
 
326
  "Yntec/Stuff",
 
327
  "Yntec/LiberteRedmond",
 
328
  "Yntec/Emoticons",
 
329
  "Yntec/BabeBae",
 
330
  "Yntec/SinkOrSwim",
 
331
  "Yntec/Nostalgic",
 
332
  "Yntec/mixRealisticFantasy",
 
333
  "Yntec/Astro_-_-Gemu",
334
+ "John6666/cookie-run-character-style-v1-sd15-lora", #853
335
  "Yntec/StorybookRedmond",
 
336
  "Yntec/Cheesecake",
 
337
  "Yntec/GimmeDatDing",
 
338
  "Yntec/GenerateMe",
 
339
  "Yntec/Gacha",
340
  "Yntec/incha_re_zoro",
341
  "Yntec/GodMode",
 
399
  "Yntec/Abased", #2k
400
  "Yntec/SCMix", #2k
401
  "Yntec/Hassaku", #2k
 
402
  "Yntec/PotaytoPotahto", #2K
403
  "Yntec/3DCute", #2K
404
  "Yntec/SuperCuteRemix", #2K
 
439
  "Yntec/vividicAnime", #2K
440
  "Yntec/WoopWoopRemix", #2K
441
  "Yntec/ArcticFowl", #2K
 
442
  "Yntec/CrystalClear", #3k
443
  "Yntec/Reanimate", #5k
444
  "Yntec/Deliberate2", #5k
 
549
  "Yntec/Hiten",
550
  "digiplay/fCAnimeMix_v6", #776
551
  "digiplay/YabaLMixAnimeRealistic_V1.0", #754
552
+ "digiplay/fCAnimeMix_v5", #798
 
 
 
 
553
  "digiplay/majicMIX_realistic_v6",
554
  "digiplay/Noosphere_v4", #723
 
555
  "digiplay/CamelliaMIx_2.5D_v1", #695
556
  "digiplay/CamelliaMIx_2.5D_diffusers", #692
557
  "digiplay/fantasticmix2.5D_v4.0",
 
558
  "digiplay/RunDiffusionFXPhotorealistic_v1",
559
+ "digiplay/HadrianDelice_DeliceV1", #772
560
+ "digiplay/HadrianDelice_DeliceV1.5", #743
561
+ "digiplay/fCAnimeMix_v2", #716
562
+ "digiplay/ZHMix-Dramatic-v2.0",
563
  "digiplay/DucHaiten-Real3D-NSFW-V1",
564
  "digiplay/kencanmix_v1.5",
565
+
 
566
  "digiplay/ya3_VAE",
 
567
  "digiplay/fantasticmix_v65_test",
568
  "digiplay/AingDiffusion8",
569
  "digiplay/AingDiffusion9",
 
590
  "digiplay/OldFish_v1.1mix_hello",
591
  "digiplay/OldFish_v1.1_personal_HDmix",
592
  "digiplay/FishMix_v1",
593
+ "digiplay/majicMIX_realistic_v5preview", #867
594
  "digiplay/Yntec_Wonder_0508_DDIM", #818
595
+
596
+ "digiplay/PeachMixsRelistic_R0", #907
597
+ "digiplay/PerfectDeliberate_v5", #904
598
+ "digiplay/majicMIX_sombre_v2", #900
599
+ "digiplay/majicMIX_sombre_v1", #899
600
+ "digiplay/GhostMix", #899
601
+ "digiplay/CiderMix_ciderR", #898
602
+ "digiplay/MeinaPastel_v3", #896
603
+ "digiplay/quincemix_v2", #896
604
+ "digiplay/chilled_remixb_v1vae.safetensors", #887
605
+ "digiplay/PerfectDeliberate_v4", #885
606
+ "digiplay/K-main2.1", #881
607
+ "digiplay/YabaLMixTrue25D_V2.0", #874
608
+ "digiplay/realdosmix_diffusers", #873
609
+ "digiplay/fantasticmix2.5D_test", #871
610
  "John6666/juggernaut-reborn-sd15", #860
611
  "digiplay/AnalogMadness-realistic-model-v6", #862
612
  "DucHaiten/DucHaitenDreamWorld",
 
741
  "digiplay/Noosphere_v3",
742
  "digiplay/PeachMixsRelistic_R0", #262
743
  "wavymulder/timeless-diffusion",
744
+
 
745
  "DucHaiten/DucHaitenAnime",
746
  "DucHaiten/DucHaitenAIart",
 
747
  "Manseo/Colorful-v4.5-Plus", #244
748
  "Guizmus/SDArt_ChaosAndOrder",
749
  "DucHaiten/DH_ClassicAnime",
 
751
  "johnslegers/epic-diffusion-v1.1",
752
  "emilianJR/epiCRealism",
753
  "johnslegers/epic-diffusion",
 
754
  "stablediffusionapi/ghostmix",
755
  "Duskfallcrew/EpicMix_Realism",
756
  "nitrosocke/Nitro-Diffusion",
 
798
  "ogkalu/Comic-Diffusion",
799
  "Guizmus/SDArt_ChaosAndOrder768",
800
  "gsdf/Counterfeit-V2.0",
801
+ ##"dwancin/memoji", #07.11
802
  "nousr/robo-diffusion-2-base",
803
 
804
  ##"hakurei/waifu-diffusion",
 
844
  "WarriorMama777/BloodOrangeMix",
845
  "wavymulder/collage-diffusion",
846
  "stablediffusionapi/camelliamixline",
 
847
  "Johnhex/Clam", #243
848
  "stablediffusionapi/cosmic-babes",
849
  "digiplay/CoffeeDonut_v1",
 
936
  "ItsJayQz/Valorant_Diffusion",
937
  "Fictiverse/Stable_Diffusion_VoxelArt_Model", #204
938
  "wavymulder/wavyfusion",
 
 
 
 
939
  "CompVis/stable-diffusion-v1-4", #530
940
  "CompVis/stable-diffusion-v1-3", #207
941
  "CompVis/stable-diffusion-v1-2", #208
942
  "CompVis/stable-diffusion-v1-1", #209
943
  ]
944
+
945
+ #from externalmod import find_model_list
946
+
947
+ #models = find_model_list("Yntec", [], "", "last_modified", 20)
948
+
949
+ # Examples:
950
+ #models = ['yodayo-ai/kivotos-xl-2.0', 'yodayo-ai/holodayo-xl-2.1'] # specific models
951
+ #models = find_model_list("Yntec", [], "", "last_modified", 20) # Yntec's latest 20 models
952
+ #models = find_model_list("Yntec", ["anime"], "", "last_modified", 20) # Yntec's latest 20 models with 'anime' tag
953
+ #models = find_model_list("Yntec", [], "anime", "last_modified", 20) # Yntec's latest 20 models without 'anime' tag
954
+ #models = find_model_list("", [], "", "last_modified", 20) # latest 20 text-to-image models of huggingface
955
+ #models = find_model_list("", [], "", "downloads", 20) # monthly most downloaded 20 text-to-image models of huggingface
app.py CHANGED
@@ -1,18 +1,20 @@
1
  import gradio as gr
2
- from random import randint
3
  from all_models import models
4
- from externalmod import gr_Interface_load
5
  import asyncio
 
 
 
 
6
 
7
 
8
  def load_fn(models):
9
  global models_load
10
  models_load = {}
11
-
12
  for model in models:
13
  if model not in models_load.keys():
14
  try:
15
- m = gr_Interface_load(f'models/{model}')
16
  except Exception as error:
17
  print(error)
18
  m = gr.Interface(lambda: None, ['text'], ['image'])
@@ -23,8 +25,11 @@ load_fn(models)
23
 
24
 
25
  num_models = 6
 
 
26
  default_models = models[:num_models]
27
- timeout = 60
 
28
 
29
  def extend_choices(choices):
30
  return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
@@ -32,106 +37,183 @@ def extend_choices(choices):
32
 
33
  def update_imgbox(choices):
34
  choices_plus = extend_choices(choices[:num_models])
35
- return [gr.Image(None, label = m, visible = (m != 'NA')) for m in choices_plus]
36
-
37
-
38
- def update_imgbox_gallery(choices):
39
- choices_plus = extend_choices(choices[:num_models])
40
- return [gr.Gallery(None, label = m, visible = (m != 'NA')) for m in choices_plus]
41
-
42
-
43
- async def infer(model_str, prompt, timeout):
44
- noise = ""
45
- rand = randint(1, 500)
46
- for i in range(rand):
47
- noise += " "
48
- task = asyncio.create_task(asyncio.to_thread(models_load[model_str], f'{prompt} {noise}'))
 
 
 
 
 
 
 
49
  await asyncio.sleep(0)
50
  try:
51
  result = await asyncio.wait_for(task, timeout=timeout)
52
- except (Exception, asyncio.TimeoutError) as e:
53
  print(e)
54
- print(f"Task timed-out: {model_str}")
55
  if not task.done(): task.cancel()
56
  result = None
57
- return result
 
 
 
 
 
 
 
 
 
 
 
58
 
59
 
60
- def gen_fn(model_str, prompt):
61
- if model_str == 'NA':
62
- return None
63
  try:
64
  loop = asyncio.new_event_loop()
65
- result = loop.run_until_complete(infer(model_str, prompt, timeout))
 
66
  except (Exception, asyncio.CancelledError) as e:
67
  print(e)
68
  print(f"Task aborted: {model_str}")
69
  result = None
 
70
  finally:
71
  loop.close()
72
  return result
73
 
74
 
75
- def gen_fn_gallery(model_str, prompt, gallery):
76
  if gallery is None: gallery = []
77
- if model_str == 'NA':
78
- yield gallery
79
- try:
80
- loop = asyncio.new_event_loop()
81
- result = loop.run_until_complete(infer(model_str, prompt, timeout))
82
- if result: gallery.append(result)
83
- except (Exception, asyncio.CancelledError) as e:
84
- print(e)
85
- print(f"Task aborted: {model_str}")
86
- finally:
87
- loop.close()
88
- yield gallery
89
 
90
 
91
  CSS="""
92
- .output { width=480px; height=480px !important; }
 
 
 
93
  """
94
 
95
- with gr.Blocks(css=CSS) as demo:
96
- with gr.Tab('The Dream'):
97
- txt_input = gr.Textbox(label = 'Your prompt:', lines=4) #.style(container=False,min_width=1200)
98
- gen_button = gr.Button('Generate up to 6 images in up to 3 minutes total')
99
- stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
100
- gen_button.click(lambda: gr.update(interactive = True), None, stop_button)
101
- gr.HTML(
102
- """
103
- <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
104
- <div>
105
- <body>
106
- <div class="center"><p style="margin-bottom: 10px; color: #000000;">Scroll down to see more images and select models.</p>
107
- </div>
108
- </body>
109
- </div>
110
- </div>
111
- """
112
- )
113
- with gr.Row():
114
- output = [gr.Image(label = m, show_download_button=True, elem_classes="output", show_share_button=True) for m in default_models]
115
- #output = [gr.Gallery(label = m, show_download_button=True, elem_classes="output", interactive=False, show_share_button=True, container=True, format="png", object_fit="contain") for m in default_models]
116
- current_models = [gr.Textbox(m, visible = False) for m in default_models]
117
-
118
- for m, o in zip(current_models, output):
119
- gen_event = gen_button.click(gen_fn, [m, txt_input], o)
120
- #gen_event = gen_button.click(gen_fn_gallery, [m, txt_input, o], o)
121
- stop_button.click(lambda: gr.update(interactive = False), None, stop_button, cancels = [gen_event])
122
- with gr.Accordion('Model selection'):
123
- model_choice = gr.CheckboxGroup(models, label = f'Choose up to {num_models} different models from the 866 available!', value = default_models, interactive = True)
124
- model_choice.change(update_imgbox, model_choice, output)
125
- #model_choice.change(update_imgbox_gallery, model_choice, output)
126
- model_choice.change(extend_choices, model_choice, current_models)
127
- with gr.Row():
128
- gr.HTML(
129
  """
130
- <div class="footer">
131
- <p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77 and Omnibus's Maximum Multiplier!
132
- </p>
133
  """
134
- )
135
-
136
- demo.queue()
137
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
2
  from all_models import models
3
+ from externalmod import gr_Interface_load, save_image, randomize_seed
4
  import asyncio
5
+ import os
6
+ from threading import RLock
7
+ lock = RLock()
8
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
9
 
10
 
11
  def load_fn(models):
12
  global models_load
13
  models_load = {}
 
14
  for model in models:
15
  if model not in models_load.keys():
16
  try:
17
+ m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
18
  except Exception as error:
19
  print(error)
20
  m = gr.Interface(lambda: None, ['text'], ['image'])
 
25
 
26
 
27
  num_models = 6
28
+ max_images = 6
29
+ inference_timeout = 300
30
  default_models = models[:num_models]
31
+ MAX_SEED = 2**32-1
32
+
33
 
34
  def extend_choices(choices):
35
  return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
 
37
 
38
  def update_imgbox(choices):
39
  choices_plus = extend_choices(choices[:num_models])
40
+ return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
41
+
42
+
43
+ def random_choices():
44
+ import random
45
+ random.seed()
46
+ return random.choices(models, k=num_models)
47
+
48
+
49
+ # https://huggingface.co/docs/api-inference/detailed_parameters
50
+ # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
51
+ async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
52
+ kwargs = {}
53
+ if height > 0: kwargs["height"] = height
54
+ if width > 0: kwargs["width"] = width
55
+ if steps > 0: kwargs["num_inference_steps"] = steps
56
+ if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
57
+ if seed == -1: kwargs["seed"] = randomize_seed()
58
+ else: kwargs["seed"] = seed
59
+ task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
60
+ prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
61
  await asyncio.sleep(0)
62
  try:
63
  result = await asyncio.wait_for(task, timeout=timeout)
64
+ except asyncio.TimeoutError as e:
65
  print(e)
66
+ print(f"Task timed out: {model_str}")
67
  if not task.done(): task.cancel()
68
  result = None
69
+ raise Exception(f"Task timed out: {model_str}") from e
70
+ except Exception as e:
71
+ print(e)
72
+ if not task.done(): task.cancel()
73
+ result = None
74
+ raise Exception() from e
75
+ if task.done() and result is not None and not isinstance(result, tuple):
76
+ with lock:
77
+ png_path = "image.png"
78
+ image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg, seed)
79
+ return image
80
+ return None
81
 
82
 
83
+ def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
 
 
84
  try:
85
  loop = asyncio.new_event_loop()
86
+ result = loop.run_until_complete(infer(model_str, prompt, nprompt,
87
+ height, width, steps, cfg, seed, inference_timeout))
88
  except (Exception, asyncio.CancelledError) as e:
89
  print(e)
90
  print(f"Task aborted: {model_str}")
91
  result = None
92
+ raise gr.Error(f"Task aborted: {model_str}, Error: {e}")
93
  finally:
94
  loop.close()
95
  return result
96
 
97
 
98
+ def add_gallery(image, model_str, gallery):
99
  if gallery is None: gallery = []
100
+ with lock:
101
+ if image is not None: gallery.insert(0, (image, model_str))
102
+ return gallery
 
 
 
 
 
 
 
 
 
103
 
104
 
105
  CSS="""
106
+ .gradio-container { max-width: 1200px; margin: 0 auto; !important; }
107
+ .output { width=112px; height=112px; max_width=112px; max_height=112px; !important; }
108
+ .gallery { min_width=512px; min_height=512px; max_height=1024px; !important; }
109
+ .guide { text-align: center; !important; }
110
  """
111
 
112
+
113
+ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=CSS) as demo:
114
+ gr.HTML(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  """
116
+ <div>
117
+ <p> <center>For simultaneous generations without hidden queue check out <a href="https://huggingface.co/spaces/Yntec/ToyWorld">Toy World</a>! For more options like single model x6 check out <a href="https://huggingface.co/spaces/John6666/Diffusion80XX4sg">Diffusion80XX4sg</a> by John6666!</center>
118
+ </p></div>
119
  """
120
+ )
121
+ with gr.Tab('Huggingface Diffusion'):
122
+ with gr.Column(scale=2):
123
+ with gr.Group():
124
+ txt_input = gr.Textbox(label='Your prompt:', lines=4)
125
+ neg_input = gr.Textbox(label='Negative prompt:', lines=1)
126
+ with gr.Accordion("Advanced", open=False, visible=True):
127
+ with gr.Row():
128
+ width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
129
+ height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
130
+ with gr.Row():
131
+ steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
132
+ cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
133
+ seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
134
+ seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
135
+ seed_rand.click(randomize_seed, None, [seed], queue=False)
136
+ with gr.Row():
137
+ gen_button = gr.Button(f'Generate up to {int(num_models)} images in up to 3 minutes total', variant='primary', scale=3)
138
+ random_button = gr.Button(f'Random {int(num_models)} 🎲', variant='secondary', scale=1)
139
+ #stop_button = gr.Button('Stop', variant='stop', interactive=False, scale=1)
140
+ #gen_button.click(lambda: gr.update(interactive=True), None, stop_button)
141
+ gr.Markdown("Scroll down to see more images and select models.", elem_classes="guide")
142
+
143
+ with gr.Column(scale=1):
144
+ with gr.Group():
145
+ with gr.Row():
146
+ output = [gr.Image(label=m, show_download_button=True, elem_classes="output",
147
+ interactive=False, width=112, height=112, show_share_button=False, format="png",
148
+ visible=True) for m in default_models]
149
+ current_models = [gr.Textbox(m, visible=False) for m in default_models]
150
+
151
+ with gr.Column(scale=2):
152
+ gallery = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
153
+ interactive=False, show_share_button=True, container=True, format="png",
154
+ preview=True, object_fit="cover", columns=2, rows=2)
155
+
156
+ for m, o in zip(current_models, output):
157
+ gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
158
+ inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o],
159
+ concurrency_limit=None, queue=False) # Be sure to delete ", queue=False" when activating the stop button
160
+ o.change(add_gallery, [o, m, gallery], [gallery])
161
+ #stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
162
+
163
+ with gr.Column(scale=4):
164
+ with gr.Accordion('Model selection'):
165
+ model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
166
+ model_choice.change(update_imgbox, model_choice, output)
167
+ model_choice.change(extend_choices, model_choice, current_models)
168
+ random_button.click(random_choices, None, model_choice)
169
+
170
+ with gr.Tab('Single model'):
171
+ with gr.Column(scale=2):
172
+ model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0])
173
+ with gr.Group():
174
+ txt_input2 = gr.Textbox(label='Your prompt:', lines=4)
175
+ neg_input2 = gr.Textbox(label='Negative prompt:', lines=1)
176
+ with gr.Accordion("Advanced", open=False, visible=True):
177
+ with gr.Row():
178
+ width2 = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
179
+ height2 = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
180
+ with gr.Row():
181
+ steps2 = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
182
+ cfg2 = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
183
+ seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
184
+ seed_rand2 = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
185
+ seed_rand2.click(randomize_seed, None, [seed2], queue=False)
186
+ num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
187
+ with gr.Row():
188
+ gen_button2 = gr.Button('Generate', variant='primary', scale=2)
189
+ #stop_button2 = gr.Button('Stop', variant='stop', interactive=False, scale=1)
190
+ #gen_button2.click(lambda: gr.update(interactive=True), None, stop_button2)
191
+
192
+ with gr.Column(scale=1):
193
+ with gr.Group():
194
+ with gr.Row():
195
+ output2 = [gr.Image(label='', show_download_button=True, elem_classes="output",
196
+ interactive=False, width=112, height=112, visible=True, format="png",
197
+ show_share_button=False, show_label=False) for _ in range(max_images)]
198
+
199
+ with gr.Column(scale=2):
200
+ gallery2 = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
201
+ interactive=False, show_share_button=True, container=True, format="png",
202
+ preview=True, object_fit="cover", columns=2, rows=2)
203
+
204
+ for i, o in enumerate(output2):
205
+ img_i = gr.Number(i, visible=False)
206
+ num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o, queue=False)
207
+ gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
208
+ fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
209
+ inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
210
+ height2, width2, steps2, cfg2, seed2], outputs=[o],
211
+ concurrency_limit=None, queue=False) # Be sure to delete ", queue=False" when activating the stop button
212
+ o.change(add_gallery, [o, model_choice2, gallery2], [gallery2])
213
+ #stop_button2.click(lambda: gr.update(interactive=False), None, stop_button2, cancels=[gen_event2])
214
+
215
+ gr.Markdown("Based on the [TestGen](https://huggingface.co/spaces/derwahnsinn/TestGen) Space by derwahnsinn, the [SpacIO](https://huggingface.co/spaces/RdnUser77/SpacIO_v1) Space by RdnUser77 and Omnibus's Maximum Multiplier!")
216
+
217
+ #demo.queue(default_concurrency_limit=200, max_size=200)
218
+ demo.launch(show_api=False, max_threads=400)
219
+ # https://github.com/gradio-app/gradio/issues/6339
externalmod.py CHANGED
@@ -9,7 +9,7 @@ import re
9
  import tempfile
10
  import warnings
11
  from pathlib import Path
12
- from typing import TYPE_CHECKING, Callable
13
 
14
  import httpx
15
  import huggingface_hub
@@ -33,11 +33,15 @@ if TYPE_CHECKING:
33
  from gradio.interface import Interface
34
 
35
 
 
 
 
 
36
  @document()
37
  def load(
38
  name: str,
39
  src: str | None = None,
40
- hf_token: str | None = None,
41
  alias: str | None = None,
42
  **kwargs,
43
  ) -> Blocks:
@@ -48,7 +52,7 @@ def load(
48
  Parameters:
49
  name: the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base")
50
  src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
51
- hf_token: optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide this if you are loading a trusted private Space as it can be read by the Space you are loading.
52
  alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)
53
  Returns:
54
  a Gradio Blocks object for the given model
@@ -65,7 +69,7 @@ def load(
65
  def load_blocks_from_repo(
66
  name: str,
67
  src: str | None = None,
68
- hf_token: str | None = None,
69
  alias: str | None = None,
70
  **kwargs,
71
  ) -> Blocks:
@@ -89,7 +93,7 @@ def load_blocks_from_repo(
89
  if src.lower() not in factory_methods:
90
  raise ValueError(f"parameter: src must be one of {factory_methods.keys()}")
91
 
92
- if hf_token is not None:
93
  if Context.hf_token is not None and Context.hf_token != hf_token:
94
  warnings.warn(
95
  """You are loading a model/Space with a different access token than the one you used to load a previous model/Space. This is not recommended, as it may cause unexpected behavior."""
@@ -100,12 +104,16 @@ def load_blocks_from_repo(
100
  return blocks
101
 
102
 
103
- def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwargs):
 
 
104
  model_url = f"https://huggingface.co/{model_name}"
105
  api_url = f"https://api-inference.huggingface.co/models/{model_name}"
106
  print(f"Fetching model from: {model_url}")
107
 
108
- headers = {"Authorization": f"Bearer {hf_token}"} if hf_token is not None else {}
 
 
109
  response = httpx.request("GET", api_url, headers=headers)
110
  if response.status_code != 200:
111
  raise ModelNotFoundError(
@@ -115,7 +123,7 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
115
 
116
  headers["X-Wait-For-Model"] = "true"
117
  client = huggingface_hub.InferenceClient(
118
- model=model_name, headers=headers, token=hf_token, timeout=120,
119
  )
120
 
121
  # For tasks that are not yet supported by the InferenceClient
@@ -365,10 +373,14 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
365
  else:
366
  raise ValueError(f"Unsupported pipeline type: {p}")
367
 
368
- def query_huggingface_inference_endpoints(*data):
369
  if preprocess is not None:
370
  data = preprocess(*data)
371
- data = fn(*data) # type: ignore
 
 
 
 
372
  if postprocess is not None:
373
  data = postprocess(data) # type: ignore
374
  return data
@@ -380,7 +392,7 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
380
  "inputs": inputs,
381
  "outputs": outputs,
382
  "title": model_name,
383
- # "examples": examples,
384
  }
385
 
386
  kwargs = dict(interface_info, **kwargs)
@@ -391,19 +403,12 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
391
  def from_spaces(
392
  space_name: str, hf_token: str | None, alias: str | None, **kwargs
393
  ) -> Blocks:
394
- client = Client(
395
- space_name,
396
- hf_token=hf_token,
397
- download_files=False,
398
- _skip_components=False,
399
- )
400
-
401
  space_url = f"https://huggingface.co/spaces/{space_name}"
402
 
403
  print(f"Fetching Space from: {space_url}")
404
 
405
  headers = {}
406
- if hf_token is not None:
407
  headers["Authorization"] = f"Bearer {hf_token}"
408
 
409
  iframe_url = (
@@ -440,8 +445,7 @@ def from_spaces(
440
  "Blocks or Interface locally. You may find this Guide helpful: "
441
  "https://gradio.app/using_blocks_like_functions/"
442
  )
443
- if client.app_version < version.Version("4.0.0b14"):
444
- return from_spaces_blocks(space=space_name, hf_token=hf_token)
445
 
446
 
447
  def from_spaces_blocks(space: str, hf_token: str | None) -> Blocks:
@@ -486,7 +490,7 @@ def from_spaces_interface(
486
  config = external_utils.streamline_spaces_interface(config)
487
  api_url = f"{iframe_url}/api/predict/"
488
  headers = {"Content-Type": "application/json"}
489
- if hf_token is not None:
490
  headers["Authorization"] = f"Bearer {hf_token}"
491
 
492
  # The function should call the API with preprocessed data
@@ -526,6 +530,83 @@ def gr_Interface_load(
526
  src: str | None = None,
527
  hf_token: str | None = None,
528
  alias: str | None = None,
529
- **kwargs,
530
  ) -> Blocks:
531
- return load_blocks_from_repo(name, src, hf_token, alias)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  import tempfile
10
  import warnings
11
  from pathlib import Path
12
+ from typing import TYPE_CHECKING, Callable, Literal
13
 
14
  import httpx
15
  import huggingface_hub
 
33
  from gradio.interface import Interface
34
 
35
 
36
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
37
+ server_timeout = 600
38
+
39
+
40
  @document()
41
  def load(
42
  name: str,
43
  src: str | None = None,
44
+ hf_token: str | Literal[False] | None = None,
45
  alias: str | None = None,
46
  **kwargs,
47
  ) -> Blocks:
 
52
  Parameters:
53
  name: the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base")
54
  src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
55
+ hf_token: optional access token for loading private Hugging Face Hub models or spaces. Will default to the locally saved token if not provided. Pass `token=False` if you don't want to send your token to the server. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide a token if you are loading a trusted private Space as it can be read by the Space you are loading.
56
  alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)
57
  Returns:
58
  a Gradio Blocks object for the given model
 
69
  def load_blocks_from_repo(
70
  name: str,
71
  src: str | None = None,
72
+ hf_token: str | Literal[False] | None = None,
73
  alias: str | None = None,
74
  **kwargs,
75
  ) -> Blocks:
 
93
  if src.lower() not in factory_methods:
94
  raise ValueError(f"parameter: src must be one of {factory_methods.keys()}")
95
 
96
+ if hf_token is not None and hf_token is not False:
97
  if Context.hf_token is not None and Context.hf_token != hf_token:
98
  warnings.warn(
99
  """You are loading a model/Space with a different access token than the one you used to load a previous model/Space. This is not recommended, as it may cause unexpected behavior."""
 
104
  return blocks
105
 
106
 
107
+ def from_model(
108
+ model_name: str, hf_token: str | Literal[False] | None, alias: str | None, **kwargs
109
+ ):
110
  model_url = f"https://huggingface.co/{model_name}"
111
  api_url = f"https://api-inference.huggingface.co/models/{model_name}"
112
  print(f"Fetching model from: {model_url}")
113
 
114
+ headers = (
115
+ {} if hf_token in [False, None] else {"Authorization": f"Bearer {hf_token}"}
116
+ )
117
  response = httpx.request("GET", api_url, headers=headers)
118
  if response.status_code != 200:
119
  raise ModelNotFoundError(
 
123
 
124
  headers["X-Wait-For-Model"] = "true"
125
  client = huggingface_hub.InferenceClient(
126
+ model=model_name, headers=headers, token=hf_token, timeout=server_timeout,
127
  )
128
 
129
  # For tasks that are not yet supported by the InferenceClient
 
373
  else:
374
  raise ValueError(f"Unsupported pipeline type: {p}")
375
 
376
+ def query_huggingface_inference_endpoints(*data, **kwargs):
377
  if preprocess is not None:
378
  data = preprocess(*data)
379
+ try:
380
+ data = fn(*data, **kwargs) # type: ignore
381
+ except huggingface_hub.utils.HfHubHTTPError as e:
382
+ if "429" in str(e):
383
+ raise TooManyRequestsError() from e
384
  if postprocess is not None:
385
  data = postprocess(data) # type: ignore
386
  return data
 
392
  "inputs": inputs,
393
  "outputs": outputs,
394
  "title": model_name,
395
+ #"examples": examples,
396
  }
397
 
398
  kwargs = dict(interface_info, **kwargs)
 
403
  def from_spaces(
404
  space_name: str, hf_token: str | None, alias: str | None, **kwargs
405
  ) -> Blocks:
 
 
 
 
 
 
 
406
  space_url = f"https://huggingface.co/spaces/{space_name}"
407
 
408
  print(f"Fetching Space from: {space_url}")
409
 
410
  headers = {}
411
+ if hf_token not in [False, None]:
412
  headers["Authorization"] = f"Bearer {hf_token}"
413
 
414
  iframe_url = (
 
445
  "Blocks or Interface locally. You may find this Guide helpful: "
446
  "https://gradio.app/using_blocks_like_functions/"
447
  )
448
+ return from_spaces_blocks(space=space_name, hf_token=hf_token)
 
449
 
450
 
451
  def from_spaces_blocks(space: str, hf_token: str | None) -> Blocks:
 
490
  config = external_utils.streamline_spaces_interface(config)
491
  api_url = f"{iframe_url}/api/predict/"
492
  headers = {"Content-Type": "application/json"}
493
+ if hf_token not in [False, None]:
494
  headers["Authorization"] = f"Bearer {hf_token}"
495
 
496
  # The function should call the API with preprocessed data
 
530
  src: str | None = None,
531
  hf_token: str | None = None,
532
  alias: str | None = None,
533
+ **kwargs, # ignore
534
  ) -> Blocks:
535
+ try:
536
+ return load_blocks_from_repo(name, src, hf_token, alias)
537
+ except Exception as e:
538
+ print(e)
539
+ return gradio.Interface(lambda: None, ['text'], ['image'])
540
+
541
+
542
+ def list_uniq(l):
543
+ return sorted(set(l), key=l.index)
544
+
545
+
546
+ def get_status(model_name: str):
547
+ from huggingface_hub import AsyncInferenceClient
548
+ client = AsyncInferenceClient(token=HF_TOKEN, timeout=10)
549
+ return client.get_model_status(model_name)
550
+
551
+
552
+ def is_loadable(model_name: str, force_gpu: bool = False):
553
+ try:
554
+ status = get_status(model_name)
555
+ except Exception as e:
556
+ print(e)
557
+ print(f"Couldn't load {model_name}.")
558
+ return False
559
+ gpu_state = isinstance(status.compute_type, dict) and "gpu" in status.compute_type.keys()
560
+ if status is None or status.state not in ["Loadable", "Loaded"] or (force_gpu and not gpu_state):
561
+ print(f"Couldn't load {model_name}. Model state:'{status.state}', GPU:{gpu_state}")
562
+ return status is not None and status.state in ["Loadable", "Loaded"] and (not force_gpu or gpu_state)
563
+
564
+
565
+ def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
566
+ from huggingface_hub import HfApi
567
+ api = HfApi(token=HF_TOKEN)
568
+ default_tags = ["diffusers"]
569
+ if not sort: sort = "last_modified"
570
+ limit = limit * 20 if check_status and force_gpu else limit * 5
571
+ models = []
572
+ try:
573
+ model_infos = api.list_models(author=author, #task="text-to-image",
574
+ tags=list_uniq(default_tags + tags), cardData=True, sort=sort, limit=limit)
575
+ except Exception as e:
576
+ print(f"Error: Failed to list models.")
577
+ print(e)
578
+ return models
579
+ for model in model_infos:
580
+ if not model.private and not model.gated or HF_TOKEN is not None:
581
+ loadable = is_loadable(model.id, force_gpu) if check_status else True
582
+ if not_tag and not_tag in model.tags or not loadable: continue
583
+ models.append(model.id)
584
+ if len(models) == limit: break
585
+ return models
586
+
587
+
588
+ def save_image(image, savefile, modelname, prompt, nprompt, height=0, width=0, steps=0, cfg=0, seed=-1):
589
+ from PIL import Image, PngImagePlugin
590
+ import json
591
+ try:
592
+ metadata = {"prompt": prompt, "negative_prompt": nprompt, "Model": {"Model": modelname.split("/")[-1]}}
593
+ if steps > 0: metadata["num_inference_steps"] = steps
594
+ if cfg > 0: metadata["guidance_scale"] = cfg
595
+ if seed != -1: metadata["seed"] = seed
596
+ if width > 0 and height > 0: metadata["resolution"] = f"{width} x {height}"
597
+ metadata_str = json.dumps(metadata)
598
+ info = PngImagePlugin.PngInfo()
599
+ info.add_text("metadata", metadata_str)
600
+ image.save(savefile, "PNG", pnginfo=info)
601
+ return str(Path(savefile).resolve())
602
+ except Exception as e:
603
+ print(f"Failed to save image file: {e}")
604
+ raise Exception(f"Failed to save image file:") from e
605
+
606
+
607
+ def randomize_seed():
608
+ from random import seed, randint
609
+ MAX_SEED = 2**32-1
610
+ seed()
611
+ rseed = randint(0, MAX_SEED)
612
+ return rseed