Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -55,34 +55,30 @@ import gradio as gr
|
|
55 |
from transformers import pipeline
|
56 |
import torch
|
57 |
|
58 |
-
#
|
59 |
-
|
60 |
|
61 |
-
# Load models using
|
62 |
-
model1 = pipeline("text-to-image", model="Jonny001/NSFW_master",
|
63 |
-
model2 = pipeline("text-to-image", model="Jonny001/Alita-v1",
|
64 |
-
model3 = pipeline("text-to-image", model="lexa862/NSFWmodel",
|
65 |
-
model4 = pipeline("text-to-image", model="Keltezaa/flux_pussy_NSFW",
|
66 |
-
model5 = pipeline("text-to-image", model="prashanth970/flux-lora-uncensored",
|
67 |
|
68 |
# Function to generate images
|
69 |
def generate_images(text, selected_model):
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
model = model5
|
80 |
-
else:
|
81 |
-
return "Invalid model selection."
|
82 |
-
|
83 |
-
# Generate images
|
84 |
results = []
|
85 |
-
|
|
|
86 |
modified_text = f"{text} variation {i+1}"
|
87 |
result = model(modified_text)
|
88 |
results.append(result)
|
@@ -103,11 +99,14 @@ interface = gr.Interface(
|
|
103 |
outputs=[
|
104 |
gr.Image(label="Generated Image 1"),
|
105 |
gr.Image(label="Generated Image 2"),
|
|
|
106 |
],
|
107 |
theme="Yntec/HaleyCH_Theme_Orange",
|
108 |
-
description="⚠
|
109 |
cache_examples=False,
|
110 |
)
|
111 |
|
112 |
# Launch the interface
|
113 |
interface.launch()
|
|
|
|
|
|
55 |
from transformers import pipeline
|
56 |
import torch
|
57 |
|
58 |
+
# Maximize CPU usage
|
59 |
+
torch.set_num_threads(torch.get_num_threads() * 2)
|
60 |
|
61 |
+
# Load models using Hugging Face pipelines
|
62 |
+
model1 = pipeline("text-to-image", model="Jonny001/NSFW_master", device_map="auto")
|
63 |
+
model2 = pipeline("text-to-image", model="Jonny001/Alita-v1", device_map="auto")
|
64 |
+
model3 = pipeline("text-to-image", model="lexa862/NSFWmodel", device_map="auto")
|
65 |
+
model4 = pipeline("text-to-image", model="Keltezaa/flux_pussy_NSFW", device_map="auto")
|
66 |
+
model5 = pipeline("text-to-image", model="prashanth970/flux-lora-uncensored", device_map="auto")
|
67 |
|
68 |
# Function to generate images
|
69 |
def generate_images(text, selected_model):
|
70 |
+
models = {
|
71 |
+
"Model 1 (NSFW Master)": model1,
|
72 |
+
"Model 2 (Alita)": model2,
|
73 |
+
"Model 3 (Lexa NSFW)": model3,
|
74 |
+
"Model 4 (Flux NSFW)": model4,
|
75 |
+
"Model 5 (Lora Uncensored)": model5,
|
76 |
+
}
|
77 |
+
|
78 |
+
model = models.get(selected_model, model1)
|
|
|
|
|
|
|
|
|
|
|
79 |
results = []
|
80 |
+
|
81 |
+
for i in range(3):
|
82 |
modified_text = f"{text} variation {i+1}"
|
83 |
result = model(modified_text)
|
84 |
results.append(result)
|
|
|
99 |
outputs=[
|
100 |
gr.Image(label="Generated Image 1"),
|
101 |
gr.Image(label="Generated Image 2"),
|
102 |
+
gr.Image(label="Generated Image 3"),
|
103 |
],
|
104 |
theme="Yntec/HaleyCH_Theme_Orange",
|
105 |
+
description="⚠ Models are running on CPU for optimized performance. Your patience is appreciated!",
|
106 |
cache_examples=False,
|
107 |
)
|
108 |
|
109 |
# Launch the interface
|
110 |
interface.launch()
|
111 |
+
|
112 |
+
|