Spaces:
Runtime error
Runtime error
# import gradio as gr | |
# model1 = gr.load("models/Jonny001/NSFW_master") | |
# model2 = gr.load("models/Jonny001/Alita-v1") | |
# model3 = gr.load("models/lexa862/NSFWmodel") | |
# model4 = gr.load("models/Keltezaa/flux_pussy_NSFW") | |
# model5 = gr.load("models/prashanth970/flux-lora-uncensored") | |
# def generate_images(text, selected_model): | |
# if selected_model == "Model 1 (NSFW Master)": | |
# model = model1 | |
# elif selected_model == "Model 2 (Alita)": | |
# model = model2 | |
# elif selected_model == "Model 3 (Lexa NSFW)": | |
# model = model3 | |
# elif selected_model == "Model 4 (Flux NSFW)": | |
# model = model4 | |
# elif selected_model == "Model 5 (Lora Uncensored)": | |
# model = model5 | |
# else: | |
# return "Invalid model selection." | |
# results = [] | |
# for i in range(3): | |
# modified_text = f"{text} variation {i+1}" | |
# result = model(modified_text) | |
# results.append(result) | |
# return results | |
# interface = gr.Interface( | |
# fn=generate_images, | |
# inputs=[ | |
# gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt..."), | |
# gr.Radio( | |
# ["Model 1 (NSFW Master)", "Model 2 (Alita)", "Model 3 (Lexa NSFW)", "Model 4 (Flux NSFW)", "Model 5 (Lora Uncensored)"], | |
# label="Select Model (Try All Models & Get Different Results)", | |
# value="Model 1 (NSFW Master)", | |
# ), | |
# ], | |
# outputs=[ | |
# gr.Image(label="Generated Image 1"), | |
# gr.Image(label="Generated Image 2"), | |
# gr.Image(label="Generated Image 3"), | |
# ], | |
# theme="Yntec/HaleyCH_Theme_Orange", | |
# description="⚠ Sorry for the inconvenience. The models are currently running on the CPU, which might affect performance. We appreciate your understanding.", | |
# cache_examples=False, | |
# ) | |
# interface.launch() | |
import gradio as gr | |
import torch | |
# Check if GPU is available | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
# Load models on GPU if available, otherwise fallback to CPU | |
model1 = gr.load("models/Jonny001/NSFW_master", device=device) # GPU | |
model2 = gr.load("models/Jonny001/Alita-v1", device=device) # GPU | |
model3 = gr.load("models/lexa862/NSFWmodel", device=device) # GPU | |
model4 = gr.load("models/Keltezaa/flux_pussy_NSFW", device=device) # GPU | |
model5 = gr.load("models/prashanth970/flux-lora-uncensored", device=device) # GPU | |
def generate_images(text, selected_model): | |
# Model selection logic | |
if selected_model == "Model 1 (NSFW Master)": | |
model = model1 | |
elif selected_model == "Model 2 (Alita)": | |
model = model2 | |
elif selected_model == "Model 3 (Lexa NSFW)": | |
model = model3 | |
elif selected_model == "Model 4 (Flux NSFW)": | |
model = model4 | |
elif selected_model == "Model 5 (Lora Uncensored)": | |
model = model5 | |
else: | |
return "Invalid model selection." | |
# Generate two variations for each input prompt | |
results = [] | |
for i in range(2): | |
modified_text = f"{text} variation {i+1}" | |
result = model(modified_text) | |
results.append(result) | |
return results | |
# Gradio interface | |
interface = gr.Interface( | |
fn=generate_images, | |
inputs=[ | |
gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt..."), | |
gr.Radio( | |
["Model 1 (NSFW Master)", "Model 2 (Alita)", "Model 3 (Lexa NSFW)", "Model 4 (Flux NSFW)", "Model 5 (Lora Uncensored)"], | |
label="Select Model (Try All Models & Get Different Results)", | |
value="Model 1 (NSFW Master)", | |
), | |
], | |
outputs=[ | |
gr.Image(label="Generated Image 1"), | |
gr.Image(label="Generated Image 2"), | |
], | |
theme="Yntec/HaleyCH_Theme_Orange", | |
description="⚠ Sorry for the inconvenience. The models are currently running on the CPU, which might affect performance. We appreciate your understanding.", | |
cache_examples=False, | |
) | |
interface.launch() | |