Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import pipeline | |
import torch | |
# Maximize CPU usage | |
torch.set_num_threads(torch.get_num_threads() * 2) | |
model1 = gr.load("models/Jonny001/NSFW_master") | |
model2 = gr.load("models/Jonny001/Alita-v1") | |
model3 = gr.load("models/lexa862/NSFWmodel") | |
model4 = gr.load("models/Keltezaa/flux_pussy_NSFW") | |
model5 = gr.load("models/prashanth970/flux-lora-uncensored") | |
def generate_images(text, selected_model): | |
if selected_model == "Model 1 (NSFW Master)": | |
model = model1 | |
elif selected_model == "Model 2 (Alita)": | |
model = model2 | |
elif selected_model == "Model 3 (Lexa NSFW)": | |
model = model3 | |
elif selected_model == "Model 4 (Flux NSFW)": | |
model = model4 | |
elif selected_model == "Model 5 (Lora Uncensored)": | |
model = model5 | |
else: | |
return "Invalid model selection." | |
results = [] | |
for i in range(3): | |
modified_text = f"{text} variation {i+1}" | |
result = model(modified_text) | |
results.append(result) | |
return results | |
interface = gr.Interface( | |
fn=generate_images, | |
inputs=[ | |
gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt..."), | |
gr.Radio( | |
["Model 1 (NSFW Master)", "Model 2 (Alita)", "Model 3 (Lexa NSFW)", "Model 4 (Flux NSFW)", "Model 5 (Lora Uncensored)"], | |
label="Select Model (Try All Models & Get Different Results)", | |
value="Model 1 (NSFW Master)", | |
), | |
], | |
outputs=[ | |
gr.Image(label="Generated Image 1"), | |
gr.Image(label="Generated Image 2"), | |
gr.Image(label="Generated Image 3"), | |
], | |
theme="Yntec/HaleyCH_Theme_Orange", | |
description="⚠ Sorry for the inconvenience. The models are currently running on the CPU, which might affect performance. We appreciate your understanding.", | |
cache_examples=False, | |
) | |
interface.launch() | |
# import gradio as gr | |
# from transformers import pipeline | |
# import torch | |
# # Maximize CPU usage | |
# torch.set_num_threads(torch.get_num_threads() * 2) | |
# # Load models using Hugging Face pipelines | |
# model1 = pipeline("text-to-image", model="Jonny001/NSFW_master", device_map="auto") | |
# model2 = pipeline("text-to-image", model="Jonny001/Alita-v1", device_map="auto") | |
# model3 = pipeline("text-to-image", model="lexa862/NSFWmodel", device_map="auto") | |
# model4 = pipeline("text-to-image", model="Keltezaa/flux_pussy_NSFW", device_map="auto") | |
# model5 = pipeline("text-to-image", model="prashanth970/flux-lora-uncensored", device_map="auto") | |
# # Function to generate images | |
# def generate_images(text, selected_model): | |
# models = { | |
# "Model 1 (NSFW Master)": model1, | |
# "Model 2 (Alita)": model2, | |
# "Model 3 (Lexa NSFW)": model3, | |
# "Model 4 (Flux NSFW)": model4, | |
# "Model 5 (Lora Uncensored)": model5, | |
# } | |
# model = models.get(selected_model, model1) | |
# results = [] | |
# for i in range(3): | |
# modified_text = f"{text} variation {i+1}" | |
# result = model(modified_text) | |
# results.append(result) | |
# return results | |
# # Gradio interface | |
# interface = gr.Interface( | |
# fn=generate_images, | |
# inputs=[ | |
# gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt..."), | |
# gr.Radio( | |
# ["Model 1 (NSFW Master)", "Model 2 (Alita)", "Model 3 (Lexa NSFW)", "Model 4 (Flux NSFW)", "Model 5 (Lora Uncensored)"], | |
# label="Select Model (Try All Models & Get Different Results)", | |
# value="Model 1 (NSFW Master)", | |
# ), | |
# ], | |
# outputs=[ | |
# gr.Image(label="Generated Image 1"), | |
# gr.Image(label="Generated Image 2"), | |
# gr.Image(label="Generated Image 3"), | |
# ], | |
# theme="Yntec/HaleyCH_Theme_Orange", | |
# description="⚠ Models are running on CPU for optimized performance. Your patience is appreciated!", | |
# cache_examples=False, | |
# ) | |
# # Launch the interface | |
# interface.launch() | |