File size: 3,974 Bytes
17e58be
da33654
d047d28
77efc8c
869288e
 
6dee790
5943e10
 
 
 
 
625e1fa
6dee790
5943e10
 
 
 
 
 
 
 
 
 
 
 
869288e
6dee790
869288e
6dee790
 
 
5943e10
6dee790
f5bddfa
0389d06
6dee790
0389d06
3528cb9
50043a1
20a03f1
3e587f4
 
50043a1
945386e
6dee790
 
 
869288e
6dee790
50043a1
5943e10
3e587f4
0389d06
146f57d
d047d28
869288e
5943e10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
869288e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import gradio as gr
from transformers import pipeline
import torch

# Maximize CPU usage
torch.set_num_threads(torch.get_num_threads() * 2)

model1 = gr.load("models/Jonny001/NSFW_master")
model2 = gr.load("models/Jonny001/Alita-v1")
model3 = gr.load("models/lexa862/NSFWmodel")
model4 = gr.load("models/Keltezaa/flux_pussy_NSFW")
model5 = gr.load("models/prashanth970/flux-lora-uncensored")

def generate_images(text, selected_model):
    if selected_model == "Model 1 (NSFW Master)":
        model = model1
    elif selected_model == "Model 2 (Alita)":
        model = model2
    elif selected_model == "Model 3 (Lexa NSFW)":
        model = model3
    elif selected_model == "Model 4 (Flux NSFW)":
        model = model4
    elif selected_model == "Model 5 (Lora Uncensored)":
        model = model5
    else:
        return "Invalid model selection."
    
    results = []
    for i in range(3):
        modified_text = f"{text} variation {i+1}"
        result = model(modified_text)
        results.append(result)
    
    return results

interface = gr.Interface(
    fn=generate_images,
    inputs=[
        gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt..."),
        gr.Radio(
            ["Model 1 (NSFW Master)", "Model 2 (Alita)", "Model 3 (Lexa NSFW)", "Model 4 (Flux NSFW)", "Model 5 (Lora Uncensored)"],
            label="Select Model (Try All Models & Get Different Results)",
            value="Model 1 (NSFW Master)",
        ),
    ],
    outputs=[
        gr.Image(label="Generated Image 1"),
        gr.Image(label="Generated Image 2"),
        gr.Image(label="Generated Image 3"),
    ],
    theme="Yntec/HaleyCH_Theme_Orange",
    description="⚠ Sorry for the inconvenience. The models are currently running on the CPU, which might affect performance. We appreciate your understanding.",
    cache_examples=False,
)

interface.launch()

# import gradio as gr
# from transformers import pipeline
# import torch

# # Maximize CPU usage
# torch.set_num_threads(torch.get_num_threads() * 2)

# # Load models using Hugging Face pipelines
# model1 = pipeline("text-to-image", model="Jonny001/NSFW_master", device_map="auto")
# model2 = pipeline("text-to-image", model="Jonny001/Alita-v1", device_map="auto")
# model3 = pipeline("text-to-image", model="lexa862/NSFWmodel", device_map="auto")
# model4 = pipeline("text-to-image", model="Keltezaa/flux_pussy_NSFW", device_map="auto")
# model5 = pipeline("text-to-image", model="prashanth970/flux-lora-uncensored", device_map="auto")

# # Function to generate images
# def generate_images(text, selected_model):
#     models = {
#         "Model 1 (NSFW Master)": model1,
#         "Model 2 (Alita)": model2,
#         "Model 3 (Lexa NSFW)": model3,
#         "Model 4 (Flux NSFW)": model4,
#         "Model 5 (Lora Uncensored)": model5,
#     }
    
#     model = models.get(selected_model, model1)
#     results = []
    
#     for i in range(3):
#         modified_text = f"{text} variation {i+1}"
#         result = model(modified_text)
#         results.append(result)

#     return results

# # Gradio interface
# interface = gr.Interface(
#     fn=generate_images,
#     inputs=[
#         gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt..."),
#         gr.Radio(
#             ["Model 1 (NSFW Master)", "Model 2 (Alita)", "Model 3 (Lexa NSFW)", "Model 4 (Flux NSFW)", "Model 5 (Lora Uncensored)"],
#             label="Select Model (Try All Models & Get Different Results)",
#             value="Model 1 (NSFW Master)",
#         ),
#     ],
#     outputs=[
#         gr.Image(label="Generated Image 1"),
#         gr.Image(label="Generated Image 2"),
#         gr.Image(label="Generated Image 3"),
#     ],
#     theme="Yntec/HaleyCH_Theme_Orange",
#     description="⚠ Models are running on CPU for optimized performance. Your patience is appreciated!",
#     cache_examples=False,
# )

# # Launch the interface
# interface.launch()