AI-trainer1 commited on
Commit
5943e10
·
verified ·
1 Parent(s): 869288e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -73
app.py CHANGED
@@ -1,56 +1,3 @@
1
- # import gradio as gr
2
-
3
-
4
- # model1 = gr.load("models/Jonny001/NSFW_master")
5
- # model2 = gr.load("models/Jonny001/Alita-v1")
6
- # model3 = gr.load("models/lexa862/NSFWmodel")
7
- # model4 = gr.load("models/Keltezaa/flux_pussy_NSFW")
8
- # model5 = gr.load("models/prashanth970/flux-lora-uncensored")
9
-
10
- # def generate_images(text, selected_model):
11
- # if selected_model == "Model 1 (NSFW Master)":
12
- # model = model1
13
- # elif selected_model == "Model 2 (Alita)":
14
- # model = model2
15
- # elif selected_model == "Model 3 (Lexa NSFW)":
16
- # model = model3
17
- # elif selected_model == "Model 4 (Flux NSFW)":
18
- # model = model4
19
- # elif selected_model == "Model 5 (Lora Uncensored)":
20
- # model = model5
21
- # else:
22
- # return "Invalid model selection."
23
-
24
- # results = []
25
- # for i in range(3):
26
- # modified_text = f"{text} variation {i+1}"
27
- # result = model(modified_text)
28
- # results.append(result)
29
-
30
- # return results
31
-
32
- # interface = gr.Interface(
33
- # fn=generate_images,
34
- # inputs=[
35
- # gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt..."),
36
- # gr.Radio(
37
- # ["Model 1 (NSFW Master)", "Model 2 (Alita)", "Model 3 (Lexa NSFW)", "Model 4 (Flux NSFW)", "Model 5 (Lora Uncensored)"],
38
- # label="Select Model (Try All Models & Get Different Results)",
39
- # value="Model 1 (NSFW Master)",
40
- # ),
41
- # ],
42
- # outputs=[
43
- # gr.Image(label="Generated Image 1"),
44
- # gr.Image(label="Generated Image 2"),
45
- # gr.Image(label="Generated Image 3"),
46
- # ],
47
- # theme="Yntec/HaleyCH_Theme_Orange",
48
- # description="⚠ Sorry for the inconvenience. The models are currently running on the CPU, which might affect performance. We appreciate your understanding.",
49
- # cache_examples=False,
50
- # )
51
-
52
- # interface.launch()
53
-
54
  import gradio as gr
55
  from transformers import pipeline
56
  import torch
@@ -58,34 +5,34 @@ import torch
58
  # Maximize CPU usage
59
  torch.set_num_threads(torch.get_num_threads() * 2)
60
 
61
- # Load models using Hugging Face pipelines
62
- model1 = pipeline("text-to-image", model="Jonny001/NSFW_master", device_map="auto")
63
- model2 = pipeline("text-to-image", model="Jonny001/Alita-v1", device_map="auto")
64
- model3 = pipeline("text-to-image", model="lexa862/NSFWmodel", device_map="auto")
65
- model4 = pipeline("text-to-image", model="Keltezaa/flux_pussy_NSFW", device_map="auto")
66
- model5 = pipeline("text-to-image", model="prashanth970/flux-lora-uncensored", device_map="auto")
67
 
68
- # Function to generate images
69
  def generate_images(text, selected_model):
70
- models = {
71
- "Model 1 (NSFW Master)": model1,
72
- "Model 2 (Alita)": model2,
73
- "Model 3 (Lexa NSFW)": model3,
74
- "Model 4 (Flux NSFW)": model4,
75
- "Model 5 (Lora Uncensored)": model5,
76
- }
 
 
 
 
 
77
 
78
- model = models.get(selected_model, model1)
79
  results = []
80
-
81
  for i in range(3):
82
  modified_text = f"{text} variation {i+1}"
83
  result = model(modified_text)
84
  results.append(result)
85
-
86
  return results
87
 
88
- # Gradio interface
89
  interface = gr.Interface(
90
  fn=generate_images,
91
  inputs=[
@@ -102,11 +49,68 @@ interface = gr.Interface(
102
  gr.Image(label="Generated Image 3"),
103
  ],
104
  theme="Yntec/HaleyCH_Theme_Orange",
105
- description="⚠ Models are running on CPU for optimized performance. Your patience is appreciated!",
106
  cache_examples=False,
107
  )
108
 
109
- # Launch the interface
110
  interface.launch()
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
  import torch
 
5
  # Maximize CPU usage
6
  torch.set_num_threads(torch.get_num_threads() * 2)
7
 
8
+ model1 = gr.load("models/Jonny001/NSFW_master")
9
+ model2 = gr.load("models/Jonny001/Alita-v1")
10
+ model3 = gr.load("models/lexa862/NSFWmodel")
11
+ model4 = gr.load("models/Keltezaa/flux_pussy_NSFW")
12
+ model5 = gr.load("models/prashanth970/flux-lora-uncensored")
 
13
 
 
14
  def generate_images(text, selected_model):
15
+ if selected_model == "Model 1 (NSFW Master)":
16
+ model = model1
17
+ elif selected_model == "Model 2 (Alita)":
18
+ model = model2
19
+ elif selected_model == "Model 3 (Lexa NSFW)":
20
+ model = model3
21
+ elif selected_model == "Model 4 (Flux NSFW)":
22
+ model = model4
23
+ elif selected_model == "Model 5 (Lora Uncensored)":
24
+ model = model5
25
+ else:
26
+ return "Invalid model selection."
27
 
 
28
  results = []
 
29
  for i in range(3):
30
  modified_text = f"{text} variation {i+1}"
31
  result = model(modified_text)
32
  results.append(result)
33
+
34
  return results
35
 
 
36
  interface = gr.Interface(
37
  fn=generate_images,
38
  inputs=[
 
49
  gr.Image(label="Generated Image 3"),
50
  ],
51
  theme="Yntec/HaleyCH_Theme_Orange",
52
+ description="⚠ Sorry for the inconvenience. The models are currently running on the CPU, which might affect performance. We appreciate your understanding.",
53
  cache_examples=False,
54
  )
55
 
 
56
  interface.launch()
57
 
58
+ # import gradio as gr
59
+ # from transformers import pipeline
60
+ # import torch
61
+
62
+ # # Maximize CPU usage
63
+ # torch.set_num_threads(torch.get_num_threads() * 2)
64
+
65
+ # # Load models using Hugging Face pipelines
66
+ # model1 = pipeline("text-to-image", model="Jonny001/NSFW_master", device_map="auto")
67
+ # model2 = pipeline("text-to-image", model="Jonny001/Alita-v1", device_map="auto")
68
+ # model3 = pipeline("text-to-image", model="lexa862/NSFWmodel", device_map="auto")
69
+ # model4 = pipeline("text-to-image", model="Keltezaa/flux_pussy_NSFW", device_map="auto")
70
+ # model5 = pipeline("text-to-image", model="prashanth970/flux-lora-uncensored", device_map="auto")
71
+
72
+ # # Function to generate images
73
+ # def generate_images(text, selected_model):
74
+ # models = {
75
+ # "Model 1 (NSFW Master)": model1,
76
+ # "Model 2 (Alita)": model2,
77
+ # "Model 3 (Lexa NSFW)": model3,
78
+ # "Model 4 (Flux NSFW)": model4,
79
+ # "Model 5 (Lora Uncensored)": model5,
80
+ # }
81
+
82
+ # model = models.get(selected_model, model1)
83
+ # results = []
84
+
85
+ # for i in range(3):
86
+ # modified_text = f"{text} variation {i+1}"
87
+ # result = model(modified_text)
88
+ # results.append(result)
89
+
90
+ # return results
91
+
92
+ # # Gradio interface
93
+ # interface = gr.Interface(
94
+ # fn=generate_images,
95
+ # inputs=[
96
+ # gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt..."),
97
+ # gr.Radio(
98
+ # ["Model 1 (NSFW Master)", "Model 2 (Alita)", "Model 3 (Lexa NSFW)", "Model 4 (Flux NSFW)", "Model 5 (Lora Uncensored)"],
99
+ # label="Select Model (Try All Models & Get Different Results)",
100
+ # value="Model 1 (NSFW Master)",
101
+ # ),
102
+ # ],
103
+ # outputs=[
104
+ # gr.Image(label="Generated Image 1"),
105
+ # gr.Image(label="Generated Image 2"),
106
+ # gr.Image(label="Generated Image 3"),
107
+ # ],
108
+ # theme="Yntec/HaleyCH_Theme_Orange",
109
+ # description="⚠ Models are running on CPU for optimized performance. Your patience is appreciated!",
110
+ # cache_examples=False,
111
+ # )
112
+
113
+ # # Launch the interface
114
+ # interface.launch()
115
+
116