AI-trainer1 commited on
Commit
d047d28
·
verified ·
1 Parent(s): 9860f3a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -8
app.py CHANGED
@@ -1,13 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
2
 
 
 
3
 
4
- model1 = gr.load("models/Jonny001/NSFW_master")
5
- model2 = gr.load("models/Jonny001/Alita-v1")
6
- model3 = gr.load("models/lexa862/NSFWmodel")
7
- model4 = gr.load("models/Keltezaa/flux_pussy_NSFW")
8
- model5 = gr.load("models/prashanth970/flux-lora-uncensored")
 
9
 
10
  def generate_images(text, selected_model):
 
11
  if selected_model == "Model 1 (NSFW Master)":
12
  model = model1
13
  elif selected_model == "Model 2 (Alita)":
@@ -21,14 +79,16 @@ def generate_images(text, selected_model):
21
  else:
22
  return "Invalid model selection."
23
 
 
24
  results = []
25
- for i in range(3):
26
  modified_text = f"{text} variation {i+1}"
27
  result = model(modified_text)
28
  results.append(result)
29
 
30
  return results
31
 
 
32
  interface = gr.Interface(
33
  fn=generate_images,
34
  inputs=[
@@ -42,11 +102,10 @@ interface = gr.Interface(
42
  outputs=[
43
  gr.Image(label="Generated Image 1"),
44
  gr.Image(label="Generated Image 2"),
45
- gr.Image(label="Generated Image 3"),
46
  ],
47
  theme="Yntec/HaleyCH_Theme_Orange",
48
  description="⚠ Sorry for the inconvenience. The models are currently running on the CPU, which might affect performance. We appreciate your understanding.",
49
  cache_examples=False,
50
  )
51
 
52
- interface.launch()
 
1
+ # import gradio as gr
2
+
3
+
4
+ # model1 = gr.load("models/Jonny001/NSFW_master")
5
+ # model2 = gr.load("models/Jonny001/Alita-v1")
6
+ # model3 = gr.load("models/lexa862/NSFWmodel")
7
+ # model4 = gr.load("models/Keltezaa/flux_pussy_NSFW")
8
+ # model5 = gr.load("models/prashanth970/flux-lora-uncensored")
9
+
10
+ # def generate_images(text, selected_model):
11
+ # if selected_model == "Model 1 (NSFW Master)":
12
+ # model = model1
13
+ # elif selected_model == "Model 2 (Alita)":
14
+ # model = model2
15
+ # elif selected_model == "Model 3 (Lexa NSFW)":
16
+ # model = model3
17
+ # elif selected_model == "Model 4 (Flux NSFW)":
18
+ # model = model4
19
+ # elif selected_model == "Model 5 (Lora Uncensored)":
20
+ # model = model5
21
+ # else:
22
+ # return "Invalid model selection."
23
+
24
+ # results = []
25
+ # for i in range(3):
26
+ # modified_text = f"{text} variation {i+1}"
27
+ # result = model(modified_text)
28
+ # results.append(result)
29
+
30
+ # return results
31
+
32
+ # interface = gr.Interface(
33
+ # fn=generate_images,
34
+ # inputs=[
35
+ # gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt..."),
36
+ # gr.Radio(
37
+ # ["Model 1 (NSFW Master)", "Model 2 (Alita)", "Model 3 (Lexa NSFW)", "Model 4 (Flux NSFW)", "Model 5 (Lora Uncensored)"],
38
+ # label="Select Model (Try All Models & Get Different Results)",
39
+ # value="Model 1 (NSFW Master)",
40
+ # ),
41
+ # ],
42
+ # outputs=[
43
+ # gr.Image(label="Generated Image 1"),
44
+ # gr.Image(label="Generated Image 2"),
45
+ # gr.Image(label="Generated Image 3"),
46
+ # ],
47
+ # theme="Yntec/HaleyCH_Theme_Orange",
48
+ # description="⚠ Sorry for the inconvenience. The models are currently running on the CPU, which might affect performance. We appreciate your understanding.",
49
+ # cache_examples=False,
50
+ # )
51
+
52
+ # interface.launch()
53
+
54
  import gradio as gr
55
+ import torch
56
 
57
+ # Check if GPU is available
58
+ device = "cuda" if torch.cuda.is_available() else "cpu"
59
 
60
+ # Load models on GPU if available, otherwise fallback to CPU
61
+ model1 = gr.load("models/Jonny001/NSFW_master", device=device) # GPU
62
+ model2 = gr.load("models/Jonny001/Alita-v1", device=device) # GPU
63
+ model3 = gr.load("models/lexa862/NSFWmodel", device=device) # GPU
64
+ model4 = gr.load("models/Keltezaa/flux_pussy_NSFW", device=device) # GPU
65
+ model5 = gr.load("models/prashanth970/flux-lora-uncensored", device=device) # GPU
66
 
67
  def generate_images(text, selected_model):
68
+ # Model selection logic
69
  if selected_model == "Model 1 (NSFW Master)":
70
  model = model1
71
  elif selected_model == "Model 2 (Alita)":
 
79
  else:
80
  return "Invalid model selection."
81
 
82
+ # Generate two variations for each input prompt
83
  results = []
84
+ for i in range(2):
85
  modified_text = f"{text} variation {i+1}"
86
  result = model(modified_text)
87
  results.append(result)
88
 
89
  return results
90
 
91
+ # Gradio interface
92
  interface = gr.Interface(
93
  fn=generate_images,
94
  inputs=[
 
102
  outputs=[
103
  gr.Image(label="Generated Image 1"),
104
  gr.Image(label="Generated Image 2"),
 
105
  ],
106
  theme="Yntec/HaleyCH_Theme_Orange",
107
  description="⚠ Sorry for the inconvenience. The models are currently running on the CPU, which might affect performance. We appreciate your understanding.",
108
  cache_examples=False,
109
  )
110
 
111
+ interface.launch()