AI-trainer1 commited on
Commit
869288e
·
verified ·
1 Parent(s): da33654

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -24
app.py CHANGED
@@ -55,34 +55,30 @@ import gradio as gr
55
  from transformers import pipeline
56
  import torch
57
 
58
- # Check if GPU is available
59
- device = 0 if torch.cuda.is_available() else -1
60
 
61
- # Load models using transformers
62
- model1 = pipeline("text-to-image", model="Jonny001/NSFW_master", device=device)
63
- model2 = pipeline("text-to-image", model="Jonny001/Alita-v1", device=device)
64
- model3 = pipeline("text-to-image", model="lexa862/NSFWmodel", device=device)
65
- model4 = pipeline("text-to-image", model="Keltezaa/flux_pussy_NSFW", device=device)
66
- model5 = pipeline("text-to-image", model="prashanth970/flux-lora-uncensored", device=device)
67
 
68
  # Function to generate images
69
  def generate_images(text, selected_model):
70
- if selected_model == "Model 1 (NSFW Master)":
71
- model = model1
72
- elif selected_model == "Model 2 (Alita)":
73
- model = model2
74
- elif selected_model == "Model 3 (Lexa NSFW)":
75
- model = model3
76
- elif selected_model == "Model 4 (Flux NSFW)":
77
- model = model4
78
- elif selected_model == "Model 5 (Lora Uncensored)":
79
- model = model5
80
- else:
81
- return "Invalid model selection."
82
-
83
- # Generate images
84
  results = []
85
- for i in range(2):
 
86
  modified_text = f"{text} variation {i+1}"
87
  result = model(modified_text)
88
  results.append(result)
@@ -103,11 +99,14 @@ interface = gr.Interface(
103
  outputs=[
104
  gr.Image(label="Generated Image 1"),
105
  gr.Image(label="Generated Image 2"),
 
106
  ],
107
  theme="Yntec/HaleyCH_Theme_Orange",
108
- description="⚠ Sorry for the inconvenience. The models are currently running on the CPU, which might affect performance. We appreciate your understanding.",
109
  cache_examples=False,
110
  )
111
 
112
  # Launch the interface
113
  interface.launch()
 
 
 
55
  from transformers import pipeline
56
  import torch
57
 
58
+ # Maximize CPU usage
59
+ torch.set_num_threads(torch.get_num_threads() * 2)
60
 
61
+ # Load models using Hugging Face pipelines
62
+ model1 = pipeline("text-to-image", model="Jonny001/NSFW_master", device_map="auto")
63
+ model2 = pipeline("text-to-image", model="Jonny001/Alita-v1", device_map="auto")
64
+ model3 = pipeline("text-to-image", model="lexa862/NSFWmodel", device_map="auto")
65
+ model4 = pipeline("text-to-image", model="Keltezaa/flux_pussy_NSFW", device_map="auto")
66
+ model5 = pipeline("text-to-image", model="prashanth970/flux-lora-uncensored", device_map="auto")
67
 
68
  # Function to generate images
69
  def generate_images(text, selected_model):
70
+ models = {
71
+ "Model 1 (NSFW Master)": model1,
72
+ "Model 2 (Alita)": model2,
73
+ "Model 3 (Lexa NSFW)": model3,
74
+ "Model 4 (Flux NSFW)": model4,
75
+ "Model 5 (Lora Uncensored)": model5,
76
+ }
77
+
78
+ model = models.get(selected_model, model1)
 
 
 
 
 
79
  results = []
80
+
81
+ for i in range(3):
82
  modified_text = f"{text} variation {i+1}"
83
  result = model(modified_text)
84
  results.append(result)
 
99
  outputs=[
100
  gr.Image(label="Generated Image 1"),
101
  gr.Image(label="Generated Image 2"),
102
+ gr.Image(label="Generated Image 3"),
103
  ],
104
  theme="Yntec/HaleyCH_Theme_Orange",
105
+ description="⚠ Models are running on CPU for optimized performance. Your patience is appreciated!",
106
  cache_examples=False,
107
  )
108
 
109
  # Launch the interface
110
  interface.launch()
111
+
112
+