Rename rotona.py to app.py
Browse files
app.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import gradio as gr
|
3 |
+
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
|
4 |
+
|
5 |
+
# Load Model
|
6 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
+
model_id = "runwayml/stable-diffusion-v1-5"
|
8 |
+
pipeline = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16 if device == "cuda" else torch.float32)
|
9 |
+
pipeline.safety_checker = None
|
10 |
+
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
|
11 |
+
pipeline = pipeline.to(device)
|
12 |
+
|
13 |
+
# Image generation function
|
14 |
+
def generate_image(prompt):
|
15 |
+
negative_prompt = "blurry, distorted, bad anatomy, missing fingers, extra limbs, bad proportions, deformed hands, low quality, cropped, out of frame, partial body, cut off, head cut off"
|
16 |
+
refined_prompt = f"{prompt}, centered, full body, well-framed, symmetrical"
|
17 |
+
|
18 |
+
with torch.no_grad():
|
19 |
+
image = pipeline(refined_prompt, negative_prompt=negative_prompt, guidance_scale=7.5, num_inference_steps=50).images[0]
|
20 |
+
|
21 |
+
torch.cuda.empty_cache() # Free GPU memory
|
22 |
+
return image
|
23 |
+
|
24 |
+
# Gradio UI
|
25 |
+
interface = gr.Interface(
|
26 |
+
fn=generate_image,
|
27 |
+
inputs=gr.Textbox(label="Enter Your Prompt"),
|
28 |
+
outputs=gr.Image(label="Generated Image"),
|
29 |
+
title="Rotona: Unrestricted Image Generator",
|
30 |
+
description="Enter a prompt to generate an AI image. This model is unrestricted—use responsibly."
|
31 |
+
)
|
32 |
+
|
33 |
+
# Launch
|
34 |
+
if __name__ == "__main__":
|
35 |
+
interface.launch()
|
rotona.py
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import tkinter as tk
|
3 |
-
from tkinter import filedialog, messagebox
|
4 |
-
from PIL import Image, ImageTk
|
5 |
-
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
|
6 |
-
|
7 |
-
# Initialize Stable Diffusion
|
8 |
-
model_id = "runwayml/stable-diffusion-v1-5"
|
9 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
-
dtype = torch.float16 if device == "cuda" else torch.float32
|
11 |
-
|
12 |
-
print(f"Using device: {device.upper()}")
|
13 |
-
|
14 |
-
pipeline = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=dtype)
|
15 |
-
pipeline.safety_checker = None # WARNING: No safety filters—USE RESPONSIBLY
|
16 |
-
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
|
17 |
-
pipeline = pipeline.to(device)
|
18 |
-
pipeline.enable_attention_slicing()
|
19 |
-
|
20 |
-
# GUI Setup
|
21 |
-
class RotonaGUI:
|
22 |
-
def __init__(self, root):
|
23 |
-
self.root = root
|
24 |
-
self.root.title("Rotona - Unrestricted AI Image Generator")
|
25 |
-
self.root.geometry("600x700")
|
26 |
-
self.root.configure(bg="black")
|
27 |
-
|
28 |
-
# Title Label
|
29 |
-
self.title_label = tk.Label(root, text="Rotona AI Generator", fg="red", bg="black", font=("Helvetica", 18, "bold"))
|
30 |
-
self.title_label.pack(pady=10)
|
31 |
-
|
32 |
-
# Prompt Entry
|
33 |
-
self.prompt_label = tk.Label(root, text="Enter your prompt:", fg="white", bg="black", font=("Helvetica", 12))
|
34 |
-
self.prompt_label.pack()
|
35 |
-
self.prompt_entry = tk.Entry(root, width=60)
|
36 |
-
self.prompt_entry.pack(pady=5)
|
37 |
-
|
38 |
-
# Generate Button
|
39 |
-
self.generate_button = tk.Button(root, text="Generate Image", command=self.generate_image, bg="red", fg="white", font=("Helvetica", 12, "bold"))
|
40 |
-
self.generate_button.pack(pady=10)
|
41 |
-
|
42 |
-
# Canvas for Displaying Image
|
43 |
-
self.canvas = tk.Canvas(root, width=512, height=512, bg="gray")
|
44 |
-
self.canvas.pack(pady=10)
|
45 |
-
|
46 |
-
# Save Button
|
47 |
-
self.save_button = tk.Button(root, text="Save Image", command=self.save_image, bg="blue", fg="white", font=("Helvetica", 12, "bold"))
|
48 |
-
self.save_button.pack(pady=10)
|
49 |
-
self.save_button["state"] = "disabled"
|
50 |
-
|
51 |
-
self.generated_image = None
|
52 |
-
|
53 |
-
def generate_image(self):
|
54 |
-
user_prompt = self.prompt_entry.get()
|
55 |
-
if not user_prompt:
|
56 |
-
messagebox.showerror("Error", "Please enter a prompt!")
|
57 |
-
return
|
58 |
-
|
59 |
-
try:
|
60 |
-
negative_prompt = "blurry, distorted, bad anatomy, missing fingers, extra limbs, bad proportions, deformed hands, low quality"
|
61 |
-
image = pipeline(user_prompt, negative_prompt=negative_prompt, guidance_scale=7.5, num_inference_steps=50).images[0]
|
62 |
-
|
63 |
-
self.generated_image = image
|
64 |
-
self.display_image(image)
|
65 |
-
self.save_button["state"] = "normal"
|
66 |
-
|
67 |
-
except torch.cuda.OutOfMemoryError:
|
68 |
-
messagebox.showerror("Error", "Out of GPU memory! Try reducing image size.")
|
69 |
-
except Exception as e:
|
70 |
-
messagebox.showerror("Error", f"Image generation failed: {e}")
|
71 |
-
|
72 |
-
def display_image(self, image):
|
73 |
-
image.thumbnail((512, 512))
|
74 |
-
img_tk = ImageTk.PhotoImage(image)
|
75 |
-
self.canvas.create_image(256, 256, image=img_tk)
|
76 |
-
self.canvas.image = img_tk
|
77 |
-
|
78 |
-
def save_image(self):
|
79 |
-
if self.generated_image:
|
80 |
-
file_path = filedialog.asksaveasfilename(defaultextension=".png", filetypes=[("PNG files", "*.png")])
|
81 |
-
if file_path:
|
82 |
-
self.generated_image.save(file_path)
|
83 |
-
messagebox.showinfo("Saved", f"Image saved successfully as {file_path}")
|
84 |
-
|
85 |
-
# Run GUI
|
86 |
-
root = tk.Tk()
|
87 |
-
app = RotonaGUI(root)
|
88 |
-
root.mainloop()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|