Update handler.py
Browse files- handler.py +16 -5
handler.py
CHANGED
@@ -6,26 +6,35 @@ import base64
|
|
6 |
|
7 |
class EndpointHandler:
|
8 |
def __init__(self, path: str = ""):
|
9 |
-
print(f"Initializing
|
10 |
|
11 |
-
# Load
|
12 |
self.pipe = StableDiffusionXLPipeline.from_pretrained(
|
13 |
-
"
|
14 |
torch_dtype=torch.float16,
|
15 |
variant="fp16"
|
16 |
)
|
17 |
|
18 |
-
print("
|
|
|
|
|
|
|
19 |
self.pipe.load_lora_weights(
|
20 |
"Texttra/Bh0r",
|
21 |
weight_name="Bh0r-10.safetensors",
|
22 |
adapter_name="bh0r_lora"
|
23 |
)
|
24 |
self.pipe.set_adapters(["bh0r_lora"], adapter_weights=[0.9])
|
|
|
|
|
|
|
|
|
25 |
self.pipe.fuse_lora()
|
|
|
26 |
|
|
|
27 |
self.pipe.to("cuda" if torch.cuda.is_available() else "cpu")
|
28 |
-
print("Model ready.")
|
29 |
|
30 |
def __call__(self, data: Dict) -> Dict:
|
31 |
print("Received data:", data)
|
@@ -37,6 +46,7 @@ class EndpointHandler:
|
|
37 |
if not prompt:
|
38 |
return {"error": "No prompt provided."}
|
39 |
|
|
|
40 |
image = self.pipe(
|
41 |
prompt,
|
42 |
num_inference_steps=45,
|
@@ -45,6 +55,7 @@ class EndpointHandler:
|
|
45 |
|
46 |
print("Image generated.")
|
47 |
|
|
|
48 |
buffer = BytesIO()
|
49 |
image.save(buffer, format="PNG")
|
50 |
base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8")
|
|
|
6 |
|
7 |
class EndpointHandler:
|
8 |
def __init__(self, path: str = ""):
|
9 |
+
print(f"🚀 Initializing Bh0r with Juggernaut-XL v9 as base model...")
|
10 |
|
11 |
+
# Load Juggernaut-XL v9 instead of SDXL base
|
12 |
self.pipe = StableDiffusionXLPipeline.from_pretrained(
|
13 |
+
"RunDiffusion/Juggernaut-XL-v9",
|
14 |
torch_dtype=torch.float16,
|
15 |
variant="fp16"
|
16 |
)
|
17 |
|
18 |
+
print("✅ Juggernaut-XL v9 base model loaded successfully.")
|
19 |
+
|
20 |
+
# Load Bh0r LoRA
|
21 |
+
print("🧩 Loading Bh0r LoRA weights...")
|
22 |
self.pipe.load_lora_weights(
|
23 |
"Texttra/Bh0r",
|
24 |
weight_name="Bh0r-10.safetensors",
|
25 |
adapter_name="bh0r_lora"
|
26 |
)
|
27 |
self.pipe.set_adapters(["bh0r_lora"], adapter_weights=[0.9])
|
28 |
+
|
29 |
+
print("✅ Bh0r LoRA loaded with 0.9 weight.")
|
30 |
+
|
31 |
+
# Fuse LoRA into base model
|
32 |
self.pipe.fuse_lora()
|
33 |
+
print("🔗 Fused LoRA into base model.")
|
34 |
|
35 |
+
# Move to GPU if available
|
36 |
self.pipe.to("cuda" if torch.cuda.is_available() else "cpu")
|
37 |
+
print("🎯 Model ready on device:", "cuda" if torch.cuda.is_available() else "cpu")
|
38 |
|
39 |
def __call__(self, data: Dict) -> Dict:
|
40 |
print("Received data:", data)
|
|
|
46 |
if not prompt:
|
47 |
return {"error": "No prompt provided."}
|
48 |
|
49 |
+
# Generate the image
|
50 |
image = self.pipe(
|
51 |
prompt,
|
52 |
num_inference_steps=45,
|
|
|
55 |
|
56 |
print("Image generated.")
|
57 |
|
58 |
+
# Convert to base64
|
59 |
buffer = BytesIO()
|
60 |
image.save(buffer, format="PNG")
|
61 |
base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8")
|