Spaces:
Runtime error
Runtime error
File size: 1,720 Bytes
a026198 983b7f3 a026198 983b7f3 a026198 983b7f3 a026198 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import gradio as gr
from diffusers import StableDiffusionUpscalePipeline
from diffusers.utils import load_image
import torch
from PIL import Image
import base64
from io import BytesIO
# Load model and scheduler
model_id = "stabilityai/stable-diffusion-x4-upscaler"
pipeline = StableDiffusionUpscalePipeline.from_pretrained(model_id)
pipeline = pipeline.to("cpu") # Use CPU instead of GPU
def upscale_image(image, prompt):
image = image.resize((128, 128)) # Resize to the expected input size
upscaled_image = pipeline(prompt=prompt, image=image).images[0]
return upscaled_image
def image_to_base64(image):
buffered = BytesIO()
image.save(buffered, format="JPEG")
return base64.b64encode(buffered.getvalue()).decode()
def base64_to_image(base64_str):
image_data = base64.b64decode(base64_str)
return Image.open(BytesIO(image_data))
def handle_upload(base64_image, prompt):
image = base64_to_image(base64_image)
upscaled_image = upscale_image(image, prompt)
base64_str = image_to_base64(upscaled_image)
return base64_str
def main():
with gr.Blocks() as demo:
gr.Markdown("# Stable Diffusion Upscaler")
with gr.Row():
with gr.Column(scale=1):
image_input = gr.Textbox(label="Base64 Encoded Low-Resolution Image")
prompt_input = gr.Textbox(label="Prompt", value="a white cat")
upload_btn = gr.Button("Upload and Upscale")
base64_output = gr.Textbox(label="Base64 Encoded Upscaled Image")
upload_btn.click(fn=handle_upload, inputs=[image_input, prompt_input], outputs=[base64_output])
demo.launch()
if __name__ == "__main__":
main()
|