freedom / app.py
JV A
Update app.py
28f74e7
raw
history blame
4.85 kB
import gradio as gr
import requests
import json
import PIL.Image
from io import BytesIO
import os
import random
def generate_image(prompt, negative_prompt, scheduler, steps, width, height, restore_faces, seed, cfg, token):
# Define the API endpoint
apiUrl = "https://apiv2.makeai.run/v2/txt2img"
# Define the request headers
headers = {
"Content-Type": "application/json",
"token": "514f7ecde6a5434dbab5c6579311ad82"
}
# Define the request body
body = {
"mode": "url",
"model": "AOM3A1B_orangemixs.safetensors",
"tiling": False,
"batch_size": 1,
"prompt": prompt,
"negative_prompt": negative_prompt,
"seed": seed if seed is not None else random.randint(0, 999999999),
"scheduler": scheduler,
"n_iter": 1,
"steps": steps,
"cfg": cfg,
"offset_noise": 0.0,
"width": width,
"height": height,
"clip_skip": 1,
"loras": [{"name": "", "strength": 1.0}],
"embeddings": [{"name": "", "strength": 1.0}],
"vae": "vae-ft-mse-840000-ema-pruned.ckpt",
"restore_faces": restore_faces,
"fr_model": "CodeFormer",
"codeformer_weight": 0.5,
"enable_hr": False,
"denoising_strength": 0.75,
"hr_scale": 2,
"hr_upscale": "None",
"img2img_ref_img_type": "piece",
"img2img_resize_mode": 0,
"img2img_denoising_strength": 0.75,
"controlnet_enabled": False,
"controlnet_ref_img_type": "piece",
"controlnet_guessmode": False,
"controlnet_module": "canny",
"controlnet_model": "control_v11p_sd15_softedge",
"controlnet_weight": 1,
"controlnet_guidance_start": 0,
"controlnet_guidance_end": 1,
"controlnet_ref_img_url": "",
}
# Send the request
response = requests.post(apiUrl, headers=headers, data=json.dumps(body), verify=False)
# Check the response status
if response.status_code == 200:
# Get the image URL from the response
response_json = response.json()
if 'results' in response_json and isinstance(response_json['results'], list) and len(response_json['results']) > 0:
image_url = response_json['results'][0]
# Get the image from the URL
image_response = requests.get(image_url)
image = PIL.Image.open(BytesIO(image_response.content))
return image
else:
raise Exception("Unexpected API response format")
else:
raise Exception("API request failed with status code " + str(response.status_code))
# Define the Gradio interface
iface = gr.Interface(
fn=generate_image,
inputs=[
gr.components.Textbox(label="Prompt"),
gr.components.Textbox(default="ugly, tiling, poorlydrawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, signature, cut off, draft", label="Negative Prompt"),
gr.components.Dropdown(choices=[
"Euler a",
"Euler",
"LMS",
"Heun",
"DPM2",
"DPM2 a",
"DPM++ 2S a",
"DPM++ 2M",
"DPM++ SDE",
"DPM fast",
"DPM adaptive",
"LMS Karras",
"DPM2 Karras",
"DPM2 a Karras",
"DPM++ 2S a Karras",
"DPM++ 2M Karras",
"DPM++ SDE Karras",
"DDIM",
"PLMS"
], label="Scheduler", default="Euler a"),
gr.components.Slider(minimum=10, maximum=100, step=1.0,default=30, label="Steps"),
gr.components.Slider(minimum=512, maximum=1600, default=768, label="Width"),
gr.components.Slider(minimum=512, maximum=1600, default=768, label="Height"),
gr.components.Slider(minimum=4, maximum=12, step=0.5, default=7.0, label="CFG"),
gr.components.Number(label="Seed", default=None),
gr.components.Checkbox(label="Restore Faces"),
],
outputs=gr.components.Image(),
title="Freedom Demonstration",
description = """
Testing environment for the Freedom Model. Finetuned model of SD 2.1 768X produced by <a href='https://twitter.com/artificialguybr' target='_blank'>@artificialguybr</a>.<br>
The weights were released <a href='LINK_TO_WEIGHTS' target='_blank'>here</a>.<br>
You can find example prompts <a href='LINK_TO_EXAMPLE_PROMPTS' target='_blank'>here</a>.<br>
Demonstration running on the <a href='LINK_TO_MAKEAI.RUN_API' target='_blank'>makeai.run API</a>.<br>
Thanks to <a href='LINK_TO_REDMOND.AI' target='_blank'>Redmond.ai</a> for providing GPU Time and sponsoring this model.
""",
allow_flagging='never'
)
# Launch the app
iface.launch(debug=True)