File size: 5,331 Bytes
54d68f7
 
da1b766
 
 
d35c749
74c18cc
680cac7
f27f103
0404bc0
9d16904
da1b766
f27f103
da1b766
 
 
f27f103
da1b766
54d68f7
da1b766
 
 
 
 
 
 
1b2dc7d
d47faed
1b2dc7d
da1b766
1b2dc7d
74c18cc
da1b766
1b2dc7d
 
da1b766
74f03eb
 
da1b766
1b2dc7d
da1b766
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74f03eb
 
 
 
 
 
 
da1b766
54d68f7
da1b766
28f74e7
a018aaa
 
 
54d68f7
da1b766
 
8543bbc
 
 
 
 
da1b766
f73d988
 
8543bbc
da1b766
8543bbc
 
 
74c18cc
da1b766
 
 
54d68f7
da1b766
1b2dc7d
680cac7
f73d988
680cac7
1b2dc7d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f73d988
 
 
 
 
 
41d3ea2
1b2dc7d
680cac7
da1b766
4a80288
c254f78
 
 
 
 
 
 
 
 
 
 
 
4a80288
680cac7
54d68f7
 
da1b766
f27f103
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import gradio as gr
import requests
import json
import PIL.Image
from io import BytesIO
import os
import random

def generate_image(prompt, negative_prompt, scheduler, steps, width, height, restore_faces, seed, cfg):
    restore_faces = bool(restore_faces)
    print(f"restore_faces: {restore_faces}, type: {type(restore_faces)}")
    # Define the API endpoint
    apiUrl = os.getenv("API_URL")
    # Define the request headers
    headers = {
        "Content-Type": "application/json",
        "token": os.getenv("API_TOKEN")
    }

    # Define the request body
    body = {
        "mode": "url",
        "model": "AOM3A1B_orangemixs.safetensors",
        "tiling": False,
        "batch_size": 1,
        "prompt": prompt,
        "negative_prompt": negative_prompt,
        "seed": seed if seed is not None else random.randint(0, 999999999),
        "scheduler": scheduler,
        "n_iter": 1,
        "steps": steps,
        "cfg": cfg,
        "offset_noise": 0.0,
        "width": width,
        "height": height,
        "clip_skip": 1,
        "loras": [{"name": "biggergirls_128", "strength": 1.0}],
        "embeddings": [{"name": "awaitingtongue", "strength": 1.0}],
        "vae": "vae-ft-mse-840000-ema-pruned.ckpt",
        "restore_faces": restore_faces,
        "fr_model": "CodeFormer",
        "codeformer_weight": 0.5,
        "enable_hr": False,
        "denoising_strength": 0.75,
        "hr_scale": 2,
        "hr_upscale": "None",
        "img2img_ref_img_type": "piece",
        "img2img_resize_mode": 0,
        "img2img_denoising_strength": 0.75,
        "controlnet_enabled": False,
        "controlnet_ref_img_type": "piece",
        "controlnet_guessmode": False,
        "controlnet_module": "canny",
        "controlnet_model": "control_v11p_sd15_softedge",
        "controlnet_weight": 1,
        "controlnet_guidance_start": 0,
        "controlnet_guidance_end": 1,
        "controlnet_ref_img_url": "https://upload.wikimedia.org/wikipedia/commons/d/d1/Image_not_available.png",
        "controlnet_lowvram": False,
        "controlnet_mask": [],
        "controlnet_processor_res": 512,
        "controlnet_resize_mode": "Scale to Fit (Inner Fit)",
        "controlnet_threshold_a": 100,
        "controlnet_threshold_b": 200,
    }

    # Send the request
    response = requests.post(apiUrl, headers=headers, data=json.dumps(body), verify=False)
    # Print the response body if the status code is not 200
    if response.status_code != 200:
        print(response.text)

    # Check the response status
    if response.status_code == 200:
        
       # Get the image URL from the response
       response_json = response.json()
       if 'results' in response_json and isinstance(response_json['results'], list) and len(response_json['results']) > 0:
           image_url = response_json['results'][0]

           # Get the image from the URL           
           image_response = requests.get(image_url)
           image = PIL.Image.open(BytesIO(image_response.content))

           return image
       else:
           raise Exception("Unexpected API response format")
    else:
        raise Exception("API request failed with status code " + str(response.status_code))

# Define the Gradio interface
iface = gr.Interface(
    fn=generate_image, 
    inputs=[
        gr.components.Textbox(label="Prompt"),
        gr.components.Textbox(value="ugly, tiling, poorlydrawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, signature, cut off, draft", label="Negative Prompt"),
        gr.components.Dropdown(choices=[
            "Euler a",
            "Euler",
            "LMS",
            "Heun",
            "DPM2",
            "DPM2 a",
            "DPM++ 2S a",
            "DPM++ 2M",
            "DPM++ SDE",
            "DPM fast",
            "DPM adaptive",
            "LMS Karras",
            "DPM2 Karras",
            "DPM2 a Karras",
            "DPM++ 2S a Karras",
            "DPM++ 2M Karras",
            "DPM++ SDE Karras",
            "DDIM",
            "PLMS"
        ], label="Scheduler", value="DPM++ SDE Karras"),
        gr.components.Slider(minimum=10, maximum=100, step=1.0,value=30, label="Steps"),
        gr.components.Slider(minimum=512, maximum=1600, value=768, label="Width"),
        gr.components.Slider(minimum=512, maximum=1600, value=768, label="Height"),
        gr.components.Slider(minimum=4, maximum=12, step=0.5, value=7.0, label="CFG"),
        gr.components.Number(label="Seed", value=None),
        gr.inputs.Checkbox(label="Restore Faces", default=False),
    ], 
    outputs=gr.components.Image(),
    title="Freedom Demonstration",
    description = """
# Testing environment for the Freedom Model
Finetuned model of SD 2.1 768X produced by [@artificialguybr](https://twitter.com/artificialguybr).

## Resources
- The weights were released [here](LINK_TO_WEIGHTS).
- You can find example prompts [here](LINK_TO_EXAMPLE_PROMPTS).

## Demonstration
This demonstration is running on the [makeai.run API](LINK_TO_MAKEAI.RUN_API).

## Acknowledgements
Thanks to [Redmond.ai](LINK_TO_REDMOND.AI) for providing GPU Time and sponsoring this model.
""",
    allow_flagging='never'
)

# Launch the app
iface.launch(debug=True)