Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -17,7 +17,7 @@ def download_image(url):
|
|
17 |
hf_token = os.environ.get("HF_TOKEN_API_DEMO") # we get it from a secret env variable, such that it's private
|
18 |
auth_headers = {"api_token": hf_token}
|
19 |
|
20 |
-
|
21 |
|
22 |
# Ng
|
23 |
default_negative_prompt= "Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers"
|
@@ -28,7 +28,7 @@ default_negative_prompt= "Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly d
|
|
28 |
# pipe.to(device="cuda")
|
29 |
|
30 |
# @spaces.GPU(enable_queue=True)
|
31 |
-
def infer(prompt,negative_prompt,seed,
|
32 |
print(f"""
|
33 |
—/n
|
34 |
{prompt}
|
@@ -46,8 +46,7 @@ def infer(prompt,negative_prompt,seed,resolution):
|
|
46 |
except:
|
47 |
generator=None
|
48 |
|
49 |
-
|
50 |
-
w,h = int(w),int(h)
|
51 |
# image = pipe(prompt,num_inference_steps=30, negative_prompt=negative_prompt,generator=generator,width=w,height=h).images[0]
|
52 |
url = "http://engine.int.bria-api.com/v1/text-to-image/base/3.2"
|
53 |
|
@@ -62,7 +61,8 @@ def infer(prompt,negative_prompt,seed,resolution):
|
|
62 |
"include_generation_prefix": False,
|
63 |
"negative_prompt": negative_prompt,
|
64 |
"num_inference_steps": 30,
|
65 |
-
"seed": seed
|
|
|
66 |
})
|
67 |
response = requests.request("POST", url, headers=auth_headers, data=payload)
|
68 |
print('1',response)
|
@@ -104,7 +104,7 @@ with gr.Blocks(css=css) as demo:
|
|
104 |
with gr.Group():
|
105 |
with gr.Column():
|
106 |
prompt_in = gr.Textbox(label="Prompt", value='''photo of mystical dragon eating sushi, text bubble says "Sushi Time".''')
|
107 |
-
|
108 |
seed = gr.Textbox(label="Seed", value=-1)
|
109 |
negative_prompt = gr.Textbox(label="Negative Prompt", value=default_negative_prompt)
|
110 |
submit_btn = gr.Button("Generate")
|
@@ -133,7 +133,7 @@ with gr.Blocks(css=css) as demo:
|
|
133 |
prompt_in,
|
134 |
negative_prompt,
|
135 |
seed,
|
136 |
-
|
137 |
],
|
138 |
outputs = [
|
139 |
result
|
|
|
17 |
hf_token = os.environ.get("HF_TOKEN_API_DEMO") # we get it from a secret env variable, such that it's private
|
18 |
auth_headers = {"api_token": hf_token}
|
19 |
|
20 |
+
aspect_ratios = ["1:1","2:3","3:2","3:4","4:3","4:5","5:4","9:16","16:9"]
|
21 |
|
22 |
# Ng
|
23 |
default_negative_prompt= "Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers"
|
|
|
28 |
# pipe.to(device="cuda")
|
29 |
|
30 |
# @spaces.GPU(enable_queue=True)
|
31 |
+
def infer(prompt,negative_prompt,seed,aspect_ratio):
|
32 |
print(f"""
|
33 |
—/n
|
34 |
{prompt}
|
|
|
46 |
except:
|
47 |
generator=None
|
48 |
|
49 |
+
|
|
|
50 |
# image = pipe(prompt,num_inference_steps=30, negative_prompt=negative_prompt,generator=generator,width=w,height=h).images[0]
|
51 |
url = "http://engine.int.bria-api.com/v1/text-to-image/base/3.2"
|
52 |
|
|
|
61 |
"include_generation_prefix": False,
|
62 |
"negative_prompt": negative_prompt,
|
63 |
"num_inference_steps": 30,
|
64 |
+
"seed": seed,
|
65 |
+
"aspect_ratio": aspect_ratio
|
66 |
})
|
67 |
response = requests.request("POST", url, headers=auth_headers, data=payload)
|
68 |
print('1',response)
|
|
|
104 |
with gr.Group():
|
105 |
with gr.Column():
|
106 |
prompt_in = gr.Textbox(label="Prompt", value='''photo of mystical dragon eating sushi, text bubble says "Sushi Time".''')
|
107 |
+
aspect_ratio = gr.Dropdown(value=aspect_ratios[0], show_label=True, label="Aspect Ratio", choices=aspect_ratios)
|
108 |
seed = gr.Textbox(label="Seed", value=-1)
|
109 |
negative_prompt = gr.Textbox(label="Negative Prompt", value=default_negative_prompt)
|
110 |
submit_btn = gr.Button("Generate")
|
|
|
133 |
prompt_in,
|
134 |
negative_prompt,
|
135 |
seed,
|
136 |
+
aspect_ratio
|
137 |
],
|
138 |
outputs = [
|
139 |
result
|