EmoCube commited on
Commit
ad36cb3
·
verified ·
1 Parent(s): 8d99aae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -7
app.py CHANGED
@@ -10,10 +10,17 @@ from deep_translator import GoogleTranslator
10
  import json
11
  from random import randint
12
 
13
- # Project by Nymbo
14
- alto_api= "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored"
15
- API_URL_ORIG = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
16
- API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Canopus-LoRA-Flux-FaceRealism"
 
 
 
 
 
 
 
17
  API_TOKEN = os.getenv("HF_READ_TOKEN")
18
  headers = {"Authorization": f"Bearer {API_TOKEN}"}
19
  timeout = 100
@@ -92,7 +99,7 @@ def compress_image(image, quality=50):
92
  compressed_image = Image.open(buffer)
93
  return compressed_image
94
 
95
- def query(prompt, is_realistic, num_inference_steps, width, height):
96
  if prompt == "" or prompt == None:
97
  return None
98
 
@@ -107,7 +114,12 @@ def query(prompt, is_realistic, num_inference_steps, width, height):
107
  "parameters": {"num_inference_steps": num_inference_steps, "width": width, "height": height}
108
  }
109
 
110
- response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
 
 
 
 
 
111
  if response.status_code != 200:
112
  print(f"Error: Failed to get image. Response status: {response.status_code}")
113
  print(f"Response content: {response.text}")
@@ -172,12 +184,13 @@ with gr.Blocks(theme='gstaff/xkcd', css=css) as app:
172
  with gr.Row():
173
  width = gr.Slider(label="Width", minimum = 128, maximum = 1024, step = 32, value = 480)
174
  height = gr.Slider(label="Height", minimum = 128, maximum = 1024, step = 32, value = 640)
 
175
 
176
  with gr.Row():
177
  text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
178
  with gr.Row():
179
  image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
180
 
181
- text_button.click(query, inputs=[text_prompt, is_realistic, num_inference_steps, width, height], outputs=image_output)
182
 
183
  app.launch(show_api=True, share=True)
 
10
  import json
11
  from random import randint
12
 
13
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
14
+
15
+ mod_list = {
16
+ "Default": API_URL,
17
+ "Face Realism": "https://api-inference.huggingface.co/models/prithivMLmods/Canopus-LoRA-Flux-FaceRealism",
18
+ "Midjourney": "https://api-inference.huggingface.co/models/Jovie/Midjourney_Schnell",
19
+ "Anime2": "Jovie/Anime2_Schnell",
20
+ "Anime3": "Jovie/Anime3_Schnell",
21
+ "Anime4": "Jovie/Anime4_Schnell",
22
+
23
+ }
24
  API_TOKEN = os.getenv("HF_READ_TOKEN")
25
  headers = {"Authorization": f"Bearer {API_TOKEN}"}
26
  timeout = 100
 
99
  compressed_image = Image.open(buffer)
100
  return compressed_image
101
 
102
+ def query(prompt, is_realistic, num_inference_steps, width, height, mod = None):
103
  if prompt == "" or prompt == None:
104
  return None
105
 
 
114
  "parameters": {"num_inference_steps": num_inference_steps, "width": width, "height": height}
115
  }
116
 
117
+ model = API_URL
118
+ for mod_name, mod_link in mod_list.items():
119
+ if (mod == mod_name):
120
+ model = mod_link
121
+
122
+ response = requests.post(model, headers=headers, json=payload, timeout=timeout)
123
  if response.status_code != 200:
124
  print(f"Error: Failed to get image. Response status: {response.status_code}")
125
  print(f"Response content: {response.text}")
 
184
  with gr.Row():
185
  width = gr.Slider(label="Width", minimum = 128, maximum = 1024, step = 32, value = 480)
186
  height = gr.Slider(label="Height", minimum = 128, maximum = 1024, step = 32, value = 640)
187
+ mod_choice = gr.Dropdown(list(mod_list.keys()), label="Mod")
188
 
189
  with gr.Row():
190
  text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
191
  with gr.Row():
192
  image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
193
 
194
+ text_button.click(query, inputs=[text_prompt, is_realistic, num_inference_steps, width, height], outputs=image_output, mod_choice)
195
 
196
  app.launch(show_api=True, share=True)