nevreal commited on
Commit
234658e
·
verified ·
1 Parent(s): c2a8649

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -11
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  import requests
3
  import os
 
4
 
5
  # Environment variables for API details
6
  API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN") # Fetching the API token from environment variable
@@ -12,19 +13,25 @@ def query_huggingface_api(api_url, prompt):
12
  response = requests.post(api_url, headers=headers, json=data)
13
 
14
  if response.status_code == 200:
15
- # Assuming the API returns binary image data
16
- return response.content
17
  else:
18
- return None, f"Error {response.status_code}: {response.text}"
19
 
20
  # Gradio function for generating the image
21
  def generate_image(api_url, prompt):
22
- result, error = query_huggingface_api(f"https://api-inference.huggingface.co/models/{api_url}", prompt)
23
-
24
- if result:
25
- return result, None
26
- else:
27
- return None, error
 
 
 
 
 
 
 
28
 
29
  # Create Gradio Blocks Interface
30
  with gr.Blocks() as demo:
@@ -39,12 +46,13 @@ with gr.Blocks() as demo:
39
  with gr.Column():
40
  text_input = gr.Textbox(
41
  label="Enter your prompt",
42
- placeholder="Type something here..."
 
43
  )
44
  model_input = gr.Textbox(
45
  label="Model URL",
46
  placeholder="Enter the model URL...",
47
- value="user/sdwarm"
48
  )
49
  generate_btn = gr.Button("Generate Image")
50
 
 
1
  import gradio as gr
2
  import requests
3
  import os
4
+ import time
5
 
6
  # Environment variables for API details
7
  API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN") # Fetching the API token from environment variable
 
13
  response = requests.post(api_url, headers=headers, json=data)
14
 
15
  if response.status_code == 200:
16
+ return response.content, None # Return the image and no error
 
17
  else:
18
+ return None, f"Error {response.status_code}: {response.text}" # Return None and the error message
19
 
20
  # Gradio function for generating the image
21
  def generate_image(api_url, prompt):
22
+ # Attempt to query the API with retry logic for loading models
23
+ for attempt in range(5): # Try up to 5 times
24
+ result, error = query_huggingface_api(f"https://api-inference.huggingface.co/models/{api_url}", prompt)
25
+
26
+ if result:
27
+ return result, None
28
+ elif "Model is currently loading" in error:
29
+ estimated_time = float(error.split("estimated_time\":")[1].split("}")[0]) # Extract estimated time from error message
30
+ time.sleep(estimated_time + 5) # Wait for the model to load, with an additional buffer time
31
+ else:
32
+ return None, error # Return the error if it's not a loading issue
33
+
34
+ return None, "Model is still loading after multiple attempts. Please try again later." # Final error if all attempts fail
35
 
36
  # Create Gradio Blocks Interface
37
  with gr.Blocks() as demo:
 
46
  with gr.Column():
47
  text_input = gr.Textbox(
48
  label="Enter your prompt",
49
+ placeholder="Type something here...",
50
+ value="psg stocking, blue eyes, blue hair, colored inner hair, hair bow, long hair, multicolored hair, pink hair, two-tone hair, bangs, blunt bangs,"
51
  )
52
  model_input = gr.Textbox(
53
  label="Model URL",
54
  placeholder="Enter the model URL...",
55
+ value=""
56
  )
57
  generate_btn = gr.Button("Generate Image")
58