Zhofang commited on
Commit
c0308f7
·
verified ·
1 Parent(s): c172e36

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +152 -151
app.py CHANGED
@@ -6,148 +6,128 @@ import os
6
  from PIL import Image
7
  from deep_translator import GoogleTranslator
8
 
9
- # Create assets directory if it doesn't exist (though not strictly needed by this script anymore)
10
  # os.makedirs('assets', exist_ok=True)
11
-
12
- # Download icon if it doesn't exist
13
  if not os.path.exists('icon.jpg'):
14
- print("Downloading icon...")
15
  try:
16
- icon_url = "https://i.pinimg.com/564x/64/49/88/644988c59447eb00286834c2e70fdd6b.jpg"
17
- response = requests.get(icon_url)
18
  response.raise_for_status() # Raise an exception for HTTP errors
19
  with open('icon.jpg', 'wb') as f:
20
- f.write(response.content)
 
21
  print("Icon downloaded successfully.")
22
  except requests.exceptions.RequestException as e:
23
- print(f"Failed to download icon.jpg: {e}. Please ensure you have internet access or place icon.jpg manually.")
24
- # As a fallback, we can proceed without the icon if download fails.
25
- # The gr.Image for the icon will show a broken image if 'icon.jpg' is missing.
26
 
27
  API_URL_DEV = "https://lol-v2.mxflower.eu.org/api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
28
  API_URL = "https://lol-v2.mxflower.eu.org/api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
29
  timeout = 100
30
 
31
- def query(prompt_text, negative_prompt_text, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, huggingface_api_key_ui=None, use_dev=False):
 
32
  api_url = API_URL_DEV if use_dev else API_URL
33
 
34
- # Determine the API token to use
35
- final_api_key = None
36
- if huggingface_api_key_ui and huggingface_api_key_ui.strip(): # Check if textbox has a non-empty value
37
- final_api_key = huggingface_api_key_ui.strip()
38
- print("Using API key from Gradio UI input.")
 
39
  else:
40
- env_token = os.getenv("HF_READ_TOKEN")
41
- if env_token and env_token.strip():
42
- final_api_key = env_token.strip()
43
  print("Using API key from HF_READ_TOKEN environment variable.")
44
  else:
 
45
  raise gr.Error("Hugging Face API Key is required. Please provide it in the 'Hugging Face API Key' field or set the HF_READ_TOKEN environment variable.")
46
 
47
- headers = {"Authorization": f"Bearer {final_api_key}"}
48
 
49
- if not prompt_text or prompt_text.strip() == "":
50
- raise gr.Error("Prompt cannot be empty.")
 
 
 
51
 
52
- key = random.randint(0, 99999) # Increased range for more uniqueness
53
 
54
- # Translate prompt if it seems to be in Russian (basic check, can be improved)
55
- # For simplicity, let's assume we always try to translate.
56
- # If it's already English, GoogleTranslator often returns it as is.
57
  try:
58
- translated_prompt = GoogleTranslator(source='auto', target='en').translate(prompt_text)
59
- if translated_prompt:
60
- print(f'\033[1mGeneration {key} translation (auto -> en):\033[0m {translated_prompt}')
61
- prompt_to_use = translated_prompt
 
62
  else:
63
- print(f'\033[1mGeneration {key} (no translation needed or failed, using original):\033[0m {prompt_text}')
64
- prompt_to_use = prompt_text
65
  except Exception as e:
66
  print(f"Error during translation: {e}. Using original prompt.")
67
- prompt_to_use = prompt_text
68
-
69
-
70
- # Add quality enhancers
71
- prompt_to_use = f"{prompt_to_use} | ultra detail, ultra elaboration, ultra quality, perfect."
72
- print(f'\033[1mGeneration {key} (final prompt):\033[0m {prompt_to_use}')
73
-
74
- if seed == -1:
75
- seed = random.randint(1, 1000000000)
76
-
 
 
 
 
 
 
77
  payload = {
78
- "inputs": prompt_to_use,
79
- "steps": int(steps), # Ensure steps is an int
80
- "cfg_scale": float(cfg_scale), # Ensure cfg_scale is a float
81
- "seed": int(seed), # Ensure seed is an int
82
- "strength": float(strength) # Ensure strength is a float
83
- # The 'sampler' parameter is not standard in basic HF Inference API for diffusers.
84
- # It's often part of "parameters" or specific to certain model endpoints.
85
- # For now, we'll omit it unless the custom proxy explicitly handles it.
86
- # If "sampler" is needed, it would typically be: "parameters": {"scheduler": sampler} or similar.
87
  }
88
 
89
- # Add negative_prompt to payload if provided
90
- if negative_prompt_text and negative_prompt_text.strip():
91
- payload["negative_prompt"] = negative_prompt_text.strip()
92
- print(f'\033[1mGeneration {key} (negative prompt):\033[0m {negative_prompt_text.strip()}')
93
-
94
-
95
- print(f"Sending payload to {api_url}: {payload}")
96
 
97
  try:
98
  response = requests.post(api_url, headers=headers, json=payload, timeout=timeout)
99
  response.raise_for_status() # This will raise an HTTPError for bad responses (4xx or 5xx)
100
  except requests.exceptions.Timeout:
101
- raise gr.Error(f"Request timed out after {timeout} seconds. The model might be too busy or the request too complex.")
102
  except requests.exceptions.HTTPError as e:
103
- status_code = e.response.status_code
104
- error_message = f"API Error: {status_code}."
105
- try:
106
- error_detail = e.response.json() # Try to get JSON error detail
107
- if 'error' in error_detail:
108
- error_message += f" Detail: {error_detail['error']}"
109
- if 'warnings' in error_detail:
110
- error_message += f" Warnings: {error_detail['warnings']}"
111
- except ValueError: # If response is not JSON
112
- error_message += f" Content: {e.response.text[:200]}" # Show first 200 chars of text response
113
-
114
- if status_code == 503: # Model loading
115
- error_message = f"{status_code}: The model is currently loading. Please try again in a few moments."
116
- elif status_code == 401: # Unauthorized
117
- error_message = f"{status_code}: Unauthorized. Check your API Key."
118
- elif status_code == 422: # Unprocessable Entity
119
- error_message = f"{status_code}: Unprocessable Entity. There might be an issue with the prompt or parameters. Details: {e.response.text[:200]}"
120
-
121
- print(f"Error: Failed to get image. Response status: {status_code}")
122
  print(f"Response content: {e.response.text}")
123
- raise gr.Error(error_message)
124
- except requests.exceptions.RequestException as e:
125
- # For other network errors (DNS failure, connection refused, etc.)
126
- print(f"Network error: {e}")
 
 
 
 
 
127
  raise gr.Error(f"A network error occurred: {e}")
128
 
129
 
130
  try:
131
  image_bytes = response.content
132
  image = Image.open(io.BytesIO(image_bytes))
133
- print(f'\033[1mGeneration {key} completed!\033[0m (Prompt: {prompt_to_use})')
134
 
135
  # Save the image to a file and return the file path and seed
136
- # Create output directory if it doesn't exist
137
- os.makedirs('outputs', exist_ok=True)
138
- output_path = f"./outputs/flux_output_{key}_{seed}.png"
139
  image.save(output_path)
140
 
141
- return output_path, seed
142
- except UnidentifiedImageError:
143
- print(f"Error: The response from the API was not a valid image. Response text: {response.text[:500]}")
144
- raise gr.Error("The API did not return a valid image. This might happen if the model is still loading or if there was an error with the request.")
145
  except Exception as e:
146
- print(f"Error when trying to open or save the image: {e}")
147
- # Log the raw response if it's not an image for debugging
148
- if 'image_bytes' not in locals(): # if response.content was never assigned
149
- print(f"Raw response was: {response.text[:500]}")
150
- raise gr.Error(f"An error occurred while processing the image: {e}")
151
 
152
 
153
  css = """
@@ -163,86 +143,107 @@ css = """
163
  margin-bottom: 10px; /* Add some space below title */
164
  }
165
  #title-icon {
166
- width: 40px; /* Adjusted icon size */
167
  height: auto;
168
- margin-right: 10px; /* Space between icon and title */
169
  }
170
  #title-text {
171
- font-size: 28px; /* Adjusted font size */
172
  font-weight: bold;
173
  }
174
- .gr-input-label { /* Style labels for better visibility */
175
- font-weight: bold;
176
  }
177
  """
178
 
179
- with gr.Blocks(theme='gradio/soft', css=css) as app: # Using a default theme for broader compatibility
180
- with gr.Row(elem_id="title-container"):
181
- if os.path.exists('icon.jpg'):
182
- gr.Image(value='icon.jpg', width=40, height=40, show_label=False, interactive=False, elem_id="title-icon", container=False)
183
- else:
184
- gr.HTML("<span>🎨</span>", elem_id="title-icon") # Fallback if icon not found
185
- gr.HTML("<h1 id='title-text'>FLUX Capacitor</h1>")
 
 
 
186
 
187
  with gr.Column(elem_id="app-container"):
188
- gr.Markdown("Generate images using FLUX.1 models via a Hugging Face Inference API endpoint.")
189
-
190
  with gr.Row():
191
- with gr.Column(scale=2): # Prompt column takes more space
192
  text_prompt = gr.Textbox(
193
- label="Prompt",
194
- placeholder="Enter your creative vision here...",
195
- lines=3,
196
  elem_id="prompt-text-input"
197
  )
198
  negative_prompt = gr.Textbox(
199
- label="Negative Prompt",
200
- placeholder="Describe what to avoid in the image...",
201
- value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos",
202
- lines=3,
203
  elem_id="negative-prompt-text-input"
204
  )
205
- with gr.Column(scale=1): # Settings column
206
- with gr.Accordion("Advanced Settings & API Configuration", open=False):
207
- steps = gr.Slider(label="Sampling steps", value=30, minimum=1, maximum=100, step=1) # Adjusted default based on FLUX recommendations
208
- cfg = gr.Slider(label="CFG Scale (Guidance Scale)", value=7.0, minimum=0.0, maximum=20.0, step=0.1) # FLUX often uses lower CFG
209
- # Sampler method is often not directly controllable via basic HF Inf API unless proxy supports it
210
- # method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
211
- strength = gr.Slider(label="Strength (for img2img/variation, less relevant for txt2img)", value=0.7, minimum=0.0, maximum=1.0, step=0.01, info="Primarily for image-to-image tasks. May have limited effect here.")
212
- seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=2147483647, step=1, info="Use -1 for a random seed.")
213
- huggingface_api_key = gr.Textbox(
214
- label="Hugging Face API Key",
215
- placeholder="hf_xxx (Optional, uses HF_READ_TOKEN if empty)",
216
- type="password",
217
- elem_id="api-key"
218
- )
219
- use_dev = gr.Checkbox(label="Use FLUX.1-dev API (experimental, potentially slower)", value=False, elem_id="use-dev-checkbox")
 
 
 
 
 
 
 
 
 
220
 
221
  with gr.Row():
222
- text_button = gr.Button("Generate Image", variant='primary', elem_id="gen-button", scale=2)
223
 
224
  gr.Markdown("### Output")
225
  with gr.Row():
226
- image_output = gr.Image(type="filepath", label="Generated Image", elem_id="gallery", height=512) # filepath is good for saved images
227
- seed_output = gr.Textbox(label="Seed Used", elem_id="seed-output", interactive=False)
228
 
 
229
  text_button.click(
230
- query,
231
- inputs=[text_prompt, negative_prompt, steps, cfg, seed, strength, huggingface_api_key, use_dev], # Removed 'method' as it's not used in payload
 
 
 
 
 
 
 
 
 
 
232
  outputs=[image_output, seed_output]
233
  )
234
-
235
- gr.Markdown(
236
- """
237
- ---
238
- *Notes:*
239
- *- If the 'Hugging Face API Key' field is empty, the application will try to use the `HF_READ_TOKEN` environment variable.*
240
- *- The `FLUX.1-schnell` model is used by default. Check 'Use FLUX.1-dev API' for the development version.*
241
- *- Images are saved to an `outputs` subfolder in the directory where you run this script.*
242
- *- Translation from any language to English is attempted for the prompt.*
243
- """
244
- )
245
 
246
- # Ensure the app uses show_api=False if you don't intend to expose the function as an API endpoint through Gradio
247
- # If you want to call this Gradio app's functions programmatically via its own API, set show_api=True
248
- app.launch(show_api=True, share=False)
 
 
 
 
 
 
 
 
 
 
 
 
6
  from PIL import Image
7
  from deep_translator import GoogleTranslator
8
 
 
9
  # os.makedirs('assets', exist_ok=True)
 
 
10
  if not os.path.exists('icon.jpg'):
11
+ print("Downloading icon.jpg...")
12
  try:
13
+ # Use a more robust way to download, requests is already imported
14
+ response = requests.get("https://i.pinimg.com/564x/64/49/88/644988c59447eb00286834c2e70fdd6b.jpg", stream=True)
15
  response.raise_for_status() # Raise an exception for HTTP errors
16
  with open('icon.jpg', 'wb') as f:
17
+ for chunk in response.iter_content(chunk_size=8192):
18
+ f.write(chunk)
19
  print("Icon downloaded successfully.")
20
  except requests.exceptions.RequestException as e:
21
+ print(f"Error downloading icon.jpg: {e}. Please ensure you have internet access or place icon.jpg manually.")
22
+ # As a fallback, you might want to skip using the icon or use a placeholder
23
+ # For now, the app will proceed and might show a broken image if icon.jpg is missing.
24
 
25
  API_URL_DEV = "https://lol-v2.mxflower.eu.org/api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
26
  API_URL = "https://lol-v2.mxflower.eu.org/api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
27
  timeout = 100
28
 
29
+ def query(prompt, negative_prompt_text, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, huggingface_api_key_ui=None, use_dev=False):
30
+ # Determine which API URL to use
31
  api_url = API_URL_DEV if use_dev else API_URL
32
 
33
+ # Determine the API Token to use
34
+ # Priority: 1. UI input, 2. Environment variable HF_READ_TOKEN
35
+ auth_token = None
36
+ if huggingface_api_key_ui and huggingface_api_key_ui.strip(): # Check if UI key is provided and not just whitespace
37
+ auth_token = huggingface_api_key_ui.strip()
38
+ print("Using API key provided in the UI.")
39
  else:
40
+ auth_token = os.getenv("HF_READ_TOKEN")
41
+ if auth_token:
 
42
  print("Using API key from HF_READ_TOKEN environment variable.")
43
  else:
44
+ # If neither is available, raise an error.
45
  raise gr.Error("Hugging Face API Key is required. Please provide it in the 'Hugging Face API Key' field or set the HF_READ_TOKEN environment variable.")
46
 
47
+ headers = {"Authorization": f"Bearer {auth_token}"}
48
 
49
+ if not prompt or not prompt.strip(): # Check if prompt is None, empty, or just whitespace
50
+ # Optionally, return a placeholder or a message instead of None
51
+ # For now, returning None as per original logic for empty prompt
52
+ gr.Warning("Prompt cannot be empty.")
53
+ return None, seed # Return seed as well to match output structure
54
 
55
+ key = random.randint(0, 999)
56
 
57
+ # Translate prompt if it seems to be in Russian (simple check, can be improved)
58
+ # For simplicity, let's assume Russian if it contains Cyrillic characters
 
59
  try:
60
+ # A more robust check might be needed, but this is a common heuristic
61
+ if any('\u0400' <= char <= '\u04FF' for char in prompt):
62
+ translated_prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
63
+ print(f'\033[1mGeneration {key} RU->EN translation:\033[0m {translated_prompt}')
64
+ prompt = translated_prompt
65
  else:
66
+ print(f'\033[1mGeneration {key} using EN prompt (no translation needed).\033[0m')
 
67
  except Exception as e:
68
  print(f"Error during translation: {e}. Using original prompt.")
69
+ # Fallback to original prompt if translation fails
70
+
71
+ # Augment the prompt
72
+ augmented_prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
73
+ print(f'\033[1mGeneration {key} final prompt:\033[0m {augmented_prompt}')
74
+
75
+ # If seed is -1, generate a random seed and use it
76
+ current_seed = seed
77
+ if current_seed == -1:
78
+ current_seed = random.randint(1, 1000000000)
79
+
80
+ # Note: The 'sampler' variable is passed to this function but not used in the payload.
81
+ # The custom API might handle sampler selection server-side or not support it.
82
+ # The 'is_negative' key in payload might be expecting the negative_prompt_text.
83
+ # Assuming the custom API expects negative prompt text under the 'is_negative' key.
84
+ # If it expects a boolean, this part needs adjustment.
85
  payload = {
86
+ "inputs": augmented_prompt,
87
+ "is_negative": negative_prompt_text, # This sends the text of negative_prompt
88
+ "steps": steps,
89
+ "cfg_scale": cfg_scale,
90
+ "seed": current_seed,
91
+ "strength": strength
 
 
 
92
  }
93
 
94
+ print(f"Sending payload to {api_url}: {payload}") # For debugging
 
 
 
 
 
 
95
 
96
  try:
97
  response = requests.post(api_url, headers=headers, json=payload, timeout=timeout)
98
  response.raise_for_status() # This will raise an HTTPError for bad responses (4xx or 5xx)
99
  except requests.exceptions.Timeout:
100
+ raise gr.Error(f"Request timed out after {timeout} seconds. The model might be too busy or the server is slow.")
101
  except requests.exceptions.HTTPError as e:
102
+ print(f"Error: Failed to get image. Response status: {e.response.status_code}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  print(f"Response content: {e.response.text}")
104
+ if e.response.status_code == 503:
105
+ raise gr.Error(f"{e.response.status_code}: Service Unavailable. The model might be loading or overloaded. Please try again later.")
106
+ elif e.response.status_code == 401:
107
+ raise gr.Error(f"{e.response.status_code}: Unauthorized. Please check your API Key.")
108
+ elif e.response.status_code == 400:
109
+ raise gr.Error(f"{e.response.status_code}: Bad Request. Please check your prompt and parameters. Details: {e.response.text[:200]}") # Show first 200 chars of error
110
+ else:
111
+ raise gr.Error(f"API Error: {e.response.status_code}. Details: {e.response.text[:200]}")
112
+ except requests.exceptions.RequestException as e: # Catch other network errors
113
  raise gr.Error(f"A network error occurred: {e}")
114
 
115
 
116
  try:
117
  image_bytes = response.content
118
  image = Image.open(io.BytesIO(image_bytes))
119
+ print(f'\033[1mGeneration {key} completed!\033[0m ({augmented_prompt})')
120
 
121
  # Save the image to a file and return the file path and seed
122
+ os.makedirs("outputs", exist_ok=True) # Ensure output directory exists
123
+ output_path = f"./outputs/output_{key}_{current_seed}.png" # Include seed in filename for uniqueness
 
124
  image.save(output_path)
125
 
126
+ return output_path, current_seed
 
 
 
127
  except Exception as e:
128
+ print(f"Error processing image response: {e}")
129
+ print(f"Response content that caused error: {response.content[:500]}") # Log first 500 bytes
130
+ raise gr.Error(f"Failed to process the image from API. The API might have returned an unexpected response. Details: {str(e)}")
 
 
131
 
132
 
133
  css = """
 
143
  margin-bottom: 10px; /* Add some space below title */
144
  }
145
  #title-icon {
146
+ width: 32px;
147
  height: auto;
148
+ margin-right: 10px;
149
  }
150
  #title-text {
151
+ font-size: 24px;
152
  font-weight: bold;
153
  }
154
+ .gr-box { /* Ensure accordion and other boxes have some padding */
155
+ padding: 10px;
156
  }
157
  """
158
 
159
+ with gr.Blocks(theme='Nymbo/Nymbo_Theme', css=css) as app:
160
+ gr.HTML("""
161
+ <div style="text-align: center; margin-bottom: 20px;">
162
+ <div id="title-container">
163
+ <img id="title-icon" src="file/icon.jpg" alt="Icon"> <!-- Use file/ prefix for local files in Gradio -->
164
+ <h1 id="title-text">FLUX Capacitor</h1>
165
+ </div>
166
+ <p>Generate images using FLUX models. Provide your API key or ensure HF_READ_TOKEN is set.</p>
167
+ </div>
168
+ """)
169
 
170
  with gr.Column(elem_id="app-container"):
 
 
171
  with gr.Row():
172
+ with gr.Column(scale=3): # Give more space to prompt
173
  text_prompt = gr.Textbox(
174
+ label="Prompt",
175
+ placeholder="Enter your prompt here (Russian will be auto-translated)",
176
+ lines=3, # Increased lines for prompt
177
  elem_id="prompt-text-input"
178
  )
179
  negative_prompt = gr.Textbox(
180
+ label="Negative Prompt",
181
+ placeholder="What should not be in the image",
182
+ value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos",
183
+ lines=2, # Increased lines for negative prompt
184
  elem_id="negative-prompt-text-input"
185
  )
186
+ with gr.Column(scale=2): # Settings column
187
+ huggingface_api_key = gr.Textbox(
188
+ label="Hugging Face API Key (optional)",
189
+ placeholder="Uses HF_READ_TOKEN env var if empty",
190
+ type="password",
191
+ elem_id="api-key"
192
+ )
193
+ use_dev = gr.Checkbox(label="Use Dev API (FLUX.1-dev)", value=False, elem_id="use-dev-checkbox")
194
+
195
+
196
+ with gr.Accordion("Advanced Generation Settings", open=False):
197
+ with gr.Row():
198
+ steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
199
+ cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=0.5) # Allow 0.5 steps
200
+ with gr.Row():
201
+ # Sampler is not currently used in the payload. If your API supports it, add it to the payload.
202
+ sampler_method = gr.Radio(
203
+ label="Sampling method (Note: Not sent to API)",
204
+ value="DPM++ 2M Karras",
205
+ choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"],
206
+ # info="This setting is currently for UI only and not passed to the backend API."
207
+ )
208
+ strength = gr.Slider(label="Strength (e.g., for img2img)", value=0.7, minimum=0, maximum=1, step=0.01) # Finer steps
209
+ seed = gr.Slider(label="Seed (-1 for random)", value=-1, minimum=-1, maximum=2147483647, step=1) # Max 32-bit signed int
210
 
211
  with gr.Row():
212
+ text_button = gr.Button("Generate Image", variant='primary', elem_id="gen-button", scale=1)
213
 
214
  gr.Markdown("### Output")
215
  with gr.Row():
216
+ image_output = gr.Image(type="filepath", label="Generated Image", elem_id="gallery") # Use filepath for saved images
217
+ seed_output = gr.Textbox(label="Seed Used", interactive=False, elem_id="seed-output") # interactive=False as it's an output
218
 
219
+ # Ensure the order of inputs matches the function signature of query
220
  text_button.click(
221
+ query,
222
+ inputs=[
223
+ text_prompt,
224
+ negative_prompt, # This is passed as `negative_prompt_text`
225
+ steps,
226
+ cfg,
227
+ sampler_method, # Passed as `sampler`
228
+ seed,
229
+ strength,
230
+ huggingface_api_key, # Passed as `huggingface_api_key_ui`
231
+ use_dev
232
+ ],
233
  outputs=[image_output, seed_output]
234
  )
 
 
 
 
 
 
 
 
 
 
 
235
 
236
+ # To run this:
237
+ # 1. Make sure 'gradio', 'requests', 'Pillow', 'deep_translator' are installed:
238
+ # pip install gradio requests Pillow deep_translator
239
+ # 2. Optionally, set the HF_READ_TOKEN environment variable:
240
+ # export HF_READ_TOKEN="your_hf_api_token_here" (Linux/macOS)
241
+ # set HF_READ_TOKEN="your_hf_api_token_here" (Windows CMD)
242
+ # $env:HF_READ_TOKEN="your_hf_api_token_here" (Windows PowerShell)
243
+ # 3. Run the script: python your_script_name.py
244
+
245
+ if __name__ == "__main__":
246
+ # For local Gradio image serving, Gradio needs to know where the 'icon.jpg' is.
247
+ # If it's in the same directory, 'file/icon.jpg' should work.
248
+ # If you have an 'assets' folder, it would be 'file/assets/icon.jpg'.
249
+ app.launch(show_api=True, share=False)