awacke1 commited on
Commit
5d6f17d
ยท
verified ยท
1 Parent(s): 4dab44a

Update backup1-app.py

Browse files
Files changed (1) hide show
  1. backup1-app.py +207 -144
backup1-app.py CHANGED
@@ -6,17 +6,56 @@ import os
6
  from PIL import Image
7
  import io
8
  import requests
 
 
 
9
 
10
- # Get token from environment variable
11
- HF_TOKEN = os.getenv('ArtToken')
12
- if not HF_TOKEN:
13
- raise ValueError("Please set the 'ArtToken' environment variable with your Hugging Face token")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  class ModelGenerator:
16
- @staticmethod
17
- def generate_midjourney(prompt):
 
 
18
  try:
19
- client = Client("mukaist/Midjourney", hf_token=HF_TOKEN)
20
  result = client.predict(
21
  prompt=prompt,
22
  negative_prompt="(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck",
@@ -30,31 +69,23 @@ class ModelGenerator:
30
  api_name="/run"
31
  )
32
 
33
- # Handle the result based on its type
34
  if isinstance(result, list) and len(result) > 0:
35
- # If result is a list of file paths or URLs
36
  image_data = result[0]
37
  if isinstance(image_data, str):
38
  if image_data.startswith('http'):
39
- # If it's a URL, download the image
40
  response = requests.get(image_data)
41
- image = Image.open(io.BytesIO(response.content))
42
  else:
43
- # If it's a file path
44
- image = Image.open(image_data)
45
  else:
46
- # If it's already image data
47
- image = Image.open(io.BytesIO(image_data))
48
- return ("Midjourney", image)
49
- else:
50
- return ("Midjourney", f"Error: Unexpected result format: {type(result)}")
51
  except Exception as e:
52
  return ("Midjourney", f"Error: {str(e)}")
53
 
54
- @staticmethod
55
- def generate_stable_cascade(prompt):
56
  try:
57
- client = Client("multimodalart/stable-cascade", hf_token=HF_TOKEN)
58
  result = client.predict(
59
  prompt=prompt,
60
  negative_prompt=prompt,
@@ -68,14 +99,23 @@ class ModelGenerator:
68
  num_images_per_prompt=1,
69
  api_name="/run"
70
  )
71
- return ("Stable Cascade", result)
 
 
 
 
 
 
 
 
 
 
72
  except Exception as e:
73
  return ("Stable Cascade", f"Error: {str(e)}")
74
 
75
- @staticmethod
76
- def generate_stable_diffusion_3(prompt):
77
  try:
78
- client = Client("stabilityai/stable-diffusion-3-medium", hf_token=HF_TOKEN)
79
  result = client.predict(
80
  prompt=prompt,
81
  negative_prompt=prompt,
@@ -87,14 +127,15 @@ class ModelGenerator:
87
  num_inference_steps=28,
88
  api_name="/infer"
89
  )
90
- return ("SD 3 Medium", result)
 
 
91
  except Exception as e:
92
  return ("SD 3 Medium", f"Error: {str(e)}")
93
 
94
- @staticmethod
95
- def generate_stable_diffusion_35(prompt):
96
  try:
97
- client = Client("stabilityai/stable-diffusion-3.5-large", hf_token=HF_TOKEN)
98
  result = client.predict(
99
  prompt=prompt,
100
  negative_prompt=prompt,
@@ -106,14 +147,15 @@ class ModelGenerator:
106
  num_inference_steps=40,
107
  api_name="/infer"
108
  )
109
- return ("SD 3.5 Large", result)
 
 
110
  except Exception as e:
111
  return ("SD 3.5 Large", f"Error: {str(e)}")
112
 
113
- @staticmethod
114
- def generate_playground_v2_5(prompt):
115
  try:
116
- client = Client("https://playgroundai-playground-v2-5.hf.space/--replicas/ji5gy/", hf_token=HF_TOKEN)
117
  result = client.predict(
118
  prompt,
119
  prompt, # negative prompt
@@ -125,23 +167,30 @@ class ModelGenerator:
125
  True, # randomize seed
126
  api_name="/run"
127
  )
128
- # Result is a tuple (gallery, seed), we want just the first image from gallery
129
- if result and isinstance(result, tuple) and result[0]:
130
- return ("Playground v2.5", result[0][0]['image'])
 
 
 
 
 
 
131
  return ("Playground v2.5", "Error: No image generated")
132
  except Exception as e:
133
  return ("Playground v2.5", f"Error: {str(e)}")
134
 
135
- def generate_images(prompt, selected_models):
136
  results = []
137
  with concurrent.futures.ThreadPoolExecutor() as executor:
138
  futures = []
 
139
  model_map = {
140
- "Midjourney": ModelGenerator.generate_midjourney,
141
- "Stable Cascade": ModelGenerator.generate_stable_cascade,
142
- "SD 3 Medium": ModelGenerator.generate_stable_diffusion_3,
143
- "SD 3.5 Large": ModelGenerator.generate_stable_diffusion_35,
144
- "Playground v2.5": ModelGenerator.generate_playground_v2_5
145
  }
146
 
147
  for model in selected_models:
@@ -149,17 +198,20 @@ def generate_images(prompt, selected_models):
149
  futures.append(executor.submit(model_map[model], prompt))
150
 
151
  for future in concurrent.futures.as_completed(futures):
152
- results.append(future.result())
 
 
 
 
 
153
 
154
  return results
155
 
156
  def handle_prompt_click(prompt_text, key):
157
- if not HF_TOKEN:
158
- st.error("Environment variable 'ArtToken' is not set!")
159
  return
160
 
161
- st.session_state[f'selected_prompt_{key}'] = prompt_text
162
-
163
  selected_models = st.session_state.get('selected_models', [])
164
 
165
  if not selected_models:
@@ -167,113 +219,124 @@ def handle_prompt_click(prompt_text, key):
167
  return
168
 
169
  with st.spinner('Generating artwork...'):
170
- results = generate_images(prompt_text, selected_models)
171
- st.session_state[f'generated_images_{key}'] = results
172
- st.success("Artwork generated successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
 
173
 
174
  def main():
175
  st.title("๐ŸŽจ Multi-Model Art Generator")
 
 
 
176
 
177
- with st.sidebar:
178
- st.header("Configuration")
179
-
180
- # Show token status
181
- if HF_TOKEN:
182
- st.success("โœ“ ArtToken loaded from environment")
183
- else:
184
- st.error("โš  ArtToken not found in environment")
185
-
186
- st.markdown("---")
187
- st.header("Model Selection")
188
- st.session_state['selected_models'] = st.multiselect(
189
- "Choose AI Models",
190
- ["Midjourney", "Stable Cascade", "SD 3 Medium", "SD 3.5 Large", "Playground v2.5"],
191
- default=["Midjourney"]
192
- )
193
-
194
- st.markdown("---")
195
- st.markdown("### Selected Models:")
196
- for model in st.session_state['selected_models']:
197
- st.write(f"โœ“ {model}")
198
-
199
- st.markdown("---")
200
- st.markdown("### Model Information:")
201
- st.markdown("""
202
- - **Midjourney**: Best for artistic and creative imagery
203
- - **Stable Cascade**: New architecture with high detail
204
- - **SD 3 Medium**: Fast and efficient generation
205
- - **SD 3.5 Large**: Highest quality, slower generation
206
- - **Playground v2.5**: Advanced model with high customization
207
- """)
208
 
209
- st.markdown("### Select a prompt style to generate artwork:")
210
 
211
- prompt_emojis = {
212
- "AIart/AIArtistCommunity": "๐Ÿค–",
213
- "Black & White": "โšซโšช",
214
- "Black & Yellow": "โšซ๐Ÿ’›",
215
- "Blindfold": "๐Ÿ™ˆ",
216
- "Break": "๐Ÿ’”",
217
- "Broken": "๐Ÿ”จ",
218
- "Christmas Celebrations art": "๐ŸŽ„",
219
- "Colorful Art": "๐ŸŽจ",
220
- "Crimson art": "๐Ÿ”ด",
221
- "Eyes Art": "๐Ÿ‘๏ธ",
222
- "Going out with Style": "๐Ÿ’ƒ",
223
- "Hooded Girl": "๐Ÿงฅ",
224
- "Lips": "๐Ÿ‘„",
225
- "MAEKHLONG": "๐Ÿฎ",
226
- "Mermaid": "๐Ÿงœโ€โ™€๏ธ",
227
- "Morning Sunshine": "๐ŸŒ…",
228
- "Music Art": "๐ŸŽต",
229
- "Owl": "๐Ÿฆ‰",
230
- "Pink": "๐Ÿ’—",
231
- "Purple": "๐Ÿ’œ",
232
- "Rain": "๐ŸŒง๏ธ",
233
- "Red Moon": "๐ŸŒ‘",
234
- "Rose": "๐ŸŒน",
235
- "Snow": "โ„๏ธ",
236
- "Spacesuit Girl": "๐Ÿ‘ฉโ€๐Ÿš€",
237
- "Steampunk": "โš™๏ธ",
238
- "Succubus": "๐Ÿ˜ˆ",
239
- "Sunlight": "โ˜€๏ธ",
240
- "Weird art": "๐ŸŽญ",
241
- "White Hair": "๐Ÿ‘ฑโ€โ™€๏ธ",
242
- "Wings art": "๐Ÿ‘ผ",
243
- "Woman with Sword": "โš”๏ธ"
244
- }
245
 
246
- col1, col2, col3 = st.columns(3)
247
-
248
- for idx, (prompt, emoji) in enumerate(prompt_emojis.items()):
249
- full_prompt = f"QT {prompt}"
250
- col = [col1, col2, col3][idx % 3]
251
 
252
- with col:
253
- if st.button(f"{emoji} {prompt}", key=f"btn_{idx}"):
254
- handle_prompt_click(full_prompt, idx)
255
-
256
- st.markdown("---")
257
- st.markdown("### Generated Artwork:")
258
-
259
- for key in st.session_state:
260
- if key.startswith('selected_prompt_'):
261
- idx = key.split('_')[-1]
262
- images_key = f'generated_images_{idx}'
263
 
264
- if images_key in st.session_state:
265
- st.write("Prompt:", st.session_state[key])
266
-
267
- cols = st.columns(len(st.session_state[images_key]))
 
 
 
 
 
 
 
 
268
 
269
- for col, (model_name, result) in zip(cols, st.session_state[images_key]):
270
- with col:
271
- st.markdown(f"**{model_name}**")
272
- if isinstance(result, str) and result.startswith("Error"):
273
- st.error(result)
274
- else:
275
- # Updated to use use_container_width instead of use_column_width
276
- st.image(result, use_container_width=True)
 
 
 
 
 
 
 
277
 
278
  if __name__ == "__main__":
279
  main()
 
6
  from PIL import Image
7
  import io
8
  import requests
9
+ from huggingface_hub import HfApi, login
10
+ from pathlib import Path
11
+ import json
12
 
13
+ def init_session_state():
14
+ """Initialize session state variables"""
15
+ if 'hf_token' not in st.session_state:
16
+ st.session_state.hf_token = None
17
+ if 'is_authenticated' not in st.session_state:
18
+ st.session_state.is_authenticated = False
19
+
20
+ def save_token(token):
21
+ """Save token to session state"""
22
+ st.session_state.hf_token = token
23
+ st.session_state.is_authenticated = True
24
+
25
+ def authenticate_user():
26
+ """Handle user authentication with HuggingFace"""
27
+ st.sidebar.markdown("## ๐Ÿ” Authentication")
28
+
29
+ if st.session_state.is_authenticated:
30
+ st.sidebar.success("โœ“ Logged in to HuggingFace")
31
+ if st.sidebar.button("Logout"):
32
+ st.session_state.hf_token = None
33
+ st.session_state.is_authenticated = False
34
+ st.experimental_rerun()
35
+ else:
36
+ token = st.sidebar.text_input("Enter HuggingFace Token", type="password",
37
+ help="Get your token from https://huggingface.co/settings/tokens")
38
+ if st.sidebar.button("Login"):
39
+ if token:
40
+ try:
41
+ # Verify token is valid
42
+ api = HfApi(token=token)
43
+ api.whoami()
44
+ save_token(token)
45
+ st.sidebar.success("Successfully logged in!")
46
+ st.experimental_rerun()
47
+ except Exception as e:
48
+ st.sidebar.error(f"Authentication failed: {str(e)}")
49
+ else:
50
+ st.sidebar.error("Please enter your HuggingFace token")
51
 
52
  class ModelGenerator:
53
+ def __init__(self, token):
54
+ self.token = token
55
+
56
+ def generate_midjourney(self, prompt):
57
  try:
58
+ client = Client("mukaist/Midjourney", hf_token=self.token)
59
  result = client.predict(
60
  prompt=prompt,
61
  negative_prompt="(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck",
 
69
  api_name="/run"
70
  )
71
 
 
72
  if isinstance(result, list) and len(result) > 0:
 
73
  image_data = result[0]
74
  if isinstance(image_data, str):
75
  if image_data.startswith('http'):
 
76
  response = requests.get(image_data)
77
+ return ("Midjourney", Image.open(io.BytesIO(response.content)))
78
  else:
79
+ return ("Midjourney", Image.open(image_data))
 
80
  else:
81
+ return ("Midjourney", Image.open(io.BytesIO(image_data)))
82
+ return ("Midjourney", "Error: No image generated")
 
 
 
83
  except Exception as e:
84
  return ("Midjourney", f"Error: {str(e)}")
85
 
86
+ def generate_stable_cascade(self, prompt):
 
87
  try:
88
+ client = Client("multimodalart/stable-cascade", hf_token=self.token)
89
  result = client.predict(
90
  prompt=prompt,
91
  negative_prompt=prompt,
 
99
  num_images_per_prompt=1,
100
  api_name="/run"
101
  )
102
+ if isinstance(result, list) and len(result) > 0:
103
+ image_data = result[0]
104
+ if isinstance(image_data, str):
105
+ if image_data.startswith('http'):
106
+ response = requests.get(image_data)
107
+ return ("Stable Cascade", Image.open(io.BytesIO(response.content)))
108
+ else:
109
+ return ("Stable Cascade", Image.open(image_data))
110
+ else:
111
+ return ("Stable Cascade", Image.open(io.BytesIO(image_data)))
112
+ return ("Stable Cascade", "Error: No image generated")
113
  except Exception as e:
114
  return ("Stable Cascade", f"Error: {str(e)}")
115
 
116
+ def generate_stable_diffusion_3(self, prompt):
 
117
  try:
118
+ client = Client("stabilityai/stable-diffusion-3-medium", hf_token=self.token)
119
  result = client.predict(
120
  prompt=prompt,
121
  negative_prompt=prompt,
 
127
  num_inference_steps=28,
128
  api_name="/infer"
129
  )
130
+ if isinstance(result, (str, bytes)):
131
+ return ("SD 3 Medium", Image.open(io.BytesIO(result) if isinstance(result, bytes) else result))
132
+ return ("SD 3 Medium", "Error: Unexpected result format")
133
  except Exception as e:
134
  return ("SD 3 Medium", f"Error: {str(e)}")
135
 
136
+ def generate_stable_diffusion_35(self, prompt):
 
137
  try:
138
+ client = Client("stabilityai/stable-diffusion-3.5-large", hf_token=self.token)
139
  result = client.predict(
140
  prompt=prompt,
141
  negative_prompt=prompt,
 
147
  num_inference_steps=40,
148
  api_name="/infer"
149
  )
150
+ if isinstance(result, (str, bytes)):
151
+ return ("SD 3.5 Large", Image.open(io.BytesIO(result) if isinstance(result, bytes) else result))
152
+ return ("SD 3.5 Large", "Error: Unexpected result format")
153
  except Exception as e:
154
  return ("SD 3.5 Large", f"Error: {str(e)}")
155
 
156
+ def generate_playground_v2_5(self, prompt):
 
157
  try:
158
+ client = Client("https://playgroundai-playground-v2-5.hf.space/--replicas/ji5gy/", hf_token=self.token)
159
  result = client.predict(
160
  prompt,
161
  prompt, # negative prompt
 
167
  True, # randomize seed
168
  api_name="/run"
169
  )
170
+ if isinstance(result, tuple) and result[0] and len(result[0]) > 0:
171
+ image_data = result[0][0].get('image')
172
+ if image_data:
173
+ if isinstance(image_data, str):
174
+ if image_data.startswith('http'):
175
+ response = requests.get(image_data)
176
+ return ("Playground v2.5", Image.open(io.BytesIO(response.content)))
177
+ return ("Playground v2.5", Image.open(image_data))
178
+ return ("Playground v2.5", Image.open(io.BytesIO(image_data)))
179
  return ("Playground v2.5", "Error: No image generated")
180
  except Exception as e:
181
  return ("Playground v2.5", f"Error: {str(e)}")
182
 
183
+ def generate_images(prompt, selected_models, token):
184
  results = []
185
  with concurrent.futures.ThreadPoolExecutor() as executor:
186
  futures = []
187
+ generator = ModelGenerator(token)
188
  model_map = {
189
+ "Midjourney": generator.generate_midjourney,
190
+ "Stable Cascade": generator.generate_stable_cascade,
191
+ "SD 3 Medium": generator.generate_stable_diffusion_3,
192
+ "SD 3.5 Large": generator.generate_stable_diffusion_35,
193
+ "Playground v2.5": generator.generate_playground_v2_5
194
  }
195
 
196
  for model in selected_models:
 
198
  futures.append(executor.submit(model_map[model], prompt))
199
 
200
  for future in concurrent.futures.as_completed(futures):
201
+ try:
202
+ result = future.result()
203
+ if result:
204
+ results.append(result)
205
+ except Exception as e:
206
+ st.error(f"Error during image generation: {str(e)}")
207
 
208
  return results
209
 
210
  def handle_prompt_click(prompt_text, key):
211
+ if not st.session_state.is_authenticated:
212
+ st.error("Please login with your HuggingFace account first!")
213
  return
214
 
 
 
215
  selected_models = st.session_state.get('selected_models', [])
216
 
217
  if not selected_models:
 
219
  return
220
 
221
  with st.spinner('Generating artwork...'):
222
+ results = generate_images(prompt_text, selected_models, st.session_state.hf_token)
223
+ if results:
224
+ st.session_state[f'generated_images_{key}'] = results
225
+ st.success("Artwork generated successfully!")
226
+
227
+ # Display images immediately
228
+ cols = st.columns(len(results))
229
+ for col, (model_name, result) in zip(cols, results):
230
+ with col:
231
+ st.markdown(f"**{model_name}**")
232
+ if isinstance(result, str) and result.startswith("Error"):
233
+ st.error(result)
234
+ elif isinstance(result, Image.Image):
235
+ st.image(result, use_container_width=True)
236
+ else:
237
+ st.error(f"Unexpected result type: {type(result)}")
238
 
239
  def main():
240
  st.title("๐ŸŽจ Multi-Model Art Generator")
241
+
242
+ init_session_state()
243
+ authenticate_user()
244
 
245
+ if st.session_state.is_authenticated:
246
+ with st.sidebar:
247
+ st.header("Model Selection")
248
+ st.session_state['selected_models'] = st.multiselect(
249
+ "Choose AI Models",
250
+ ["Midjourney", "Stable Cascade", "SD 3 Medium", "SD 3.5 Large", "Playground v2.5"],
251
+ default=["Midjourney"]
252
+ )
253
+
254
+ st.markdown("---")
255
+ st.markdown("### Selected Models:")
256
+ for model in st.session_state['selected_models']:
257
+ st.write(f"โœ“ {model}")
258
+
259
+ st.markdown("---")
260
+ st.markdown("### Model Information:")
261
+ st.markdown("""
262
+ - **Midjourney**: Best for artistic and creative imagery
263
+ - **Stable Cascade**: New architecture with high detail
264
+ - **SD 3 Medium**: Fast and efficient generation
265
+ - **SD 3.5 Large**: Highest quality, slower generation
266
+ - **Playground v2.5**: Advanced model with high customization
267
+ """)
 
 
 
 
 
 
 
 
268
 
269
+ st.markdown("### Select a prompt style to generate artwork:")
270
 
271
+ prompt_emojis = {
272
+ "AIart/AIArtistCommunity": "๐Ÿค–",
273
+ "Black & White": "โšซโšช",
274
+ "Black & Yellow": "โšซ๐Ÿ’›",
275
+ "Blindfold": "๐Ÿ™ˆ",
276
+ "Break": "๐Ÿ’”",
277
+ "Broken": "๐Ÿ”จ",
278
+ "Christmas Celebrations art": "๐ŸŽ„",
279
+ "Colorful Art": "๐ŸŽจ",
280
+ "Crimson art": "๐Ÿ”ด",
281
+ "Eyes Art": "๐Ÿ‘๏ธ",
282
+ "Going out with Style": "๐Ÿ’ƒ",
283
+ "Hooded Girl": "๐Ÿงฅ",
284
+ "Lips": "๐Ÿ‘„",
285
+ "MAEKHLONG": "๐Ÿฎ",
286
+ "Mermaid": "๐Ÿงœโ€โ™€๏ธ",
287
+ "Morning Sunshine": "๐ŸŒ…",
288
+ "Music Art": "๐ŸŽต",
289
+ "Owl": "๐Ÿฆ‰",
290
+ "Pink": "๐Ÿ’—",
291
+ "Purple": "๐Ÿ’œ",
292
+ "Rain": "๐ŸŒง๏ธ",
293
+ "Red Moon": "๐ŸŒ‘",
294
+ "Rose": "๐ŸŒน",
295
+ "Snow": "โ„๏ธ",
296
+ "Spacesuit Girl": "๐Ÿ‘ฉโ€๐Ÿš€",
297
+ "Steampunk": "โš™๏ธ",
298
+ "Succubus": "๐Ÿ˜ˆ",
299
+ "Sunlight": "โ˜€๏ธ",
300
+ "Weird art": "๐ŸŽญ",
301
+ "White Hair": "๐Ÿ‘ฑโ€โ™€๏ธ",
302
+ "Wings art": "๐Ÿ‘ผ",
303
+ "Woman with Sword": "โš”๏ธ"
304
+ }
305
 
306
+ col1, col2, col3 = st.columns(3)
 
 
 
 
307
 
308
+ for idx, (prompt, emoji) in enumerate(prompt_emojis.items()):
309
+ full_prompt = f"QT {prompt}"
310
+ col = [col1, col2, col3][idx % 3]
 
 
 
 
 
 
 
 
311
 
312
+ with col:
313
+ if st.button(f"{emoji} {prompt}", key=f"btn_{idx}"):
314
+ handle_prompt_click(full_prompt, idx)
315
+
316
+ st.markdown("---")
317
+ st.markdown("### Generated Artwork:")
318
+
319
+ # Display any previously generated images
320
+ for key in st.session_state:
321
+ if key.startswith('generated_images_'):
322
+ idx = key.split('_')[-1]
323
+ prompt_key = f'selected_prompt_{idx}'
324
 
325
+ if prompt_key in st.session_state:
326
+ st.write("Prompt:", st.session_state[prompt_key])
327
+
328
+ cols = st.columns(len(st.session_state[key]))
329
+ for col, (model_name, result) in zip(cols, st.session_state[key]):
330
+ with col:
331
+ st.markdown(f"**{model_name}**")
332
+ if isinstance(result, str) and result.startswith("Error"):
333
+ st.error(result)
334
+ elif isinstance(result, Image.Image):
335
+ st.image(result, use_container_width=True)
336
+ else:
337
+ st.error(f"Unexpected result type: {type(result)}")
338
+ else:
339
+ st.info("Please login with your HuggingFace account to use the app")
340
 
341
  if __name__ == "__main__":
342
  main()