Pijush2023 commited on
Commit
9521104
·
verified ·
1 Parent(s): fc3b2a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -54
app.py CHANGED
@@ -1126,52 +1126,8 @@ def handle_model_choice_change(selected_model):
1126
  # Default case: allow interaction
1127
  return gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)
1128
 
1129
- # import gradio as gr
1130
- # import torch
1131
- # from diffusers import FluxPipeline
1132
- # import os
1133
-
1134
- # # Set PYTORCH_CUDA_ALLOC_CONF to handle memory fragmentation
1135
- # os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
1136
-
1137
- # # Check if CUDA (GPU) is available, otherwise fallback to CPU
1138
- # device = "cuda:0" if torch.cuda.is_available() else "cpu"
1139
-
1140
- # # Function to initialize Flux bot model with GPU memory management
1141
- # def initialize_flux_bot():
1142
- # try:
1143
- # torch.cuda.empty_cache() # Clear GPU memory cache
1144
- # pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.float16) # Use FP16
1145
- # pipe.to(device) # Move the model to the correct device (GPU/CPU)
1146
- # except torch.cuda.OutOfMemoryError:
1147
- # print("CUDA out of memory, switching to CPU.")
1148
- # pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.float32) # Use FP32 for CPU
1149
- # pipe.to("cpu")
1150
- # return pipe
1151
-
1152
- # # Function to generate image using Flux bot on the specified device
1153
- # def generate_image_flux(prompt):
1154
- # pipe = initialize_flux_bot()
1155
- # image = pipe(
1156
- # prompt,
1157
- # guidance_scale=0.0,
1158
- # num_inference_steps=2, # Reduced steps to save memory
1159
- # max_sequence_length=128, # Reduced sequence length to save memory
1160
- # generator=torch.Generator(device).manual_seed(0)
1161
- # ).images[0]
1162
- # return image
1163
-
1164
- # # Hardcoded prompts for the images
1165
- # hardcoded_prompt_1 = "A high quality cinematic image for Toyota Truck in Birmingham skyline shot in the style of Michael Mann"
1166
- # hardcoded_prompt_2 = "A high quality cinematic image for Alabama Quarterback close up emotional shot in the style of Michael Mann"
1167
- # hardcoded_prompt_3 = "A high quality cinematic image for Taylor Swift concert in Birmingham skyline style of Michael Mann"
1168
-
1169
- # # Function to update images
1170
- # def update_images():
1171
- # image_1 = generate_image_flux(hardcoded_prompt_1)
1172
- # image_2 = generate_image_flux(hardcoded_prompt_2)
1173
- # image_3 = generate_image_flux(hardcoded_prompt_3)
1174
- # return image_1, image_2, image_3
1175
 
1176
  # Existing prompts for the Flux API
1177
  hardcoded_prompt_1 = "A high quality cinematic image for Toyota Truck in Birmingham skyline shot in the style of Michael Mann"
@@ -1180,7 +1136,7 @@ hardcoded_prompt_3 = "A high quality cinematic image for Taylor Swift concert in
1180
 
1181
  # Function to call the Flux API and generate images
1182
  def generate_image_flux(prompt):
1183
- # client = Client("black-forest-labs/FLUX.1-schnell")
1184
  client = Client("Pijush2023/radar_flux")
1185
  result = client.predict(
1186
  prompt=prompt,
@@ -1507,13 +1463,7 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
1507
  events_output = gr.HTML(value=fetch_local_events())
1508
 
1509
  with gr.Column():
1510
- # image_output_1 = gr.Image(value=generate_image_flux(hardcoded_prompt_1), width=400, height=400)
1511
- # image_output_2 = gr.Image(value=generate_image_flux(hardcoded_prompt_2), width=400, height=400)
1512
- # image_output_3 = gr.Image(value=generate_image_flux(hardcoded_prompt_3), width=400, height=400)
1513
-
1514
- # # Refresh button to update images
1515
- # refresh_button = gr.Button("Refresh Images")
1516
- # refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
1517
 
1518
  # Call update_images during the initial load to display images when the interface appears
1519
  initial_images = update_images()
 
1126
  # Default case: allow interaction
1127
  return gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)
1128
 
1129
+ #Flux Coding
1130
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1131
 
1132
  # Existing prompts for the Flux API
1133
  hardcoded_prompt_1 = "A high quality cinematic image for Toyota Truck in Birmingham skyline shot in the style of Michael Mann"
 
1136
 
1137
  # Function to call the Flux API and generate images
1138
  def generate_image_flux(prompt):
1139
+ # client = Client("black-forest-labs/FLUX.1-schnell",hf_token=hf_token)
1140
  client = Client("Pijush2023/radar_flux")
1141
  result = client.predict(
1142
  prompt=prompt,
 
1463
  events_output = gr.HTML(value=fetch_local_events())
1464
 
1465
  with gr.Column():
1466
+
 
 
 
 
 
 
1467
 
1468
  # Call update_images during the initial load to display images when the interface appears
1469
  initial_images = update_images()