Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -17,16 +17,6 @@ url: str = os.getenv('SUPABASE_URL')
|
|
17 |
key: str = os.getenv('SUPABASE_KEY')
|
18 |
supabase: Client = create_client(url, key)
|
19 |
|
20 |
-
# Initialize GSheet Connexion
|
21 |
-
#Authorization
|
22 |
-
gc = pygsheets.authorize(service_account_env_var='GSHEET_AUTH')
|
23 |
-
|
24 |
-
#Open the google spreadsheet
|
25 |
-
sh = gc.open('AndroFLUX-Logs')
|
26 |
-
|
27 |
-
#Select the first sheet
|
28 |
-
wks = sh[0]
|
29 |
-
|
30 |
# Initialize the base model and specific LoRA
|
31 |
base_model = "black-forest-labs/FLUX.1-dev"
|
32 |
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
|
@@ -46,9 +36,6 @@ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora
|
|
46 |
seed = random.randint(0, MAX_SEED)
|
47 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
48 |
|
49 |
-
# Log prompt
|
50 |
-
print('PROMPT: ' + prompt + 'SEED:' + str(seed) + 'CFG: '+ str(cfg_scale))
|
51 |
-
|
52 |
#Moderation
|
53 |
|
54 |
moderation_client = client_gradio("duchaba/Friendly_Text_Moderation")
|
@@ -60,16 +47,14 @@ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora
|
|
60 |
|
61 |
if float(json.loads(result[1])['sexual_minors']) > 0.03 :
|
62 |
print('Minors')
|
|
|
|
|
|
|
63 |
raise gr.Error("Unauthorized request 💥!")
|
64 |
|
65 |
# Update progress bar (0% saat mulai)
|
66 |
progress(0, "Starting image generation...")
|
67 |
|
68 |
-
# Generate image with progress updates
|
69 |
-
for i in range(1, steps + 1):
|
70 |
-
# Simulate the processing step (in a real scenario, you would integrate this with your image generation process)
|
71 |
-
if i % (steps // 10) == 0: # Update every 10% of the steps
|
72 |
-
progress(i / steps * 100, f"Processing step {i} of {steps}...")
|
73 |
|
74 |
# Generate image using the pipeline
|
75 |
image = pipe(
|
@@ -96,32 +81,24 @@ def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora
|
|
96 |
#Save the tmp image
|
97 |
image.save(image_path, pnginfo=metadata)
|
98 |
|
99 |
-
|
100 |
-
response = supabase.storage.from_('generated_images').upload(image_filename, image_path,file_options={"content-type":"image/png;charset=UTF-8"})
|
101 |
-
print(response.dict)
|
102 |
-
#Log request in supabase
|
103 |
-
response_data = (supabase.table("requests")
|
104 |
-
.insert({"prompt":prompt, "cfg_scale":cfg_scale, "steps":steps, "randomized_seed": randomize_seed, "seed":seed, "lora_scale" : lora_scale, "image_url" : response.full_path})
|
105 |
-
.execute()
|
106 |
-
)
|
107 |
-
|
108 |
|
109 |
-
# Construct the URL to access the image
|
110 |
-
space_url = "https://killwithabass-flux-1-dev-lora-androflux.hf.space" # Replace with your actual space URL
|
111 |
-
image_url = f"{space_url}/gradio_api/file={image_path}"
|
112 |
|
113 |
#Log queries
|
114 |
try:
|
115 |
if "girl" not in prompt and "woman" not in prompt:
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
except Exception as error:
|
119 |
# handle the exception
|
120 |
print("An exception occurred:", error)
|
121 |
-
print(f"Image URL: {image_url}") # Log the file URL
|
122 |
-
|
123 |
-
# Final update (100%)
|
124 |
-
progress(100, "Completed!")
|
125 |
|
126 |
yield image, seed
|
127 |
|
|
|
17 |
key: str = os.getenv('SUPABASE_KEY')
|
18 |
supabase: Client = create_client(url, key)
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
# Initialize the base model and specific LoRA
|
21 |
base_model = "black-forest-labs/FLUX.1-dev"
|
22 |
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
|
|
|
36 |
seed = random.randint(0, MAX_SEED)
|
37 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
38 |
|
|
|
|
|
|
|
39 |
#Moderation
|
40 |
|
41 |
moderation_client = client_gradio("duchaba/Friendly_Text_Moderation")
|
|
|
47 |
|
48 |
if float(json.loads(result[1])['sexual_minors']) > 0.03 :
|
49 |
print('Minors')
|
50 |
+
response_data = (supabase.table("requests")
|
51 |
+
.insert({"prompt":prompt, "cfg_scale":cfg_scale, "steps":steps, "randomized_seed": randomize_seed, "seed":seed, "lora_scale" : lora_scale, "moderated" : 'true'})
|
52 |
+
.execute()
|
53 |
raise gr.Error("Unauthorized request 💥!")
|
54 |
|
55 |
# Update progress bar (0% saat mulai)
|
56 |
progress(0, "Starting image generation...")
|
57 |
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
# Generate image using the pipeline
|
60 |
image = pipe(
|
|
|
81 |
#Save the tmp image
|
82 |
image.save(image_path, pnginfo=metadata)
|
83 |
|
84 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
|
|
|
|
|
|
86 |
|
87 |
#Log queries
|
88 |
try:
|
89 |
if "girl" not in prompt and "woman" not in prompt:
|
90 |
+
#Save image in supabase
|
91 |
+
response = supabase.storage.from_('generated_images').upload(image_filename, image_path,file_options={"content-type":"image/png;charset=UTF-8"})
|
92 |
+
print(response.dict)
|
93 |
+
#Log request in supabase
|
94 |
+
response_data = (supabase.table("requests")
|
95 |
+
.insert({"prompt":prompt, "cfg_scale":cfg_scale, "steps":steps, "randomized_seed": randomize_seed, "seed":seed, "lora_scale" : lora_scale, "image_url" : response.full_path})
|
96 |
+
.execute()
|
97 |
+
)
|
98 |
|
99 |
except Exception as error:
|
100 |
# handle the exception
|
101 |
print("An exception occurred:", error)
|
|
|
|
|
|
|
|
|
102 |
|
103 |
yield image, seed
|
104 |
|