giorgio-caparvi commited on
Commit
9abe70d
·
1 Parent(s): 896542a

saving image in memory and sending via api

Browse files
api/app.py CHANGED
@@ -1,6 +1,10 @@
1
- from flask import Flask, render_template, send_from_directory
2
  import subprocess
3
  import os
 
 
 
 
4
 
5
  app = Flask(__name__)
6
 
@@ -14,7 +18,8 @@ def index():
14
 
15
  @app.route('/generate-design', methods=['POST'])
16
  def generate_design():
17
- # Esegui il comando di generazione
 
18
  command = [
19
  'python', '/api/model/src/eval.py',
20
  '--dataset_path', '/api/model/assets/data/vitonhd',
@@ -30,13 +35,37 @@ def generate_design():
30
  ]
31
 
32
  # Esegui il comando senza gestire il JSON, lascia che l'errore venga stampato
33
- subprocess.run(command, check=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- files = os.listdir(output_dir)
36
- for file in files:
37
- print(file)
38
-
39
- return '', 204 # Nessun contenuto
 
 
 
 
 
 
 
40
 
41
  # Route per servire l'immagine generata
42
  @app.route('/api/output/generato_paired_paired/images/<filename>')
 
1
+ from flask import Flask, render_template, send_from_directory, send_file
2
  import subprocess
3
  import os
4
+ import io
5
+ from model.src.utils.arg_parser import eval_parse_args # Nuovo import corretto
6
+
7
+ from model.src import eval
8
 
9
  app = Flask(__name__)
10
 
 
18
 
19
  @app.route('/generate-design', methods=['POST'])
20
  def generate_design():
21
+
22
+ '''# Esegui il comando di generazione
23
  command = [
24
  'python', '/api/model/src/eval.py',
25
  '--dataset_path', '/api/model/assets/data/vitonhd',
 
35
  ]
36
 
37
  # Esegui il comando senza gestire il JSON, lascia che l'errore venga stampato
38
+ subprocess.run(command, check=True)'''
39
+ try:
40
+ # Creiamo una lista di argomenti come quelli che passeresti via CLI
41
+ args_list = [
42
+ '--dataset_path', '/api/model/assets/data/vitonhd',
43
+ '--batch_size', '1',
44
+ '--mixed_precision', 'fp16',
45
+ '--output_dir', '/api/output', # Se serve ancora questo argomento
46
+ '--save_name', 'generato_paired',
47
+ '--num_workers_test', '4',
48
+ '--sketch_cond_rate', '0.2',
49
+ '--dataset', 'vitonhd',
50
+ '--start_cond_rate', '0.0',
51
+ '--test_order', 'paired'
52
+ ]
53
+
54
+ # Simuliamo l'arg_parser con la lista di argomenti
55
+ args = eval_parse_args(args_list)
56
 
57
+ # Esegui la funzione `main()` di eval.py passando gli argomenti
58
+ final_image = eval.main(args)
59
+
60
+ # Converti l'immagine in un buffer di memoria
61
+ img_io = io.BytesIO()
62
+ final_image.save(img_io, 'JPEG') # Cambia il formato se necessario
63
+ img_io.seek(0) # Riporta il puntatore all'inizio del buffer
64
+
65
+ # Invia l'immagine come file scaricabile
66
+ return send_file(img_io, mimetype='image/jpeg', as_attachment=True, attachment_filename='generated_image.jpg')
67
+ except Exception as e:
68
+ return str(e), 500
69
 
70
  # Route per servire l'immagine generata
71
  @app.route('/api/output/generato_paired_paired/images/<filename>')
api/model/src/eval.py CHANGED
@@ -19,6 +19,7 @@ from mgd_pipelines.mgd_pipe_disentangled import MGDPipeDisentangled
19
  from utils.arg_parser import eval_parse_args
20
  from utils.image_from_pipe import generate_images_from_mgd_pipe
21
  from utils.set_seeds import set_seed
 
22
 
23
  # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
24
  check_min_version("0.10.0.dev0")
@@ -133,7 +134,7 @@ def main() -> None:
133
 
134
  val_pipe.enable_attention_slicing()
135
  test_dataloader = accelerator.prepare(test_dataloader)
136
- generate_images_from_mgd_pipe(
137
  test_order=args.test_order,
138
  pipe=val_pipe,
139
  test_dataloader=test_dataloader,
@@ -149,7 +150,7 @@ def main() -> None:
149
  disentagle=False,
150
  seed=args.seed,
151
  )
152
-
153
 
154
  if __name__ == "__main__":
155
  main()
 
19
  from utils.arg_parser import eval_parse_args
20
  from utils.image_from_pipe import generate_images_from_mgd_pipe
21
  from utils.set_seeds import set_seed
22
+ from PIL import Image
23
 
24
  # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
25
  check_min_version("0.10.0.dev0")
 
134
 
135
  val_pipe.enable_attention_slicing()
136
  test_dataloader = accelerator.prepare(test_dataloader)
137
+ final_image = generate_images_from_mgd_pipe(
138
  test_order=args.test_order,
139
  pipe=val_pipe,
140
  test_dataloader=test_dataloader,
 
150
  disentagle=False,
151
  seed=args.seed,
152
  )
153
+ return final_image # Now returning the generated image
154
 
155
  if __name__ == "__main__":
156
  main()
api/model/src/utils/image_from_pipe.py CHANGED
@@ -6,6 +6,7 @@ import torchvision.transforms as T
6
  from diffusers.pipeline_utils import DiffusionPipeline
7
  from torch.utils.data import DataLoader
8
  from src.utils.image_composition import compose_img, compose_img_dresscode
 
9
 
10
 
11
  @torch.inference_mode()
@@ -46,12 +47,12 @@ def generate_images_from_mgd_pipe(
46
  Returns:
47
  None
48
  """
49
- assert(save_name != ""), "save_name must be specified"
50
- assert(output_dir != ""), "output_dir must be specified"
51
 
52
- path = os.path.join(output_dir, f"{save_name}_{test_order}", "images")
53
 
54
- os.makedirs(path, exist_ok=True)
55
  generator = torch.Generator("cuda").manual_seed(seed)
56
 
57
  for batch in tqdm(test_dataloader):
@@ -122,6 +123,7 @@ def generate_images_from_mgd_pipe(
122
  im_head = face
123
  )
124
 
125
- final_img = T.functional.to_pil_image(final_img)
126
- final_img.save(
127
- os.path.join(path, batch["im_name"][i].replace(".jpg", ext)))
 
 
6
  from diffusers.pipeline_utils import DiffusionPipeline
7
  from torch.utils.data import DataLoader
8
  from src.utils.image_composition import compose_img, compose_img_dresscode
9
+ from PIL import Image
10
 
11
 
12
  @torch.inference_mode()
 
47
  Returns:
48
  None
49
  """
50
+ #assert(save_name != ""), "save_name must be specified"
51
+ #assert(output_dir != ""), "output_dir must be specified"
52
 
53
+ #path = os.path.join(output_dir, f"{save_name}_{test_order}", "images")
54
 
55
+ #os.makedirs(path, exist_ok=True)
56
  generator = torch.Generator("cuda").manual_seed(seed)
57
 
58
  for batch in tqdm(test_dataloader):
 
123
  im_head = face
124
  )
125
 
126
+ return T.functional.to_pil_image(final_img)
127
+ #final_img = T.functional.to_pil_image(final_img)
128
+ #final_img.save(
129
+ # os.path.join(path, batch["im_name"][i].replace(".jpg", ext)))