MaxReimann commited on
Commit
92ac447
·
unverified ·
2 Parent(s): 6734357 9109365

Merge pull request #1 from MaxReimann/server_test

Browse files
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ worker/img_received
2
+ worker/result
Whitebox_style_transfer.py CHANGED
@@ -9,6 +9,7 @@ import requests
9
  import torch
10
  import torch.nn.functional as F
11
  from PIL import Image
 
12
 
13
  PACKAGE_PARENT = 'wise'
14
  SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
@@ -144,33 +145,15 @@ def optimize(effect, preset, result_image_placeholder):
144
  content = st.session_state["Content_im"]
145
  style = st.session_state["Style_im"]
146
  result_image_placeholder.text("<- Custom content/style needs to be style transferred")
 
147
  optimize_button = st.sidebar.button("Optimize Style Transfer")
148
  if optimize_button:
149
- if HUGGING_FACE:
150
- result_image_placeholder.warning("NST optimization is currently disabled in this HuggingFace Space because it takes ~5min to optimize. To try it out, please clone the repo and change the huggingface variable in demo_config.py")
151
- st.stop()
152
-
153
- result_image_placeholder.text("Executing NST to create reference image..")
154
- base_dir = f"result/{datetime.datetime.now().strftime(r'%Y-%m-%d %H.%Mh %Ss')}"
155
- os.makedirs(base_dir)
156
- with st.spinner(text="Running NST"):
157
- reference = strotss(pil_resize_long_edge_to(content, 1024),
158
- pil_resize_long_edge_to(style, 1024), content_weight=16.0,
159
- device=torch.device("cuda"), space="uniform")
160
- progress_bar = result_image_placeholder.progress(0.0)
161
- ref_save_path = os.path.join(base_dir, "reference.jpg")
162
- content_save_path = os.path.join(base_dir, "content.jpg")
163
- resize_to = 720
164
- reference = pil_resize_long_edge_to(reference, resize_to)
165
- reference.save(ref_save_path)
166
- content.save(content_save_path)
167
- ST_CONFIG["n_iterations"] = 300
168
  with st.spinner(text="Optimizing parameters.."):
169
- vp, content_img_cuda = single_optimize(effect, preset, "l1", content_save_path, str(ref_save_path),
170
- write_video=False, base_dir=base_dir,
171
- iter_callback=lambda i: progress_bar.progress(
172
- float(i) / ST_CONFIG["n_iterations"]))
173
- return content_img_cuda.detach(), vp.cuda().detach()
174
  else:
175
  if not "result_vp" in st.session_state:
176
  st.stop()
@@ -223,6 +206,15 @@ coll2.header("Global Edits")
223
  result_image_placeholder = coll1.empty()
224
  result_image_placeholder.markdown("## loading..")
225
 
 
 
 
 
 
 
 
 
 
226
  img_choice_panel("Content", content_urls, "portrait", expanded=True)
227
  img_choice_panel("Style", style_urls, "starry_night", expanded=True)
228
 
 
9
  import torch
10
  import torch.nn.functional as F
11
  from PIL import Image
12
+ import time
13
 
14
  PACKAGE_PARENT = 'wise'
15
  SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
 
145
  content = st.session_state["Content_im"]
146
  style = st.session_state["Style_im"]
147
  result_image_placeholder.text("<- Custom content/style needs to be style transferred")
148
+ st.sidebar.warning("Note: Optimizing takes up to 5 minutes.")
149
  optimize_button = st.sidebar.button("Optimize Style Transfer")
150
  if optimize_button:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  with st.spinner(text="Optimizing parameters.."):
152
+ if HUGGING_FACE:
153
+ optimize_on_server(content, style, result_image_placeholder)
154
+ else:
155
+ optimize_params(effect, preset, content, style, result_image_placeholder)
156
+ return st.session_state["effect_input"], st.session_state["result_vp"]
157
  else:
158
  if not "result_vp" in st.session_state:
159
  st.stop()
 
206
  result_image_placeholder = coll1.empty()
207
  result_image_placeholder.markdown("## loading..")
208
 
209
+ from tasks import optimize_on_server, optimize_params, monitor_task
210
+
211
+ if "current_server_task_id" not in st.session_state:
212
+ st.session_state['current_server_task_id'] = None
213
+
214
+ if HUGGING_FACE and st.session_state['current_server_task_id'] is not None:
215
+ with st.spinner(text="Optimizing parameters.."):
216
+ monitor_task(result_image_placeholder)
217
+
218
  img_choice_panel("Content", content_urls, "portrait", expanded=True)
219
  img_choice_panel("Style", style_urls, "starry_night", expanded=True)
220
 
demo_config.py CHANGED
@@ -1 +1,2 @@
1
- HUGGING_FACE=True # if run in hugging face. Disables some things like full NST optimization
 
 
1
+ HUGGING_FACE=True # if run in hugging face. Huggingface uses extra server task for optim
2
+ WORKER_URL="http://ava.hpi3d.de:8600"
docker-compose.yml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: "2.3"
2
+
3
+ services:
4
+ worker:
5
+ build:
6
+ context: ./
7
+ dockerfile: ./project/server/Dockerfile_worker
8
+ image: wise-worker
9
+ container_name: wise-eccv-optim-worker
10
+ ports:
11
+ - 8600:8600
tasks.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import datetime
3
+ import os
4
+ import sys
5
+ from io import BytesIO
6
+ from pathlib import Path
7
+ import numpy as np
8
+ import requests
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from PIL import Image
12
+ import time
13
+ import streamlit as st
14
+ from demo_config import HUGGING_FACE, WORKER_URL
15
+
16
+
17
+
18
+ PACKAGE_PARENT = 'wise'
19
+ SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
20
+ sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
21
+
22
+ from parameter_optimization.parametric_styletransfer import single_optimize
23
+ from parameter_optimization.parametric_styletransfer import CONFIG as ST_CONFIG
24
+ from parameter_optimization.strotss_org import strotss, pil_resize_long_edge_to
25
+ from helpers import torch_to_np, np_to_torch
26
+
27
+ def retrieve_for_results_from_server():
28
+ task_id = st.session_state['current_server_task_id']
29
+ vp_res = requests.get(WORKER_URL+"/get_vp", params={"task_id": task_id})
30
+ image_res = requests.get(WORKER_URL+"/get_image", params={"task_id": task_id})
31
+ if vp_res.status_code != 200 or image_res.status_code != 200:
32
+ st.warning("got status for " + WORKER_URL+"/get_vp" + str(vp_res.status_code))
33
+ st.warning("got status for " + WORKER_URL+"/image_res" + str(image_res.status_code))
34
+ st.session_state['current_server_task_id'] = None
35
+ vp_res.raise_for_status()
36
+ image_res.raise_for_status()
37
+ else:
38
+ st.session_state['current_server_task_id'] = None
39
+ vp = np.load(BytesIO(vp_res.content))["vp"]
40
+ print("received vp from server")
41
+ print("got numpy array", vp.shape)
42
+ vp = torch.from_numpy(vp).cuda()
43
+ image = Image.open(BytesIO(image_res.content))
44
+ print("received image from server")
45
+ image = np_to_torch(np.asarray(image)).cuda()
46
+
47
+ st.session_state["effect_input"] = image
48
+ st.session_state["result_vp"] = vp
49
+
50
+
51
+ def monitor_task(progress_placeholder):
52
+ task_id = st.session_state['current_server_task_id']
53
+
54
+ started_time = time.time()
55
+ retries = 3
56
+ while True:
57
+ status = requests.get(WORKER_URL+"/get_status", params={"task_id": task_id})
58
+ if status.status_code != 200:
59
+ print("get_status got status_code", status.status_code)
60
+ st.warning(status.content)
61
+ retries -= 1
62
+ if retries == 0:
63
+ return
64
+ else:
65
+ time.sleep(2)
66
+ continue
67
+ status = status.json()
68
+ print(status)
69
+ if status["status"] != "running" and status["status"] != "queued" :
70
+ if status["msg"] != "":
71
+ print("got error for task", task_id, ":", status["msg"])
72
+ progress_placeholder.error(status["msg"])
73
+ st.session_state['current_server_task_id'] = None
74
+ st.stop()
75
+ if status["status"] == "finished":
76
+ retrieve_for_results_from_server()
77
+ return
78
+ elif status["status"] == "queued":
79
+ started_time = time.time()
80
+ queue_length = requests.get(WORKER_URL+"/queue_length").json()
81
+ progress_placeholder.write(f"There are {queue_length['length']} tasks in the queue")
82
+ elif status["progress"] == 0.0:
83
+ progressed = min(0.5 * (time.time() - started_time) / 80.0, 0.5) #estimate 80s for strotts
84
+ progress_placeholder.progress(progressed)
85
+ else:
86
+ progress_placeholder.progress(min(0.5 + status["progress"] / 2.0, 1.0))
87
+
88
+ time.sleep(2)
89
+
90
+
91
+ def optimize_on_server(content, style, result_image_placeholder):
92
+ url = WORKER_URL + "/upload"
93
+ content_path=f"/tmp/content-wise-uploaded{str(datetime.datetime.timestamp(datetime.datetime.now()))}.jpg"
94
+ style_path=f"/tmp/content-wise-uploaded{str(datetime.datetime.timestamp(datetime.datetime.now()))}.jpg"
95
+ asp_c, asp_s = content.height / content.width, style.height / style.width
96
+ if any(a < 0.5 or a > 2.0 for a in (asp_c, asp_s)):
97
+ result_image_placeholder.error('aspect ratio must be <= 2')
98
+ st.stop()
99
+ content = pil_resize_long_edge_to(content, 1024)
100
+ content.save(content_path)
101
+ style = pil_resize_long_edge_to(style, 1024)
102
+ style.save(style_path)
103
+ files = {'style-image': open(style_path, "rb"), "content-image": open(content_path, "rb")}
104
+ print("start-optimizing")
105
+ task_id_res = requests.post(url, files=files)
106
+ if task_id_res.status_code != 200:
107
+ result_image_placeholder.error(task_id_res.content)
108
+ st.stop()
109
+ else:
110
+ task_id = task_id_res.json()['task_id']
111
+ st.session_state['current_server_task_id'] = task_id
112
+
113
+ monitor_task(result_image_placeholder)
114
+
115
+ def optimize_params(effect, preset, content, style, result_image_placeholder):
116
+ result_image_placeholder.text("Executing NST to create reference image..")
117
+ base_dir = f"result/{datetime.datetime.now().strftime(r'%Y-%m-%d %H.%Mh %Ss')}"
118
+ os.makedirs(base_dir)
119
+ reference = strotss(pil_resize_long_edge_to(content, 1024),
120
+ pil_resize_long_edge_to(style, 1024), content_weight=16.0,
121
+ device=torch.device("cuda"), space="uniform")
122
+ progress_bar = result_image_placeholder.progress(0.0)
123
+ ref_save_path = os.path.join(base_dir, "reference.jpg")
124
+ content_save_path = os.path.join(base_dir, "content.jpg")
125
+ resize_to = 720
126
+ reference = pil_resize_long_edge_to(reference, resize_to)
127
+ reference.save(ref_save_path)
128
+ content.save(content_save_path)
129
+ ST_CONFIG["n_iterations"] = 300
130
+
131
+ vp, content_img_cuda = single_optimize(effect, preset, "l1", content_save_path, str(ref_save_path),
132
+ write_video=False, base_dir=base_dir,
133
+ iter_callback=lambda i: progress_bar.progress(
134
+ float(i) / ST_CONFIG["n_iterations"]))
135
+ st.session_state["effect_input"], st.session_state["result_vp"] = content_img_cuda.detach(), vp.cuda().detach()
worker/Dockerfile_worker ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM pytorch/pytorch:1.12.0-cuda11.3-cudnn8-runtime
2
+
3
+ WORKDIR /usr/app
4
+ ADD worker/requirements.txt .
5
+ RUN pip install -r requirements.txt
6
+
7
+ ADD wise .
8
+
9
+ WORKDIR /usr/app/worker
10
+ ADD worker/serve.py .
11
+
12
+ EXPOSE 8600
13
+
14
+ CMD ["python", "serve.py"]
worker/requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ imageio
2
+ imageio-ffmpeg
3
+ scipy
4
+ Pillow
5
+ numpy
6
+ matplotlib
7
+ --extra-index-url https://download.pytorch.org/whl/cu113
8
+ torch
9
+ torchvision
10
+ Flask
11
+ Flask-Reuploaded
worker/serve.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import os
3
+ from pathlib import Path
4
+ import sys
5
+ from flask import Flask, jsonify, request, send_file, abort
6
+ from flask_uploads import UploadSet, configure_uploads, IMAGES
7
+ from werkzeug.exceptions import default_exceptions
8
+ from werkzeug.exceptions import HTTPException, NotFound
9
+ import json
10
+ import torch
11
+ import time
12
+ import threading
13
+ import traceback
14
+ from PIL import Image
15
+ import numpy as np
16
+
17
+ PACKAGE_PARENT = '..'
18
+ WISE_DIR = '../wise/'
19
+ SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
20
+ sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
21
+ sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, WISE_DIR)))
22
+
23
+
24
+
25
+ from parameter_optimization.parametric_styletransfer import single_optimize
26
+ from parameter_optimization.parametric_styletransfer import CONFIG as ST_CONFIG
27
+ from parameter_optimization.strotss_org import strotss, pil_resize_long_edge_to
28
+ from helpers import torch_to_np, np_to_torch
29
+ from effects import get_default_settings, MinimalPipelineEffect
30
+
31
+ class JSONExceptionHandler(object):
32
+
33
+ def __init__(self, app=None):
34
+ if app:
35
+ self.init_app(app)
36
+
37
+ def std_handler(self, error):
38
+ response = jsonify(message=error.message)
39
+ response.status_code = error.code if isinstance(error, HTTPException) else 500
40
+ return response
41
+
42
+
43
+ def init_app(self, app):
44
+ self.app = app
45
+ self.register(HTTPException)
46
+ for code, v in default_exceptions.items():
47
+ self.register(code)
48
+
49
+ def register(self, exception_or_code, handler=None):
50
+ self.app.errorhandler(exception_or_code)(handler or self.std_handler)
51
+
52
+
53
+
54
+ app = Flask(__name__)
55
+ handler = JSONExceptionHandler(app)
56
+
57
+ image_folder = 'img_received'
58
+ photos = UploadSet('photos', IMAGES)
59
+ app.config['UPLOADED_PHOTOS_DEST'] = image_folder
60
+ configure_uploads(app, photos)
61
+
62
+ class Args(object):
63
+ def __init__(self, initial_data):
64
+ for key in initial_data:
65
+ setattr(self, key, initial_data[key])
66
+ def set_attributes(self, val_dict):
67
+ for key in val_dict:
68
+ setattr(self, key, val_dict[key])
69
+
70
+ default_args = {
71
+ "output_image" : "output.jpg",
72
+ ## values always set by request ##
73
+ "content_image": "",
74
+ "style_image": "",
75
+ "output_vp": "",
76
+ "iters": 500
77
+ }
78
+
79
+
80
+ total_task_count = 0
81
+
82
+ class NeuralOptimizer():
83
+ def __init__(self, args) -> None:
84
+ self.cur_iteration = 0
85
+ self.args = args
86
+
87
+ def optimize(self):
88
+ base_dir = f"result/{datetime.datetime.now().strftime(r'%Y-%m-%d %H.%Mh %Ss')}"
89
+ os.makedirs(base_dir)
90
+
91
+ content = Image.open(self.args.content_image)
92
+ style = Image.open(self.args.style_image)
93
+
94
+ def set_iter(iter):
95
+ self.cur_iteration=iter
96
+
97
+ effect, preset, _ = get_default_settings("minimal_pipeline")
98
+ effect.enable_checkpoints()
99
+
100
+ reference = strotss(pil_resize_long_edge_to(content, 1024),
101
+ pil_resize_long_edge_to(style, 1024), content_weight=16.0,
102
+ device=torch.device("cuda"), space="uniform")
103
+
104
+ ref_save_path = os.path.join(base_dir, "reference.jpg")
105
+ resize_to = 720
106
+ reference = pil_resize_long_edge_to(reference, resize_to)
107
+ reference.save(ref_save_path)
108
+ ST_CONFIG["n_iterations"] = self.args.iters
109
+ vp, content_img_cuda = single_optimize(effect, preset, "l1", self.args.content_image, str(ref_save_path),
110
+ write_video=False, base_dir=base_dir,
111
+ iter_callback=set_iter)
112
+
113
+ output = Image.fromarray(torch_to_np(content_img_cuda.detach().cpu() * 255.0).astype(np.uint8))
114
+ output.save(self.args.output_image)
115
+ # torch.save (vp.detach().clone(), self.args.output_vp)
116
+ # preset_tensor = effect.vpd.preset_tensor(preset, np_to_torch(np.array(content)).cuda(), add_local_dims=True)
117
+ np.savez_compressed(self.args.output_vp, vp=vp.detach().cpu().numpy())
118
+
119
+
120
+
121
+ class StyleTask:
122
+ def __init__(self, task_id, style_filename, content_filename):
123
+ self.content_filename=content_filename
124
+ self.style_filename=style_filename
125
+
126
+ self.status = "queued"
127
+ self.task_id = task_id
128
+ self.error_msg = ""
129
+ self.output_filename = content_filename.split(".")[0] + "_output.jpg"
130
+ self.vp_output_filename = content_filename.split(".")[0] + "_output.npz"
131
+
132
+ # global neural_optimizer
133
+ # if neural_optimizer is None:
134
+ # neural_optimizer = NeuralOptimizer(Args(default_args))
135
+
136
+ self.neural_optimizer = NeuralOptimizer(Args(default_args))
137
+
138
+ def start(self):
139
+ self.neural_optimizer.args.set_attributes(default_args)
140
+
141
+ self.neural_optimizer.args.style_image = os.path.join(image_folder, self.style_filename)
142
+ self.neural_optimizer.args.content_image = os.path.join(image_folder, self.content_filename)
143
+ self.neural_optimizer.args.output_image = os.path.join(image_folder, self.output_filename)
144
+ self.neural_optimizer.args.output_vp = os.path.join(image_folder, self.vp_output_filename)
145
+
146
+ thread = threading.Thread(target=self.run, args=())
147
+ thread.daemon = True # Daemonize thread
148
+ thread.start() # Start the execution
149
+
150
+ def run(self):
151
+ self.status = "running"
152
+ try:
153
+ self.neural_optimizer.optimize()
154
+ except Exception as e:
155
+ print("Error in task %d :"%(self.task_id), str(e))
156
+ traceback.print_exc()
157
+
158
+ self.status = "error"
159
+ self.error_msg = str(e)
160
+ return
161
+
162
+ self.status = "finished"
163
+ print("finished styling task: " + str(self.task_id))
164
+
165
+ class StylerQueue:
166
+ queued_tasks = []
167
+ finished_tasks = []
168
+ running_task = None
169
+
170
+ def __init__(self):
171
+ thread = threading.Thread(target=self.status_checker, args=())
172
+ thread.daemon = True # Daemonize thread
173
+ thread.start() # Start the execution
174
+
175
+ def queue_task(self, *args):
176
+ global total_task_count
177
+ total_task_count += 1
178
+ task_id = abs(hash(str(time.time())))
179
+ print("queued task num. ", total_task_count, "with ID", task_id)
180
+ task = StyleTask(task_id, *args)
181
+ self.queued_tasks.append(task)
182
+
183
+ return task_id
184
+
185
+ def get_task(self, task_id):
186
+ if self.running_task is not None and self.running_task.task_id == task_id:
187
+ return self.running_task
188
+ task = next((task for task in self.queued_tasks + self.finished_tasks if task.task_id == task_id), None)
189
+ return task
190
+
191
+ def status_checker(self):
192
+ while True:
193
+ time.sleep(0.3)
194
+
195
+ if self.running_task is None:
196
+ if len(self.queued_tasks) > 0:
197
+ self.running_task = self.queued_tasks[0]
198
+ self.running_task.start()
199
+ self.queued_tasks = self.queued_tasks[1:]
200
+ elif self.running_task.status == "finished" or self.running_task.status == "error":
201
+ self.finished_tasks.append(self.running_task)
202
+ if len(self.queued_tasks) > 0:
203
+ self.running_task = self.queued_tasks[0]
204
+ self.running_task.start()
205
+ self.queued_tasks = self.queued_tasks[1:]
206
+ else:
207
+ self.running_task = None
208
+
209
+ styler_queue = StylerQueue()
210
+
211
+
212
+ @app.route('/upload', methods=['POST'])
213
+ def upload():
214
+ if 'style-image' in request.files and \
215
+ 'content-image' in request.files:
216
+
217
+ style_filename = photos.save(request.files['style-image'])
218
+ content_filename = photos.save(request.files['content-image'])
219
+
220
+ job_id = styler_queue.queue_task(style_filename, content_filename)
221
+ print('added new stylization task', style_filename, content_filename)
222
+
223
+ return jsonify({"task_id": job_id})
224
+ abort(jsonify(message="request needs style, content image"), 400)
225
+
226
+ @app.route('/get_status')
227
+ def get_status():
228
+ task_id = int(request.args.get("task_id"))
229
+ task = styler_queue.get_task(task_id)
230
+
231
+ if task is None:
232
+ abort(jsonify(message="task with id %d not found"%task_id), 400)
233
+
234
+ status = {
235
+ "status": task.status,
236
+ "msg": task.error_msg
237
+ }
238
+
239
+ if task.status == "running":
240
+ if isinstance(task, StyleTask):
241
+ status["progress"] = float(task.neural_optimizer.cur_iteration) / float(default_args["iters"])
242
+
243
+ return jsonify(status)
244
+
245
+ @app.route('/queue_length')
246
+ def get_queue_length():
247
+ tasks = len(styler_queue.queued_tasks)
248
+ if styler_queue.running_task is not None:
249
+ tasks += 1
250
+
251
+ status = {
252
+ "length": tasks
253
+ }
254
+
255
+ return jsonify(status)
256
+
257
+
258
+ @app.route('/get_image')
259
+ def get_image():
260
+ task_id = int(request.args.get("task_id"))
261
+ task = styler_queue.get_task(task_id)
262
+
263
+ if task is None:
264
+ abort(jsonify(message="task with id %d not found"%task_id), 400)
265
+
266
+ if task.status != "finished":
267
+ abort(jsonify(message="task with id %d not in finished state"%task_id), 400)
268
+
269
+ return send_file(os.path.join(image_folder, task.output_filename), mimetype='image/jpg')
270
+
271
+ @app.route('/get_vp')
272
+ def get_vp():
273
+ task_id = int(request.args.get("task_id"))
274
+ task = styler_queue.get_task(task_id)
275
+
276
+ if task is None:
277
+ abort(jsonify(message="task with id %d not found"%task_id), 400)
278
+
279
+ if task.status != "finished":
280
+ abort(jsonify(message="task with id %d not in finished state"%task_id), 400)
281
+
282
+ return send_file(os.path.join(image_folder, task.vp_output_filename), mimetype='application/zip')
283
+
284
+
285
+ if __name__ == '__main__':
286
+ app.run(debug=False, host="0.0.0.0",port=8600)