theanhntp commited on
Commit
f9165d8
·
verified ·
1 Parent(s): 02b563c

Upload main.py

Browse files
Files changed (1) hide show
  1. Was_node_suite/main.py +301 -0
Was_node_suite/main.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit, requests, subprocess, time, re, os
2
+ from random import randint
3
+ from threading import Timer
4
+ from queue import Queue
5
+ def cloudflared(port, metrics_port, output_queue):
6
+ atexit.register(lambda p: p.terminate(), subprocess.Popen(['/workspace/cloudflared-linux-amd64', 'tunnel', '--url', f'http://127.0.0.1:{port}', '--metrics', f'127.0.0.1:{metrics_port}'], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT))
7
+ attempts, tunnel_url = 0, None
8
+ while attempts < 10 and not tunnel_url:
9
+ attempts += 1
10
+ time.sleep(3)
11
+ try:
12
+ tunnel_url = re.search("(?P<url>https?:\/\/[^\s]+.trycloudflare.com)", requests.get(f'http://127.0.0.1:{metrics_port}/metrics').text).group("url")
13
+ except:
14
+ pass
15
+ if not tunnel_url:
16
+ raise Exception("Can't connect to Cloudflare Edge")
17
+ output_queue.put(tunnel_url)
18
+
19
+ output_queue, metrics_port = Queue(), randint(8100, 9000)
20
+ thread = Timer(2, cloudflared, args=(8188, metrics_port, output_queue))
21
+ thread.start()
22
+ thread.join()
23
+ tunnel_url = output_queue.get()
24
+ os.environ['webui_url'] = tunnel_url
25
+ print(tunnel_url)
26
+
27
+ import comfy.options
28
+ comfy.options.enable_args_parsing()
29
+
30
+ import os
31
+ import importlib.util
32
+ import folder_paths
33
+ import time
34
+ from comfy.cli_args import args
35
+ from app.logger import setup_logger
36
+
37
+
38
+ setup_logger(verbose=args.verbose)
39
+
40
+
41
+ def execute_prestartup_script():
42
+ def execute_script(script_path):
43
+ module_name = os.path.splitext(script_path)[0]
44
+ try:
45
+ spec = importlib.util.spec_from_file_location(module_name, script_path)
46
+ module = importlib.util.module_from_spec(spec)
47
+ spec.loader.exec_module(module)
48
+ return True
49
+ except Exception as e:
50
+ print(f"Failed to execute startup-script: {script_path} / {e}")
51
+ return False
52
+
53
+ if args.disable_all_custom_nodes:
54
+ return
55
+
56
+ node_paths = folder_paths.get_folder_paths("custom_nodes")
57
+ for custom_node_path in node_paths:
58
+ possible_modules = os.listdir(custom_node_path)
59
+ node_prestartup_times = []
60
+
61
+ for possible_module in possible_modules:
62
+ module_path = os.path.join(custom_node_path, possible_module)
63
+ if os.path.isfile(module_path) or module_path.endswith(".disabled") or module_path == "__pycache__":
64
+ continue
65
+
66
+ script_path = os.path.join(module_path, "prestartup_script.py")
67
+ if os.path.exists(script_path):
68
+ time_before = time.perf_counter()
69
+ success = execute_script(script_path)
70
+ node_prestartup_times.append((time.perf_counter() - time_before, module_path, success))
71
+ if len(node_prestartup_times) > 0:
72
+ print("\nPrestartup times for custom nodes:")
73
+ for n in sorted(node_prestartup_times):
74
+ if n[2]:
75
+ import_message = ""
76
+ else:
77
+ import_message = " (PRESTARTUP FAILED)"
78
+ print("{:6.1f} seconds{}:".format(n[0], import_message), n[1])
79
+ print()
80
+
81
+ execute_prestartup_script()
82
+
83
+
84
+ # Main code
85
+ import asyncio
86
+ import itertools
87
+ import shutil
88
+ import threading
89
+ import gc
90
+
91
+ import logging
92
+
93
+ if os.name == "nt":
94
+ logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
95
+
96
+ if __name__ == "__main__":
97
+ if args.cuda_device is not None:
98
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
99
+ logging.info("Set cuda device to: {}".format(args.cuda_device))
100
+
101
+ if args.deterministic:
102
+ if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ:
103
+ os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"
104
+
105
+ import cuda_malloc
106
+
107
+ if args.windows_standalone_build:
108
+ try:
109
+ import fix_torch
110
+ except:
111
+ pass
112
+
113
+ import comfy.utils
114
+ import yaml
115
+
116
+ import execution
117
+ import server
118
+ from server import BinaryEventTypes
119
+ import nodes
120
+ import comfy.model_management
121
+
122
+ def cuda_malloc_warning():
123
+ device = comfy.model_management.get_torch_device()
124
+ device_name = comfy.model_management.get_torch_device_name(device)
125
+ cuda_malloc_warning = False
126
+ if "cudaMallocAsync" in device_name:
127
+ for b in cuda_malloc.blacklist:
128
+ if b in device_name:
129
+ cuda_malloc_warning = True
130
+ if cuda_malloc_warning:
131
+ logging.warning("\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n")
132
+
133
+ def prompt_worker(q, server):
134
+ e = execution.PromptExecutor(server, lru_size=args.cache_lru)
135
+ last_gc_collect = 0
136
+ need_gc = False
137
+ gc_collect_interval = 10.0
138
+
139
+ while True:
140
+ timeout = 1000.0
141
+ if need_gc:
142
+ timeout = max(gc_collect_interval - (current_time - last_gc_collect), 0.0)
143
+
144
+ queue_item = q.get(timeout=timeout)
145
+ if queue_item is not None:
146
+ item, item_id = queue_item
147
+ execution_start_time = time.perf_counter()
148
+ prompt_id = item[1]
149
+ server.last_prompt_id = prompt_id
150
+
151
+ e.execute(item[2], prompt_id, item[3], item[4])
152
+ need_gc = True
153
+ q.task_done(item_id,
154
+ e.history_result,
155
+ status=execution.PromptQueue.ExecutionStatus(
156
+ status_str='success' if e.success else 'error',
157
+ completed=e.success,
158
+ messages=e.status_messages))
159
+ if server.client_id is not None:
160
+ server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id)
161
+
162
+ current_time = time.perf_counter()
163
+ execution_time = current_time - execution_start_time
164
+ logging.info("Prompt executed in {:.2f} seconds".format(execution_time))
165
+
166
+ flags = q.get_flags()
167
+ free_memory = flags.get("free_memory", False)
168
+
169
+ if flags.get("unload_models", free_memory):
170
+ comfy.model_management.unload_all_models()
171
+ need_gc = True
172
+ last_gc_collect = 0
173
+
174
+ if free_memory:
175
+ e.reset()
176
+ need_gc = True
177
+ last_gc_collect = 0
178
+
179
+ if need_gc:
180
+ current_time = time.perf_counter()
181
+ if (current_time - last_gc_collect) > gc_collect_interval:
182
+ comfy.model_management.cleanup_models()
183
+ gc.collect()
184
+ comfy.model_management.soft_empty_cache()
185
+ last_gc_collect = current_time
186
+ need_gc = False
187
+
188
+ async def run(server, address='', port=8188, verbose=True, call_on_start=None):
189
+ await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop())
190
+
191
+
192
+ def hijack_progress(server):
193
+ def hook(value, total, preview_image):
194
+ comfy.model_management.throw_exception_if_processing_interrupted()
195
+ progress = {"value": value, "max": total, "prompt_id": server.last_prompt_id, "node": server.last_node_id}
196
+
197
+ server.send_sync("progress", progress, server.client_id)
198
+ if preview_image is not None:
199
+ server.send_sync(BinaryEventTypes.UNENCODED_PREVIEW_IMAGE, preview_image, server.client_id)
200
+ comfy.utils.set_progress_bar_global_hook(hook)
201
+
202
+
203
+ def cleanup_temp():
204
+ temp_dir = folder_paths.get_temp_directory()
205
+ if os.path.exists(temp_dir):
206
+ shutil.rmtree(temp_dir, ignore_errors=True)
207
+
208
+
209
+ def load_extra_path_config(yaml_path):
210
+ with open(yaml_path, 'r') as stream:
211
+ config = yaml.safe_load(stream)
212
+ for c in config:
213
+ conf = config[c]
214
+ if conf is None:
215
+ continue
216
+ base_path = None
217
+ if "base_path" in conf:
218
+ base_path = conf.pop("base_path")
219
+ for x in conf:
220
+ for y in conf[x].split("\n"):
221
+ if len(y) == 0:
222
+ continue
223
+ full_path = y
224
+ if base_path is not None:
225
+ full_path = os.path.join(base_path, full_path)
226
+ logging.info("Adding extra search path {} {}".format(x, full_path))
227
+ folder_paths.add_model_folder_path(x, full_path)
228
+
229
+
230
+ if __name__ == "__main__":
231
+ if args.temp_directory:
232
+ temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp")
233
+ logging.info(f"Setting temp directory to: {temp_dir}")
234
+ folder_paths.set_temp_directory(temp_dir)
235
+ cleanup_temp()
236
+
237
+ if args.windows_standalone_build:
238
+ try:
239
+ import new_updater
240
+ new_updater.update_windows_updater()
241
+ except:
242
+ pass
243
+
244
+ loop = asyncio.new_event_loop()
245
+ asyncio.set_event_loop(loop)
246
+ server = server.PromptServer(loop)
247
+ q = execution.PromptQueue(server)
248
+
249
+ extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml")
250
+ if os.path.isfile(extra_model_paths_config_path):
251
+ load_extra_path_config(extra_model_paths_config_path)
252
+
253
+ if args.extra_model_paths_config:
254
+ for config_path in itertools.chain(*args.extra_model_paths_config):
255
+ load_extra_path_config(config_path)
256
+
257
+ nodes.init_extra_nodes(init_custom_nodes=not args.disable_all_custom_nodes)
258
+
259
+ cuda_malloc_warning()
260
+
261
+ server.add_routes()
262
+ hijack_progress(server)
263
+
264
+ threading.Thread(target=prompt_worker, daemon=True, args=(q, server,)).start()
265
+
266
+ if args.output_directory:
267
+ output_dir = os.path.abspath(args.output_directory)
268
+ logging.info(f"Setting output directory to: {output_dir}")
269
+ folder_paths.set_output_directory(output_dir)
270
+
271
+ #These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes
272
+ folder_paths.add_model_folder_path("checkpoints", os.path.join(folder_paths.get_output_directory(), "checkpoints"))
273
+ folder_paths.add_model_folder_path("clip", os.path.join(folder_paths.get_output_directory(), "clip"))
274
+ folder_paths.add_model_folder_path("vae", os.path.join(folder_paths.get_output_directory(), "vae"))
275
+ folder_paths.add_model_folder_path("diffusion_models", os.path.join(folder_paths.get_output_directory(), "diffusion_models"))
276
+ folder_paths.add_model_folder_path("loras", os.path.join(folder_paths.get_output_directory(), "loras"))
277
+
278
+ if args.input_directory:
279
+ input_dir = os.path.abspath(args.input_directory)
280
+ logging.info(f"Setting input directory to: {input_dir}")
281
+ folder_paths.set_input_directory(input_dir)
282
+
283
+ if args.quick_test_for_ci:
284
+ exit(0)
285
+
286
+ call_on_start = None
287
+ if args.auto_launch:
288
+ def startup_server(scheme, address, port):
289
+ import webbrowser
290
+ if os.name == 'nt' and address == '0.0.0.0':
291
+ address = '127.0.0.1'
292
+ webbrowser.open(f"{scheme}://{address}:{port}")
293
+ call_on_start = startup_server
294
+
295
+ try:
296
+ loop.run_until_complete(server.setup())
297
+ loop.run_until_complete(run(server, address=args.listen, port=args.port, verbose=not args.dont_print_server, call_on_start=call_on_start))
298
+ except KeyboardInterrupt:
299
+ logging.info("\nStopped server")
300
+
301
+ cleanup_temp()