theanhntp commited on
Commit
02b563c
·
verified ·
1 Parent(s): ae43577

Delete Was_node_suite/main.py

Browse files
Files changed (1) hide show
  1. Was_node_suite/main.py +0 -294
Was_node_suite/main.py DELETED
@@ -1,294 +0,0 @@
1
- import atexit, requests, subprocess, time, re, os
2
- from random import randint
3
- from threading import Timer
4
- from queue import Queue
5
- def cloudflared(port, metrics_port, output_queue):
6
- atexit.register(lambda p: p.terminate(), subprocess.Popen(['/workspace/cloudflared-linux-amd64', 'tunnel', '--url', f'http://127.0.0.1:{port}', '--metrics', f'127.0.0.1:{metrics_port}'], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT))
7
- attempts, tunnel_url = 0, None
8
- while attempts < 10 and not tunnel_url:
9
- attempts += 1
10
- time.sleep(3)
11
- try:
12
- tunnel_url = re.search("(?P<url>https?:\/\/[^\s]+.trycloudflare.com)", requests.get(f'http://127.0.0.1:{metrics_port}/metrics').text).group("url")
13
- except:
14
- pass
15
- if not tunnel_url:
16
- raise Exception("Can't connect to Cloudflare Edge")
17
- output_queue.put(tunnel_url)
18
-
19
- output_queue, metrics_port = Queue(), randint(8100, 9000)
20
- thread = Timer(2, cloudflared, args=(8188, metrics_port, output_queue))
21
- thread.start()
22
- thread.join()
23
- tunnel_url = output_queue.get()
24
- os.environ['webui_url'] = tunnel_url
25
- print(tunnel_url)
26
-
27
- import comfy.options
28
- comfy.options.enable_args_parsing()
29
-
30
- import subprocess
31
- import os
32
- import importlib.util
33
- import folder_paths
34
- import time
35
- from comfy.cli_args import args
36
-
37
-
38
- def execute_prestartup_script():
39
- def execute_script(script_path):
40
- module_name = os.path.splitext(script_path)[0]
41
- try:
42
- spec = importlib.util.spec_from_file_location(module_name, script_path)
43
- module = importlib.util.module_from_spec(spec)
44
- spec.loader.exec_module(module)
45
- return True
46
- except Exception as e:
47
- print(f"Failed to execute startup-script: {script_path} / {e}")
48
- return False
49
-
50
- node_paths = folder_paths.get_folder_paths("custom_nodes")
51
- for custom_node_path in node_paths:
52
- possible_modules = os.listdir(custom_node_path)
53
- node_prestartup_times = []
54
-
55
- for possible_module in possible_modules:
56
- module_path = os.path.join(custom_node_path, possible_module)
57
- if os.path.isfile(module_path) or module_path.endswith(".disabled") or module_path == "__pycache__":
58
- continue
59
-
60
- script_path = os.path.join(module_path, "prestartup_script.py")
61
- if os.path.exists(script_path):
62
- time_before = time.perf_counter()
63
- success = execute_script(script_path)
64
- node_prestartup_times.append((time.perf_counter() - time_before, module_path, success))
65
- if len(node_prestartup_times) > 0:
66
- print("\nPrestartup times for custom nodes:")
67
- for n in sorted(node_prestartup_times):
68
- if n[2]:
69
- import_message = ""
70
- else:
71
- import_message = " (PRESTARTUP FAILED)"
72
- print("{:6.1f} seconds{}:".format(n[0], import_message), n[1])
73
- print()
74
-
75
- execute_prestartup_script()
76
-
77
-
78
- # Main code
79
- import asyncio
80
- import itertools
81
- import shutil
82
- import threading
83
- import gc
84
-
85
- import logging
86
-
87
- if os.name == "nt":
88
- logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
89
-
90
- if __name__ == "__main__":
91
- if args.cuda_device is not None:
92
- os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
93
- logging.info("Set cuda device to: {}".format(args.cuda_device))
94
-
95
- if args.deterministic:
96
- if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ:
97
- os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"
98
-
99
- import cuda_malloc
100
-
101
- if args.windows_standalone_build:
102
- try:
103
- import fix_torch
104
- except:
105
- pass
106
-
107
- import comfy.utils
108
- import yaml
109
-
110
- import execution
111
- import server
112
- from server import BinaryEventTypes
113
- import nodes
114
- import comfy.model_management
115
-
116
- def cuda_malloc_warning():
117
- device = comfy.model_management.get_torch_device()
118
- device_name = comfy.model_management.get_torch_device_name(device)
119
- cuda_malloc_warning = False
120
- if "cudaMallocAsync" in device_name:
121
- for b in cuda_malloc.blacklist:
122
- if b in device_name:
123
- cuda_malloc_warning = True
124
- if cuda_malloc_warning:
125
- logging.warning("\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n")
126
-
127
- def prompt_worker(q, server):
128
- e = execution.PromptExecutor(server)
129
- last_gc_collect = 0
130
- need_gc = False
131
- gc_collect_interval = 10.0
132
-
133
- while True:
134
- timeout = 1000.0
135
- if need_gc:
136
- timeout = max(gc_collect_interval - (current_time - last_gc_collect), 0.0)
137
-
138
- queue_item = q.get(timeout=timeout)
139
- if queue_item is not None:
140
- item, item_id = queue_item
141
- execution_start_time = time.perf_counter()
142
- prompt_id = item[1]
143
- server.last_prompt_id = prompt_id
144
-
145
- e.execute(item[2], prompt_id, item[3], item[4])
146
- need_gc = True
147
- q.task_done(item_id,
148
- e.outputs_ui,
149
- status=execution.PromptQueue.ExecutionStatus(
150
- status_str='success' if e.success else 'error',
151
- completed=e.success,
152
- messages=e.status_messages))
153
- if server.client_id is not None:
154
- server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id)
155
-
156
- current_time = time.perf_counter()
157
- execution_time = current_time - execution_start_time
158
- logging.info("Prompt executed in {:.2f} seconds".format(execution_time))
159
-
160
- flags = q.get_flags()
161
- free_memory = flags.get("free_memory", False)
162
-
163
- if flags.get("unload_models", free_memory):
164
- comfy.model_management.unload_all_models()
165
- need_gc = True
166
- last_gc_collect = 0
167
-
168
- if free_memory:
169
- e.reset()
170
- need_gc = True
171
- last_gc_collect = 0
172
-
173
- if need_gc:
174
- current_time = time.perf_counter()
175
- if (current_time - last_gc_collect) > gc_collect_interval:
176
- comfy.model_management.cleanup_models()
177
- gc.collect()
178
- comfy.model_management.soft_empty_cache()
179
- last_gc_collect = current_time
180
- need_gc = False
181
-
182
- async def run(server, address='', port=8188, verbose=True, call_on_start=None):
183
- await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop())
184
-
185
-
186
- def hijack_progress(server):
187
- def hook(value, total, preview_image):
188
- comfy.model_management.throw_exception_if_processing_interrupted()
189
- progress = {"value": value, "max": total, "prompt_id": server.last_prompt_id, "node": server.last_node_id}
190
-
191
- server.send_sync("progress", progress, server.client_id)
192
- if preview_image is not None:
193
- server.send_sync(BinaryEventTypes.UNENCODED_PREVIEW_IMAGE, preview_image, server.client_id)
194
- comfy.utils.set_progress_bar_global_hook(hook)
195
-
196
-
197
- def cleanup_temp():
198
- temp_dir = folder_paths.get_temp_directory()
199
- if os.path.exists(temp_dir):
200
- shutil.rmtree(temp_dir, ignore_errors=True)
201
-
202
-
203
- def load_extra_path_config(yaml_path):
204
- with open(yaml_path, 'r') as stream:
205
- config = yaml.safe_load(stream)
206
- for c in config:
207
- conf = config[c]
208
- if conf is None:
209
- continue
210
- base_path = None
211
- if "base_path" in conf:
212
- base_path = conf.pop("base_path")
213
- for x in conf:
214
- for y in conf[x].split("\n"):
215
- if len(y) == 0:
216
- continue
217
- full_path = y
218
- if base_path is not None:
219
- full_path = os.path.join(base_path, full_path)
220
- logging.info("Adding extra search path {} {}".format(x, full_path))
221
- folder_paths.add_model_folder_path(x, full_path)
222
-
223
-
224
- if __name__ == "__main__":
225
- if args.temp_directory:
226
- temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp")
227
- logging.info(f"Setting temp directory to: {temp_dir}")
228
- folder_paths.set_temp_directory(temp_dir)
229
- cleanup_temp()
230
-
231
- if args.windows_standalone_build:
232
- try:
233
- import new_updater
234
- new_updater.update_windows_updater()
235
- except:
236
- pass
237
-
238
- loop = asyncio.new_event_loop()
239
- asyncio.set_event_loop(loop)
240
- server = server.PromptServer(loop)
241
- q = execution.PromptQueue(server)
242
-
243
- extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml")
244
- if os.path.isfile(extra_model_paths_config_path):
245
- load_extra_path_config(extra_model_paths_config_path)
246
-
247
- if args.extra_model_paths_config:
248
- for config_path in itertools.chain(*args.extra_model_paths_config):
249
- load_extra_path_config(config_path)
250
-
251
- nodes.init_extra_nodes(init_custom_nodes=not args.disable_all_custom_nodes)
252
-
253
- cuda_malloc_warning()
254
-
255
- server.add_routes()
256
- hijack_progress(server)
257
-
258
- threading.Thread(target=prompt_worker, daemon=True, args=(q, server,)).start()
259
-
260
- if args.output_directory:
261
- output_dir = os.path.abspath(args.output_directory)
262
- logging.info(f"Setting output directory to: {output_dir}")
263
- folder_paths.set_output_directory(output_dir)
264
-
265
- #These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes
266
- folder_paths.add_model_folder_path("checkpoints", os.path.join(folder_paths.get_output_directory(), "checkpoints"))
267
- folder_paths.add_model_folder_path("clip", os.path.join(folder_paths.get_output_directory(), "clip"))
268
- folder_paths.add_model_folder_path("vae", os.path.join(folder_paths.get_output_directory(), "vae"))
269
-
270
- if args.input_directory:
271
- input_dir = os.path.abspath(args.input_directory)
272
- logging.info(f"Setting input directory to: {input_dir}")
273
- folder_paths.set_input_directory(input_dir)
274
-
275
- if args.quick_test_for_ci:
276
- exit(0)
277
-
278
- call_on_start = None
279
- if args.auto_launch:
280
- def startup_server(scheme, address, port):
281
- import webbrowser
282
- if os.name == 'nt' and address == '0.0.0.0':
283
- address = '127.0.0.1'
284
- webbrowser.open(f"{scheme}://{address}:{port}")
285
- call_on_start = startup_server
286
-
287
-
288
- try:
289
- loop.run_until_complete(run(server, address=args.listen, port=args.port, verbose=not args.dont_print_server, call_on_start=call_on_start))
290
- subprocess.run(["python", "script.py"])
291
- except KeyboardInterrupt:
292
- logging.info("\nStopped server")
293
-
294
- cleanup_temp()