theanhntp commited on
Commit
e985e15
·
verified ·
1 Parent(s): 262f56a

Delete Was_node_suite/main.py.py

Browse files
Files changed (1) hide show
  1. Was_node_suite/main.py.py +0 -295
Was_node_suite/main.py.py DELETED
@@ -1,295 +0,0 @@
1
- import atexit, requests, subprocess, time, re, os
2
- from random import randint
3
- from threading import Timer
4
- from queue import Queue
5
- def cloudflared(port, metrics_port, output_queue):
6
- atexit.register(lambda p: p.terminate(), subprocess.Popen(['/workspace/ComfyUI/cloudflared-linux-amd64', 'tunnel', '--url', f'http://127.0.0.1:{port}', '--metrics', f'127.0.0.1:{metrics_port}'], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT))
7
- attempts, tunnel_url = 0, None
8
- while attempts < 10 and not tunnel_url:
9
- attempts += 1
10
- time.sleep(3)
11
- try:
12
- tunnel_url = re.search("(?P<url>https?:\/\/[^\s]+.trycloudflare.com)", requests.get(f'http://127.0.0.1:{metrics_port}/metrics').text).group("url")
13
- except:
14
- pass
15
- if not tunnel_url:
16
- raise Exception("Can't connect to Cloudflare Edge")
17
- output_queue.put(tunnel_url)
18
- output_queue, metrics_port = Queue(), randint(8100, 9000)
19
- thread = Timer(2, cloudflared, args=(8188, metrics_port, output_queue))
20
- thread.start()
21
- thread.join()
22
- tunnel_url = output_queue.get()
23
- os.environ['webui_url'] = tunnel_url
24
- print(tunnel_url)
25
-
26
- import comfy.options
27
- comfy.options.enable_args_parsing()
28
-
29
- import os
30
- import importlib.util
31
- import folder_paths
32
- import time
33
- from comfy.cli_args import args
34
-
35
-
36
- def execute_prestartup_script():
37
- def execute_script(script_path):
38
- module_name = os.path.splitext(script_path)[0]
39
- try:
40
- spec = importlib.util.spec_from_file_location(module_name, script_path)
41
- module = importlib.util.module_from_spec(spec)
42
- spec.loader.exec_module(module)
43
- return True
44
- except Exception as e:
45
- print(f"Failed to execute startup-script: {script_path} / {e}")
46
- return False
47
-
48
- if args.disable_all_custom_nodes:
49
- return
50
-
51
- node_paths = folder_paths.get_folder_paths("custom_nodes")
52
- for custom_node_path in node_paths:
53
- possible_modules = os.listdir(custom_node_path)
54
- node_prestartup_times = []
55
-
56
- for possible_module in possible_modules:
57
- module_path = os.path.join(custom_node_path, possible_module)
58
- if os.path.isfile(module_path) or module_path.endswith(".disabled") or module_path == "__pycache__":
59
- continue
60
-
61
- script_path = os.path.join(module_path, "prestartup_script.py")
62
- if os.path.exists(script_path):
63
- time_before = time.perf_counter()
64
- success = execute_script(script_path)
65
- node_prestartup_times.append((time.perf_counter() - time_before, module_path, success))
66
- if len(node_prestartup_times) > 0:
67
- print("\nPrestartup times for custom nodes:")
68
- for n in sorted(node_prestartup_times):
69
- if n[2]:
70
- import_message = ""
71
- else:
72
- import_message = " (PRESTARTUP FAILED)"
73
- print("{:6.1f} seconds{}:".format(n[0], import_message), n[1])
74
- print()
75
-
76
- execute_prestartup_script()
77
-
78
-
79
- # Main code
80
- import asyncio
81
- import itertools
82
- import shutil
83
- import threading
84
- import gc
85
-
86
- import logging
87
-
88
- if os.name == "nt":
89
- logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
90
-
91
- if __name__ == "__main__":
92
- if args.cuda_device is not None:
93
- os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
94
- logging.info("Set cuda device to: {}".format(args.cuda_device))
95
-
96
- if args.deterministic:
97
- if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ:
98
- os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"
99
-
100
- import cuda_malloc
101
-
102
- if args.windows_standalone_build:
103
- try:
104
- import fix_torch
105
- except:
106
- pass
107
-
108
- import comfy.utils
109
- import yaml
110
-
111
- import execution
112
- import server
113
- from server import BinaryEventTypes
114
- import nodes
115
- import comfy.model_management
116
-
117
- def cuda_malloc_warning():
118
- device = comfy.model_management.get_torch_device()
119
- device_name = comfy.model_management.get_torch_device_name(device)
120
- cuda_malloc_warning = False
121
- if "cudaMallocAsync" in device_name:
122
- for b in cuda_malloc.blacklist:
123
- if b in device_name:
124
- cuda_malloc_warning = True
125
- if cuda_malloc_warning:
126
- logging.warning("\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n")
127
-
128
- def prompt_worker(q, server):
129
- e = execution.PromptExecutor(server, lru_size=args.cache_lru)
130
- last_gc_collect = 0
131
- need_gc = False
132
- gc_collect_interval = 10.0
133
-
134
- while True:
135
- timeout = 1000.0
136
- if need_gc:
137
- timeout = max(gc_collect_interval - (current_time - last_gc_collect), 0.0)
138
-
139
- queue_item = q.get(timeout=timeout)
140
- if queue_item is not None:
141
- item, item_id = queue_item
142
- execution_start_time = time.perf_counter()
143
- prompt_id = item[1]
144
- server.last_prompt_id = prompt_id
145
-
146
- e.execute(item[2], prompt_id, item[3], item[4])
147
- need_gc = True
148
- q.task_done(item_id,
149
- e.history_result,
150
- status=execution.PromptQueue.ExecutionStatus(
151
- status_str='success' if e.success else 'error',
152
- completed=e.success,
153
- messages=e.status_messages))
154
- if server.client_id is not None:
155
- server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id)
156
-
157
- current_time = time.perf_counter()
158
- execution_time = current_time - execution_start_time
159
- logging.info("Prompt executed in {:.2f} seconds".format(execution_time))
160
-
161
- flags = q.get_flags()
162
- free_memory = flags.get("free_memory", False)
163
-
164
- if flags.get("unload_models", free_memory):
165
- comfy.model_management.unload_all_models()
166
- need_gc = True
167
- last_gc_collect = 0
168
-
169
- if free_memory:
170
- e.reset()
171
- need_gc = True
172
- last_gc_collect = 0
173
-
174
- if need_gc:
175
- current_time = time.perf_counter()
176
- if (current_time - last_gc_collect) > gc_collect_interval:
177
- comfy.model_management.cleanup_models()
178
- gc.collect()
179
- comfy.model_management.soft_empty_cache()
180
- last_gc_collect = current_time
181
- need_gc = False
182
-
183
- async def run(server, address='', port=8188, verbose=True, call_on_start=None):
184
- await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop())
185
-
186
-
187
- def hijack_progress(server):
188
- def hook(value, total, preview_image):
189
- comfy.model_management.throw_exception_if_processing_interrupted()
190
- progress = {"value": value, "max": total, "prompt_id": server.last_prompt_id, "node": server.last_node_id}
191
-
192
- server.send_sync("progress", progress, server.client_id)
193
- if preview_image is not None:
194
- server.send_sync(BinaryEventTypes.UNENCODED_PREVIEW_IMAGE, preview_image, server.client_id)
195
- comfy.utils.set_progress_bar_global_hook(hook)
196
-
197
-
198
- def cleanup_temp():
199
- temp_dir = folder_paths.get_temp_directory()
200
- if os.path.exists(temp_dir):
201
- shutil.rmtree(temp_dir, ignore_errors=True)
202
-
203
-
204
- def load_extra_path_config(yaml_path):
205
- with open(yaml_path, 'r') as stream:
206
- config = yaml.safe_load(stream)
207
- for c in config:
208
- conf = config[c]
209
- if conf is None:
210
- continue
211
- base_path = None
212
- if "base_path" in conf:
213
- base_path = conf.pop("base_path")
214
- for x in conf:
215
- for y in conf[x].split("\n"):
216
- if len(y) == 0:
217
- continue
218
- full_path = y
219
- if base_path is not None:
220
- full_path = os.path.join(base_path, full_path)
221
- logging.info("Adding extra search path {} {}".format(x, full_path))
222
- folder_paths.add_model_folder_path(x, full_path)
223
-
224
-
225
- if __name__ == "__main__":
226
- if args.temp_directory:
227
- temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp")
228
- logging.info(f"Setting temp directory to: {temp_dir}")
229
- folder_paths.set_temp_directory(temp_dir)
230
- cleanup_temp()
231
-
232
- if args.windows_standalone_build:
233
- try:
234
- import new_updater
235
- new_updater.update_windows_updater()
236
- except:
237
- pass
238
-
239
- loop = asyncio.new_event_loop()
240
- asyncio.set_event_loop(loop)
241
- server = server.PromptServer(loop)
242
- q = execution.PromptQueue(server)
243
-
244
- extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml")
245
- if os.path.isfile(extra_model_paths_config_path):
246
- load_extra_path_config(extra_model_paths_config_path)
247
-
248
- if args.extra_model_paths_config:
249
- for config_path in itertools.chain(*args.extra_model_paths_config):
250
- load_extra_path_config(config_path)
251
-
252
- nodes.init_extra_nodes(init_custom_nodes=not args.disable_all_custom_nodes)
253
-
254
- cuda_malloc_warning()
255
-
256
- server.add_routes()
257
- hijack_progress(server)
258
-
259
- threading.Thread(target=prompt_worker, daemon=True, args=(q, server,)).start()
260
-
261
- if args.output_directory:
262
- output_dir = os.path.abspath(args.output_directory)
263
- logging.info(f"Setting output directory to: {output_dir}")
264
- folder_paths.set_output_directory(output_dir)
265
-
266
- #These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes
267
- folder_paths.add_model_folder_path("checkpoints", os.path.join(folder_paths.get_output_directory(), "checkpoints"))
268
- folder_paths.add_model_folder_path("clip", os.path.join(folder_paths.get_output_directory(), "clip"))
269
- folder_paths.add_model_folder_path("vae", os.path.join(folder_paths.get_output_directory(), "vae"))
270
- folder_paths.add_model_folder_path("diffusion_models", os.path.join(folder_paths.get_output_directory(), "diffusion_models"))
271
-
272
- if args.input_directory:
273
- input_dir = os.path.abspath(args.input_directory)
274
- logging.info(f"Setting input directory to: {input_dir}")
275
- folder_paths.set_input_directory(input_dir)
276
-
277
- if args.quick_test_for_ci:
278
- exit(0)
279
-
280
- call_on_start = None
281
- if args.auto_launch:
282
- def startup_server(scheme, address, port):
283
- import webbrowser
284
- if os.name == 'nt' and address == '0.0.0.0':
285
- address = '127.0.0.1'
286
- webbrowser.open(f"{scheme}://{address}:{port}")
287
- call_on_start = startup_server
288
-
289
- try:
290
- loop.run_until_complete(server.setup())
291
- loop.run_until_complete(run(server, address=args.listen, port=args.port, verbose=not args.dont_print_server, call_on_start=call_on_start))
292
- except KeyboardInterrupt:
293
- logging.info("\nStopped server")
294
-
295
- cleanup_temp()