new dockerfile
Browse files- Dockerfile +54 -0
- entrypoint.sh +14 -0
- requirements.txt +23 -0
- wsj-api-rnd-v2.json +162 -0
- wsj-server.py +107 -0
Dockerfile
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04
|
2 |
+
|
3 |
+
ENV DEBIAN_FRONTEND=noninteractive \
|
4 |
+
TZ=America/Los_Angeles
|
5 |
+
|
6 |
+
RUN apt-get update && apt-get install -y \
|
7 |
+
git \
|
8 |
+
python3 \
|
9 |
+
python3-pip \
|
10 |
+
ffmpeg \
|
11 |
+
libsm6 \
|
12 |
+
libxext6 \
|
13 |
+
libgl1-mesa-glx \
|
14 |
+
&& rm -rf /var/lib/apt/lists/*
|
15 |
+
|
16 |
+
WORKDIR /code
|
17 |
+
|
18 |
+
COPY ./requirements.txt /code/requirements.txt
|
19 |
+
|
20 |
+
RUN useradd -m -u 1000 user
|
21 |
+
USER user
|
22 |
+
ENV HOME=/home/user \
|
23 |
+
PATH=/home/user/.local/bin:$PATH
|
24 |
+
|
25 |
+
RUN pip install --no-cache-dir --upgrade pip setuptools wheel
|
26 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
27 |
+
|
28 |
+
WORKDIR $HOME/app
|
29 |
+
|
30 |
+
# Clone ComfyUI
|
31 |
+
RUN git clone https://github.com/comfyanonymous/ComfyUI.git . && \
|
32 |
+
pip install --no-cache-dir -r requirements.txt
|
33 |
+
|
34 |
+
# Download specific models
|
35 |
+
RUN mkdir -p ./models/unet ./models/vae ./models/clip && \
|
36 |
+
wget -c https://huggingface.co/black-forest-labs/FLUX.1-dev/resolve/main/flux1-dev.safetensors -P ./models/unet/ && \
|
37 |
+
wget -c https://huggingface.co/black-forest-labs/FLUX.1-dev/resolve/main/vae/diffusion_pytorch_model.safetensors -P ./models/vae/ && \
|
38 |
+
wget -c https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/clip_l.safetensors -P ./models/clip/ && \
|
39 |
+
wget -c https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp16.safetensors -P ./models/clip/ && \
|
40 |
+
wget -c https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp8_e4m3fn.safetensors -P ./models/clip/
|
41 |
+
|
42 |
+
# Copy wsj-server.py
|
43 |
+
COPY --chown=user wsj-server.py .
|
44 |
+
|
45 |
+
# Expose ports for ComfyUI and wsj-server
|
46 |
+
EXPOSE 8188 7860
|
47 |
+
|
48 |
+
# Create entrypoint script
|
49 |
+
RUN echo '#!/bin/bash\n\
|
50 |
+
python main.py --listen 0.0.0.0 --port 8188 --use-split-cross-attention &\n\
|
51 |
+
python wsj-server.py &\n\
|
52 |
+
wait -n' > entrypoint.sh && chmod +x entrypoint.sh
|
53 |
+
|
54 |
+
CMD ["./entrypoint.sh"]
|
entrypoint.sh
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -e
|
3 |
+
|
4 |
+
# Start ComfyUI
|
5 |
+
python main.py --listen &
|
6 |
+
|
7 |
+
# Start wsj-server
|
8 |
+
python wsj-server.py &
|
9 |
+
|
10 |
+
# Wait for any process to exit
|
11 |
+
wait -n
|
12 |
+
|
13 |
+
# Exit with status of process that exited first
|
14 |
+
exit $?
|
requirements.txt
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn[standard]
|
3 |
+
pillow
|
4 |
+
torch
|
5 |
+
torchsde
|
6 |
+
torchvision
|
7 |
+
torchaudio
|
8 |
+
einops
|
9 |
+
transformers>=4.28.1
|
10 |
+
tokenizers>=0.13.3
|
11 |
+
sentencepiece
|
12 |
+
safetensors>=0.4.2
|
13 |
+
aiohttp
|
14 |
+
pyyaml
|
15 |
+
Pillow
|
16 |
+
scipy
|
17 |
+
tqdm
|
18 |
+
psutil
|
19 |
+
|
20 |
+
#non essential dependencies:
|
21 |
+
kornia>=0.7.1
|
22 |
+
spandrel
|
23 |
+
soundfile
|
wsj-api-rnd-v2.json
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"5": {
|
3 |
+
"inputs": {
|
4 |
+
"width": 1024,
|
5 |
+
"height": 1024,
|
6 |
+
"batch_size": 1
|
7 |
+
},
|
8 |
+
"class_type": "EmptyLatentImage",
|
9 |
+
"_meta": {
|
10 |
+
"title": "Empty Latent Image"
|
11 |
+
}
|
12 |
+
},
|
13 |
+
"6": {
|
14 |
+
"inputs": {
|
15 |
+
"text": "Create a painting in the style of American Realism or American Gothic, inspired by a political landscape reflecting the growing influence of evangelical movements in the U.S. Depict a large revival tent under a darkened sky, with a preacher, Lance Wallnau, standing among a crowd of around 2,000 people. The preacher's posture should convey urgency as he lays hands on those seeking prayer, emphasizing the idea of political and spiritual mobilization. In the distance, banners with slogans supporting Donald Trump are visible, alongside imagery evoking the tension between religion and politics, with Christian symbols intertwined with political messages. The atmosphere should evoke a somber, yet intense energy, blending realism with a sense of looming uncertainty and urgency. Include the presence of JD Vance addressing the crowd in the background, emphasizing the merging of faith and politics",
|
16 |
+
"clip": [
|
17 |
+
"11",
|
18 |
+
0
|
19 |
+
]
|
20 |
+
},
|
21 |
+
"class_type": "CLIPTextEncode",
|
22 |
+
"_meta": {
|
23 |
+
"title": "articlePrompt"
|
24 |
+
}
|
25 |
+
},
|
26 |
+
"8": {
|
27 |
+
"inputs": {
|
28 |
+
"samples": [
|
29 |
+
"13",
|
30 |
+
0
|
31 |
+
],
|
32 |
+
"vae": [
|
33 |
+
"10",
|
34 |
+
0
|
35 |
+
]
|
36 |
+
},
|
37 |
+
"class_type": "VAEDecode",
|
38 |
+
"_meta": {
|
39 |
+
"title": "VAE Decode"
|
40 |
+
}
|
41 |
+
},
|
42 |
+
"9": {
|
43 |
+
"inputs": {
|
44 |
+
"filename_prefix": "ComfyUI",
|
45 |
+
"images": [
|
46 |
+
"8",
|
47 |
+
0
|
48 |
+
]
|
49 |
+
},
|
50 |
+
"class_type": "SaveImage",
|
51 |
+
"_meta": {
|
52 |
+
"title": "Save Image"
|
53 |
+
}
|
54 |
+
},
|
55 |
+
"10": {
|
56 |
+
"inputs": {
|
57 |
+
"vae_name": "diffusion_pytorch_model_flux.safetensors"
|
58 |
+
},
|
59 |
+
"class_type": "VAELoader",
|
60 |
+
"_meta": {
|
61 |
+
"title": "Load VAE"
|
62 |
+
}
|
63 |
+
},
|
64 |
+
"11": {
|
65 |
+
"inputs": {
|
66 |
+
"clip_name1": "t5xxl_fp16.safetensors",
|
67 |
+
"clip_name2": "clip_l.safetensors",
|
68 |
+
"type": "flux"
|
69 |
+
},
|
70 |
+
"class_type": "DualCLIPLoader",
|
71 |
+
"_meta": {
|
72 |
+
"title": "DualCLIPLoader"
|
73 |
+
}
|
74 |
+
},
|
75 |
+
"12": {
|
76 |
+
"inputs": {
|
77 |
+
"unet_name": "flux1-dev.safetensors",
|
78 |
+
"weight_dtype": "default"
|
79 |
+
},
|
80 |
+
"class_type": "UNETLoader",
|
81 |
+
"_meta": {
|
82 |
+
"title": "Load Diffusion Model"
|
83 |
+
}
|
84 |
+
},
|
85 |
+
"13": {
|
86 |
+
"inputs": {
|
87 |
+
"noise": [
|
88 |
+
"25",
|
89 |
+
0
|
90 |
+
],
|
91 |
+
"guider": [
|
92 |
+
"22",
|
93 |
+
0
|
94 |
+
],
|
95 |
+
"sampler": [
|
96 |
+
"16",
|
97 |
+
0
|
98 |
+
],
|
99 |
+
"sigmas": [
|
100 |
+
"17",
|
101 |
+
0
|
102 |
+
],
|
103 |
+
"latent_image": [
|
104 |
+
"5",
|
105 |
+
0
|
106 |
+
]
|
107 |
+
},
|
108 |
+
"class_type": "SamplerCustomAdvanced",
|
109 |
+
"_meta": {
|
110 |
+
"title": "SamplerCustomAdvanced"
|
111 |
+
}
|
112 |
+
},
|
113 |
+
"16": {
|
114 |
+
"inputs": {
|
115 |
+
"sampler_name": "euler"
|
116 |
+
},
|
117 |
+
"class_type": "KSamplerSelect",
|
118 |
+
"_meta": {
|
119 |
+
"title": "KSamplerSelect"
|
120 |
+
}
|
121 |
+
},
|
122 |
+
"17": {
|
123 |
+
"inputs": {
|
124 |
+
"scheduler": "simple",
|
125 |
+
"steps": 20,
|
126 |
+
"denoise": 1,
|
127 |
+
"model": [
|
128 |
+
"12",
|
129 |
+
0
|
130 |
+
]
|
131 |
+
},
|
132 |
+
"class_type": "BasicScheduler",
|
133 |
+
"_meta": {
|
134 |
+
"title": "BasicScheduler"
|
135 |
+
}
|
136 |
+
},
|
137 |
+
"22": {
|
138 |
+
"inputs": {
|
139 |
+
"model": [
|
140 |
+
"12",
|
141 |
+
0
|
142 |
+
],
|
143 |
+
"conditioning": [
|
144 |
+
"6",
|
145 |
+
0
|
146 |
+
]
|
147 |
+
},
|
148 |
+
"class_type": "BasicGuider",
|
149 |
+
"_meta": {
|
150 |
+
"title": "BasicGuider"
|
151 |
+
}
|
152 |
+
},
|
153 |
+
"25": {
|
154 |
+
"inputs": {
|
155 |
+
"noise_seed": 533166222602070
|
156 |
+
},
|
157 |
+
"class_type": "RandomNoise",
|
158 |
+
"_meta": {
|
159 |
+
"title": "RandomNoise"
|
160 |
+
}
|
161 |
+
}
|
162 |
+
}
|
wsj-server.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#This is an example that uses the websockets api to know when a prompt execution is done
|
2 |
+
#Once the prompt execution is done it downloads the images using the /history endpoint
|
3 |
+
|
4 |
+
import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
|
5 |
+
import uuid
|
6 |
+
import json
|
7 |
+
import urllib.request
|
8 |
+
import urllib.parse
|
9 |
+
import random
|
10 |
+
import io
|
11 |
+
from fastapi import FastAPI, Response
|
12 |
+
from pydantic import BaseModel
|
13 |
+
from PIL import Image
|
14 |
+
|
15 |
+
server_address = "127.0.0.1:8188"
|
16 |
+
client_id = str(uuid.uuid4())
|
17 |
+
|
18 |
+
def queue_prompt(prompt):
|
19 |
+
p = {"prompt": prompt, "client_id": client_id}
|
20 |
+
data = json.dumps(p).encode('utf-8')
|
21 |
+
req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
|
22 |
+
return json.loads(urllib.request.urlopen(req).read())
|
23 |
+
|
24 |
+
def get_image(filename, subfolder, folder_type):
|
25 |
+
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
26 |
+
url_values = urllib.parse.urlencode(data)
|
27 |
+
with urllib.request.urlopen("http://{}/view?{}".format(server_address, url_values)) as response:
|
28 |
+
return response.read()
|
29 |
+
|
30 |
+
def get_history(prompt_id):
|
31 |
+
with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
|
32 |
+
return json.loads(response.read())
|
33 |
+
|
34 |
+
def get_images(ws, prompt):
|
35 |
+
prompt_id = queue_prompt(prompt)['prompt_id']
|
36 |
+
output_images = {}
|
37 |
+
while True:
|
38 |
+
out = ws.recv()
|
39 |
+
if isinstance(out, str):
|
40 |
+
message = json.loads(out)
|
41 |
+
if message['type'] == 'executing':
|
42 |
+
data = message['data']
|
43 |
+
if data['node'] is None and data['prompt_id'] == prompt_id:
|
44 |
+
break #Execution is done
|
45 |
+
else:
|
46 |
+
# If you want to be able to decode the binary stream for latent previews, here is how you can do it:
|
47 |
+
# bytesIO = BytesIO(out[8:])
|
48 |
+
# preview_image = Image.open(bytesIO) # This is your preview in PIL image format, store it in a global
|
49 |
+
continue #previews are binary data
|
50 |
+
|
51 |
+
history = get_history(prompt_id)[prompt_id]
|
52 |
+
for node_id in history['outputs']:
|
53 |
+
node_output = history['outputs'][node_id]
|
54 |
+
images_output = []
|
55 |
+
if 'images' in node_output:
|
56 |
+
for image in node_output['images']:
|
57 |
+
image_data = get_image(image['filename'], image['subfolder'], image['type'])
|
58 |
+
images_output.append(image_data)
|
59 |
+
output_images[node_id] = images_output
|
60 |
+
|
61 |
+
return output_images
|
62 |
+
|
63 |
+
app = FastAPI()
|
64 |
+
|
65 |
+
class PromptRequest(BaseModel):
|
66 |
+
prompt: str
|
67 |
+
|
68 |
+
@app.post("/generate-image")
|
69 |
+
async def generate_image(prompt_request: PromptRequest):
|
70 |
+
# Load the workflow JSON
|
71 |
+
with open("wsj-api-rnd-v2.json", "r", encoding="utf-8") as f:
|
72 |
+
workflow_jsondata = f.read()
|
73 |
+
|
74 |
+
jsonwf = json.loads(workflow_jsondata)
|
75 |
+
|
76 |
+
# Set the text prompt
|
77 |
+
jsonwf["6"]["inputs"]["text"] = prompt_request.prompt
|
78 |
+
|
79 |
+
# Set a random seed
|
80 |
+
seednum = random.randint(0, 10000)
|
81 |
+
jsonwf["25"]["inputs"]["noise_seed"] = seednum
|
82 |
+
|
83 |
+
# Connect to WebSocket
|
84 |
+
ws = websocket.WebSocket()
|
85 |
+
ws.connect(f"ws://{server_address}/ws?clientId={client_id}")
|
86 |
+
|
87 |
+
# Generate images
|
88 |
+
images = get_images(ws, jsonwf)
|
89 |
+
ws.close()
|
90 |
+
|
91 |
+
# Assuming we want to return the first image from the first node
|
92 |
+
first_node = next(iter(images))
|
93 |
+
image_data = images[first_node][0]
|
94 |
+
|
95 |
+
# Convert image data to PIL Image
|
96 |
+
image = Image.open(io.BytesIO(image_data))
|
97 |
+
|
98 |
+
# Convert PIL Image to bytes
|
99 |
+
img_byte_arr = io.BytesIO()
|
100 |
+
image.save(img_byte_arr, format='PNG')
|
101 |
+
img_byte_arr = img_byte_arr.getvalue()
|
102 |
+
|
103 |
+
return Response(content=img_byte_arr, media_type="image/png")
|
104 |
+
|
105 |
+
if __name__ == "__main__":
|
106 |
+
import uvicorn
|
107 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|