JayIsLearning commited on
Commit
5c1a453
·
verified ·
1 Parent(s): cf243d1

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ cache/1709866575_76532584.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: FaceID
3
- emoji: 😻
4
- colorFrom: green
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 4.20.1
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: faceID
3
+ app_file: websockets_api_v1_3_0229_debug_beta.py
 
 
4
  sdk: gradio
5
+ sdk_version: 3.41.2
 
 
6
  ---
 
 
cache/1709864908_36857950.png ADDED
cache/1709866309_18671109.png ADDED
cache/1709866575_76532584.png ADDED

Git LFS Details

  • SHA256: 9623b06a2d8bfc35e824b68e6ef8ba65d32bcecb3bdad6ae1523d1e5f29ce400
  • Pointer size: 132 Bytes
  • Size of remote file: 1.03 MB
cache/1709867129_81267182.png ADDED
cache/1709867218_99613356.png ADDED
websockets_api_v1_3_0229_debug_beta.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import websocket
4
+ import uuid
5
+ import json
6
+ import urllib.request
7
+ import urllib.parse
8
+ import gradio as gr
9
+ from glob import glob
10
+ import requests
11
+ from pathlib import Path
12
+ import base64
13
+ from PIL import Image
14
+ import time
15
+ import io
16
+
17
+ server_address = "127.0.0.1:8188"
18
+ client_id = str(uuid.uuid4())
19
+
20
+
21
+ def queue_prompt(prompt):
22
+ p = {"prompt": prompt, "client_id": client_id}
23
+ data = json.dumps(p).encode('utf-8')
24
+ req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
25
+ return json.loads(urllib.request.urlopen(req).read())
26
+
27
+ def get_image(filename, subfolder, folder_type):
28
+ data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
29
+ url_values = urllib.parse.urlencode(data)
30
+ with urllib.request.urlopen("http://{}/view?{}".format(server_address, url_values)) as response:
31
+ return response.read()
32
+
33
+ def get_history(prompt_id):
34
+ with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
35
+ return json.loads(response.read())
36
+
37
+
38
+ def get_images(ws, prompt):
39
+ prompt_id = queue_prompt(prompt)['prompt_id']
40
+ output_images = {}
41
+ while True:
42
+ out = ws.recv()
43
+ if isinstance(out, str):
44
+ message = json.loads(out)
45
+ if message['type'] == 'executing':
46
+ data = message['data']
47
+ if data['node'] is None and data['prompt_id'] == prompt_id:
48
+ break #Execution is done
49
+ else:
50
+ continue #previews are binary data
51
+
52
+ history = get_history(prompt_id)[prompt_id]
53
+ for o in history['outputs']:
54
+ for node_id in history['outputs']:
55
+ node_output = history['outputs'][node_id]
56
+ if 'images' in node_output:
57
+ images_output = []
58
+ for image in node_output['images']:
59
+ image_data = get_image(image['filename'], image['subfolder'], image['type'])
60
+ images_output.append(image_data)
61
+ output_images[node_id] = images_output
62
+
63
+ return output_images
64
+
65
+ def detect(image):
66
+ img = Path(image).read_bytes()
67
+ rsp = requests.post(f'http://cv.bytedance.net/aipet_head_det/run/predict', json={
68
+ 'data': ['data:image/png;base64,'+
69
+ base64.b64encode(img).decode('utf-8'),
70
+ ]
71
+ })
72
+
73
+ return rsp.json()['data'][1]
74
+
75
+ def clip_save(img_in,coords,path="img.png"):
76
+
77
+ img = Image.open(img_in)
78
+ img2 = img.crop((int(coords[0]), int(coords[1]), int(coords[2]), int(coords[3])))
79
+ img2.save(path)
80
+
81
+
82
+ def load_template(img_in,seed):
83
+ seed = int(seed)
84
+ with open(workflow_base,encoding='utf-8') as file:
85
+ template = json.load(file)
86
+ template["14"]["inputs"]["image"] = img_in
87
+ # template["7"]["inputs"]["text"] = animal + templates[style]
88
+ template["3"]["inputs"]["seed"] = seed if seed > 0 else random.randint(1,1e8)
89
+ # template["31"]["inputs"]["seed"] = seed if seed > 0 else random.randint(1,1e8)
90
+ # template["30"]["inputs"]["lora_name"] = loras[style]
91
+ # template["30"]["inputs"]["strength_model"] = w_lora
92
+ # template["30"]["inputs"]["strength_clip"] = w_lora
93
+ # if debug:
94
+ # print(template["6"]["inputs"]["image"],template["7"]["inputs"]["text"],template["9"]["inputs"]["seed"],template["30"]["inputs"]["lora_name"],template["30"]["inputs"]["strength_model"],template["30"]["inputs"]["strength_clip"])
95
+ return template
96
+
97
+ def generate(img_in,seed):
98
+ seed = int(seed)
99
+
100
+ template = load_template(img_in,seed)
101
+ ws = websocket.WebSocket()
102
+ ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
103
+ images = get_images(ws, template)
104
+
105
+ for node_id in images:
106
+ for image_data in images[node_id]:
107
+ image = Image.open(io.BytesIO(image_data))
108
+ path_out = dir_cache+"/"+str(time.time()).split('.')[0]+"_"+str(template["3"]["inputs"]["seed"])+".png"
109
+ image.save(path_out)
110
+
111
+ return image
112
+
113
+ if __name__ == '__main__':
114
+
115
+
116
+ workflow_base = "D:/faceID/workflow_api_anime_0306.json"
117
+ dir_cache = "D:/faceID/cache"
118
+ seed = -1
119
+ # debug = True
120
+ demo = gr.Interface(
121
+
122
+ fn = generate,
123
+ inputs = [
124
+ gr.Image(type='filepath'),
125
+ # gr.Textbox(label="自定义品种",value="", info="自定义品种,内部调试使用"),
126
+ # gr.Radio(["发财麻将","东北大花","情人玫瑰","天使丘比特","爱心丘比特","美式证件照","新年工笔画","新年唐装","新年糖葫芦","宠物礼盒","生日快乐","雪地工笔画","破壳纪念","爱读书的学霸","米其林大厨","疯狂赛车手","工笔画","圣诞树","圣诞雪人","圣诞老人",], label="风格", info="更多风格规划中,敬请期待~"),
127
+ # gr.Slider(0, 1, value=0.5,step=0.05,label='风格化程度',info='推荐值:低风格化0.3, 中风格化0.5, 高风格化0.7'),
128
+ gr.Textbox(label="随机种子",value=-1, info="-1为随机种子,大于0时为自定义种子")
129
+ ],
130
+ outputs = ["image"]
131
+ )
132
+
133
+ demo.queue(max_size=2)
134
+ demo.launch(share=True)
workflow_api_anime_0306.json ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "3": {
3
+ "inputs": {
4
+ "seed": 0,
5
+ "steps": 20,
6
+ "cfg": 4,
7
+ "sampler_name": "dpmpp_2m_sde",
8
+ "scheduler": "karras",
9
+ "denoise": 1,
10
+ "model": [
11
+ "10",
12
+ 0
13
+ ],
14
+ "positive": [
15
+ "6",
16
+ 0
17
+ ],
18
+ "negative": [
19
+ "7",
20
+ 0
21
+ ],
22
+ "latent_image": [
23
+ "5",
24
+ 0
25
+ ]
26
+ },
27
+ "class_type": "KSampler",
28
+ "_meta": {
29
+ "title": "KSampler"
30
+ }
31
+ },
32
+ "4": {
33
+ "inputs": {
34
+ "ckpt_name": "ghostxl_v10BakedVAE.safetensors"
35
+ },
36
+ "class_type": "CheckpointLoaderSimple",
37
+ "_meta": {
38
+ "title": "Load Checkpoint"
39
+ }
40
+ },
41
+ "5": {
42
+ "inputs": {
43
+ "width": 768,
44
+ "height": 1024,
45
+ "batch_size": 1
46
+ },
47
+ "class_type": "EmptyLatentImage",
48
+ "_meta": {
49
+ "title": "Empty Latent Image"
50
+ }
51
+ },
52
+ "6": {
53
+ "inputs": {
54
+ "text": "masterpiece, 8K, best quality, clean background",
55
+ "clip": [
56
+ "4",
57
+ 1
58
+ ]
59
+ },
60
+ "class_type": "CLIPTextEncode",
61
+ "_meta": {
62
+ "title": "CLIP Text Encode (Prompt)"
63
+ }
64
+ },
65
+ "7": {
66
+ "inputs": {
67
+ "text": "nsfw,blurry, low quality, distorted, photo, frame, naked, horror,embedding:EasyNegative",
68
+ "clip": [
69
+ "4",
70
+ 1
71
+ ]
72
+ },
73
+ "class_type": "CLIPTextEncode",
74
+ "_meta": {
75
+ "title": "CLIP Text Encode (Prompt)"
76
+ }
77
+ },
78
+ "8": {
79
+ "inputs": {
80
+ "samples": [
81
+ "3",
82
+ 0
83
+ ],
84
+ "vae": [
85
+ "4",
86
+ 2
87
+ ]
88
+ },
89
+ "class_type": "VAEDecode",
90
+ "_meta": {
91
+ "title": "VAE Decode"
92
+ }
93
+ },
94
+ "9": {
95
+ "inputs": {
96
+ "filename_prefix": "ComfyUI",
97
+ "images": [
98
+ "8",
99
+ 0
100
+ ]
101
+ },
102
+ "class_type": "SaveImage",
103
+ "_meta": {
104
+ "title": "Save Image"
105
+ }
106
+ },
107
+ "10": {
108
+ "inputs": {
109
+ "weight": 1,
110
+ "noise": 0,
111
+ "weight_type": "original",
112
+ "start_at": 0,
113
+ "end_at": 1,
114
+ "faceid_v2": true,
115
+ "weight_v2": 1,
116
+ "unfold_batch": false,
117
+ "ipadapter": [
118
+ "11",
119
+ 0
120
+ ],
121
+ "clip_vision": [
122
+ "41",
123
+ 0
124
+ ],
125
+ "insightface": [
126
+ "13",
127
+ 0
128
+ ],
129
+ "image": [
130
+ "14",
131
+ 0
132
+ ],
133
+ "model": [
134
+ "39",
135
+ 0
136
+ ]
137
+ },
138
+ "class_type": "IPAdapterApplyFaceID",
139
+ "_meta": {
140
+ "title": "Apply IPAdapter FaceID"
141
+ }
142
+ },
143
+ "11": {
144
+ "inputs": {
145
+ "ipadapter_file": "ip-adapter-faceid-plusv2_sdxl.bin"
146
+ },
147
+ "class_type": "IPAdapterModelLoader",
148
+ "_meta": {
149
+ "title": "Load IPAdapter Model"
150
+ }
151
+ },
152
+ "13": {
153
+ "inputs": {
154
+ "provider": "CUDA"
155
+ },
156
+ "class_type": "InsightFaceLoader",
157
+ "_meta": {
158
+ "title": "Load InsightFace"
159
+ }
160
+ },
161
+ "14": {
162
+ "inputs": {
163
+ "image": "comfyworkflows_f0942efd-fb40-422b-8cd4-cbaa39529fab (3).png",
164
+ "upload": "image"
165
+ },
166
+ "class_type": "LoadImage",
167
+ "_meta": {
168
+ "title": "Load Image"
169
+ }
170
+ },
171
+ "39": {
172
+ "inputs": {
173
+ "lora_name": "ip-adapter-faceid-plusv2_sdxl_lora.safetensors",
174
+ "strength_model": 1,
175
+ "model": [
176
+ "4",
177
+ 0
178
+ ]
179
+ },
180
+ "class_type": "LoraLoaderModelOnly",
181
+ "_meta": {
182
+ "title": "LoraLoaderModelOnly"
183
+ }
184
+ },
185
+ "41": {
186
+ "inputs": {
187
+ "clip_name": "ipadpter1.5.safetensors"
188
+ },
189
+ "class_type": "CLIPVisionLoader",
190
+ "_meta": {
191
+ "title": "Load CLIP Vision"
192
+ }
193
+ }
194
+ }