clone3 commited on
Commit
28cd9fd
·
verified ·
1 Parent(s): ba5bbe9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -214
app.py CHANGED
@@ -1,218 +1,19 @@
1
- #!/usr/bin/env python
2
- from __future__ import annotations
 
3
 
4
- import requests
5
- import re
6
- import threading
7
 
8
- import os
9
- import random
10
- import time
 
 
 
11
 
12
- import gradio as gr
13
- import numpy as np
14
- import PIL.Image
15
 
16
- from huggingface_hub import snapshot_download
17
- from diffusers import DiffusionPipeline
18
-
19
- from lcm_scheduler import LCMScheduler
20
- from lcm_ov_pipeline import OVLatentConsistencyModelPipeline
21
-
22
- from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder, OVBaseModel
23
-
24
- import os
25
- from tqdm import tqdm
26
-
27
- from concurrent.futures import ThreadPoolExecutor
28
- import uuid
29
-
30
- DESCRIPTION = '''# Latent Consistency Model OpenVino CPU
31
- Based on [Latency Consistency Model](https://huggingface.co/spaces/SimianLuo/Latent_Consistency_Model) HF space
32
-
33
- <p>Running on CPU 🥶.</p>
34
- '''
35
-
36
- MAX_SEED = np.iinfo(np.int32).max
37
- CACHE_EXAMPLES = os.getenv("CACHE_EXAMPLES") == "1"
38
-
39
- model_id = "Kano001/Dreamshaper_v7-Openvino"
40
- batch_size = 1
41
- width = int(os.getenv("IMAGE_WIDTH", "512"))
42
- height = int(os.getenv("IMAGE_HEIGHT", "512"))
43
- num_images = int(os.getenv("NUM_IMAGES", "1"))
44
-
45
- class CustomOVModelVaeDecoder(OVModelVaeDecoder):
46
- def __init__(
47
- self, model: openvino.runtime.Model, parent_model: OVBaseModel, ov_config: Optional[Dict[str, str]] = None, model_dir: str = None,
48
- ):
49
- super(OVModelVaeDecoder, self).__init__(model, parent_model, ov_config, "vae_decoder", model_dir)
50
-
51
- scheduler = LCMScheduler.from_pretrained(model_id, subfolder="scheduler")
52
- pipe = OVLatentConsistencyModelPipeline.from_pretrained(model_id, scheduler = scheduler, compile = False, ov_config = {"CACHE_DIR":""})
53
-
54
- # Inject TAESD
55
-
56
- taesd_dir = snapshot_download(repo_id="Kano001/taesd-openvino")
57
- pipe.vae_decoder = CustomOVModelVaeDecoder(model = OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"), parent_model = pipe, model_dir = taesd_dir)
58
-
59
- pipe.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images)
60
- pipe.compile()
61
-
62
- # Personal Thing-----------------------------------
63
- api_url = None
64
- def make_api_request():
65
- global api_url
66
- response = requests.get("https://genielamp-image7.hf.space/")
67
- api_url = response.text
68
- match = re.search(r'"root"\s*:\s*"([^"]+)"', response.text)
69
- api_url = match.group(1) + "/file="
70
- print(api_url)
71
-
72
-
73
- def delayed_api_request():
74
- threading.Timer(10, make_api_request).start()
75
- #------------------------------------------------------
76
-
77
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
78
- if randomize_seed:
79
- seed = random.randint(0, MAX_SEED)
80
- return seed
81
-
82
- def save_image(img, profile: gr.OAuthProfile | None, metadata: dict):
83
- unique_name = str(uuid.uuid4()) + '.png'
84
- img.save(unique_name)
85
- return unique_name
86
-
87
- def save_images(image_array, profile: gr.OAuthProfile | None, metadata: dict):
88
- paths = []
89
- with ThreadPoolExecutor() as executor:
90
- paths = list(executor.map(save_image, image_array, [profile]*len(image_array), [metadata]*len(image_array)))
91
- return paths
92
-
93
- def generate(
94
- prompt: str,
95
- url: str,
96
- seed: int = 0,
97
- guidance_scale: float = 8.0,
98
- num_inference_steps: int = 4,
99
- randomize_seed: bool = False,
100
- progress = gr.Progress(track_tqdm=True),
101
- profile: gr.OAuthProfile | None = None,
102
- ) -> PIL.Image.Image:
103
- global batch_size
104
- global width
105
- global height
106
- global num_images
107
-
108
- seed = randomize_seed_fn(seed, randomize_seed)
109
- np.random.seed(seed)
110
- start_time = time.time()
111
- url = api_url
112
- result = pipe(
113
- prompt=prompt,
114
- width=width,
115
- height=height,
116
- guidance_scale=guidance_scale,
117
- num_inference_steps=num_inference_steps,
118
- num_images_per_prompt=num_images,
119
- output_type="pil",
120
- ).images
121
- paths = save_images(result, profile, metadata={"prompt": prompt, "seed": seed, "width": width, "height": height, "guidance_scale": guidance_scale, "num_inference_steps": num_inference_steps})
122
- print(time.time() - start_time)
123
- return paths, seed, url
124
-
125
- examples = [
126
- "portrait photo of a girl, photograph, highly detailed face, depth of field, moody light, golden hour, style by Dan Winters, Russell James, Steve McCurry, centered, extremely detailed, Nikon D850, award winning photography",
127
- "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k",
128
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
129
- "A photo of beautiful mountain with realistic sunset and blue lake, highly detailed, masterpiece",
130
- ]
131
-
132
- with gr.Blocks(css="style.css") as demo:
133
- gr.Markdown(DESCRIPTION)
134
- gr.DuplicateButton(
135
- value="Duplicate Space for private use",
136
- elem_id="duplicate-button",
137
- visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
138
- )
139
-
140
- with gr.Group():
141
- with gr.Row():
142
- prompt = gr.Text(
143
- label="Prompt",
144
- show_label=False,
145
- max_lines=1,
146
- placeholder="Enter your prompt",
147
- container=False,
148
- )
149
- run_button = gr.Button("Run", scale=0)
150
- result = gr.Gallery(
151
- label="Generated images", show_label=False, elem_id="gallery", grid=[1]
152
- )
153
- with gr.Accordion("Advanced options", open=False):
154
- seed = gr.Slider(
155
- label="Seed",
156
- minimum=0,
157
- maximum=MAX_SEED,
158
- step=1,
159
- value=0,
160
- randomize=True
161
- )
162
- randomize_seed = gr.Checkbox(label="Randomize seed across runs", value=True)
163
- with gr.Row():
164
- guidance_scale = gr.Slider(
165
- label="Guidance scale for base",
166
- minimum=2,
167
- maximum=14,
168
- step=0.1,
169
- value=8.0,
170
- )
171
- num_inference_steps = gr.Slider(
172
- label="Number of inference steps for base",
173
- minimum=1,
174
- maximum=8,
175
- step=1,
176
- value=4,
177
- )
178
- url = gr.Text(
179
- label="url",
180
- value="Null",
181
- show_label=False,
182
- placeholder="Null",
183
- max_lines=1,
184
- container=False,
185
- interactive=False,
186
- )
187
-
188
- gr.Examples(
189
- examples=examples,
190
- inputs=prompt,
191
- outputs=result,
192
- fn=generate,
193
- cache_examples=CACHE_EXAMPLES,
194
- )
195
-
196
- gr.on(
197
- triggers=[
198
- prompt.submit,
199
- run_button.click,
200
- ],
201
- fn=generate,
202
- inputs=[
203
- prompt,
204
- seed,
205
- url,
206
- guidance_scale,
207
- num_inference_steps,
208
- randomize_seed
209
- ],
210
- outputs=[result, seed, url],
211
- api_name="run",
212
- )
213
-
214
- if __name__ == "__main__":
215
- demo.queue(api_open=False)
216
- delayed_api_request()
217
- # demo.queue(max_size=20).launch()
218
- demo.launch()
 
1
+ import asyncio
2
+ import websockets
3
+ from transformers import pipeline
4
 
5
+ # Load a Hugging Face model
6
+ nlp = pipeline("sentiment-analysis")
 
7
 
8
+ async def handle_client(websocket, path):
9
+ async for message in websocket:
10
+ # Process the message using the Hugging Face model
11
+ result = nlp(message)
12
+ # Send the result back to the client
13
+ await websocket.send(str(result))
14
 
15
+ # Start the WebSocket server
16
+ start_server = websockets.serve(handle_client, "localhost", 8765)
 
17
 
18
+ asyncio.get_event_loop().run_until_complete(start_server)
19
+ asyncio.get_event_loop().run_forever()