Spaces:
Runtime error
Runtime error
Commit
Β·
0b89080
1
Parent(s):
261fcb3
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""ImagePromtGenerator.ipynb
|
3 |
+
|
4 |
+
Automatically generated by Colaboratory.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/14IVhWCKpCLQnMrb4wuAqYRM4a6j14Dyt
|
8 |
+
|
9 |
+
# CLIP Interrogator 2.2 by [@pharmapsychotic](https://twitter.com/pharmapsychotic)
|
10 |
+
|
11 |
+
Want to figure out what a good prompt might be to create new images like an existing one? The CLIP Interrogator is here to get you answers!
|
12 |
+
|
13 |
+
<br>
|
14 |
+
|
15 |
+
For Stable Diffusion 1.X choose the **ViT-L** model and for Stable Diffusion 2.0+ choose the **ViT-H** CLIP Model.
|
16 |
+
|
17 |
+
This version is specialized for producing nice prompts for use with Stable Diffusion and achieves higher alignment between generated text prompt and source image. You can try out the old [version 1](https://colab.research.google.com/github/pharmapsychotic/clip-interrogator/blob/v1/clip_interrogator.ipynb) to see how different CLIP models ranks terms.
|
18 |
+
|
19 |
+
You can also run this on HuggingFace and Replicate<br>
|
20 |
+
[](https://huggingface.co/spaces/pharma/CLIP-Interrogator) [](https://replicate.com/pharmapsychotic/clip-interrogator)
|
21 |
+
|
22 |
+
<br>
|
23 |
+
|
24 |
+
If this notebook is helpful to you please consider buying me a coffee via [ko-fi](https://ko-fi.com/pharmapsychotic) or following me on [twitter](https://twitter.com/pharmapsychotic) for more cool Ai stuff. π
|
25 |
+
|
26 |
+
And if you're looking for more Ai art tools check out my [Ai generative art tools list](https://pharmapsychotic.com/tools.html).
|
27 |
+
"""
|
28 |
+
|
29 |
+
|
30 |
+
#@title Setup
|
31 |
+
import os, subprocess
|
32 |
+
|
33 |
+
def setup():
|
34 |
+
install_cmds = [
|
35 |
+
['pip', 'install', 'gradio'],
|
36 |
+
['pip', 'install', 'open_clip_torch'],
|
37 |
+
['pip', 'install', 'clip-interrogator'],
|
38 |
+
['pip', 'install', 'git+https://github.com/pharmapsychotic/BLIP.git'],
|
39 |
+
]
|
40 |
+
for cmd in install_cmds:
|
41 |
+
print(subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode('utf-8'))
|
42 |
+
|
43 |
+
setup()
|
44 |
+
|
45 |
+
|
46 |
+
clip_model_name = 'ViT-L-14/openai' #@param ["ViT-L-14/openai", "ViT-H-14/laion2b_s32b_b79k"]
|
47 |
+
|
48 |
+
|
49 |
+
print("Download preprocessed cache files...")
|
50 |
+
CACHE_URLS = [
|
51 |
+
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-L-14_openai_artists.pkl',
|
52 |
+
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-L-14_openai_flavors.pkl',
|
53 |
+
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-L-14_openai_mediums.pkl',
|
54 |
+
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-L-14_openai_movements.pkl',
|
55 |
+
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-L-14_openai_trendings.pkl',
|
56 |
+
] if clip_model_name == 'ViT-L-14/openai' else [
|
57 |
+
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_artists.pkl',
|
58 |
+
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_flavors.pkl',
|
59 |
+
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_mediums.pkl',
|
60 |
+
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_movements.pkl',
|
61 |
+
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_trendings.pkl',
|
62 |
+
]
|
63 |
+
os.makedirs('cache', exist_ok=True)
|
64 |
+
for url in CACHE_URLS:
|
65 |
+
print(subprocess.run(['wget', url, '-P', 'cache'], stdout=subprocess.PIPE).stdout.decode('utf-8'))
|
66 |
+
|
67 |
+
|
68 |
+
import gradio as gr
|
69 |
+
from clip_interrogator import Config, Interrogator
|
70 |
+
|
71 |
+
config = Config()
|
72 |
+
config.blip_num_beams = 64
|
73 |
+
config.blip_offload = False
|
74 |
+
config.clip_model_name = clip_model_name
|
75 |
+
ci = Interrogator(config)
|
76 |
+
|
77 |
+
def inference(image, mode, best_max_flavors=32):
|
78 |
+
ci.config.chunk_size = 2048 if ci.config.clip_model_name == "ViT-L-14/openai" else 1024
|
79 |
+
ci.config.flavor_intermediate_count = 2048 if ci.config.clip_model_name == "ViT-L-14/openai" else 1024
|
80 |
+
image = image.convert('RGB')
|
81 |
+
if mode == 'best':
|
82 |
+
return ci.interrogate(image, max_flavors=int(best_max_flavors))
|
83 |
+
elif mode == 'classic':
|
84 |
+
return ci.interrogate_classic(image)
|
85 |
+
else:
|
86 |
+
return ci.interrogate_fast(image)
|
87 |
+
|
88 |
+
#@title Image to prompt! πΌοΈ -> π
|
89 |
+
|
90 |
+
inputs = [
|
91 |
+
gr.inputs.Image(type='pil'),
|
92 |
+
gr.Radio(['best', 'fast'], label='', value='best'),
|
93 |
+
gr.Number(value=16, label='best mode max flavors'),
|
94 |
+
]
|
95 |
+
outputs = [
|
96 |
+
gr.outputs.Textbox(label="Output"),
|
97 |
+
]
|
98 |
+
|
99 |
+
io = gr.Interface(
|
100 |
+
inference,
|
101 |
+
inputs,
|
102 |
+
outputs,
|
103 |
+
allow_flagging=False,
|
104 |
+
)
|
105 |
+
io.launch(debug=False, share=True)
|
106 |
+
|
107 |
+
#@title Batch process a folder of images π -> π
|
108 |
+
|
109 |
+
#@markdown This will generate prompts for every image in a folder and either save results
|
110 |
+
#@markdown to a desc.csv file in the same folder or rename the files to contain their prompts.
|
111 |
+
#@markdown The renamed files work well for [DreamBooth extension](https://github.com/d8ahazard/sd_dreambooth_extension)
|
112 |
+
#@markdown in the [Stable Diffusion Web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui).
|
113 |
+
#@markdown You can use the generated csv in the [Stable Diffusion Finetuning](https://colab.research.google.com/drive/1vrh_MUSaAMaC5tsLWDxkFILKJ790Z4Bl?usp=sharing)
|
114 |
+
|
115 |
+
import csv
|
116 |
+
import os
|
117 |
+
from IPython.display import clear_output, display
|
118 |
+
from PIL import Image
|
119 |
+
from tqdm import tqdm
|
120 |
+
|
121 |
+
folder_path = "/content/my_images" #@param {type:"string"}
|
122 |
+
prompt_mode = 'best' #@param ["best","fast"]
|
123 |
+
output_mode = 'rename' #@param ["desc.csv","rename"]
|
124 |
+
max_filename_len = 128 #@param {type:"integer"}
|
125 |
+
best_max_flavors = 16 #@param {type:"integer"}
|
126 |
+
|
127 |
+
|
128 |
+
def sanitize_for_filename(prompt: str, max_len: int) -> str:
|
129 |
+
name = "".join(c for c in prompt if (c.isalnum() or c in ",._-! "))
|
130 |
+
name = name.strip()[:(max_len-4)] # extra space for extension
|
131 |
+
return name
|
132 |
+
|
133 |
+
ci.config.quiet = True
|
134 |
+
|
135 |
+
files = [f for f in os.listdir(folder_path) if f.endswith('.jpg') or f.endswith('.png')] if os.path.exists(folder_path) else []
|
136 |
+
prompts = []
|
137 |
+
for idx, file in enumerate(tqdm(files, desc='Generating prompts')):
|
138 |
+
if idx > 0 and idx % 100 == 0:
|
139 |
+
clear_output(wait=True)
|
140 |
+
|
141 |
+
image = Image.open(os.path.join(folder_path, file)).convert('RGB')
|
142 |
+
prompt = inference(image, prompt_mode, best_max_flavors=best_max_flavors)
|
143 |
+
prompts.append(prompt)
|
144 |
+
|
145 |
+
print(prompt)
|
146 |
+
thumb = image.copy()
|
147 |
+
thumb.thumbnail([256, 256])
|
148 |
+
display(thumb)
|
149 |
+
|
150 |
+
if output_mode == 'rename':
|
151 |
+
name = sanitize_for_filename(prompt, max_filename_len)
|
152 |
+
ext = os.path.splitext(file)[1]
|
153 |
+
filename = name + ext
|
154 |
+
idx = 1
|
155 |
+
while os.path.exists(os.path.join(folder_path, filename)):
|
156 |
+
print(f'File {filename} already exists, trying {idx+1}...')
|
157 |
+
filename = f"{name}_{idx}{ext}"
|
158 |
+
idx += 1
|
159 |
+
os.rename(os.path.join(folder_path, file), os.path.join(folder_path, filename))
|
160 |
+
|
161 |
+
if len(prompts):
|
162 |
+
if output_mode == 'desc.csv':
|
163 |
+
csv_path = os.path.join(folder_path, 'desc.csv')
|
164 |
+
with open(csv_path, 'w', encoding='utf-8', newline='') as f:
|
165 |
+
w = csv.writer(f, quoting=csv.QUOTE_MINIMAL)
|
166 |
+
w.writerow(['image', 'prompt'])
|
167 |
+
for file, prompt in zip(files, prompts):
|
168 |
+
w.writerow([file, prompt])
|
169 |
+
|
170 |
+
print(f"\n\n\n\nGenerated {len(prompts)} prompts and saved to {csv_path}, enjoy!")
|
171 |
+
else:
|
172 |
+
print(f"\n\n\n\nGenerated {len(prompts)} prompts and renamed your files, enjoy!")
|
173 |
+
else:
|
174 |
+
print(f"Sorry, I couldn't find any images in {folder_path}")
|