hlydecker commited on
Commit
dbd1ef8
·
1 Parent(s): e26d1ed

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -0
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import hf_hub_download
3
+ import torch, open_clip
4
+ from PIL import Image
5
+ from IPython.display import display
6
+
7
+ for model_name in ['RN50', 'ViT-B-32', 'ViT-L-14']:
8
+ checkpoint_path = hf_hub_download("chendelong/RemoteCLIP", f"RemoteCLIP-{model_name}.pt", cache_dir='checkpoints')
9
+ print(f'{model_name} is downloaded to {checkpoint_path}.')
10
+
11
+ model_name = 'RN50' # 'RN50' or 'ViT-B-32' or 'ViT-L-14'
12
+ model, _, preprocess = open_clip.create_model_and_transforms(model_name)
13
+ tokenizer = open_clip.get_tokenizer(model_name)
14
+
15
+ path_to_your_checkpoints = 'checkpoints/models--chendelong--RemoteCLIP/snapshots/bf1d8a3ccf2ddbf7c875705e46373bfe542bce38'
16
+
17
+ ckpt = torch.load(f"{path_to_your_checkpoints}/RemoteCLIP-{model_name}.pt", map_location="cpu")
18
+
19
+ def remote_clip(input_image,input_text):
20
+
21
+ text_queries = [input_text]
22
+ text = tokenizer(text_queries)
23
+
24
+ image = Image.open(input_image)
25
+ image = preprocess(image).unsqueeze(0)
26
+
27
+ with torch.no_grad(), torch.cuda.amp.autocast():
28
+ image_features = model.encode_image(image.cuda())
29
+ text_features = model.encode_text(text.cuda())
30
+ image_features /= image_features.norm(dim=-1, keepdim=True)
31
+ text_features /= text_features.norm(dim=-1, keepdim=True)
32
+
33
+ text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1).cpu().numpy()[0]
34
+
35
+ print(f'Predictions of {model_name}:')
36
+ for query, prob in zip(text_queries, text_probs):
37
+ print(f"{query:<40} {prob * 100:5.1f}%")
38
+
39
+
40
+ demo = gr.Interface(fn=greet, inputs=[gr.Image(type="pil"), gr.Text(type="pil")], outputs="text")
41
+
42
+ demo.launch()