909ahmed commited on
Commit
1e62ffa
·
verified ·
1 Parent(s): 1d631ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -9
app.py CHANGED
@@ -7,18 +7,19 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
7
  model, preprocess = clip.load("ViT-B/32", device=device)
8
 
9
  def clip(image, text):
10
-
11
- image = preprocess(image).unsqueeze(0).to(device)
12
- text = clip.tokenize([text]).to(device)
13
 
14
- with torch.no_grad():
15
- image_features = model.encode_image(image)
16
- text_features = model.encode_text(text)
 
 
 
 
17
 
18
- logits_per_image, logits_per_text = model(image, text)
19
- probs = logits_per_image.softmax(dim=-1).cpu().numpy()
20
 
21
- return probs[0]
22
 
23
  demo = gr.Interface(fn=clip, inputs=["text", "image"], outputs="text")
24
  demo.launch()
 
7
  model, preprocess = clip.load("ViT-B/32", device=device)
8
 
9
  def clip(image, text):
 
 
 
10
 
11
+ return "UWU"
12
+ # image = preprocess(image).unsqueeze(0).to(device)
13
+ # text = clip.tokenize([text]).to(device)
14
+
15
+ # with torch.no_grad():
16
+ # image_features = model.encode_image(image)
17
+ # text_features = model.encode_text(text)
18
 
19
+ # logits_per_image, logits_per_text = model(image, text)
20
+ # probs = logits_per_image.softmax(dim=-1).cpu().numpy()
21
 
22
+ # return probs[0]
23
 
24
  demo = gr.Interface(fn=clip, inputs=["text", "image"], outputs="text")
25
  demo.launch()