Vidushee commited on
Commit
c8b2ee9
1 Parent(s): 957b993

Update Zocket_ImageBind.py

Browse files
Files changed (1) hide show
  1. Zocket_ImageBind.py +20 -23
Zocket_ImageBind.py CHANGED
@@ -2,14 +2,11 @@ from imagebind import data
2
  import torch
3
  from imagebind.models import imagebind_model
4
  from imagebind.models.imagebind_model import ModalityType
5
- import gradio as gr
6
-
7
- # command = "pip install git+https://github.com/facebookresearch/pytorchvideo.git@28fe037d212663c6a24f373b94cc5d478c8c1a1d timm==0.6.7 ftfy regex einops fvcore decord==0.6.0"
8
- # process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
9
- # process.wait()
10
- # print(process.returncode) # should print 0 if installation was successful
11
-
12
 
 
 
 
13
 
14
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
15
 
@@ -21,17 +18,25 @@ model.to(device)
21
  text_list = ["An Advertisement(branding, text, promotions, lifestyle depiction, contextual cues, and visual composition)","Not an Advertisement"]
22
  image_paths = []
23
 
 
 
 
 
 
24
 
 
 
25
 
26
- with gr.Blocks() as demo:
27
- image = gr.File()
28
- image_paths.append(image)
29
 
 
 
 
 
 
30
 
31
- gr.Markdown(
32
- """
33
- Zocket ImageBind made AdBind
34
- """)
35
 
36
 
37
  inputs = {
@@ -47,12 +52,4 @@ with gr.Blocks() as demo:
47
  torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.TEXT].T, dim=-1),
48
  )
49
 
50
- out = f"""Output = {torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.TEXT].T, dim=-1)}"""
51
- gr.Markdown(out)
52
-
53
-
54
-
55
- demo.launch()
56
-
57
-
58
- # Load data
 
2
  import torch
3
  from imagebind.models import imagebind_model
4
  from imagebind.models.imagebind_model import ModalityType
5
+ import os
 
 
 
 
 
 
6
 
7
+ from PIL import Image
8
+ import streamlit as st
9
+ import tempfile
10
 
11
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
12
 
 
18
  text_list = ["An Advertisement(branding, text, promotions, lifestyle depiction, contextual cues, and visual composition)","Not an Advertisement"]
19
  image_paths = []
20
 
21
+ text = ['Advertisement Creative(Contains Text)', 'Not an Advertisement Creative(Contains No Text)', 'Simple Product Image and not an Advertisement)']
22
+
23
+
24
+
25
+ st.title("Advertisement Detection using CLIP")
26
 
27
+ # Upload image
28
+ uploaded_image = st.file_uploader("Choose an image...", type= ["png", "jpg", "jpeg"])
29
 
 
 
 
30
 
31
+ if uploaded_image is not None:
32
+ temp_dir = tempfile.mkdtemp()
33
+ path = os.path.join(temp_dir, uploaded_image.name)
34
+ with open(path, "wb") as f:
35
+ f.write(uploaded_image.getvalue())
36
 
37
+ image_paths.append(path)
38
+ image = Image.open(uploaded_image)
39
+ st.image(image, caption="Uploaded Image.", use_column_width=True)
 
40
 
41
 
42
  inputs = {
 
52
  torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.TEXT].T, dim=-1),
53
  )
54
 
55
+ st.write(torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.TEXT].T, dim=-1))