AbdulBasit2007 commited on
Commit
a88d3cb
·
verified ·
1 Parent(s): a285f0d
Files changed (1) hide show
  1. app.py +13 -0
app.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ from transformers import CLIPProcessor
3
+
4
+ dataset = load_dataset('coco', split='train')
5
+
6
+
7
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
8
+
9
+ def preprocess_data(examples):
10
+ inputs = processor(text=examples["caption"], images=examples["image"], return_tensors="pt", padding=True)
11
+ return inputs
12
+
13
+ dataset = dataset.map(preprocess_data)