Commit
·
ee2def5
1
Parent(s):
bb3e8f2
Update README.md
Browse files
README.md
CHANGED
@@ -19,8 +19,10 @@ tokenizer = AutoTokenizer.from_pretrained('SajjadAyoubi/clip-fa-text')
|
|
19 |
text = 'something'
|
20 |
image = PIL.Image.open('my_favorite_image.jpg')
|
21 |
# compute embeddings
|
22 |
-
text_embedding = text_encoder(**tokenizer(text,
|
23 |
-
|
|
|
|
|
24 |
text_embedding.shape == image_embedding.shape
|
25 |
```
|
26 |
|
@@ -30,7 +32,7 @@ The followings are just some use cases of CLIPfa on 25K [`Unsplash images`](http
|
|
30 |
```python
|
31 |
from clipfa import CLIPDemo
|
32 |
demo = CLIPDemo(vision_encoder, text_encoder, tokenizer)
|
33 |
-
demo.compute_text_embeddings(['
|
34 |
demo.compute_image_embeddings(test_df.image_path.to_list())
|
35 |
```
|
36 |
### Image Search:
|
|
|
19 |
text = 'something'
|
20 |
image = PIL.Image.open('my_favorite_image.jpg')
|
21 |
# compute embeddings
|
22 |
+
text_embedding = text_encoder(**tokenizer(text,
|
23 |
+
return_tensors='pt')).pooler_output
|
24 |
+
image_embedding = vision_encoder(**preprocessor(image,
|
25 |
+
return_tensors='pt')).pooler_output
|
26 |
text_embedding.shape == image_embedding.shape
|
27 |
```
|
28 |
|
|
|
32 |
```python
|
33 |
from clipfa import CLIPDemo
|
34 |
demo = CLIPDemo(vision_encoder, text_encoder, tokenizer)
|
35 |
+
demo.compute_text_embeddings(['گاو' ,'اسب' ,'ماهی'])
|
36 |
demo.compute_image_embeddings(test_df.image_path.to_list())
|
37 |
```
|
38 |
### Image Search:
|