Spaces:
Runtime error
Runtime error
anas-awadalla
commited on
Commit
·
130a7e8
1
Parent(s):
5a051fc
more stuff
Browse files- app.py +13 -9
- requirements.txt +1 -0
app.py
CHANGED
@@ -1,17 +1,18 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
from PIL import Image
|
|
|
4 |
|
5 |
demo_imgs = [
|
6 |
-
["chinchilla_web-1024x683.jpg", "shiba-inu-dog-in-the-snow.jpg"],
|
7 |
-
["4645808729_2dfc59b6a5_z.jpg", "5944609705_4664531909_z.jpg"],
|
8 |
-
["COCO_train2014_000000310472.jpg", "COCO_train2014_000000194806.jpg"],
|
9 |
[
|
10 |
-
"bcee7a-20190225-a-london-underground-sign.jpg",
|
11 |
-
"istockphoto-622434332-1024x1024.jpg",
|
12 |
],
|
13 |
-
["dogs.jpeg", "pandas.jpg"],
|
14 |
-
["11887_pesto-pasta_Rita-1x1-1-501c953b29074ab193e2b5ad36e64648.jpg", "hummus.jpg"],
|
15 |
]
|
16 |
demo_texts = [
|
17 |
[
|
@@ -48,11 +49,14 @@ with open("bad_words.txt", "r") as f:
|
|
48 |
model, image_processor, tokenizer = create_model_and_transforms(
|
49 |
clip_vision_encoder_pretrained="openai",
|
50 |
clip_vision_encoder_path="ViT-L-14",
|
51 |
-
lang_encoder_path="togethercomputer/RedPajama-INCITE-
|
52 |
-
tokenizer_path="togethercomputer/RedPajama-INCITE-
|
53 |
cross_attn_every_n_layers=2,
|
54 |
)
|
55 |
|
|
|
|
|
|
|
56 |
model.eval()
|
57 |
|
58 |
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
from PIL import Image
|
4 |
+
from huggingface_hub import hf_hub_download
|
5 |
|
6 |
demo_imgs = [
|
7 |
+
["images/chinchilla_web-1024x683.jpg", "images/shiba-inu-dog-in-the-snow.jpg"],
|
8 |
+
["images/4645808729_2dfc59b6a5_z.jpg", "images/5944609705_4664531909_z.jpg"],
|
9 |
+
["images/COCO_train2014_000000310472.jpg", "images/COCO_train2014_000000194806.jpg"],
|
10 |
[
|
11 |
+
"images/bcee7a-20190225-a-london-underground-sign.jpg",
|
12 |
+
"images/istockphoto-622434332-1024x1024.jpg",
|
13 |
],
|
14 |
+
["images/dogs.jpeg", "images/pandas.jpg"],
|
15 |
+
["images/11887_pesto-pasta_Rita-1x1-1-501c953b29074ab193e2b5ad36e64648.jpg", "images/hummus.jpg"],
|
16 |
]
|
17 |
demo_texts = [
|
18 |
[
|
|
|
49 |
model, image_processor, tokenizer = create_model_and_transforms(
|
50 |
clip_vision_encoder_pretrained="openai",
|
51 |
clip_vision_encoder_path="ViT-L-14",
|
52 |
+
lang_encoder_path="togethercomputer/RedPajama-INCITE-Instruct-3B-v1",
|
53 |
+
tokenizer_path="togethercomputer/RedPajama-INCITE-Instruct-3B-v1",
|
54 |
cross_attn_every_n_layers=2,
|
55 |
)
|
56 |
|
57 |
+
checkpoint_path = hf_hub_download("openflamingo/OpenFlamingo-4B-vitl-rpj3b-langinstruct", "checkpoint.pt")
|
58 |
+
model.load_state_dict(torch.load(checkpoint_path), strict=False)
|
59 |
+
|
60 |
model.eval()
|
61 |
|
62 |
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
gradio
|
2 |
torch
|
3 |
pillow
|
|
|
|
1 |
gradio
|
2 |
torch
|
3 |
pillow
|
4 |
+
huggingface_hub
|