Commit
·
9ae2aa9
1
Parent(s):
a915791
update
Browse files- app_idefics2.py +3 -2
app_idefics2.py
CHANGED
@@ -1,17 +1,18 @@
|
|
1 |
import gradio as gr
|
2 |
import spaces
|
3 |
import time
|
|
|
4 |
from PIL import Image
|
5 |
from transformers import AutoProcessor, AutoModelForVision2Seq
|
6 |
from transformers.image_utils import load_image
|
7 |
from typing import List
|
8 |
processor = AutoProcessor.from_pretrained("TIGER-Lab/Mantis-8B-Idefics2")
|
9 |
-
model = AutoModelForVision2Seq.from_pretrained("TIGER-Lab/Mantis-8B-Idefics2")
|
10 |
|
11 |
@spaces.GPU
|
12 |
def generate_stream(text:str, images:List[Image.Image], history: List[dict], **kwargs):
|
13 |
global processor, model
|
14 |
-
model
|
15 |
if not images:
|
16 |
images = None
|
17 |
|
|
|
1 |
import gradio as gr
|
2 |
import spaces
|
3 |
import time
|
4 |
+
import torch
|
5 |
from PIL import Image
|
6 |
from transformers import AutoProcessor, AutoModelForVision2Seq
|
7 |
from transformers.image_utils import load_image
|
8 |
from typing import List
|
9 |
processor = AutoProcessor.from_pretrained("TIGER-Lab/Mantis-8B-Idefics2")
|
10 |
+
model = AutoModelForVision2Seq.from_pretrained("TIGER-Lab/Mantis-8B-Idefics2", torch_dtype=torch.bfloat16)
|
11 |
|
12 |
@spaces.GPU
|
13 |
def generate_stream(text:str, images:List[Image.Image], history: List[dict], **kwargs):
|
14 |
global processor, model
|
15 |
+
model.to("cuda")
|
16 |
if not images:
|
17 |
images = None
|
18 |
|