Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -47,6 +47,7 @@ ocr_model = ocr_predictor(
|
|
47 |
|
48 |
|
49 |
if torch.cuda.is_available():
|
|
|
50 |
processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
|
51 |
vision_model = LlavaNextForConditionalGeneration.from_pretrained(
|
52 |
"llava-hf/llava-v1.6-mistral-7b-hf",
|
@@ -55,6 +56,11 @@ if torch.cuda.is_available():
|
|
55 |
load_in_4bit=True,
|
56 |
)
|
57 |
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
@spaces.GPU
|
60 |
def get_image_description(image):
|
|
|
47 |
|
48 |
|
49 |
if torch.cuda.is_available():
|
50 |
+
# Load the processor and model
|
51 |
processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
|
52 |
vision_model = LlavaNextForConditionalGeneration.from_pretrained(
|
53 |
"llava-hf/llava-v1.6-mistral-7b-hf",
|
|
|
56 |
load_in_4bit=True,
|
57 |
)
|
58 |
|
59 |
+
# Move the processor and model to the GPU
|
60 |
+
processor = processor.to("cuda")
|
61 |
+
vision_model = vision_model.to("cuda")
|
62 |
+
|
63 |
+
|
64 |
|
65 |
@spaces.GPU
|
66 |
def get_image_description(image):
|