Spaces:
Runtime error
Runtime error
Fix (#2)
Browse files- Fix (088953aedbc45b660577728b8b5de0f9c5614349)
Co-authored-by: hysts <[email protected]>
- README.md +1 -1
- app.py +4 -5
- requirements.txt +4 -4
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 🌖
|
|
4 |
colorFrom: blue
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 3.
|
8 |
python_version: 3.10.11
|
9 |
app_file: app.py
|
10 |
pinned: false
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.50.2
|
8 |
python_version: 3.10.11
|
9 |
app_file: app.py
|
10 |
pinned: false
|
app.py
CHANGED
@@ -18,19 +18,18 @@ if not torch.cuda.is_available():
|
|
18 |
|
19 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
20 |
|
21 |
-
|
22 |
-
MODEL_ID_FLAN_T5_XXL = "Salesforce/blip2-flan-t5-xxl"
|
23 |
-
MODEL_ID = os.getenv("MODEL_ID", MODEL_ID_FLAN_T5_XXL)
|
24 |
|
25 |
|
26 |
processor = AutoProcessor.from_pretrained(MODEL_ID)
|
27 |
|
28 |
if torch.cuda.is_available():
|
29 |
model = Blip2ForConditionalGeneration.from_pretrained(MODEL_ID, device_map="auto", load_in_8bit=True)
|
30 |
-
else:
|
31 |
model = Blip2ForConditionalGeneration.from_pretrained(MODEL_ID)
|
32 |
|
33 |
|
|
|
34 |
def generate_caption(
|
35 |
image: PIL.Image.Image,
|
36 |
decoding_method: str,
|
@@ -54,7 +53,7 @@ def generate_caption(
|
|
54 |
return result
|
55 |
|
56 |
|
57 |
-
|
58 |
def answer_question(
|
59 |
image: PIL.Image.Image,
|
60 |
text: str,
|
|
|
18 |
|
19 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
20 |
|
21 |
+
MODEL_ID = os.getenv("MODEL_ID", "Salesforce/blip2-opt-6.7b")
|
|
|
|
|
22 |
|
23 |
|
24 |
processor = AutoProcessor.from_pretrained(MODEL_ID)
|
25 |
|
26 |
if torch.cuda.is_available():
|
27 |
model = Blip2ForConditionalGeneration.from_pretrained(MODEL_ID, device_map="auto", load_in_8bit=True)
|
28 |
+
else:
|
29 |
model = Blip2ForConditionalGeneration.from_pretrained(MODEL_ID)
|
30 |
|
31 |
|
32 |
+
@spaces.GPU
|
33 |
def generate_caption(
|
34 |
image: PIL.Image.Image,
|
35 |
decoding_method: str,
|
|
|
53 |
return result
|
54 |
|
55 |
|
56 |
+
@spaces.GPU
|
57 |
def answer_question(
|
58 |
image: PIL.Image.Image,
|
59 |
text: str,
|
requirements.txt
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
accelerate==0.23.0
|
2 |
bitsandbytes==0.41.1
|
3 |
-
gradio==3.
|
4 |
-
huggingface-hub==0.17.
|
5 |
-
Pillow==10.0
|
6 |
scipy==1.11.2
|
7 |
torch==2.0.0
|
8 |
torchvision==0.15.1
|
9 |
-
transformers==4.
|
|
|
1 |
accelerate==0.23.0
|
2 |
bitsandbytes==0.41.1
|
3 |
+
gradio==3.50.2
|
4 |
+
huggingface-hub==0.17.3
|
5 |
+
Pillow==10.1.0
|
6 |
scipy==1.11.2
|
7 |
torch==2.0.0
|
8 |
torchvision==0.15.1
|
9 |
+
transformers==4.34.1
|