fixes
Browse files- app.py +3 -3
- requirements.txt +4 -8
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import torch
|
2 |
-
from transformers import AutoProcessor,
|
3 |
from PIL import Image
|
4 |
import gradio as gr
|
5 |
|
@@ -8,7 +8,7 @@ saved_folder_path = "model_folder" # Replace with the path to your model folder
|
|
8 |
|
9 |
# Load processor and model
|
10 |
processor = AutoProcessor.from_pretrained(saved_folder_path) # Processor (e.g., feature extractor + tokenizer)
|
11 |
-
model =
|
12 |
model.eval() # Set model to evaluation mode
|
13 |
|
14 |
# Define the caption generation function
|
@@ -35,4 +35,4 @@ interface = gr.Interface(
|
|
35 |
)
|
36 |
|
37 |
# Launch the Gradio app
|
38 |
-
interface.launch()
|
|
|
1 |
import torch
|
2 |
+
from transformers import AutoProcessor, AutoModelForVision2Seq
|
3 |
from PIL import Image
|
4 |
import gradio as gr
|
5 |
|
|
|
8 |
|
9 |
# Load processor and model
|
10 |
processor = AutoProcessor.from_pretrained(saved_folder_path) # Processor (e.g., feature extractor + tokenizer)
|
11 |
+
model = AutoModelForVision2Seq.from_pretrained(saved_folder_path) # Pre-trained BLIP model
|
12 |
model.eval() # Set model to evaluation mode
|
13 |
|
14 |
# Define the caption generation function
|
|
|
35 |
)
|
36 |
|
37 |
# Launch the Gradio app
|
38 |
+
interface.launch()
|
requirements.txt
CHANGED
@@ -1,8 +1,4 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
torch==2.1.0
|
6 |
-
nltk==3.8.1
|
7 |
-
numpy==1.23.5
|
8 |
-
transformers>=4.26.0
|
|
|
1 |
+
torch
|
2 |
+
transformers
|
3 |
+
gradio
|
4 |
+
pillow
|
|
|
|
|
|
|
|