ayoubkirouane commited on
Commit
378b359
·
1 Parent(s): 0e394f5

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -0
app.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoProcessor, AutoModelForCausalLM, pipeline
3
+ import torch
4
+
5
+ device = "cuda" if torch.cuda.is_available() else "cpu"
6
+ checkpoint1 = "ayoubkirouane/git-base-One-Piece"
7
+ processor = AutoProcessor.from_pretrained(checkpoint1)
8
+ model1 = AutoModelForCausalLM.from_pretrained(checkpoint1)
9
+ def img2cap(image):
10
+ input1 = processor(images=image, return_tensors="pt").to(device)
11
+ pixel_values1 = input1.pixel_values
12
+ generated_id1 = model1.generate(pixel_values=pixel_values1, max_length=50)
13
+ generated_caption1 = processor.batch_decode(generated_id1, skip_special_tokens=True)[0]
14
+ return generated_caption1
15
+
16
+
17
+ examples = ["1.png" ,"2.png" , "3.png" ]
18
+
19
+ gr.Interface(
20
+ img2cap,
21
+ inputs = gr.inputs.Image(type="pil", label="Original Image"),
22
+ outputs= gr.outputs.Textbox(label="Caption from pre-trained model"),
23
+ title = "Image Captioning using git-base-One-Piece Model",
24
+ description = "git-base-One-Piece is used to generate Image Caption for the uploaded image.",
25
+ examples=examples,
26
+ theme="huggingface",
27
+ ).launch(debug=True)