File size: 1,148 Bytes
6d4f8a8
9c3e1a4
 
 
8361579
9c3e1a4
 
 
7dc9415
9c3e1a4
 
454e48d
9c3e1a4
 
 
 
 
 
 
e375416
87eac61
9c3e1a4
87eac61
 
e375416
87eac61
454e48d
87eac61
 
6d4f8a8
 
 
e375416
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import gradio as gr
from PIL import Image
from transformers import pipeline, AutoModelForVision2Seq, AutoProcessor
import torch

# Load the OpenGVLab/InternVL-Chat-V1-5 model and processor
from transformers import AutoModel
model = AutoModel.from_pretrained("OpenGVLab/InternVL-Chat-V1-5", trust_remote_code=True)

# Load the Llama3 model for text processing
#llama_model = pipeline("text2text-generation", model="llama3")

def process_image(image):
    # Process the image to extract the recipe using OpenGVLab
    inputs = processor(images=image, return_tensors="pt")
    generated_ids = model.generate(**inputs)
    extracted_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]

    return extracted_text   
    
iface = gr.Interface(
    fn=process_image,
    inputs=[
        gr.components.Image(type="filepath", label="Recipe Image"),
        gr.components.Radio(choices=["Double","Triple", "Half", "Third"], label="Action")
    ],
    outputs="text",
    title="Recipe Modifier",
    description="Upload an image of a recipe and choose how to modify the measurements.",
)

if __name__ == "__main__":
    iface.launch()