File size: 1,530 Bytes
6d4f8a8
454e48d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6d4f8a8
 
 
454e48d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr
from PIL import Image
from transformers import pipeline, AutoModelForVision2Seq, AutoProcessor
import torch

# Load the OpenGVLab/InternVL-Chat-V1-5 model and processor
processor = AutoProcessor.from_pretrained("OpenGVLab/InternVL-Chat-V1-5")
model = AutoModelForVision2Seq.from_pretrained("OpenGVLab/InternVL-Chat-V1-5")

# Load the Llama3 model for text processing
llama_model = pipeline("text2text-generation", model="llama3")

def process_image(image):
    # Process the image to extract the recipe using OpenGVLab
    inputs = processor(images=image, return_tensors="pt")
    generated_ids = model.generate(**inputs)
    extracted_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]

    return extracted_text

def adjust_recipe(extracted_text, adjustment):
    # Create the prompt for Llama3 to adjust the recipe
    prompt = f"Here is a recipe: {extracted_text}. Please {adjustment} the recipe."
    response = llama_model(prompt)
    return response[0]['generated_text']

def app(image, adjustment):
    extracted_text = process_image(image)
    adjusted_recipe = adjust_recipe(extracted_text, adjustment)
    return adjusted_recipe

# Create the Gradio interface
interface = gr.Interface(
    fn=app,
    inputs=[gr.inputs.Image(type="pil"), gr.inputs.Dropdown(["double", "halve"])],
    outputs="text",
    title="Recipe Adjuster",
    description="Upload an image of a recipe, and this app will double or halve the recipe."
)

if __name__ == "__main__":
    interface.launch()