Nechama commited on
Commit
e375416
·
verified ·
1 Parent(s): c15410b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -22
app.py CHANGED
@@ -1,34 +1,18 @@
1
  import gradio as gr
2
- from PIL import Image
3
- from transformers import pipeline, AutoModelForVision2Seq, AutoProcessor
4
- import torch
5
-
6
- # Load the OpenGVLab/InternVL-Chat-V1-5 model and processor
7
- from transformers import AutoModel
8
- model = AutoModel.from_pretrained("OpenGVLab/InternVL-Chat-V1-5", trust_remote_code=True)
9
-
10
- # Load the Llama3 model for text processing
11
- #llama_model = pipeline("text2text-generation", model="llama3")
12
-
13
- def process_image(image):
14
- # Process the image to extract the recipe using OpenGVLab
15
- inputs = processor(images=image, return_tensors="pt")
16
- generated_ids = model.generate(**inputs)
17
- extracted_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
18
-
19
- return extracted_text
20
 
 
 
 
21
  iface = gr.Interface(
22
- fn=process_image,
23
  inputs=[
24
  gr.components.Image(type="filepath", label="Recipe Image"),
25
- #gr.components.Radio(choices=["Double", "Triple", "Half", "Third"], label="Action")
26
  ],
27
  outputs="text",
28
  title="Recipe Modifier",
29
  description="Upload an image of a recipe and choose how to modify the measurements.",
30
  )
31
 
32
-
33
  if __name__ == "__main__":
34
- iface.launch(share=True)
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ def process_recipe(i,j):
4
+ return "Thank you"
5
+
6
  iface = gr.Interface(
7
+ fn=process_recipe,
8
  inputs=[
9
  gr.components.Image(type="filepath", label="Recipe Image"),
10
+ gr.components.Radio(choices=["Double","Triple", "Half", "Third"], label="Action")
11
  ],
12
  outputs="text",
13
  title="Recipe Modifier",
14
  description="Upload an image of a recipe and choose how to modify the measurements.",
15
  )
16
 
 
17
  if __name__ == "__main__":
18
+ iface.launch()