Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
import subprocess
|
2 |
-
subprocess.run('pip install -r requirements.txt', shell = True)
|
3 |
|
4 |
import gradio as gr
|
|
|
5 |
import os
|
6 |
from PIL import Image
|
7 |
import numpy as np
|
@@ -17,7 +18,6 @@ from langchain_core.runnables import RunnablePassthrough
|
|
17 |
from langchain_fireworks import ChatFireworks
|
18 |
from langchain_community.llms import Ollama
|
19 |
from langchain_core.prompts import ChatPromptTemplate
|
20 |
-
from transformers import AutoModelForImageClassification, AutoImageProcessor
|
21 |
from rich.console import Console
|
22 |
from rich.markdown import Markdown
|
23 |
|
@@ -34,7 +34,9 @@ def image_to_query(image):
|
|
34 |
output: Query for the LLM
|
35 |
"""
|
36 |
#image = Image.open(image)
|
37 |
-
image =
|
|
|
|
|
38 |
|
39 |
model = AutoModelForImageClassification.from_pretrained("nprasad24/bean_classifier", from_tf=True)
|
40 |
image_processor = AutoImageProcessor.from_pretrained("nprasad24/bean_classifier")
|
@@ -125,7 +127,8 @@ def generate_response(rag_chain, query):
|
|
125 |
|
126 |
output: generated response by the llm
|
127 |
"""
|
128 |
-
return Markdown(rag_chain.invoke(f"{query}"))
|
|
|
129 |
|
130 |
def main(image):
|
131 |
console = Console()
|
@@ -135,8 +138,10 @@ def main(image):
|
|
135 |
output = generate_response(chain, query)
|
136 |
return output
|
137 |
|
|
|
|
|
138 |
title = "Bean Classifier and Instructor"
|
139 |
description = "Professor Bean is an agricultural expert. He will guide you on how to protect your plants from bean diseases"
|
140 |
app = gr.Interface(fn=main, inputs="image", outputs="text", title=title,
|
141 |
description=description)
|
142 |
-
app.launch(share=True)
|
|
|
1 |
import subprocess
|
2 |
+
#subprocess.run('pip install -r requirements.txt', shell = True)
|
3 |
|
4 |
import gradio as gr
|
5 |
+
import cv2
|
6 |
import os
|
7 |
from PIL import Image
|
8 |
import numpy as np
|
|
|
18 |
from langchain_fireworks import ChatFireworks
|
19 |
from langchain_community.llms import Ollama
|
20 |
from langchain_core.prompts import ChatPromptTemplate
|
|
|
21 |
from rich.console import Console
|
22 |
from rich.markdown import Markdown
|
23 |
|
|
|
34 |
output: Query for the LLM
|
35 |
"""
|
36 |
#image = Image.open(image)
|
37 |
+
#image = cv2.imread(image)
|
38 |
+
#image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
39 |
+
image = Image.fromarray(image)
|
40 |
|
41 |
model = AutoModelForImageClassification.from_pretrained("nprasad24/bean_classifier", from_tf=True)
|
42 |
image_processor = AutoImageProcessor.from_pretrained("nprasad24/bean_classifier")
|
|
|
127 |
|
128 |
output: generated response by the llm
|
129 |
"""
|
130 |
+
#return Markdown(rag_chain.invoke(f"{query}"))
|
131 |
+
return rag_chain.invoke(f"{query}")
|
132 |
|
133 |
def main(image):
|
134 |
console = Console()
|
|
|
138 |
output = generate_response(chain, query)
|
139 |
return output
|
140 |
|
141 |
+
#main('test2.jpeg')
|
142 |
+
|
143 |
title = "Bean Classifier and Instructor"
|
144 |
description = "Professor Bean is an agricultural expert. He will guide you on how to protect your plants from bean diseases"
|
145 |
app = gr.Interface(fn=main, inputs="image", outputs="text", title=title,
|
146 |
description=description)
|
147 |
+
app.launch(share=True)
|