chat_vision / app.py
Core23's picture
Update app.py
db3e332
raw
history blame
1.7 kB
import gradio as gr
from fastai.vision.all import *
import skimage
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
# Load the model
learn = load_learner('model.pkl')
# Define the labels
labels = learn.dls.vocab
# Define a function for generating text
def generate_text(prompt):
response = openai.Completion.create(
engine="davinci",
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.7,
)
return response.choices[0].text.strip()
# Define a function to handle user queries
def handle_query(query, chat_history):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": query}] + chat_history
)
return response.choices[0].message['content']
# Define the prediction function
def predict(img):
img = PILImage.create(img)
pred,pred_idx,probs = learn.predict(img)
prediction = {labels[i]: float(probs[i]) for i in range(len(labels))}
chat_prompt = f"The model predicted {prediction}."
chat_response = generate_text(chat_prompt)
return {**prediction, 'chat_response': chat_response}
# Define the chat function
def chat(query, chat_history):
chat_response = handle_query(query, chat_history)
return chat_response
# Define the examples
examples = ['image.jpg']
# Define the interpretation
interpretation='default'
# Define the enable_queue
enable_queue=True
# Launch the interface
gr.Interface(fn=predict,inputs=gr.inputs.Image(shape=(512, 512)),outputs=gr.outputs.Label(num_top_classes=3),examples=examples,interpretation=interpretation,enable_queue=enable_queue).launch()