File size: 1,651 Bytes
c5b6e4e
 
 
97f9bd6
 
c5b6e4e
 
 
 
6edb6ba
c5b6e4e
 
6edb6ba
 
fc6b019
 
 
 
 
 
 
 
 
6edb6ba
 
 
fc6b019
 
 
 
 
 
6edb6ba
c5b6e4e
 
fc6b019
 
 
 
 
 
6edb6ba
 
 
fc6b019
 
6edb6ba
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import gradio as gr
from fastai.vision.all import *
import openai
import os

openai.api_key = os.getenv("OPENAI_API_KEY")

learn = load_learner('model.pkl')

# Define the labels
labels = learn.dls.vocab

# Define a function for generating text
def generate_text(prompt):
 response = openai.Completion.create(
     engine="davinci",
     prompt=prompt,
     max_tokens=1024,
     n=1,
     stop=None,
     temperature=0.7,
 )
 return response.choices[0].text.strip()

# Define a function to handle user queries
def handle_query(query, chat_history):
 response = openai.ChatCompletion.create(
     model="gpt-3.5-turbo",
     messages=[{"role": "system", "content": "You are a helpful assistant."},
              {"role": "user", "content": query}] + chat_history
 )
 return response.choices[0].message['content']

# Define the prediction function
def predict(img):
 img = PILImage.create(img)
 pred,pred_idx,probs = learn.predict(img)
 prediction = {labels[i]: float(probs[i]) for i in range(len(labels))}
 chat_prompt = f"The model predicted {prediction}."
 chat_response = generate_text(chat_prompt)
 return {**prediction, 'chat_response': chat_response}

# Define the chat function
def chat(query, chat_history):
 chat_response = handle_query(query, chat_history)
 return chat_response

# Define the examples
examples = ['image.jpg']

# Define the interpretation
interpretation='default'

# Define the enable_queue
enable_queue=True

# Launch the interface
gr.Interface(fn=predict,inputs=gr.inputs.Image(shape=(512, 512)),outputs=gr.outputs.Label(num_top_classes=3),examples=examples,interpretation=interpretation,enable_queue=enable_queue).launch()