File size: 1,672 Bytes
c5b6e4e
 
 
97f9bd6
 
c5b6e4e
 
 
97f9bd6
c5b6e4e
 
97f9bd6
c5b6e4e
 
 
 
97f9bd6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import gradio as gr
from fastai.vision.all import *
import openai
import os


openai.api_key = os.getenv("OPENAI_API_KEY")

# Load your trained model (you should replace 'model.pkl' with the path to your model file)
learn = load_learner('model.pkl')

# Define the labels for the output
labels = learn.dls.vocab

# Define the prediction function
def predict(img):
    img = PILImage.create(img)
    pred, pred_idx, probs = learn.predict(img)
    prediction = {labels[i]: float(probs[i]) for i in range(len(labels))}

    # Now generate a chat/text response based on the model's prediction.
    chat_prompt = f"The image likely depicts the following: {pred}. What can I help you with next?"

    # Ensure that you have set the OPENAI_API_KEY environment variable,
    # as we will use it to interact with OpenAI's GPT-3 model.
    response = openai.Completion.create(
        engine="text-davinci-003",  # Adjust the engine as needed for your use-case
        prompt=chat_prompt,
        max_tokens=1024,
        n=1,
        stop=None,
        temperature=0.7,
    )
    text_response = response.choices[0].text.strip()

    return prediction, text_response

# Create examples list by specifying the paths to the example images
examples = ["path/to/example1.jpg", "path/to/example2.jpg"]  # replace with actual image paths

# Define the Gradio interface
iface = gr.Interface(
    fn=predict,
    inputs=gr.Image(shape=(512, 512)),
    outputs=[gr.Label(num_top_classes=3), gr.Textbox(label="GPT-3 Response")],
    examples=examples,
    enable_queue=True  # This is optional and only necessary if you're hosting under heavy traffic
)

# Launch the Gradio app
iface.launch()