File size: 3,244 Bytes
6379d4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import gradio as gr
import os
import openai
import base64

# Read API key from environment variable
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
if not OPENAI_API_KEY:
    raise ValueError("API key not found. Please set the OPENAI_API_KEY environment variable.")

openai.api_key = OPENAI_API_KEY

global_system_prompt = None
global_model = 'gpt-4'

def encode_image(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode('utf-8')

def convert_history_to_openai_format(history):
    """
    Convert chat history to OpenAI format.
    
    Parameters:
    history (list of tuples): The chat history where each tuple consists of (message, sender).
    
    Returns:
    list of dict: The formatted history for OpenAI with "role" as either "user" or "assistant".
    """
    global global_system_prompt
    if global_system_prompt is None:
        global_system_prompt = "You are a helpful assistant."
        
    formatted_history = [{"role": "system", "content": global_system_prompt}]
    for user_msg, assistant_msg in history:
        if isinstance(user_msg, tuple) and ('.png' in user_msg[0] or '.jpg' in user_msg[0]):
            encoded_image = encode_image(user_msg[0])
            text = 'Help me based on the image'
            if user_msg[1] != '':
                text = user_msg[1]
            content = [{'type':'text', 'text':text}, {'type':'image_url', 'image_url':{'url':f'data:image/jpeg;base64,{encoded_image}'}}]
            formatted_history.append({"role": 'user', "content": content})
        else:
            formatted_history.append({"role": 'user', "content": user_msg})
        if isinstance(assistant_msg, str):
            formatted_history.append({"role": 'assistant', "content": assistant_msg})
    
    return formatted_history

def bot(history):
    global global_model
    response = openai.ChatCompletion.create(
        model=global_model,
        messages=convert_history_to_openai_format(history)
    )
    
    chatbot_message = response.choices[0].message['content'].strip()
    history[-1][1] = chatbot_message
    return history

def add_message(history, message):
    if len(message["files"]) > 0:
        for x in message["files"]:
            history.append(((x, message["text"]), None))
    else:
        if message["text"] != '':
            history.append((message["text"], None))
    return history, gr.MultimodalTextbox(value=None, interactive=False)

# Define the Gradio interface
with gr.Blocks() as demo:
    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("### Chatbot Playground")
            chatbot = gr.Chatbot(label="Chatbot:", bubble_full_width=False, show_copy_button=True, min_width=400,
                                 avatar_images=(os.path.join(os.getcwd(), 'user.png'), os.path.join(os.getcwd(), 'ai.png')))
            chat_input = gr.MultimodalTextbox(interactive=True, placeholder="Enter message or upload file...", show_label=False)

    # Connect chat_input to bot function to handle input and provide responses
    chat_input.submit(lambda message, history: bot(add_message(history, message)), [chat_input, chatbot], [chatbot, chat_input])

# Launch the Gradio interface
demo.launch()