File size: 3,701 Bytes
6379d4c
 
ffd71df
6379d4c
 
 
 
 
 
 
09887b9
6379d4c
4bc2a21
 
 
 
6379d4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5bb6024
6379d4c
5bb6024
 
 
 
 
 
 
 
 
ff9c6e0
5bb6024
 
 
6379d4c
 
 
 
09887b9
d08b9da
6379d4c
 
6f3dc4e
ec81cfc
8a76eba
6379d4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c4b81c5
1bd07b4
 
 
6379d4c
 
ff9c6e0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import gradio as gr
import os
from openai import OpenAI
import base64

# Read API key from environment variable
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
if not OPENAI_API_KEY:
    raise ValueError("API key not found. Please set the OPENAI_API_KEY environment variable.")

client = OpenAI(api_key=OPENAI_API_KEY)

global_system_prompt = '''You are a fitness assistant. First, ask the user to upload their picture to create a personalized workout plan.
Ask about their goal. Ask about the their height and weight and calculate their BMI. the based on the information give them a 10 days workout plan with explaing
the reason based on their picture or other information like because in the picture your pectoralis muscle seem small you should work on it.
'''

def encode_image(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode('utf-8')

def convert_history_to_openai_format(history):
    """
    Convert chat history to OpenAI format.
    
    Parameters:
    history (list of tuples): The chat history where each tuple consists of (message, sender).
    
    Returns:
    list of dict: The formatted history for OpenAI with "role" as either "user" or "assistant".
    """
    global global_system_prompt
    if global_system_prompt == None:
        global_system_prompt = "You are a helpful assistant."
    formatted_history = [{"role": "system", "content": global_system_prompt},]
    for user_msg, assistant_msg in history:
        if ('.png' in user_msg[0]) or ('.jpg' in user_msg[0]):
            encoded_image = encode_image(user_msg[0])
            text = 'help me based on the image'
            if user_msg[1] != '':
                text = user_msg[1]
            content = [{'type':'text', 'text':text},{'type':'image_url','image_url':{'url':f'data:image/jpeg;base64,{encoded_image}'}}]
            formatted_history.append({"role": 'user', "content": content})
        else:
            formatted_history.append({"role": 'user', "content": user_msg})
        if isinstance(assistant_msg,str):
            formatted_history.append({"role": 'assistant', "content": assistant_msg})
    return formatted_history

def bot(history):
    global global_model
    response = client.chat.completions.create(
        model='gpt-4o',
        messages=convert_history_to_openai_format(history)
    )
    print(response)
    chatbot_message = response.choices[0].message.content.strip()
    history[-1][1] = chatbot_message
    return history

def add_message(history, message):
    if len(message["files"]) > 0:
        for x in message["files"]:
            history.append(((x, message["text"]), None))
    else:
        if message["text"] != '':
            history.append((message["text"], None))
    return history, gr.MultimodalTextbox(value=None, interactive=False)

# Define the Gradio interface
with gr.Blocks() as demo:
    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("### Chatbot Playground")
            chatbot = gr.Chatbot(label="Chatbot:", bubble_full_width=False, show_copy_button=True, min_width=400,
                                 avatar_images=(os.path.join(os.getcwd(), 'user.png'), os.path.join(os.getcwd(), 'ai.png')))
            chat_input = gr.MultimodalTextbox(interactive=True, placeholder="Enter message or upload file...", show_label=False)

    chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
    bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
    bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])


# Launch the Gradio interface
demo.launch(share=True)  # Enable public sharing