Medvira commited on
Commit
6379d4c
·
verified ·
1 Parent(s): 31d602d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -0
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import openai
4
+ import base64
5
+
6
+ # Read API key from environment variable
7
+ OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
8
+ if not OPENAI_API_KEY:
9
+ raise ValueError("API key not found. Please set the OPENAI_API_KEY environment variable.")
10
+
11
+ openai.api_key = OPENAI_API_KEY
12
+
13
+ global_system_prompt = None
14
+ global_model = 'gpt-4'
15
+
16
+ def encode_image(image_path):
17
+ with open(image_path, "rb") as image_file:
18
+ return base64.b64encode(image_file.read()).decode('utf-8')
19
+
20
+ def convert_history_to_openai_format(history):
21
+ """
22
+ Convert chat history to OpenAI format.
23
+
24
+ Parameters:
25
+ history (list of tuples): The chat history where each tuple consists of (message, sender).
26
+
27
+ Returns:
28
+ list of dict: The formatted history for OpenAI with "role" as either "user" or "assistant".
29
+ """
30
+ global global_system_prompt
31
+ if global_system_prompt is None:
32
+ global_system_prompt = "You are a helpful assistant."
33
+
34
+ formatted_history = [{"role": "system", "content": global_system_prompt}]
35
+ for user_msg, assistant_msg in history:
36
+ if isinstance(user_msg, tuple) and ('.png' in user_msg[0] or '.jpg' in user_msg[0]):
37
+ encoded_image = encode_image(user_msg[0])
38
+ text = 'Help me based on the image'
39
+ if user_msg[1] != '':
40
+ text = user_msg[1]
41
+ content = [{'type':'text', 'text':text}, {'type':'image_url', 'image_url':{'url':f'data:image/jpeg;base64,{encoded_image}'}}]
42
+ formatted_history.append({"role": 'user', "content": content})
43
+ else:
44
+ formatted_history.append({"role": 'user', "content": user_msg})
45
+ if isinstance(assistant_msg, str):
46
+ formatted_history.append({"role": 'assistant', "content": assistant_msg})
47
+
48
+ return formatted_history
49
+
50
+ def bot(history):
51
+ global global_model
52
+ response = openai.ChatCompletion.create(
53
+ model=global_model,
54
+ messages=convert_history_to_openai_format(history)
55
+ )
56
+
57
+ chatbot_message = response.choices[0].message['content'].strip()
58
+ history[-1][1] = chatbot_message
59
+ return history
60
+
61
+ def add_message(history, message):
62
+ if len(message["files"]) > 0:
63
+ for x in message["files"]:
64
+ history.append(((x, message["text"]), None))
65
+ else:
66
+ if message["text"] != '':
67
+ history.append((message["text"], None))
68
+ return history, gr.MultimodalTextbox(value=None, interactive=False)
69
+
70
+ # Define the Gradio interface
71
+ with gr.Blocks() as demo:
72
+ with gr.Row():
73
+ with gr.Column(scale=1):
74
+ gr.Markdown("### Chatbot Playground")
75
+ chatbot = gr.Chatbot(label="Chatbot:", bubble_full_width=False, show_copy_button=True, min_width=400,
76
+ avatar_images=(os.path.join(os.getcwd(), 'user.png'), os.path.join(os.getcwd(), 'ai.png')))
77
+ chat_input = gr.MultimodalTextbox(interactive=True, placeholder="Enter message or upload file...", show_label=False)
78
+
79
+ # Connect chat_input to bot function to handle input and provide responses
80
+ chat_input.submit(lambda message, history: bot(add_message(history, message)), [chat_input, chatbot], [chatbot, chat_input])
81
+
82
+ # Launch the Gradio interface
83
+ demo.launch()