File size: 8,113 Bytes
18b090f
 
 
 
500f3c8
 
 
18b090f
500f3c8
3ce50f4
18b090f
 
 
 
 
 
 
 
 
 
 
500f3c8
 
18b090f
43eb109
18b090f
753aacd
18b090f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500f3c8
 
18b090f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3ce50f4
18b090f
 
 
500f3c8
 
 
18b090f
 
 
 
500f3c8
18b090f
500f3c8
 
18b090f
 
 
 
 
 
 
500f3c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86f71f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500f3c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18b090f
500f3c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18b090f
500f3c8
 
 
 
 
 
 
 
 
 
 
 
 
18b090f
500f3c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18b090f
500f3c8
 
 
 
 
 
 
 
 
 
 
 
 
f44c5b3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
import os
from dotenv import load_dotenv, find_dotenv
import gradio as gr
import openai
import requests
from PIL import Image
from io import BytesIO

# load the secrets if running locally
_ = load_dotenv(find_dotenv(filename="secrets.env", raise_error_if_not_found=False))

# Global variable
AUTH_USERNAME = os.environ["AUTH_USERNAME"]
AUTH_PASSWORD = os.environ["AUTH_PASSWORD"]

# Load credentials
openai.api_key = os.environ["OPENAI_API_KEY"]

SYSTEM_PROMPT = "You are a helpful assistant and do your best to answer the user's questions.\
	You do not make up answers."

# define the function that will make the API calls for the catbot
def chatBotCompletionApiCall(prompt:str, temperature = 0.7, max_tokens = 1024, model="GPT-3.5", stream=True):
	if model == "GPT-3.5":
		model = "gpt-3.5-turbo-0125"
	else:
		model = "gpt-4-turbo-preview"
	# make the API call with the given parameter
	response = openai.chat.completions.create(
		model=model,
		messages=prompt,
		max_tokens = max_tokens,
		temperature=temperature,
		stream=stream,
	)

	# return the completed text
	if stream:
		for chunk in response:
			output = chunk.choices[0].delta.content # when Stream is set to True
			yield output
	else:
		output = response.choices[0].message.content # when Stream is set to False

# Helper function: format the prompt to include history for fhe chatbot
def chatBotFormatPrompt(newMsg:str, chatHistory, instruction):
	
	# start with the system prompt
	messages = []
	messages.append({
		"role": "system",
		"content": instruction
	})

	# add the history
	for turn in chatHistory:
		# retrieve the user and assistant messages from history
		userMsg, AssistantMsg = turn
		
		# add the user message
		messages.append({
			"role": "user",
			"content": userMsg
		})

		# add the assistant message
		messages.append({
			"role": "assistant",
			"content": AssistantMsg
		})
	
	# add the last message that needs to be answer
	messages.append({
		"role": "user",
		"content": newMsg
	})

	# return the formated messages 
	return messages

# def the response function (to get the answer as one block after generation)
def responseChatBot(newMsg:str, chatHistory, instruction, temperature, max_tokens, model, stream=False):
	prompt = chatBotFormatPrompt(newMsg=newMsg, chatHistory=chatHistory, instruction=instruction)
	response = chatBotCompletionApiCall(prompt=prompt, temperature=temperature, max_tokens=max_tokens, model=model)
	chatHistory.append([newMsg, response])
	return "", chatHistory

# def the streamResponse function, to stream the results as they are generated
def streamResponseChatBot(newMsg:str, chatHistory, instruction, temperature, max_tokens, model, stream = True):
	chatHistory.append([newMsg, ""])
	prompt = chatBotFormatPrompt(newMsg=newMsg, chatHistory=chatHistory, instruction=instruction)
	stream = chatBotCompletionApiCall(prompt=prompt, temperature=temperature, max_tokens=max_tokens, model=model)
	for chunk in stream:
		if chunk != None:
			chatHistory[-1][1] += chunk
			yield "", chatHistory
		else:
			return "", chatHistory

# helper function for image generation
def generateImageOpenAI(prompt, size = "1024x1024", quality = "standard", model = "dall-e-3", n=1):
	'''
	Make an API call to OpenAI's DALL-E model and return the generated image in PIL format
	'''
	print("request sent")
	openAIresponse = openai.images.generate(model=model, prompt=prompt,size=size,quality=quality,n=n,)
	image_url = openAIresponse.data[0].url

	# get the image in Bytes format
	imageResponse = requests.get(url=image_url)
	imageBytes = imageResponse.content
	
	# convert it to PIL format
	image = Image.open(BytesIO(imageBytes))

	print("image received!")
	# return the result
	return image

# Define some components
model = gr.Dropdown(
	choices=["GPT-3.5", "GPT-4"],
	value="GPT-3.5",
	multiselect=False,
	label="Model",
	info="Choose the model you want to chat with.\nGo easy on GPT-4: it costs 500 times more than GPT 3.5!"
)
instruction = gr.Textbox(
	value=SYSTEM_PROMPT,
	label="System instructions",
	lines=4,)
temperature = gr.Slider(
	minimum=0,
	maximum=2,
	step=0.1,
	value=0.7,
	label="Temperature",
	info="The higher, the more random the results will be"
)
max_token = gr.Slider(
	minimum=64,
	maximum=2048,
	step=64,
	value=1024,
	label="Max Token",
	info="Maximum number of token the model will take into consideration"
)

# Components for Image generator 
genImage = gr.Image(
	label="Result", 
	type="pil", 
	render = False
	) # Box for generated image

# def helper function to update and render the component
def generateAndRender(prompt:str, size, quality,):
	'''
	Send the request to the API endpoint and update the components. Outputs:
	- oldPrompt
	- genImage
	- promptBox
	'''

	# get the image
	image = generateImageOpenAI(prompt, size, quality)
	
	# update the components
	oldPrompt = gr.Textbox(value=prompt, label = "Your prompt", render=True)
	genImage = gr.Image(value=image, label="Result", type="pil", render = True)
	promptBox = gr.Textbox(label="Enter your prompt", lines=3)
	
	# return the components
	return oldPrompt, genImage, promptBox

# Build the app
with gr.Blocks(theme='Insuz/Mocha', css="style.css") as app:
	
	# First tab: chatbot
	with gr.Tab(label="ChatBot"):
		with gr.Row():
			with gr.Column(scale = 8, elem_classes=["float-left"]):
				gr.Markdown("# Private GPT")
				gr.Markdown("This chatbot is powered by the openAI GPT series.\
						The default model is `GPT-3.5`, but `GPT-4` can be selected in the advanced options.\
						\nAs it uses the openAI API, user data is not used to train openAI models (see their official [website](https://help.openai.com/en/articles/5722486-how-your-data-is-used-to-improve-model-performance)).")
				chatbot = gr.Chatbot() # Associated variable: chatHistory
				msg = gr.Textbox(label="Message")
				with gr.Row():
					with gr.Column(scale=4):
						Button = gr.Button(value="Submit")
					with gr.Column(scale=4):
						clearButton = gr.ClearButton([chatbot, msg])
				msg.submit(
					fn=streamResponseChatBot,
					inputs=[msg, chatbot, instruction, temperature, max_token, model],
					outputs=[msg, chatbot]
				)
				Button.click(
					fn=streamResponseChatBot,
					inputs=[msg, chatbot, instruction, temperature, max_token, model],
					outputs=[msg, chatbot]
				)
			with gr.Column(scale = 1, elem_classes=["float-right"]):
				with gr.Accordion(label="Advanced options", open=True):
					model.render()
					instruction.render()
					temperature.render()
					max_token.render()

	# Second Tab: image generation
	with gr.Tab(label="Image Creation"):
		# Title and description
		gr.Markdown("# Image generation")
		gr.Markdown("Powered by OpenAI's `DALL-E 3` Model under the hood.\n\
				You can change the `size` as well as the `quality`.")
		
		# First row: prompt
		with gr.Row():
			prompt = gr.Textbox(label="Enter your prompt", lines=3)
		
		# Second row: allow for advanced customization
		with gr.Accordion(label="Advanced option", open=False): # should not be visible by default

			# Three columns of advanced options
			with gr.Row():
				with gr.Column():
					size = gr.Dropdown(
						choices = ["1024x1024", "1024x1792","1792x1024"],
						value = "1024x1024",
						info = "Choose the size of the image",
					)
				with gr.Column():
					quality = gr.Dropdown(
						choices = ["standard", "hd"],
						value = "standard",
						info="Define the quality of the image",
					)
				model = gr.Text(value="dall-e-3", render=False)
				n = gr.Text(value=1, render=False)
		
		# Button
		# Submit and clear
		with gr.Row():
			with gr.Column():
				button = gr.Button(value="submit", min_width=30, )
			with gr.Column():
				clearImageButton = gr.ClearButton(components=[prompt, genImage])
		
		# Generated Image
		genImage.render()

		# Not rendered - logic of the app
		button.click(
			fn=generateImageOpenAI,
			inputs=[prompt, size, quality],
			outputs=[genImage],
		)
		prompt.submit(
			fn=generateImageOpenAI,
			inputs=[prompt, size, quality],
			outputs=[genImage],
		)
		
gr.close_all()
app.queue().launch(auth=(AUTH_USERNAME, AUTH_PASSWORD))
# app.queue().launch(share=False)