Spaces:
Running
Running
import os | |
from dotenv import load_dotenv, find_dotenv | |
import gradio as gr | |
import openai | |
import requests | |
from PIL import Image | |
from io import BytesIO | |
# load the secrets if running locally | |
_ = load_dotenv(find_dotenv(filename="secrets.env", raise_error_if_not_found=False)) | |
# Global variable | |
AUTH_USERNAME = os.environ["AUTH_USERNAME"] | |
AUTH_PASSWORD = os.environ["AUTH_PASSWORD"] | |
# Load credentials | |
openai.api_key = os.environ["OPENAI_API_KEY"] | |
SYSTEM_PROMPT = "You are a helpful assistant and do your best to answer the user's questions.\ | |
You do not make up answers." | |
# define the function that will make the API calls for the catbot | |
def chatBotCompletionApiCall(prompt:str, temperature = 0.7, max_tokens = 1024, model="GPT-3.5", stream=True): | |
if model == "GPT-3.5": | |
model = "gpt-3.5-turbo-0125" | |
else: | |
model = "gpt-4-turbo-preview" | |
# make the API call with the given parameter | |
response = openai.chat.completions.create( | |
model=model, | |
messages=prompt, | |
max_tokens = max_tokens, | |
temperature=temperature, | |
stream=stream, | |
) | |
# return the completed text | |
if stream: | |
for chunk in response: | |
output = chunk.choices[0].delta.content # when Stream is set to True | |
yield output | |
else: | |
output = response.choices[0].message.content # when Stream is set to False | |
# Helper function: format the prompt to include history for fhe chatbot | |
def chatBotFormatPrompt(newMsg:str, chatHistory, instruction): | |
# start with the system prompt | |
messages = [] | |
messages.append({ | |
"role": "system", | |
"content": instruction | |
}) | |
# add the history | |
for turn in chatHistory: | |
# retrieve the user and assistant messages from history | |
userMsg, AssistantMsg = turn | |
# add the user message | |
messages.append({ | |
"role": "user", | |
"content": userMsg | |
}) | |
# add the assistant message | |
messages.append({ | |
"role": "assistant", | |
"content": AssistantMsg | |
}) | |
# add the last message that needs to be answer | |
messages.append({ | |
"role": "user", | |
"content": newMsg | |
}) | |
# return the formated messages | |
return messages | |
# def the response function (to get the answer as one block after generation) | |
def responseChatBot(newMsg:str, chatHistory, instruction, temperature, max_tokens, model, stream=False): | |
prompt = chatBotFormatPrompt(newMsg=newMsg, chatHistory=chatHistory, instruction=instruction) | |
response = chatBotCompletionApiCall(prompt=prompt, temperature=temperature, max_tokens=max_tokens, model=model) | |
chatHistory.append([newMsg, response]) | |
return "", chatHistory | |
# def the streamResponse function, to stream the results as they are generated | |
def streamResponseChatBot(newMsg:str, chatHistory, instruction, temperature, max_tokens, model, stream = True): | |
chatHistory.append([newMsg, ""]) | |
prompt = chatBotFormatPrompt(newMsg=newMsg, chatHistory=chatHistory, instruction=instruction) | |
stream = chatBotCompletionApiCall(prompt=prompt, temperature=temperature, max_tokens=max_tokens, model=model) | |
for chunk in stream: | |
if chunk != None: | |
chatHistory[-1][1] += chunk | |
yield "", chatHistory | |
else: | |
return "", chatHistory | |
# helper function for image generation | |
def generateImageOpenAI(prompt, size = "1024x1024", quality = "standard", model = "dall-e-3", n=1): | |
''' | |
Make an API call to OpenAI's DALL-E model and return the generated image in PIL format | |
''' | |
print("request sent") | |
openAIresponse = openai.images.generate(model=model, prompt=prompt,size=size,quality=quality,n=n,) | |
image_url = openAIresponse.data[0].url | |
# get the image in Bytes format | |
imageResponse = requests.get(url=image_url) | |
imageBytes = imageResponse.content | |
# convert it to PIL format | |
image = Image.open(BytesIO(imageBytes)) | |
print("image received!") | |
# return the result | |
return image | |
# Define some components | |
model = gr.Dropdown( | |
choices=["GPT-3.5", "GPT-4"], | |
value="GPT-3.5", | |
multiselect=False, | |
label="Model", | |
info="Choose the model you want to chat with.\nGo easy on GPT-4: it costs 500 times more than GPT 3.5!" | |
) | |
instruction = gr.Textbox( | |
value=SYSTEM_PROMPT, | |
label="System instructions", | |
lines=4,) | |
temperature = gr.Slider( | |
minimum=0, | |
maximum=2, | |
step=0.1, | |
value=0.7, | |
label="Temperature", | |
info="The higher, the more random the results will be" | |
) | |
max_token = gr.Slider( | |
minimum=64, | |
maximum=2048, | |
step=64, | |
value=1024, | |
label="Max Token", | |
info="Maximum number of token the model will take into consideration" | |
) | |
# Components for Image generator | |
genImage = gr.Image( | |
label="Result", | |
type="pil", | |
render = False | |
) # Box for generated image | |
# def helper function to update and render the component | |
def generateAndRender(prompt:str, size, quality,): | |
''' | |
Send the request to the API endpoint and update the components. Outputs: | |
- oldPrompt | |
- genImage | |
- promptBox | |
''' | |
# get the image | |
image = generateImageOpenAI(prompt, size, quality) | |
# update the components | |
oldPrompt = gr.Textbox(value=prompt, label = "Your prompt", render=True) | |
genImage = gr.Image(value=image, label="Result", type="pil", render = True) | |
promptBox = gr.Textbox(label="Enter your prompt", lines=3) | |
# return the components | |
return oldPrompt, genImage, promptBox | |
# Build the app | |
with gr.Blocks(theme='Insuz/Mocha', css="style.css") as app: | |
# First tab: chatbot | |
with gr.Tab(label="ChatBot"): | |
with gr.Row(): | |
with gr.Column(scale = 8, elem_classes=["float-left"]): | |
gr.Markdown("# Private GPT") | |
gr.Markdown("This chatbot is powered by the openAI GPT series.\ | |
The default model is `GPT-3.5`, but `GPT-4` can be selected in the advanced options.\ | |
\nAs it uses the openAI API, user data is not used to train openAI models (see their official [website](https://help.openai.com/en/articles/5722486-how-your-data-is-used-to-improve-model-performance)).") | |
chatbot = gr.Chatbot() # Associated variable: chatHistory | |
msg = gr.Textbox(label="Message") | |
with gr.Row(): | |
with gr.Column(scale=4): | |
Button = gr.Button(value="Submit") | |
with gr.Column(scale=4): | |
clearButton = gr.ClearButton([chatbot, msg]) | |
msg.submit( | |
fn=streamResponseChatBot, | |
inputs=[msg, chatbot, instruction, temperature, max_token, model], | |
outputs=[msg, chatbot] | |
) | |
Button.click( | |
fn=streamResponseChatBot, | |
inputs=[msg, chatbot, instruction, temperature, max_token, model], | |
outputs=[msg, chatbot] | |
) | |
with gr.Column(scale = 1, elem_classes=["float-right"]): | |
with gr.Accordion(label="Advanced options", open=True): | |
model.render() | |
instruction.render() | |
temperature.render() | |
max_token.render() | |
# Second Tab: image generation | |
with gr.Tab(label="Image Creation"): | |
# Title and description | |
gr.Markdown("# Image generation") | |
gr.Markdown("Powered by OpenAI's `DALL-E 3` Model under the hood.\n\ | |
You can change the `size` as well as the `quality`.") | |
# First row: prompt | |
with gr.Row(): | |
prompt = gr.Textbox(label="Enter your prompt", lines=3) | |
# Second row: allow for advanced customization | |
with gr.Accordion(label="Advanced option", open=False): # should not be visible by default | |
# Three columns of advanced options | |
with gr.Row(): | |
with gr.Column(): | |
size = gr.Dropdown( | |
choices = ["1024x1024", "1024x1792","1792x1024"], | |
value = "1024x1024", | |
info = "Choose the size of the image", | |
) | |
with gr.Column(): | |
quality = gr.Dropdown( | |
choices = ["standard", "hd"], | |
value = "standard", | |
info="Define the quality of the image", | |
) | |
model = gr.Text(value="dall-e-3", render=False) | |
n = gr.Text(value=1, render=False) | |
# Button | |
# Submit and clear | |
with gr.Row(): | |
with gr.Column(): | |
button = gr.Button(value="submit", min_width=30, ) | |
with gr.Column(): | |
clearImageButton = gr.ClearButton(components=[prompt, genImage]) | |
# Generated Image | |
genImage.render() | |
# Not rendered - logic of the app | |
button.click( | |
fn=generateImageOpenAI, | |
inputs=[prompt, size, quality], | |
outputs=[genImage], | |
) | |
prompt.submit( | |
fn=generateImageOpenAI, | |
inputs=[prompt, size, quality], | |
outputs=[genImage], | |
) | |
gr.close_all() | |
app.queue().launch(auth=(AUTH_USERNAME, AUTH_PASSWORD)) | |
# app.queue().launch(share=False) | |