sylvainHellin commited on
Commit
18b090f
·
verified ·
1 Parent(s): ee999a7

Add app and requirements files

Browse files
Files changed (2) hide show
  1. app.py +164 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %% [markdown]
2
+ # # ChatBot app with Gradio
3
+
4
+ # %%
5
+ import os
6
+ from dotenv import load_dotenv, find_dotenv
7
+ import gradio as gr
8
+ import openai
9
+
10
+ _ = load_dotenv(find_dotenv(filename="secrets.env", raise_error_if_not_found=True))
11
+
12
+ # Global variable
13
+ ROOT_DIR = os.environ["ROOT_DIR"]
14
+ AUTH_USERNAME = os.environ["AUTH_USERNAME"]
15
+ AUTH_PASSWORD = os.environ["AUTH_PASSWORD"]
16
+
17
+ # Load credentials
18
+ openai.api_key = os.environ["OPENAI_API_KEY"]
19
+
20
+ SYSTEM_PROMPT = "You are a helpful assistant and do your best to answer the user's questions.\
21
+ You do not make up answers."
22
+
23
+ # %% [markdown]
24
+ # ## Define and test the API calls
25
+
26
+ # %%
27
+ # define the function that will make the API calls
28
+ def APIcall(prompt:str, temperature = 0.7, max_tokens = 1024, model="GPT-3.5", stream=True):
29
+ if model == "GPT-3.5":
30
+ model = "gpt-3.5-turbo"
31
+ else:
32
+ model = "gpt-4"
33
+ # make the API call with the given parameter
34
+ response = openai.chat.completions.create(
35
+ model=model,
36
+ messages=prompt,
37
+ max_tokens = max_tokens,
38
+ temperature=temperature,
39
+ stream=stream,
40
+ )
41
+
42
+ # return the completed text
43
+ if stream:
44
+ for chunk in response:
45
+ output = chunk.choices[0].delta.content # when Stream is set to True
46
+ yield output
47
+ else:
48
+ output = response.choices[0].message.content # when Stream is set to False
49
+
50
+
51
+ # %% [markdown]
52
+ # ## Building the ChatBot with Gradio
53
+
54
+ # %%
55
+ # Helper function: format the prompt to include history
56
+ def formatPrompt(newMsg:str, chatHistory, instruction):
57
+
58
+ # start with the system prompt
59
+ messages = []
60
+ messages.append({
61
+ "role": "system",
62
+ "content": instruction
63
+ })
64
+
65
+ # add the history
66
+ for turn in chatHistory:
67
+ # retrieve the user and assistant messages from history
68
+ userMsg, AssistantMsg = turn
69
+
70
+ # add the user message
71
+ messages.append({
72
+ "role": "user",
73
+ "content": userMsg
74
+ })
75
+
76
+ # add the assistant message
77
+ messages.append({
78
+ "role": "assistant",
79
+ "content": AssistantMsg
80
+ })
81
+
82
+ # add the last message that needs to be answer
83
+ messages.append({
84
+ "role": "user",
85
+ "content": newMsg
86
+ })
87
+
88
+ # return the formated messages
89
+ return messages
90
+
91
+ # def the response function (to get the answer as one block after generation)
92
+ def response(newMsg:str, chatHistory, instruction, temperature, max_tokens, model, stream=False):
93
+ prompt = formatPrompt(newMsg=newMsg, chatHistory=chatHistory, instruction=instruction)
94
+ response = APIcall(prompt=prompt, temperature=temperature, max_tokens=max_tokens, model=model)
95
+ chatHistory.append([newMsg, response])
96
+ return "", chatHistory
97
+
98
+ # def the streamResponse function, to stream the results as they are generated
99
+ def streamResponse(newMsg:str, chatHistory, instruction, temperature, max_tokens, model, stream = True):
100
+ chatHistory.append([newMsg, ""])
101
+ prompt = formatPrompt(newMsg=newMsg, chatHistory=chatHistory, instruction=instruction)
102
+ stream = APIcall(prompt=prompt, temperature=temperature, max_tokens=max_tokens, model=model)
103
+ for chunk in stream:
104
+ if chunk != None:
105
+ chatHistory[-1][1] += chunk
106
+ yield "", chatHistory
107
+ else:
108
+ return "", chatHistory
109
+
110
+ # Build the app
111
+ with gr.Blocks(theme='Insuz/Mocha') as app:
112
+ gr.Markdown("# Private GPT")
113
+ gr.Markdown("This chatbot is powered by the openAI GPT series.\
114
+ \nThe default model is `GPT-3.5`, but `GPT-4` can be selected in the advanced options.\
115
+ \nAs it uses the openAI API, user data is not used to train openAI models.")
116
+ chatbot = gr.Chatbot() # Associated variable: chatHistory
117
+ msg = gr.Textbox(label="Message")
118
+ with gr.Accordion(label="Advanced options", open=False):
119
+ model = gr.Dropdown(
120
+ choices=["GPT-3.5", "GPT-4"],
121
+ value="GPT-3.5",
122
+ multiselect=False,
123
+ label="Model",
124
+ info="Choose the model you want to chat with"
125
+ )
126
+ instruction = gr.Textbox(
127
+ value=SYSTEM_PROMPT,
128
+ label="System instructions",
129
+ lines=2,)
130
+ temperature = gr.Slider(
131
+ minimum=0,
132
+ maximum=2,
133
+ step=0.1,
134
+ value=0.7,
135
+ label="Temperature",
136
+ info="The higher, the more random the results will be"
137
+ )
138
+ max_token = gr.Slider(
139
+ minimum=64,
140
+ maximum=2048,
141
+ step=64,
142
+ value=1024,
143
+ label="Max Token",
144
+ info="Maximum number of token the model will take into consideration"
145
+ )
146
+ Button = gr.Button(value="Submit")
147
+ msg.submit(
148
+ fn=streamResponse,
149
+ inputs=[msg, chatbot, instruction, temperature, max_token, model],
150
+ outputs=[msg, chatbot]
151
+ )
152
+ Button.click(
153
+ fn=streamResponse,
154
+ inputs=[msg, chatbot, instruction, temperature, max_token, model],
155
+ outputs=[msg, chatbot]
156
+ )
157
+
158
+ gr.close_all()
159
+ app.queue().launch(auth=(AUTH_USERNAME, AUTH_PASSWORD))
160
+
161
+ # %%
162
+
163
+
164
+
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio == 4.14.1
2
+ openai == 1.3.6
3
+ dotenv