TheBlueberry-AI commited on
Commit
230b211
·
1 Parent(s): 9954049

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +216 -0
app.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ from typing import List, Tuple, Optional, Dict
4
+
5
+ import google.generativeai as genai
6
+ import gradio as gr
7
+ from PIL import Image
8
+
9
+ print("google-generativeai:", genai.__version__)
10
+
11
+ GGL_API_KEY = os.environ.get("GGL_API_KEY")
12
+ gglusr = os.environ.get("GGL_USR")
13
+ gglpwd = os.environ.get("GGL_PWD")
14
+
15
+ TITLE = """<h2 align="center">🫐Blueberry-AI Buruburu Chat🫐</h2>"""
16
+ IMAGE_WIDTH = 256
17
+
18
+
19
+ def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
20
+ if not stop_sequences:
21
+ return None
22
+ return [sequence.strip() for sequence in stop_sequences.split(",")]
23
+
24
+
25
+ def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
26
+ image_height = int(image.height * IMAGE_WIDTH / image.width)
27
+ return image.resize((IMAGE_WIDTH, image_height))
28
+
29
+
30
+ def preprocess_chat_history(
31
+ history: List[Tuple[Optional[str], Optional[str]]]
32
+ ) -> List[Dict[str, List[str]]]:
33
+ messages = []
34
+ for user_message, model_message in history:
35
+ if user_message is not None:
36
+ messages.append({'role': 'user', 'parts': [user_message]})
37
+ if model_message is not None:
38
+ messages.append({'role': 'model', 'parts': [model_message]})
39
+ return messages
40
+
41
+
42
+ def user(text_prompt: str, chatbot: List[Tuple[str, str]]):
43
+ return "", chatbot + [[text_prompt, None]]
44
+
45
+
46
+ def bot(
47
+ image_prompt: Optional[Image.Image],
48
+ temperature: float,
49
+ max_output_tokens: int,
50
+ stop_sequences: str,
51
+ top_k: int,
52
+ top_p: float,
53
+ chatbot: List[Tuple[str, str]]
54
+ ):
55
+
56
+ text_prompt = chatbot[-1][0]
57
+ genai.configure(api_key=GGL_API_KEY)
58
+ generation_config = genai.types.GenerationConfig(
59
+ temperature=temperature,
60
+ max_output_tokens=max_output_tokens,
61
+ stop_sequences=preprocess_stop_sequences(stop_sequences=stop_sequences),
62
+ top_k=top_k,
63
+ top_p=top_p)
64
+
65
+ if image_prompt is None:
66
+ model = genai.GenerativeModel('gemini-pro')
67
+ response = model.generate_content(
68
+ preprocess_chat_history(chatbot),
69
+ stream=True,
70
+ generation_config=generation_config)
71
+ response.resolve()
72
+ else:
73
+ image_prompt = preprocess_image(image_prompt)
74
+ model = genai.GenerativeModel('gemini-pro-vision')
75
+ response = model.generate_content(
76
+ contents=[text_prompt, image_prompt],
77
+ stream=True,
78
+ generation_config=generation_config)
79
+ response.resolve()
80
+
81
+ # streaming effect
82
+ chatbot[-1][1] = ""
83
+ for chunk in response:
84
+ for i in range(0, len(chunk.text), 10):
85
+ section = chunk.text[i:i + 10]
86
+ chatbot[-1][1] += section
87
+ time.sleep(0.01)
88
+ yield chatbot
89
+
90
+
91
+ image_prompt_component = gr.Image(type="pil", label="Image", scale=1, height=350)
92
+ chatbot_component = gr.Chatbot(
93
+ label='Blueberry-AI',
94
+ bubble_full_width=False,
95
+ avatar_images=("./usr.png", "./bot.png"),
96
+ likeable=True,
97
+ show_copy_button=True,
98
+ scale=5,
99
+ height=350
100
+ )
101
+ text_prompt_component = gr.Textbox(
102
+ placeholder="Ask me anything and press Enter",
103
+ show_label=False,
104
+ scale=5
105
+ )
106
+ run_button_component = gr.Button( scale=1)
107
+ temperature_component = gr.Slider(
108
+ minimum=0,
109
+ maximum=1.0,
110
+ value=0.4,
111
+ step=0.05,
112
+ label="Temperature",
113
+ info=(
114
+ "Temperature controls the degree of randomness in token selection. Lower "
115
+ "temperatures are good for prompts that expect a true or correct response, "
116
+ "while higher temperatures can lead to more diverse or unexpected results. "
117
+ ))
118
+ max_output_tokens_component = gr.Slider(
119
+ minimum=1,
120
+ maximum=2048,
121
+ value=1024,
122
+ step=1,
123
+ label="Token limit",
124
+ info=(
125
+ "Token limit determines the maximum amount of text output from one prompt. A "
126
+ "token is approximately four characters. The default value is 2048."
127
+ ))
128
+ stop_sequences_component = gr.Textbox(
129
+ label="Add stop sequence",
130
+ value="",
131
+ type="text",
132
+ placeholder="STOP, END",
133
+ info=(
134
+ "A stop sequence is a series of characters (including spaces) that stops "
135
+ "response generation if the model encounters it. The sequence is not included "
136
+ "as part of the response. You can add up to five stop sequences."
137
+ ))
138
+ top_k_component = gr.Slider(
139
+ minimum=1,
140
+ maximum=40,
141
+ value=32,
142
+ step=1,
143
+ label="Top-K",
144
+ info=(
145
+ "Top-k changes how the model selects tokens for output. A top-k of 1 means the "
146
+ "selected token is the most probable among all tokens in the model’s "
147
+ "vocabulary (also called greedy decoding), while a top-k of 3 means that the "
148
+ "next token is selected from among the 3 most probable tokens (using "
149
+ "temperature)."
150
+ ))
151
+ top_p_component = gr.Slider(
152
+ minimum=0,
153
+ maximum=1,
154
+ value=1,
155
+ step=0.01,
156
+ label="Top-P",
157
+ info=(
158
+ "Top-p changes how the model selects tokens for output. Tokens are selected "
159
+ "from most probable to least until the sum of their probabilities equals the "
160
+ "top-p value. For example, if tokens A, B, and C have a probability of .3, .2, "
161
+ "and .1 and the top-p value is .5, then the model will select either A or B as "
162
+ "the next token (using temperature). "
163
+ ))
164
+
165
+ user_inputs = [
166
+ text_prompt_component,
167
+ chatbot_component
168
+ ]
169
+
170
+ bot_inputs = [
171
+ image_prompt_component,
172
+ temperature_component,
173
+ max_output_tokens_component,
174
+ stop_sequences_component,
175
+ top_k_component,
176
+ top_p_component,
177
+ chatbot_component
178
+ ]
179
+
180
+ with gr.Blocks() as demo:
181
+ gr.HTML(TITLE)
182
+ with gr.Column():
183
+ with gr.Row():
184
+ image_prompt_component.render()
185
+ chatbot_component.render()
186
+
187
+ with gr.Row():
188
+ text_prompt_component.render()
189
+ run_button_component.render()
190
+ with gr.Accordion("Parameters", open=False):
191
+ temperature_component.render()
192
+ max_output_tokens_component.render()
193
+ stop_sequences_component.render()
194
+ with gr.Accordion("Advanced", open=False):
195
+ top_k_component.render()
196
+ top_p_component.render()
197
+
198
+ run_button_component.click(
199
+ fn=user,
200
+ inputs=user_inputs,
201
+ outputs=[text_prompt_component, chatbot_component],
202
+ queue=False
203
+ ).then(
204
+ fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
205
+ )
206
+
207
+ text_prompt_component.submit(
208
+ fn=user,
209
+ inputs=user_inputs,
210
+ outputs=[text_prompt_component, chatbot_component],
211
+ queue=False
212
+ ).then(
213
+ fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
214
+ )
215
+
216
+ demo.queue(max_size=99).launch(auth=(gglusr, gglpwd),show_api=False, debug=False, show_error=True)