yyasso commited on
Commit
1b87f37
·
verified ·
1 Parent(s): 7ce48cc

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -231
README.md CHANGED
@@ -101,237 +101,7 @@ Use the code below to get started with the model.
101
  #### Preprocessing [optional]
102
 
103
  [More Information Needed]
104
- ###### code:
105
-
106
- import os
107
- import time
108
- import uuid
109
- from typing import List, Tuple, Optional, Dict, Union
110
-
111
- import google.generativeai as genai
112
- import gradio as gr
113
- from PIL import Image
114
-
115
- print("google-generativeai:", genai.__version__)
116
-
117
- GOOGLE_API_KEY = "your_gemini_api" # ضع مفتاح API هنا مباشرة
118
-
119
- TITLE = """<h1 align="center">ReffidGPT Chat</h1>"""
120
-
121
- AVATAR_IMAGES = (
122
- None,
123
- "https://cdn-icons-png.flaticon.com/512/17115/17115944.png"
124
- )
125
-
126
- IMAGE_CACHE_DIRECTORY = "/tmp"
127
- IMAGE_WIDTH = 511
128
- CHAT_HISTORY = List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]
129
-
130
- SYSTEM_PROMPT = "You are ReffidGPT, a helpful assistant. Respond in a friendly and informative manner. Your Name ReffidGPT & Your Creator Is Groqcin Technologies Inc."
131
-
132
- def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
133
- if not stop_sequences:
134
- return None
135
- return [sequence.strip() for sequence in stop_sequences.split(",")]
136
-
137
- def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
138
- image_height = int(image.height * IMAGE_WIDTH / image.width)
139
- return image.resize((IMAGE_WIDTH, image_height))
140
-
141
- def cache_pil_image(image: Image.Image) -> str:
142
- image_filename = f"{uuid.uuid4()}.jpeg"
143
- os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
144
- image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
145
- image.save(image_path, "JPEG")
146
- return image_path
147
-
148
- def preprocess_chat_history(
149
- history: CHAT_HISTORY
150
- ) -> List[Dict[str, Union[str, List[str]]]]:
151
- messages = []
152
- for user_message, model_message in history:
153
- if isinstance(user_message, tuple):
154
- pass
155
- elif user_message is not None:
156
- messages.append({'role': 'user', 'parts': [user_message]})
157
- if model_message is not None:
158
- messages.append({'role': 'user', 'parts': [model_message]})
159
- return messages
160
-
161
- def upload(files: Optional[List[str]], chatbot: CHAT_HISTORY) -> CHAT_HISTORY:
162
- for file in files:
163
- image = Image.open(file).convert('RGB')
164
- image = preprocess_image(image)
165
- image_path = cache_pil_image(image)
166
- chatbot.append(((image_path,), None))
167
- return chatbot
168
-
169
- def user(text_prompt: str, chatbot: CHAT_HISTORY):
170
- if text_prompt:
171
- chatbot.append((text_prompt, None))
172
- return "", chatbot
173
-
174
- def bot(
175
- files: Optional[List[str]],
176
- temperature: float,
177
- max_output_tokens: int,
178
- stop_sequences: str,
179
- top_k: int,
180
- top_p: float,
181
- chatbot: CHAT_HISTORY
182
- ):
183
- if len(chatbot) == 0:
184
- return chatbot
185
-
186
- if not GOOGLE_API_KEY:
187
- raise ValueError(
188
- "GOOGLE_API_KEY is not set. "
189
- "Please set it in the code."
190
- )
191
-
192
- genai.configure(api_key=GOOGLE_API_KEY)
193
- generation_config = genai.types.GenerationConfig(
194
- temperature=temperature,
195
- max_output_tokens=max_output_tokens,
196
- stop_sequences=preprocess_stop_sequences(stop_sequences=stop_sequences),
197
- top_k=top_k,
198
- top_p=top_p)
199
-
200
- system_prompt_message = [{'role': 'user', 'parts': [SYSTEM_PROMPT]}]
201
-
202
- if files:
203
- text_prompt = [chatbot[-1][0]] \
204
- if chatbot[-1][0] and isinstance(chatbot[-1][0], str) \
205
- else []
206
- image_prompt = [Image.open(file).convert('RGB') for file in files]
207
- model = genai.GenerativeModel('gemini-1.5-flash-8b')
208
- response = model.generate_content(
209
- text_prompt + image_prompt,
210
- stream=True,
211
- generation_config=generation_config)
212
- else:
213
- messages = preprocess_chat_history(chatbot)
214
- messages = system_prompt_message + messages
215
- model = genai.GenerativeModel('gemini-1.5-flash-8b')
216
- response = model.generate_content(
217
- messages,
218
- stream=True,
219
- generation_config=generation_config)
220
-
221
- chatbot[-1][1] = ""
222
- for chunk in response:
223
- for i in range(0, len(chunk.text), 10):
224
- section = chunk.text[i:i + 10]
225
- chatbot[-1][1] += section
226
- time.sleep(0.01)
227
- yield chatbot
228
-
229
- chatbot_component = gr.Chatbot(
230
- label='ReffidGPT',
231
- bubble_full_width=False,
232
- avatar_images=AVATAR_IMAGES,
233
- scale=2,
234
- height=400
235
- )
236
- text_prompt_component = gr.Textbox(
237
- placeholder="Hey ReffidGPT! [press Enter or Send]", show_label=False, autofocus=True, scale=8
238
- )
239
- upload_button_component = gr.UploadButton(
240
- label="Upload Images", file_count="multiple", file_types=["image"], scale=1
241
- )
242
- run_button_component = gr.Button(value="Run", variant="primary", scale=1)
243
- temperature_component = gr.Slider(
244
- minimum=0,
245
- maximum=1.0,
246
- value=0.4,
247
- step=0.05,
248
- label="Temperature",
249
- )
250
- max_output_tokens_component = gr.Slider(
251
- minimum=1,
252
- maximum=2048,
253
- value=1024,
254
- step=1,
255
- label="Token limit",
256
- )
257
- stop_sequences_component = gr.Textbox(
258
- label="Add stop sequence",
259
- value="",
260
- type="text",
261
- placeholder="STOP, END",
262
- )
263
- top_k_component = gr.Slider(
264
- minimum=1,
265
- maximum=40,
266
- value=32,
267
- step=1,
268
- label="Top-K",
269
- )
270
- top_p_component = gr.Slider(
271
- minimum=0,
272
- maximum=1,
273
- value=1,
274
- step=0.01,
275
- label="Top-P",
276
- )
277
-
278
- user_inputs = [
279
- text_prompt_component,
280
- chatbot_component
281
- ]
282
-
283
- bot_inputs = [
284
- upload_button_component,
285
- temperature_component,
286
- max_output_tokens_component,
287
- stop_sequences_component,
288
- top_k_component,
289
- top_p_component,
290
- chatbot_component
291
- ]
292
-
293
- with gr.Blocks() as demo:
294
- gr.HTML(TITLE)
295
- with gr.Column():
296
- chatbot_component.render()
297
- with gr.Row():
298
- text_prompt_component.render()
299
- upload_button_component.render()
300
- run_button_component.render()
301
- with gr.Accordion("Parameters", open=False):
302
- temperature_component.render()
303
- max_output_tokens_component.render()
304
- stop_sequences_component.render()
305
- with gr.Accordion("Advanced", open=False):
306
- top_k_component.render()
307
- top_p_component.render()
308
-
309
- run_button_component.click(
310
- fn=user,
311
- inputs=user_inputs,
312
- outputs=[text_prompt_component, chatbot_component],
313
- queue=False
314
- ).then(
315
- fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
316
- )
317
-
318
- text_prompt_component.submit(
319
- fn=user,
320
- inputs=user_inputs,
321
- outputs=[text_prompt_component, chatbot_component],
322
- queue=False
323
- ).then(
324
- fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
325
- )
326
-
327
- upload_button_component.upload(
328
- fn=upload,
329
- inputs=[upload_button_component, chatbot_component],
330
- outputs=[chatbot_component],
331
- queue=False
332
- )
333
-
334
- demo.queue(max_size=99).launch(debug=False, show_error=True)
335
 
336
  #### ______________________________________________________________________
337
 
 
101
  #### Preprocessing [optional]
102
 
103
  [More Information Needed]
104
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
  #### ______________________________________________________________________
107