yyasso commited on
Commit
2f80e30
·
verified ·
1 Parent(s): 60d04ef

Update ReffidGPT-1.0-speedTrubo.app.py

Browse files
Files changed (1) hide show
  1. ReffidGPT-1.0-speedTrubo.app.py +229 -0
ReffidGPT-1.0-speedTrubo.app.py CHANGED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import uuid
4
+ from typing import List, Tuple, Optional, Dict, Union
5
+
6
+ import google.generativeai as genai
7
+ import gradio as gr
8
+ from PIL import Image
9
+
10
+ print("google-generativeai:", genai.__version__)
11
+
12
+ GOOGLE_API_KEY = "your_gemini_api" # ضع مفتاح API هنا مباشرة
13
+
14
+ TITLE = """<h1 align="center">ReffidGPT Chat</h1>"""
15
+
16
+ AVATAR_IMAGES = (
17
+ None,
18
+ "https://cdn-icons-png.flaticon.com/512/17115/17115944.png"
19
+ )
20
+
21
+ IMAGE_CACHE_DIRECTORY = "/tmp"
22
+ IMAGE_WIDTH = 511
23
+ CHAT_HISTORY = List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]
24
+
25
+ SYSTEM_PROMPT = "You are ReffidGPT, a helpful assistant. Respond in a friendly and informative manner. Your Name ReffidGPT & Your Creator Is Groqcin Technologies Inc."
26
+
27
+ def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
28
+ if not stop_sequences:
29
+ return None
30
+ return [sequence.strip() for sequence in stop_sequences.split(",")]
31
+
32
+ def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
33
+ image_height = int(image.height * IMAGE_WIDTH / image.width)
34
+ return image.resize((IMAGE_WIDTH, image_height))
35
+
36
+ def cache_pil_image(image: Image.Image) -> str:
37
+ image_filename = f"{uuid.uuid4()}.jpeg"
38
+ os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
39
+ image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
40
+ image.save(image_path, "JPEG")
41
+ return image_path
42
+
43
+ def preprocess_chat_history(
44
+ history: CHAT_HISTORY
45
+ ) -> List[Dict[str, Union[str, List[str]]]]:
46
+ messages = []
47
+ for user_message, model_message in history:
48
+ if isinstance(user_message, tuple):
49
+ pass
50
+ elif user_message is not None:
51
+ messages.append({'role': 'user', 'parts': [user_message]})
52
+ if model_message is not None:
53
+ messages.append({'role': 'user', 'parts': [model_message]})
54
+ return messages
55
+
56
+ def upload(files: Optional[List[str]], chatbot: CHAT_HISTORY) -> CHAT_HISTORY:
57
+ for file in files:
58
+ image = Image.open(file).convert('RGB')
59
+ image = preprocess_image(image)
60
+ image_path = cache_pil_image(image)
61
+ chatbot.append(((image_path,), None))
62
+ return chatbot
63
+
64
+ def user(text_prompt: str, chatbot: CHAT_HISTORY):
65
+ if text_prompt:
66
+ chatbot.append((text_prompt, None))
67
+ return "", chatbot
68
+
69
+ def bot(
70
+ files: Optional[List[str]],
71
+ temperature: float,
72
+ max_output_tokens: int,
73
+ stop_sequences: str,
74
+ top_k: int,
75
+ top_p: float,
76
+ chatbot: CHAT_HISTORY
77
+ ):
78
+ if len(chatbot) == 0:
79
+ return chatbot
80
+
81
+ if not GOOGLE_API_KEY:
82
+ raise ValueError(
83
+ "GOOGLE_API_KEY is not set. "
84
+ "Please set it in the code."
85
+ )
86
+
87
+ genai.configure(api_key=GOOGLE_API_KEY)
88
+ generation_config = genai.types.GenerationConfig(
89
+ temperature=temperature,
90
+ max_output_tokens=max_output_tokens,
91
+ stop_sequences=preprocess_stop_sequences(stop_sequences=stop_sequences),
92
+ top_k=top_k,
93
+ top_p=top_p)
94
+
95
+ system_prompt_message = [{'role': 'user', 'parts': [SYSTEM_PROMPT]}]
96
+
97
+ if files:
98
+ text_prompt = [chatbot[-1][0]] \
99
+ if chatbot[-1][0] and isinstance(chatbot[-1][0], str) \
100
+ else []
101
+ image_prompt = [Image.open(file).convert('RGB') for file in files]
102
+ model = genai.GenerativeModel('gemini-1.5-flash-8b')
103
+ response = model.generate_content(
104
+ text_prompt + image_prompt,
105
+ stream=True,
106
+ generation_config=generation_config)
107
+ else:
108
+ messages = preprocess_chat_history(chatbot)
109
+ messages = system_prompt_message + messages
110
+ model = genai.GenerativeModel('gemini-1.5-flash-8b')
111
+ response = model.generate_content(
112
+ messages,
113
+ stream=True,
114
+ generation_config=generation_config)
115
+
116
+ chatbot[-1][1] = ""
117
+ for chunk in response:
118
+ for i in range(0, len(chunk.text), 10):
119
+ section = chunk.text[i:i + 10]
120
+ chatbot[-1][1] += section
121
+ time.sleep(0.01)
122
+ yield chatbot
123
+
124
+ chatbot_component = gr.Chatbot(
125
+ label='ReffidGPT',
126
+ bubble_full_width=False,
127
+ avatar_images=AVATAR_IMAGES,
128
+ scale=2,
129
+ height=400
130
+ )
131
+ text_prompt_component = gr.Textbox(
132
+ placeholder="Hey ReffidGPT! [press Enter or Send]", show_label=False, autofocus=True, scale=8
133
+ )
134
+ upload_button_component = gr.UploadButton(
135
+ label="Upload Images", file_count="multiple", file_types=["image"], scale=1
136
+ )
137
+ run_button_component = gr.Button(value="Run", variant="primary", scale=1)
138
+ temperature_component = gr.Slider(
139
+ minimum=0,
140
+ maximum=1.0,
141
+ value=0.4,
142
+ step=0.05,
143
+ label="Temperature",
144
+ )
145
+ max_output_tokens_component = gr.Slider(
146
+ minimum=1,
147
+ maximum=2048,
148
+ value=1024,
149
+ step=1,
150
+ label="Token limit",
151
+ )
152
+ stop_sequences_component = gr.Textbox(
153
+ label="Add stop sequence",
154
+ value="",
155
+ type="text",
156
+ placeholder="STOP, END",
157
+ )
158
+ top_k_component = gr.Slider(
159
+ minimum=1,
160
+ maximum=40,
161
+ value=32,
162
+ step=1,
163
+ label="Top-K",
164
+ )
165
+ top_p_component = gr.Slider(
166
+ minimum=0,
167
+ maximum=1,
168
+ value=1,
169
+ step=0.01,
170
+ label="Top-P",
171
+ )
172
+
173
+ user_inputs = [
174
+ text_prompt_component,
175
+ chatbot_component
176
+ ]
177
+
178
+ bot_inputs = [
179
+ upload_button_component,
180
+ temperature_component,
181
+ max_output_tokens_component,
182
+ stop_sequences_component,
183
+ top_k_component,
184
+ top_p_component,
185
+ chatbot_component
186
+ ]
187
+
188
+ with gr.Blocks() as demo:
189
+ gr.HTML(TITLE)
190
+ with gr.Column():
191
+ chatbot_component.render()
192
+ with gr.Row():
193
+ text_prompt_component.render()
194
+ upload_button_component.render()
195
+ run_button_component.render()
196
+ with gr.Accordion("Parameters", open=False):
197
+ temperature_component.render()
198
+ max_output_tokens_component.render()
199
+ stop_sequences_component.render()
200
+ with gr.Accordion("Advanced", open=False):
201
+ top_k_component.render()
202
+ top_p_component.render()
203
+
204
+ run_button_component.click(
205
+ fn=user,
206
+ inputs=user_inputs,
207
+ outputs=[text_prompt_component, chatbot_component],
208
+ queue=False
209
+ ).then(
210
+ fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
211
+ )
212
+
213
+ text_prompt_component.submit(
214
+ fn=user,
215
+ inputs=user_inputs,
216
+ outputs=[text_prompt_component, chatbot_component],
217
+ queue=False
218
+ ).then(
219
+ fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
220
+ )
221
+
222
+ upload_button_component.upload(
223
+ fn=upload,
224
+ inputs=[upload_button_component, chatbot_component],
225
+ outputs=[chatbot_component],
226
+ queue=False
227
+ )
228
+
229
+ demo.queue(max_size=99).launch(debug=False, show_error=True)