NotASI commited on
Commit
13d3b4a
ยท
1 Parent(s): f157c84

Update stable branch

Browse files
.gitignore CHANGED
@@ -1,2 +1,4 @@
1
  .env
2
- env/
 
 
 
1
  .env
2
+ env/
3
+ .python-version
4
+ Google-AI-Playground/
Tabs/Gemini_Chabot_Stable.py CHANGED
@@ -6,7 +6,7 @@ from dotenv import load_dotenv
6
  load_dotenv()
7
 
8
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
9
- model_name = "gemini-1.5-flash-exp-0827"
10
 
11
  TITLE = """<h1 align="center">๐ŸŽฎChat with Gemini 1.5๐Ÿ”ฅ</h1>"""
12
  NOTICE = """
@@ -15,10 +15,10 @@ NOTICE = """
15
  - Some features may not work as expected
16
  """
17
  ABOUT = """
18
- **Updates (2024-8-28)** ๐Ÿ“ˆ: Upgrade model to SOTA Gemini 1.5 Flash Experimental 0827
19
 
20
  **Info** ๐Ÿ“„:
21
- - Model: Gemini 1.5 Flash Experimental 0827
22
  - Chat with Gemini 1.5 Flash model with images and documents
23
  """
24
  ERRORS = """
@@ -26,40 +26,99 @@ Known errors โš ๏ธ:
26
  """
27
  FUTURE_IMPLEMENTATIONS = """
28
  Future features ๐Ÿš€:
29
- - Select other Gemini / Gemma models
30
  - More tools such as web search
31
  """
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  genai.configure(api_key=GEMINI_API_KEY)
34
- model = genai.GenerativeModel(
35
- model_name,
36
- safety_settings=[
37
- {
38
- "category": "HARM_CATEGORY_HARASSMENT",
39
- "threshold": "BLOCK_NONE"
40
- },
41
- {
42
- "category": "HARM_CATEGORY_HATE_SPEECH",
43
- "threshold": "BLOCK_NONE"
44
- },
45
- {
46
- "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
47
- "threshold": "BLOCK_NONE"
48
- },
49
- {
50
- "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
51
- "threshold": "BLOCK_NONE"
52
- }
53
- ],
54
- generation_config={
55
- "temperature": 1,
56
- "top_p": 0.95,
57
- "top_k": 64,
58
- "max_output_tokens": 8192,
59
- "response_mime_type": "text/plain",
60
- }
61
- )
62
- chat = model.start_chat(history=[])
63
 
64
  def clear_chat_history():
65
  chat.history = []
@@ -74,7 +133,38 @@ def transform_history(history):
74
  new_history.append({"role": "model", "parts": [model_msg]})
75
  return new_history
76
 
77
- def chatbot_stable(message, history):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  message_text = message["text"]
79
  message_files = message["files"]
80
  if message_files:
@@ -89,7 +179,7 @@ def chatbot_stable(message, history):
89
  return response.text
90
 
91
  gemini_chatbot_interface = gr.Chatbot(
92
- height=400,
93
  likeable=True,
94
  avatar_images=(
95
  None,
@@ -114,5 +204,22 @@ gemini_chatbot = gr.ChatInterface(
114
  chatbot=gemini_chatbot_interface,
115
  multimodal=True,
116
  clear_btn=clear_chat_button,
117
- undo_btn=undo_chat_button
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  )
 
6
  load_dotenv()
7
 
8
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
9
+ # model_name = "gemini-1.5-flash-exp-0827"
10
 
11
  TITLE = """<h1 align="center">๐ŸŽฎChat with Gemini 1.5๐Ÿ”ฅ</h1>"""
12
  NOTICE = """
 
15
  - Some features may not work as expected
16
  """
17
  ABOUT = """
18
+ **Updates (2024-9-25)** ๐Ÿ“ˆ: Upgrade model to SOTA Gemini 1.5 Flash Experimental 0924
19
 
20
  **Info** ๐Ÿ“„:
21
+ - Model: Gemini 1.5 Flash Experimental 0924 and other Gemini models
22
  - Chat with Gemini 1.5 Flash model with images and documents
23
  """
24
  ERRORS = """
 
26
  """
27
  FUTURE_IMPLEMENTATIONS = """
28
  Future features ๐Ÿš€:
 
29
  - More tools such as web search
30
  """
31
 
32
+ # genai.configure(api_key=GEMINI_API_KEY)
33
+ # model = genai.GenerativeModel(
34
+ # model_name,
35
+ # safety_settings=[
36
+ # {
37
+ # "category": "HARM_CATEGORY_HARASSMENT",
38
+ # "threshold": "BLOCK_NONE"
39
+ # },
40
+ # {
41
+ # "category": "HARM_CATEGORY_HATE_SPEECH",
42
+ # "threshold": "BLOCK_NONE"
43
+ # },
44
+ # {
45
+ # "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
46
+ # "threshold": "BLOCK_NONE"
47
+ # },
48
+ # {
49
+ # "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
50
+ # "threshold": "BLOCK_NONE"
51
+ # }
52
+ # ],
53
+ # generation_config={
54
+ # "temperature": 1,
55
+ # "top_p": 0.95,
56
+ # "top_k": 64,
57
+ # "max_output_tokens": 8192,
58
+ # "response_mime_type": "text/plain",
59
+ # }
60
+ # )
61
+ # chat = model.start_chat(history=[])
62
+
63
+ # def clear_chat_history():
64
+ # chat.history = []
65
+
66
+ # def undo_chat():
67
+ # last_send, last_received = chat.rewind()
68
+
69
+ # def transform_history(history):
70
+ # new_history = []
71
+ # for user_msg, model_msg in history:
72
+ # new_history.append({"role": "user", "parts": [user_msg]})
73
+ # new_history.append({"role": "model", "parts": [model_msg]})
74
+ # return new_history
75
+
76
+ # def chatbot_stable(message, history):
77
+ # message_text = message["text"]
78
+ # message_files = message["files"]
79
+ # if message_files:
80
+ # image_uris = [genai.upload_file(path=file_path["path"]) for file_path in message_files]
81
+ # message_content = [message_text] + image_uris
82
+ # else:
83
+ # message_content = [message_text]
84
+
85
+ # response = chat.send_message(message_content, stream=True)
86
+ # response.resolve()
87
+
88
+ # return response.text
89
+
90
+ # gemini_chatbot_interface = gr.Chatbot(
91
+ # height=400,
92
+ # likeable=True,
93
+ # avatar_images=(
94
+ # None,
95
+ # "https://media.roboflow.com/spaces/gemini-icon.png"
96
+ # ),
97
+ # show_copy_button=True,
98
+ # show_share_button=True,
99
+ # render_markdown=True
100
+ # )
101
+
102
+ # clear_chat_button = gr.ClearButton(
103
+ # components=[gemini_chatbot_interface],
104
+ # value="๐Ÿ—‘๏ธ Clear"
105
+ # )
106
+
107
+ # undo_chat_button = gr.Button(
108
+ # value="โ†ฉ๏ธ Undo"
109
+ # )
110
+
111
+ # gemini_chatbot = gr.ChatInterface(
112
+ # fn=chatbot_stable,
113
+ # chatbot=gemini_chatbot_interface,
114
+ # multimodal=True,
115
+ # clear_btn=clear_chat_button,
116
+ # undo_btn=undo_chat_button
117
+ # )
118
+
119
+ model_list = ["gemini-1.5-pro", "gemini-1.5-pro-002", "gemini-1.5-pro-exp-0827", "gemini-1.5-flash", "gemini-1.5-flash-002", "gemini-1.5-flash-8b-exp-0924"]
120
+
121
  genai.configure(api_key=GEMINI_API_KEY)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
  def clear_chat_history():
124
  chat.history = []
 
133
  new_history.append({"role": "model", "parts": [model_msg]})
134
  return new_history
135
 
136
+ def chatbot_stable(message, history, model_id, system_message, max_tokens, temperature, top_p,):
137
+ global model, chat
138
+ model = genai.GenerativeModel(
139
+ model_name=model_id,
140
+ system_instruction=system_message,
141
+ safety_settings=[
142
+ {
143
+ "category": "HARM_CATEGORY_HARASSMENT",
144
+ "threshold": "BLOCK_NONE"
145
+ },
146
+ {
147
+ "category": "HARM_CATEGORY_HATE_SPEECH",
148
+ "threshold": "BLOCK_NONE"
149
+ },
150
+ {
151
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
152
+ "threshold": "BLOCK_NONE"
153
+ },
154
+ {
155
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
156
+ "threshold": "BLOCK_NONE"
157
+ }
158
+ ],
159
+ generation_config={
160
+ "temperature": temperature,
161
+ "top_p": top_p,
162
+ "top_k": 64,
163
+ "max_output_tokens": max_tokens,
164
+ "response_mime_type": "text/plain",
165
+ }
166
+ )
167
+ chat = model.start_chat(history=[])
168
  message_text = message["text"]
169
  message_files = message["files"]
170
  if message_files:
 
179
  return response.text
180
 
181
  gemini_chatbot_interface = gr.Chatbot(
182
+ height=500,
183
  likeable=True,
184
  avatar_images=(
185
  None,
 
204
  chatbot=gemini_chatbot_interface,
205
  multimodal=True,
206
  clear_btn=clear_chat_button,
207
+ undo_btn=undo_chat_button,
208
+ additional_inputs=[
209
+ gr.Dropdown(
210
+ choices=model_list,
211
+ value="gemini-1.5-flash-002",
212
+ label="Models"
213
+ ),
214
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
215
+ gr.Slider(minimum=1, maximum=8192, value=4096, step=1, label="Max new tokens"),
216
+ gr.Slider(minimum=0.1, maximum=1.0, value=1, step=0.1, label="Temperature"),
217
+ gr.Slider(
218
+ minimum=0.1,
219
+ maximum=1.0,
220
+ value=0.95,
221
+ step=0.05,
222
+ label="Top-p (nucleus sampling)",
223
+ ),
224
+ ],
225
  )
Tabs/Gemini_Chatbot_Preview.py CHANGED
@@ -6,7 +6,7 @@ from dotenv import load_dotenv
6
  load_dotenv()
7
 
8
  GEMINI_API_KEY_PREVIEW = os.getenv("GEMINI_API_KEY_PREVIEW")
9
- model_list = ["gemini-1.5-pro", "gemini-1.5-pro-exp-0801", "gemini-1.5-pro-exp-0827", "gemini-1.5-flash", "gemini-1.5-flash-exp-0827", "gemini-1.5-flash-8b-exp-0827"]
10
 
11
  genai.configure(api_key=GEMINI_API_KEY_PREVIEW)
12
 
 
6
  load_dotenv()
7
 
8
  GEMINI_API_KEY_PREVIEW = os.getenv("GEMINI_API_KEY_PREVIEW")
9
+ model_list = ["gemini-1.5-pro", "gemini-1.5-pro-002", "gemini-1.5-pro-exp-0827", "gemini-1.5-flash", "gemini-1.5-flash-002", "gemini-1.5-flash-8b-exp-0924"]
10
 
11
  genai.configure(api_key=GEMINI_API_KEY_PREVIEW)
12
 
Tabs/Gemma_Chatbot.py DELETED
@@ -1,82 +0,0 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- import os
4
- from dotenv import load_dotenv
5
-
6
- load_dotenv()
7
- HF_TOKEN = os.getenv("HF_TOKEN")
8
-
9
- model_list = ["google/gemma-2-2b-it", "google/gemma-2-9b-it", "google/gemma-2-27b-it"]
10
-
11
- def respond(
12
- message,
13
- history: list[tuple[str, str]],
14
- model_id,
15
- system_message,
16
- max_tokens,
17
- temperature,
18
- top_p,
19
- ):
20
- client = InferenceClient(
21
- model_id,
22
- token=HF_TOKEN,
23
- )
24
- messages = [{"role": "system", "content": system_message}]
25
-
26
- for val in history:
27
- if val[0]:
28
- messages.append({"role": "user", "content": val[0]})
29
- if val[1]:
30
- messages.append({"role": "assistant", "content": val[1]})
31
-
32
- messages.append({"role": "user", "content": message})
33
-
34
- response = ""
35
-
36
- for message in client.chat_completion(
37
- messages,
38
- max_tokens=max_tokens,
39
- stream=True,
40
- temperature=temperature,
41
- top_p=top_p,
42
- ):
43
- token = message.choices[0].delta.content
44
-
45
- response += token
46
- yield response
47
-
48
- gemma_chatbot = gr.ChatInterface(
49
- respond,
50
- additional_inputs=[
51
- gr.Dropdown(
52
- choices=model_list,
53
- label="Model",
54
- value="google/gemma-2-27b-it",
55
- ),
56
- gr.Textbox(
57
- value="You are a friendly Chatbot.",
58
- label="System message"
59
- ),
60
- gr.Slider(
61
- minimum=1,
62
- maximum=4096,
63
- value=512,
64
- step=1,
65
- label="Max new tokens"
66
- ),
67
- gr.Slider(
68
- minimum=0.1,
69
- maximum=4.0,
70
- value=0.7,
71
- step=0.1,
72
- label="Temperature"
73
- ),
74
- gr.Slider(
75
- minimum=0.1,
76
- maximum=1.0,
77
- value=0.95,
78
- step=0.05,
79
- label="Top-p (nucleus sampling)",
80
- ),
81
- ],
82
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import gradio as gr
2
  from Tabs.Gemini_Chabot_Stable import gemini_chatbot, clear_chat_button, undo_chat_button, clear_chat_history, undo_chat, TITLE, NOTICE, ERRORS, FUTURE_IMPLEMENTATIONS, ABOUT
3
  from Tabs.Gemini_Chatbot_Preview import gemini_chatbot_preview, clear_chat_button_preview, undo_chat_button_preview, clear_chat_history_preview, undo_chat_preview
4
- from Tabs.Gemma_Chatbot import gemma_chatbot
5
 
6
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
7
  gr.HTML("""<h3 align="center">I strongly recommond duplicate this space for intensive uses!!!</h3>""")
@@ -23,16 +22,14 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
23
  # ============================== Stable - END ==============================
24
  # ============================== Nightly - START ==============================
25
  with gr.Tab("Chat with Gemini 1.5 - Preview"):
26
- gemini_chatbot_preview.render()
27
- clear_chat_button_preview.click(
28
- fn=clear_chat_history_preview
29
- )
30
- undo_chat_button_preview.click(
31
- fn=undo_chat_preview
32
- )
 
33
  # ============================== Nightly - END ==============================
34
- with gr.Tab("Chat with Gemma 2"):
35
- # gemma_chatbot.render()
36
- gr.HTML("""<h3 align="center">This feature is under development. Please check back later.</h3>""")
37
 
38
  demo.queue().launch(debug=True, show_error=True)
 
1
  import gradio as gr
2
  from Tabs.Gemini_Chabot_Stable import gemini_chatbot, clear_chat_button, undo_chat_button, clear_chat_history, undo_chat, TITLE, NOTICE, ERRORS, FUTURE_IMPLEMENTATIONS, ABOUT
3
  from Tabs.Gemini_Chatbot_Preview import gemini_chatbot_preview, clear_chat_button_preview, undo_chat_button_preview, clear_chat_history_preview, undo_chat_preview
 
4
 
5
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
6
  gr.HTML("""<h3 align="center">I strongly recommond duplicate this space for intensive uses!!!</h3>""")
 
22
  # ============================== Stable - END ==============================
23
  # ============================== Nightly - START ==============================
24
  with gr.Tab("Chat with Gemini 1.5 - Preview"):
25
+ # gemini_chatbot_preview.render()
26
+ # clear_chat_button_preview.click(
27
+ # fn=clear_chat_history_preview
28
+ # )
29
+ # undo_chat_button_preview.click(
30
+ # fn=undo_chat_preview
31
+ # )
32
+ gr.HTML("<h1 align='center'>๐Ÿšง Work in progress ๐Ÿšง</h1>")
33
  # ============================== Nightly - END ==============================
 
 
 
34
 
35
  demo.queue().launch(debug=True, show_error=True)