Artin2009 commited on
Commit
e3d0541
1 Parent(s): e807623

Upload chain_app.py

Browse files
Files changed (1) hide show
  1. chain_app.py +717 -0
chain_app.py ADDED
@@ -0,0 +1,717 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import chainlit as cl
2
+ from gradio_client import Client
3
+ from openai import OpenAI
4
+ from groq import Groq
5
+ import requests
6
+ from chainlit.input_widget import Select, Slider
7
+
8
+ hf_token = os.environ.get("HF_TOKEN")
9
+ openai_api_key = os.environ.get('OPENAI_API_KEY')
10
+ groq_api_key = os.environ.get('GROQ_API_KEY')
11
+
12
+ hf_text_client = Client("Artin2009/text-generation", hf_token=hf_token)
13
+ # hf_image_client = Client('Artin2009/image-generation', hf_token='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')
14
+ openai_client = OpenAI(api_key=openai_api_key)
15
+ groq_client = Groq(api_key=groq_api_key)
16
+
17
+ API_URL = "https://api-inference.huggingface.co/models/PartAI/TookaBERT-Large"
18
+ headers = {"Authorization": f"Bearer {hf_token}"}
19
+
20
+
21
+ def query(payload):
22
+ response = requests.post(API_URL, headers=headers, json=payload)
23
+ return response.json()
24
+
25
+ @cl.set_chat_profiles
26
+ async def chat_profile():
27
+ return [
28
+ cl.ChatProfile(
29
+ name="None",
30
+ markdown_description="None",
31
+ ),
32
+ cl.ChatProfile(
33
+ name="neural-brain-AI",
34
+ markdown_description="The main model of neural brain",
35
+ ),
36
+ cl.ChatProfile(
37
+ name="Dorna-AI",
38
+ markdown_description="One of the open-sourced models that neural brain team fine-tuned",
39
+ ),
40
+ cl.ChatProfile(
41
+ name='Image-Generation',
42
+ markdown_description='Our image generation model, has a performance like midjourney',
43
+ ),
44
+ cl.ChatProfile(
45
+ name="GPT-4",
46
+ markdown_description="OpenAI's GPT-4 model",
47
+ ),
48
+ cl.ChatProfile(
49
+ name="gpt-3.5-turbo",
50
+ markdown_description="OpenAI's GPT-3.5 Turbo model",
51
+ ),
52
+ cl.ChatProfile(
53
+ name="GPT-3.5-turbo-0125",
54
+ markdown_description="OpenAI's GPT-3.5 Turbo 0125 model",
55
+ ),
56
+ cl.ChatProfile(
57
+ name="gpt-3.5-turbo-1106",
58
+ markdown_description="OpenAI's GPT-3.5 Turbo 1106 model",
59
+ ),
60
+ cl.ChatProfile(
61
+ name="davinci-002",
62
+ markdown_description="OpenAI's Davinci-002 model",
63
+ ),
64
+ cl.ChatProfile(
65
+ name="TTS",
66
+ markdown_description="OpenAI's Text-to-Speech model",
67
+ ),
68
+ cl.ChatProfile(
69
+ name="Llama-3-70B",
70
+ markdown_description="Meta Open Source model Llama-2 with 70B parameters",
71
+ ),
72
+ cl.ChatProfile(
73
+ name="Llama-3-8B",
74
+ markdown_description="Meta Open Source model Llama-2 with 7B parameters",
75
+ ),
76
+ cl.ChatProfile(
77
+ name = "gemma-7B",
78
+ markdown_description = 'Google Open Source LLM'
79
+ ),
80
+ cl.ChatProfile(
81
+ name="zephyr-7B",
82
+ markdown_description="Open Source model Zephyr with 7B parameters",
83
+ ),
84
+ cl.ChatProfile(
85
+ name='mistral-7B',
86
+ markdown_description = 'mistral open source LLM with 7B parameters'
87
+ ),
88
+ cl.ChatProfile(
89
+ name="Toka-353M",
90
+ markdown_description="PartAI Open Source model Toka with 353M parameters",
91
+ )
92
+ ]
93
+
94
+ @cl.on_chat_start
95
+ async def on_chat_start():
96
+ chat_profile = cl.user_session.get("chat_profile")
97
+ if chat_profile == 'neural-brain-AI':
98
+ await cl.ChatSettings(
99
+ [
100
+ Select(
101
+ id="NB-Model",
102
+ label="NeuralBrain - Models",
103
+ values=["Neural Brain AI"],
104
+ initial_index=0,
105
+ )
106
+ ]
107
+ ).send()
108
+ await cl.Message(
109
+ content="Hello, I am the main model of neural brain team, i am an instance of ChatGPT-4, This team finetuned me and i am ready to help you"
110
+ ).send()
111
+
112
+ if chat_profile == 'Dorna-AI':
113
+ await cl.ChatSettings(
114
+ [
115
+ Select(
116
+ id="param_3",
117
+ label="Parameter 3",
118
+ values=["512"], # Only one selectable value
119
+ initial_index=0,
120
+ tooltip="Config parameter 3 (e.g., max tokens)",
121
+ ),
122
+ Select(
123
+ id="param_4",
124
+ label="Parameter 4",
125
+ values=["0.7"], # Only one selectable value
126
+ initial_index=0,
127
+ tooltip="Config parameter 4 (e.g., temperature)",
128
+ ),
129
+ Select(
130
+ id="param_5",
131
+ label="Parameter 5",
132
+ values=["0.95"], # Only one selectable value
133
+ initial_index=0,
134
+ tooltip="Config parameter 5 (e.g., top_p)",
135
+ ),
136
+ Select(
137
+ id="api_name",
138
+ label="API Name",
139
+ values=["/chat"],
140
+ initial_index=0,
141
+ ),
142
+ ]
143
+ ).send()
144
+
145
+ await cl.Message(
146
+ content='my name is Dorna, Your AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!'
147
+ ).send()
148
+
149
+ if chat_profile == 'Image-Generation':
150
+ await cl.ChatSettings(
151
+ [
152
+ Slider(
153
+ id="Image_Width",
154
+ label="Image Width",
155
+ initial=512,
156
+ min=256,
157
+ max=2048,
158
+ step=64,
159
+ tooltip="Measured in pixels",
160
+ ),
161
+ Slider(
162
+ id="Image_Height",
163
+ label="Image Height",
164
+ initial=512,
165
+ min=256,
166
+ max=2048,
167
+ step=64,
168
+ tooltip="Measured in pixels",
169
+ )
170
+ ]).send()
171
+
172
+ image = cl.Image(path='cat.png', name="result", display="inline")
173
+ await cl.Message(
174
+ content="I can make high quality & resoloution images for you, This is an example of what i can do!",
175
+ elements=[image],
176
+ ).send()
177
+
178
+ if chat_profile == 'GPT-4':
179
+ await cl.ChatSettings(
180
+ [
181
+ Select(
182
+ id="OpenAI-Model",
183
+ label="OpenAI - Model",
184
+ values=["gpt-4"],
185
+ initial_index=0,
186
+ ),
187
+ Slider(
188
+ id="Temperature",
189
+ label="Model Temperature",
190
+ initial=0.7,
191
+ min=0,
192
+ max=1,
193
+ step=0.1,
194
+ ),
195
+ ]
196
+ ).send()
197
+ await cl.Message(
198
+ content="Im OpenAI's latest and biggest model. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
199
+ ).send()
200
+
201
+ if chat_profile == 'gpt-3.5-turbo':
202
+ await cl.ChatSettings(
203
+ [
204
+ Select(
205
+ id="OpenAI-Model",
206
+ label="OpenAI - Model",
207
+ values=["gpt-3.5-turbo"],
208
+ initial_index=0,
209
+ ),
210
+ Slider(
211
+ id="Temperature",
212
+ label="Model Temperature",
213
+ initial=0.7,
214
+ min=0,
215
+ max=1,
216
+ step=0.1,
217
+ ),
218
+ ]
219
+ ).send()
220
+ await cl.Message(
221
+ content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
222
+ ).send()
223
+ if chat_profile == 'GPT-3.5-turbo-0125':
224
+ await cl.ChatSettings(
225
+ [
226
+ Select(
227
+ id="OpenAI-Model",
228
+ label="OpenAI - Model",
229
+ values=["gpt-3.5-turbo-0125"],
230
+ initial_index=0,
231
+ ),
232
+ Slider(
233
+ id="Temperature",
234
+ label="Model Temperature",
235
+ initial=0.7,
236
+ min=0,
237
+ max=1,
238
+ step=0.1,
239
+ ),
240
+ ]
241
+ ).send()
242
+ await cl.Message(
243
+ content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
244
+ ).send()
245
+
246
+ if chat_profile == 'gpt-3.5-turbo-1106':
247
+ await cl.ChatSettings(
248
+ [
249
+ Select(
250
+ id="OpenAI-Model",
251
+ label="OpenAI - Model",
252
+ values=["gpt-3.5-turbo-1106"],
253
+ initial_index=0,
254
+ ),
255
+ Slider(
256
+ id="Temperature",
257
+ label="Model Temperature",
258
+ initial=0.7,
259
+ min=0,
260
+ max=1,
261
+ step=0.1,
262
+ ),
263
+ ]
264
+ ).send()
265
+ await cl.Message(
266
+ content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
267
+ ).send()
268
+
269
+ if chat_profile == 'davinci-002':
270
+ await cl.ChatSettings(
271
+ [
272
+ Select(
273
+ id="OpenAI-Model",
274
+ label="OpenAI - Model",
275
+ values=["davinci-002"],
276
+ initial_index=0,
277
+ ),
278
+ Slider(
279
+ id="Temperature",
280
+ label="Model Temperature",
281
+ initial=0.7,
282
+ min=0,
283
+ max=1,
284
+ step=0.1,
285
+ ),
286
+ ]
287
+ ).send()
288
+ await cl.Message(
289
+ content="Im one of the OpenAI's models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
290
+ ).send()
291
+ if chat_profile == 'TTS':
292
+ await cl.Message(
293
+ content="Im TTS. of the best models OpenAI ever created. i can convert text to speech! . i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
294
+ ).send()
295
+ if chat_profile == 'Llama-3-70B':
296
+ await cl.ChatSettings(
297
+ [
298
+ Select(
299
+ id="Meta-Model",
300
+ label="Meta - Model",
301
+ values=["Llama-3-70B"],
302
+ initial_index=0,
303
+ ),
304
+ Slider(
305
+ id="Temperature",
306
+ label="Model Temperature",
307
+ initial=0.7,
308
+ min=0,
309
+ max=1,
310
+ step=0.1,
311
+ ),
312
+ ]
313
+ ).send()
314
+ await cl.Message(
315
+ content="Im the big Llama!. one of the best open source models released by Meta! i am the Big version of meta's open source LLMs., i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
316
+ ).send()
317
+ if chat_profile == 'Llama-3-8B':
318
+ await cl.ChatSettings(
319
+ [
320
+ Select(
321
+ id="Meta-Model",
322
+ label="Meta - Model",
323
+ values=["Llama-3-8B"],
324
+ initial_index=0,
325
+ ),
326
+ Slider(
327
+ id="Temperature",
328
+ label="Model Temperature",
329
+ initial=0.7,
330
+ min=0,
331
+ max=1,
332
+ step=0.1,
333
+ ),
334
+ ]
335
+ ).send()
336
+ await cl.Message(
337
+ content="Im The small Llama!. one of the best open source models released by Meta! i am the small version of meta's open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
338
+ ).send()
339
+ if chat_profile == 'gemma-7B':
340
+ await cl.ChatSettings(
341
+ [
342
+ Select(
343
+ id="Google-Model",
344
+ label="Google - Model",
345
+ values=["Gemma-7B"],
346
+ initial_index=0,
347
+ ),
348
+ Slider(
349
+ id="Temperature",
350
+ label="Model Temperature",
351
+ initial=0.7,
352
+ min=0,
353
+ max=1,
354
+ step=0.1,
355
+ ),
356
+ ]
357
+ ).send()
358
+ await cl.Message(
359
+ content="Im Gemma. the small version of google open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
360
+ ).send()
361
+ if chat_profile == 'zephyr-7B':
362
+ await cl.ChatSettings(
363
+ [
364
+ Select(
365
+ id="zephyr-Model",
366
+ label="zephyr - Model",
367
+ values=["zephyr-7B"],
368
+ initial_index=0,
369
+ ),
370
+ Slider(
371
+ id="Temperature",
372
+ label="Model Temperature",
373
+ initial=0.7,
374
+ min=0,
375
+ max=1,
376
+ step=0.1,
377
+ ),
378
+ ]
379
+ ).send()
380
+ await cl.Message(
381
+ content="Im Zephyr. One of the best open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
382
+ ).send()
383
+ if chat_profile == 'mistral-7B':
384
+ await cl.ChatSettings(
385
+ [
386
+ Select(
387
+ id="Mistral-Model",
388
+ label="Mistral - Model",
389
+ values=["Mistral-7B"],
390
+ initial_index=0,
391
+ ),
392
+ Slider(
393
+ id="Temperature",
394
+ label="Model Temperature",
395
+ initial=0.7,
396
+ min=0,
397
+ max=1,
398
+ step=0.1,
399
+ ),
400
+ ]
401
+ ).send()
402
+ await cl.Message(
403
+ content="Im Mistral. the small version of Mistral Family. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
404
+ ).send()
405
+ if chat_profile == 'Toka-353M':
406
+ await cl.ChatSettings(
407
+ [
408
+ Select(
409
+ id="PartAI-Model",
410
+ label="PartAI - Model",
411
+ values=["TokaBert-353M"],
412
+ initial_index=0,
413
+ ),
414
+ Slider(
415
+ id="Temperature",
416
+ label="Model Temperature",
417
+ initial=0.7,
418
+ min=0,
419
+ max=1,
420
+ step=0.1,
421
+ ),
422
+ ]
423
+ ).send()
424
+ await cl.Message(
425
+ content="Im Toka. An opens source persian LLM . i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? you should ask me your questions like : the capital of england is <mask> "
426
+ ).send()
427
+
428
+
429
+ @cl.on_message
430
+ async def main(message: cl.Message):
431
+ chat_profile = cl.user_session.get("chat_profile")
432
+ if not chat_profile or chat_profile == 'None':
433
+ await cl.Message(
434
+ content="Please select a model first."
435
+ ).send()
436
+ return
437
+ if chat_profile == 'neural-brain-AI':
438
+ completion = openai_client.chat.completions.create(
439
+ model="ft:gpt-3.5-turbo-1106:nb:aria1:9UWDrLJK",
440
+ messages=[
441
+ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
442
+ {"role": "user", "content": message.content}
443
+ ]
444
+ )
445
+ model_response = completion.choices[0].message.content
446
+ await cl.Message(
447
+ content=model_response
448
+ ).send()
449
+
450
+ elif chat_profile == "Dorna-AI":
451
+ result = hf_text_client.predict(
452
+ message=message.content,
453
+ request="your name is Dorna,An AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!",
454
+ param_3=512,
455
+ param_4=0.7,
456
+ param_5=0.95,
457
+ api_name="/chat"
458
+ )
459
+ model_response = result.strip("</s>")
460
+ await cl.Message(
461
+ content=model_response
462
+ ).send()
463
+
464
+ elif chat_profile == 'Image-Generation':
465
+ await cl.ChatSettings(
466
+ Slider(
467
+ id="Image_Width",
468
+ label="Image Width",
469
+ initial=512,
470
+ min=256,
471
+ max=2048,
472
+ step=64,
473
+ tooltip="Measured in pixels",
474
+ ),
475
+ Slider(
476
+ id="Image_Height",
477
+ label="Image Height",
478
+ initial=512,
479
+ min=256,
480
+ max=2048,
481
+ step=64,
482
+ tooltip="Measured in pixels",
483
+ )).send()
484
+ result = hf_image_client.predict(
485
+ prompt=message.content,
486
+ negative_prompt="",
487
+ seed=0,
488
+ randomize_seed=True,
489
+ width=512,
490
+ height=512,
491
+ guidance_scale=0,
492
+ num_inference_steps=2,
493
+ api_name="/infer"
494
+ )
495
+ image = cl.Image(path=result, name="result", display="inline")
496
+ await cl.Message(
497
+ content="This message has an image!",
498
+ elements=[image],
499
+ ).send()
500
+ elif chat_profile == 'GPT-4':
501
+ completion = openai_client.chat.completions.create(
502
+ model="gpt-4",
503
+ messages=[
504
+ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
505
+ {"role": "user", "content": message.content}
506
+ ]
507
+ )
508
+ model_response = completion.choices[0].message.content
509
+ await cl.Message(
510
+ content=model_response
511
+ ).send()
512
+
513
+ elif chat_profile == 'gpt-3.5-turbo':
514
+ completion = openai_client.chat.completions.create(
515
+ model="gpt-3.5-turbo",
516
+ messages=[
517
+ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
518
+ {"role": "user", "content": message.content}
519
+ ]
520
+ )
521
+ model_response = completion.choices[0].message.content
522
+ await cl.Message(
523
+ content=model_response
524
+ ).send()
525
+ elif chat_profile == 'GPT-3.5-turbo-0125':
526
+ completion = openai_client.chat.completions.create(
527
+ model="GPT-3.5-turbo-0125",
528
+ messages=[
529
+ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
530
+ {"role": "user", "content": message.content}
531
+ ]
532
+ )
533
+ model_response = completion.choices[0].message.content
534
+ await cl.Message(
535
+ content=model_response
536
+ ).send()
537
+ elif chat_profile == 'gpt-3.5-turbo-1106':
538
+ completion = openai_client.chat.completions.create(
539
+ model="gpt-3.5-turbo-1106",
540
+ messages=[
541
+ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
542
+ {"role": "user", "content": message.content}
543
+ ]
544
+ )
545
+ model_response = completion.choices[0].message.content
546
+ await cl.Message(
547
+ content=model_response
548
+ ).send()
549
+ elif chat_profile == 'davinci-002':
550
+ completion = openai_client.chat.completions.create(
551
+ model="davinci-002",
552
+ messages=[
553
+ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
554
+ {"role": "user", "content": message.content}
555
+ ]
556
+ )
557
+ model_response = completion.choices[0].message.content
558
+ await cl.Message(
559
+ content=model_response
560
+ ).send()
561
+
562
+ elif chat_profile == 'TTS':
563
+ response = openai_client.audio.speech.create(
564
+ model="tts-1",
565
+ voice="alloy",
566
+ input=message.content,
567
+ )
568
+
569
+ response.stream_to_file("output.mp3")
570
+
571
+ elements = [
572
+ cl.Audio(name="output.mp3", path="./output.mp3", display="inline"),
573
+ ]
574
+ await cl.Message(
575
+ content="Here it is the response!",
576
+ elements=elements,
577
+ ).send()
578
+
579
+ elif chat_profile == 'Llama-3-70B':
580
+ completion = groq_client.chat.completions.create(
581
+ model="llama3-70b-8192",
582
+ messages=[
583
+ {
584
+ "role": "user",
585
+ "content": message.content
586
+ }
587
+ ],
588
+ temperature=1,
589
+ max_tokens=1024,
590
+ top_p=1,
591
+ stream=True,
592
+ stop=None,
593
+ )
594
+
595
+ complete_content = ""
596
+
597
+ # Iterate over each chunk
598
+ for chunk in completion:
599
+ # Retrieve the content from the current chunk
600
+ content = chunk.choices[0].delta.content
601
+
602
+ # Check if the content is not None before concatenating it
603
+ if content is not None:
604
+ complete_content += content
605
+
606
+ # Send the concatenated content as a message
607
+ await cl.Message(content=complete_content).send()
608
+
609
+ elif chat_profile == 'Llama-3-8B':
610
+ completion = groq_client.chat.completions.create(
611
+ model="llama3-8b-8192",
612
+ messages=[
613
+ {
614
+ "role": "user",
615
+ "content": message.content
616
+ }
617
+ ],
618
+ temperature=1,
619
+ max_tokens=1024,
620
+ top_p=1,
621
+ stream=True,
622
+ stop=None,
623
+ )
624
+
625
+ complete_content = ""
626
+
627
+ # Iterate over each chunk
628
+ for chunk in completion:
629
+ # Retrieve the content from the current chunk
630
+ content = chunk.choices[0].delta.content
631
+
632
+ # Check if the content is not None before concatenating it
633
+ if content is not None:
634
+ complete_content += content
635
+
636
+ # Send the concatenated content as a message
637
+ await cl.Message(content=complete_content).send()
638
+
639
+ elif chat_profile == 'gemma-7B':
640
+ completion = groq_client.chat.completions.create(
641
+ model="gemma-7b-it",
642
+ messages=[
643
+ {
644
+ "role": "user",
645
+ "content": message.content
646
+ }
647
+ ],
648
+ temperature=1,
649
+ max_tokens=1024,
650
+ top_p=1,
651
+ stream=True,
652
+ stop=None,
653
+ )
654
+
655
+ complete_content = ""
656
+
657
+ # Iterate over each chunk
658
+ for chunk in completion:
659
+ # Retrieve the content from the current chunk
660
+ content = chunk.choices[0].delta.content
661
+
662
+ # Check if the content is not None before concatenating it
663
+ if content is not None:
664
+ complete_content += content
665
+
666
+ # Send the concatenated content as a message
667
+ await cl.Message(content=complete_content).send()
668
+
669
+ elif chat_profile == "zephyr-7B":
670
+ result = hf_text_client.predict(
671
+ message=message.content,
672
+ request="your name is zephyr,An AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!",
673
+ param_3=512,
674
+ param_4=0.7,
675
+ param_5=0.95,
676
+ api_name="/chat"
677
+ )
678
+ model_response = result.strip("</s>")
679
+ await cl.Message(
680
+ content=model_response
681
+ ).send()
682
+
683
+ elif chat_profile == 'mistral-7B':
684
+ completion = groq_client.chat.completions.create(
685
+ model="mixtral-8x7b-32768",
686
+ messages=[
687
+ {
688
+ "role": "user",
689
+ "content": message.content
690
+ }
691
+ ],
692
+ temperature=1,
693
+ max_tokens=1024,
694
+ top_p=1,
695
+ stream=True,
696
+ stop=None,
697
+ )
698
+
699
+ complete_content = ""
700
+
701
+ for chunk in completion:
702
+ content = chunk.choices[0].delta.content
703
+
704
+ if content is not None:
705
+ complete_content += content
706
+
707
+ await cl.Message(content=complete_content).send()
708
+
709
+ elif chat_profile == 'Toka-353M':
710
+ output = query({
711
+ "inputs": message.content,
712
+ })
713
+ await cl.Message(content=output[0]['sequence']).send()
714
+
715
+ @cl.on_settings_update
716
+ async def setup_agent(settings):
717
+ print("on_settings_update", settings)